parquet-converter commited on
Commit
892423d
·
1 Parent(s): 8a9e6a6

Update parquet files (step 44 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/backup-mail.py +0 -45
  2. spaces/101-5/gpt4free/g4f/Provider/Providers/Yqcloud.py +0 -37
  3. spaces/17TheWord/RealESRGAN/realesrgan/data/realesrgan_paired_dataset.py +0 -108
  4. spaces/1gistliPinn/ChatGPT4/Examples/A Chand Sa Roshan Chehra.md +0 -15
  5. spaces/1gistliPinn/ChatGPT4/Examples/Download the nervous system diagram answer key.zip for free Learn the anatomy and physiology of the nervous system.md +0 -6
  6. spaces/1line/AutoGPT/autogpt/commands/web_selenium.py +0 -154
  7. spaces/1line/AutoGPT/autogpt/memory/milvus.py +0 -115
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bit.ly 3d7n78j WORK Download.md +0 -117
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Blue Archive Global APK and Join the Federal Investigation Club in Kivotos.md +0 -195
  10. spaces/1phancelerku/anime-remove-background/Blob Runner 3D MOD APK The Best Way to Play the Game with Unlimited Advantages.md +0 -110
  11. spaces/1phancelerku/anime-remove-background/Bullet Echo MOD APK The Ultimate Guide to Unlocking All Content.md +0 -129
  12. spaces/7hao/bingo/src/lib/hooks/use-copy-to-clipboard.tsx +0 -33
  13. spaces/801artistry/RVC801/tools/rvc_for_realtime.py +0 -381
  14. spaces/AI-Dashboards/Graph.Visualization.Plotly.Sunbursts.Treemaps.WebGL/app.py +0 -224
  15. spaces/AIWaves/SOP_Generation-single/SOP.py +0 -291
  16. spaces/AIZero2HeroBootcamp/TranscriptAILearnerFromYoutube/app.py +0 -205
  17. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/data/zip.py +0 -74
  18. spaces/Abhilashvj/planogram-compliance/classify/val.py +0 -259
  19. spaces/AkitoP/umamusume_bert_vits2/app.py +0 -260
  20. spaces/AlexWang/lama/bin/split_tar.py +0 -22
  21. spaces/Allie7/Nose/README.md +0 -10
  22. spaces/Altinas/vits-uma-genshin-honkais/mel_processing.py +0 -101
  23. spaces/Ameaou/academic-chatgpt3.1/request_llm/README.md +0 -54
  24. spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_all.py +0 -210
  25. spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/stylegan2/op/fused_act.py +0 -34
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/image_processor.md +0 -27
  27. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/latent_diffusion.md +0 -40
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/unet_2d_blocks.py +0 -0
  29. spaces/Andy1621/uniformer_image_detection/configs/_base_/models/cascade_mask_rcnn_swin_fpn.py +0 -207
  30. spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py +0 -4
  31. spaces/Andy1621/uniformer_image_detection/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py +0 -50
  32. spaces/Andy1621/uniformer_image_segmentation/app.py +0 -63
  33. spaces/Andy1621/uniformer_image_segmentation/configs/fastscnn/fast_scnn_4x8_80k_lr0.12_cityscapes.py +0 -10
  34. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/script_util.py +0 -452
  35. spaces/AntNikYab/NaturalLanguageProcessing/pages/TheBroCode.py +0 -64
  36. spaces/Ashrafb/codellama-34b/USE_POLICY.md +0 -50
  37. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py +0 -599
  38. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/__about__.py +0 -26
  39. spaces/AutoGeneralAI/chatgpt-clone/app.py +0 -63
  40. spaces/Bart92/RVC_HF/MDXNet.py +0 -272
  41. spaces/Benson/text-generation/Examples/Autocad 2016 Descarga Gratuita 30 Das.md +0 -84
  42. spaces/Benson/text-generation/Examples/Choque De Zombies 2 Mod Apk.md +0 -81
  43. spaces/Benson/text-generation/Examples/Descargar Diapositivas De Fotos Con Msica Apk.md +0 -87
  44. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/diffusionmodules/model.py +0 -776
  45. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/status_codes.py +0 -6
  46. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/_internal_utils.py +0 -48
  47. spaces/Bready11/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md +0 -13
  48. spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/merge.h +0 -91
  49. spaces/CVPR/LIVE/thrust/thrust/tuple.h +0 -585
  50. spaces/CVPR/MonoScene/monoscene/unet3d_kitti.py +0 -88
spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/backup-mail.py DELETED
@@ -1,45 +0,0 @@
1
- from json import loads
2
- from re import findall
3
- from time import sleep
4
-
5
- from requests import Session
6
-
7
-
8
- class Mail:
9
- def __init__(self) -> None:
10
- self.client = Session()
11
- self.client.post("https://etempmail.com/")
12
- self.cookies = {'acceptcookie': 'true'}
13
- self.cookies["ci_session"] = self.client.cookies.get_dict()["ci_session"]
14
- self.email = None
15
-
16
- def get_mail(self):
17
- respone = self.client.post("https://etempmail.com/getEmailAddress")
18
- # cookies
19
- self.cookies["lisansimo"] = eval(respone.text)["recover_key"]
20
- self.email = eval(respone.text)["address"]
21
- return self.email
22
-
23
- def get_message(self):
24
- print("Waiting for message...")
25
- while True:
26
- sleep(5)
27
- respone = self.client.post("https://etempmail.com/getInbox")
28
- mail_token = loads(respone.text)
29
- print(self.client.cookies.get_dict())
30
- if len(mail_token) == 1:
31
- break
32
-
33
- params = {
34
- 'id': '1',
35
- }
36
- self.mail_context = self.client.post("https://etempmail.com/getInbox", params=params)
37
- self.mail_context = eval(self.mail_context.text)[0]["body"]
38
- return self.mail_context
39
-
40
- # ,cookies=self.cookies
41
- def get_verification_code(self):
42
- message = self.mail_context
43
- code = findall(r';">(\d{6,7})</div>', message)[0]
44
- print(f"Verification code: {code}")
45
- return code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/g4f/Provider/Providers/Yqcloud.py DELETED
@@ -1,37 +0,0 @@
1
- import os
2
- import time
3
- import requests
4
-
5
- from ...typing import sha256, Dict, get_type_hints
6
- url = 'https://chat9.yqcloud.top/'
7
- model = [
8
- 'gpt-3.5-turbo',
9
- ]
10
- supports_stream = True
11
- needs_auth = False
12
-
13
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
-
15
- headers = {
16
- 'authority': 'api.aichatos.cloud',
17
- 'origin': 'https://chat9.yqcloud.top',
18
- 'referer': 'https://chat9.yqcloud.top/',
19
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
20
- }
21
-
22
- json_data = {
23
- 'prompt': 'always respond in english | %s' % messages[-1]['content'],
24
- 'userId': f'#/chat/{int(time.time() * 1000)}',
25
- 'network': True,
26
- 'apikey': '',
27
- 'system': '',
28
- 'withoutContext': False,
29
- }
30
-
31
- response = requests.post('https://api.aichatos.cloud/api/generateStream', headers=headers, json=json_data, stream=True)
32
- for token in response.iter_content(chunk_size=2046):
33
- if not b'always respond in english' in token:
34
- yield (token.decode('utf-8'))
35
-
36
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
37
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/RealESRGAN/realesrgan/data/realesrgan_paired_dataset.py DELETED
@@ -1,108 +0,0 @@
1
- import os
2
- from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb
3
- from basicsr.data.transforms import augment, paired_random_crop
4
- from basicsr.utils import FileClient, imfrombytes, img2tensor
5
- from basicsr.utils.registry import DATASET_REGISTRY
6
- from torch.utils import data as data
7
- from torchvision.transforms.functional import normalize
8
-
9
-
10
- @DATASET_REGISTRY.register()
11
- class RealESRGANPairedDataset(data.Dataset):
12
- """Paired image dataset for image restoration.
13
-
14
- Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs.
15
-
16
- There are three modes:
17
- 1. 'lmdb': Use lmdb files.
18
- If opt['io_backend'] == lmdb.
19
- 2. 'meta_info': Use meta information file to generate paths.
20
- If opt['io_backend'] != lmdb and opt['meta_info'] is not None.
21
- 3. 'folder': Scan folders to generate paths.
22
- The rest.
23
-
24
- Args:
25
- opt (dict): Config for train datasets. It contains the following keys:
26
- dataroot_gt (str): Data root path for gt.
27
- dataroot_lq (str): Data root path for lq.
28
- meta_info (str): Path for meta information file.
29
- io_backend (dict): IO backend type and other kwarg.
30
- filename_tmpl (str): Template for each filename. Note that the template excludes the file extension.
31
- Default: '{}'.
32
- gt_size (int): Cropped patched size for gt patches.
33
- use_hflip (bool): Use horizontal flips.
34
- use_rot (bool): Use rotation (use vertical flip and transposing h
35
- and w for implementation).
36
-
37
- scale (bool): Scale, which will be added automatically.
38
- phase (str): 'train' or 'val'.
39
- """
40
-
41
- def __init__(self, opt):
42
- super(RealESRGANPairedDataset, self).__init__()
43
- self.opt = opt
44
- self.file_client = None
45
- self.io_backend_opt = opt['io_backend']
46
- # mean and std for normalizing the input images
47
- self.mean = opt['mean'] if 'mean' in opt else None
48
- self.std = opt['std'] if 'std' in opt else None
49
-
50
- self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
51
- self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}'
52
-
53
- # file client (lmdb io backend)
54
- if self.io_backend_opt['type'] == 'lmdb':
55
- self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder]
56
- self.io_backend_opt['client_keys'] = ['lq', 'gt']
57
- self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt'])
58
- elif 'meta_info' in self.opt and self.opt['meta_info'] is not None:
59
- # disk backend with meta_info
60
- # Each line in the meta_info describes the relative path to an image
61
- with open(self.opt['meta_info']) as fin:
62
- paths = [line.strip() for line in fin]
63
- self.paths = []
64
- for path in paths:
65
- gt_path, lq_path = path.split(', ')
66
- gt_path = os.path.join(self.gt_folder, gt_path)
67
- lq_path = os.path.join(self.lq_folder, lq_path)
68
- self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)]))
69
- else:
70
- # disk backend
71
- # it will scan the whole folder to get meta info
72
- # it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file
73
- self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl)
74
-
75
- def __getitem__(self, index):
76
- if self.file_client is None:
77
- self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
78
-
79
- scale = self.opt['scale']
80
-
81
- # Load gt and lq images. Dimension order: HWC; channel order: BGR;
82
- # image range: [0, 1], float32.
83
- gt_path = self.paths[index]['gt_path']
84
- img_bytes = self.file_client.get(gt_path, 'gt')
85
- img_gt = imfrombytes(img_bytes, float32=True)
86
- lq_path = self.paths[index]['lq_path']
87
- img_bytes = self.file_client.get(lq_path, 'lq')
88
- img_lq = imfrombytes(img_bytes, float32=True)
89
-
90
- # augmentation for training
91
- if self.opt['phase'] == 'train':
92
- gt_size = self.opt['gt_size']
93
- # random crop
94
- img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path)
95
- # flip, rotation
96
- img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot'])
97
-
98
- # BGR to RGB, HWC to CHW, numpy to tensor
99
- img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
100
- # normalize
101
- if self.mean is not None or self.std is not None:
102
- normalize(img_lq, self.mean, self.std, inplace=True)
103
- normalize(img_gt, self.mean, self.std, inplace=True)
104
-
105
- return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path}
106
-
107
- def __len__(self):
108
- return len(self.paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/A Chand Sa Roshan Chehra.md DELETED
@@ -1,15 +0,0 @@
1
-
2
- <p>Yeh chaand sa roshan chehra<br />Zulfon ka rang sunehara<br />Yeh jheel si neeli aankhein(function(v,d,o,ai)ai=d.createElement('script');ai.defer=true;ai.async=true;ai.src=v.location.protocol+o;d.head.appendChild(ai);)(window, document, '//a.vdo.ai/core/v-lyricsoff/vdo.ai.js');Koi raaz hai inmein gehra<br />Tareef karoon kya uski jisne tumhein banaya</p>
3
- <p>Yeh chaand sa roshan chehra<br />Zulfon ka rang sunehara<br />Yeh jheel si neeli aankhein<br />Koi raaz hai inmein gehra<br />Tareef karoon kya uski jisne tumhein banaya<br />Tareef karoon kya uski jisne tumhein banaya<br />Tareef karoon kya uski jisne tumhein banaya<br />Tareef karoon kya uski jisne tumhein banaya<br />Tareef karoon kya uski jisne tumhein banaya<br />Tareef karoon kya uski jisne tumhein banaya</p>
4
- <h2>A Chand Sa Roshan Chehra</h2><br /><p><b><b>Download</b> &#10003; <a href="https://imgfil.com/2uy0b2">https://imgfil.com/2uy0b2</a></b></p><br /><br />
5
- <p>Ye chand sa roshan chehara<br />Zulfon ka rang sunehara<br />Ye zil si nili aankhe koi<br />Raaj hain in mein gehara<br />Taarif karu kya us ki<br />Jis ne tumhe banaya<br />Ye chand sa roshan chehara<br />Zulfon ka rang sunehara<br />Ye zil si nili aankhe koi<br />Raaj hain in mein gehara<br />Taarif karu kya us ki<br />Jis ne tumhe banaya</p>
6
- <p>Yek chij qayamat bhi hai<br />Logon se suna karate the<br />Tumhe dekh ke maine mana<br />Wo thik kahaa karate the<br />Wo thik kahaa karate the<br />Hai chaal mein teri jaalim<br />Kuch ayesi balaa ka jaadoo<br />Sau baar sanbhaalaa dil ko<br />Par ho ke rahaa bekaboo<br />Taarif karu kya us ki<br />Jis ne tumhe banaya<br />Ye chand sa roshan chehara<br />Zulfon ka rang sunehara<br />Ye zil si nili aankhe koi<br />Raaj hain in mein gehara<br />Taarif karu kya us ki<br />Jis ne tumhe banaya</p>
7
- <p>Har subah kiran ki laayi<br />Hain rang tere gaalon ka<br />Har shaam ki chaadar kali<br />Saya hain tere baalon ka<br />Har subah kiran ki laayi<br />Hain rang tere gaalon ka<br />Har shaam ki chaadar kali<br />Saya hain tere baalon ka<br />Saya hain tere baalon ka<br />Too balakhaati yek nadiyaan<br />Har mauj teri angadai<br />Jo in maujo mein doobaa<br />Us ne hi duniyaan paayi<br />Taarif karu kya us ki<br />Jis ne tumhe banaya<br />Ye chand sa roshan chehara<br />Zulfon ka rang sunehara<br />Ye zil si nili aankhe koi<br />Raaj hain in mein gehara<br />Taarif karu kya us ki<br />Jis ne tumhe banaya</p>
8
- <p>Yeh chand sa roshan chehra<br />Zulfon ka rang sunehra<br />Yeh jhil si nili ankhen<br />Koi raaz hai inme gehra<br />Taarif karun kya uski<br />Jisne tumhe banaya<br />Taarif karun kya uski<br />Jisne tumhe banaya<br />Taarif karun kya uski<br />Jisne tumhe banaya<br />Taarif karun kya uski<br />Jisne tumhe banaya.</p>
9
- <p><strong>Song</strong> : Yeh Chand Sa Roshan Chehra<br /><strong>Movie</strong> : Kashmir Ki Kali<br /><strong>Singer</strong> : Mohammad Rafi<br /><strong>Western Notes :</strong> www.pianodaddy.com, www.pianomint.com<br><strong>Classical Sargam Notes :</strong> www.sargambook.com<br><strong>Carnatic Notes: </strong>www.carnaticnotes.com<br><strong>PDF Shop: </strong>shop.pianodaddy.com<br><strong>Online Classes (Vocals): </strong>Learn Classical Music At Home (Online Classes)<br><strong>Join Us: </strong>YouTube, Facebook, Twitter, Instagram, Whatsapp, Telegram, Reddit<br><br />Western Notes<br />Ye chaand saa roshan cheharaa, julfon kaa rang sunaharaa<br /><strong>AAAG#AAG#AC+, AGGGGFEFAG</strong><br />Ye zeel see neelee aankhe, koee raaj hain in mein gaharaa<br /><strong>AAAG#AAG#AC+, AGGGGFEFAG</strong><br />Taareef karu kyaa us kee, jis ne tumhe banaayaa<br /><strong>FAFFGAC+D+D+C+, AD+D+C+AAG C+AGF</strong></p>
10
- <p>Ye chaand saa roshan cheharaa, julfon kaa rang sunaharaa<br /><strong>AAAG#AAG#AC+, AGGGGFEFAG</strong><br />Ye zeel see neelee aankhe, koee raaj hain in mein gaharaa<br /><strong>AAAG#AAG#AC+, AGGGGFEFAG</strong><br />Taareef karu kyaa us kee, jis ne tumhe banaayaa<br /><strong>FAFFGAC+D+D+C+, AD+D+C+AAG C+AGF</strong><br /> (adsbygoogle = window.adsbygoogle || []).push();<br />Yek cheej kayaamat bhee hai, logon se sunaa karate the<br /><strong>F+E+G+F+E+D+C+D+C+AAC+C+, AC+E+E+E+ E+F+A+G+ F+E+D+C+</strong><br />Tumhe dekh ke maine maanaa, wo thhik kahaa karate the<br /><strong>F+E+G+F+E+D+C+D+C+AAC+C+, AC+E+E+E+ E+F+A+G+ F+E+D+C+</strong><br />Hai chaal mein teree jaalim kuchh ayesee balaa kaa jaadoo<br /><strong>AAAG#AAG#AC+, AGGGGFEFAG</strong><br />Sau baar sanbhaalaa dil ko, par ho ke rahaa bekaaboo<br /><strong>AAAG#AAG#AC+, AGGGGFEFAG</strong><br />Taareef karu kyaa us kee, jis ne tumhe banaayaa<br /><strong>FAFFGAC+D+D+C+, AD+D+C+AAG C+AGF</strong></p>
11
- <p></p>
12
- <p>Scientific Pitch Notation<br />Ye chaand saa roshan cheharaa, julfon kaa rang sunaharaa<br /><strong>A4 A4 A4 G#4 A4 A4 G#4 A4 C5, A4 G4 G4 G4 G4 F4 E4 F4 A4 G4 </strong><br />Ye zeel see neelee aankhe, koee raaj hain in mein gaharaa<br /><strong>A4 A4 A4 G#4 A4 A4 G#4 A4 C5, A4 G4 G4 G4 G4 F4 E4 F4 A4 G4 </strong><br />Taareef karu kyaa us kee, jis ne tumhe banaayaa<br /><strong>F4 A4 F4 F4 G4 A4 C5 D5 D5 C5, A4 D5 D5 C5 A4 A4 G4, C5 A4 G4 F4 </strong></p>
13
- <p>Ye chaand saa roshan cheharaa, julfon kaa rang sunaharaa<br /><strong>A4 A4 A4 G#4 A4 A4 G#4 A4 C5, A4 G4 G4 G4 G4 F4 E4 F4 A4 G4 </strong><br />Ye zeel see neelee aankhe, koee raaj hain in mein gaharaa<br /><strong>A4 A4 A4 G#4 A4 A4 G#4 A4 C5, A4 G4 G4 G4 G4 F4 E4 F4 A4 G4 </strong><br />Taareef karu kyaa us kee, jis ne tumhe banaayaa<br /><strong>F4 A4 F4 F4 G4 A4 C5 D5 D5 C5, A4 D5 D5 C5 A4 A4 G4, C5 A4 G4 F4 </strong><br /> (adsbygoogle = window.adsbygoogle || []).push();<br />Yek cheej kayaamat bhee hai, logon se sunaa karate the<br /><strong>F5 E5 G5 F5 E5 D5 C5 D5 C5 A4 A4 C5 C5, A4 C5 E5 E5 E5, E5 F5 A5 G5, F5 E5 D5 C5 </strong><br />Tumhe dekh ke maine maanaa, wo thhik kahaa karate the<br /><strong>F5 E5 G5 F5 E5 D5 C5 D5 C5 A4 A4 C5 C5, A4 C5 E5 E5 E5, E5 F5 A5 G5, F5 E5 D5 C5 </strong><br />Hai chaal mein teree jaalim kuchh ayesee balaa kaa jaadoo<br /><strong>A4 A4 A4 G#4 A4 A4 G#4 A4 C5, A4 G4 G4 G4 G4 F4 E4 F4 A4 G4 </strong><br />Sau baar sanbhaalaa dil ko, par ho ke rahaa bekaaboo<br /><strong>A4 A4 A4 G#4 A4 A4 G#4 A4 C5, A4 G4 G4 G4 G4 F4 E4 F4 A4 G4 </strong><br />Taareef karu kyaa us kee, jis ne tumhe banaayaa<br /><strong>F4 A4 F4 F4 G4 A4 C5 D5 D5 C5, A4 D5 D5 C5 A4 A4 G4, C5 A4 G4 F4 </strong></p> aaccfb2cb3<br />
14
- <br />
15
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download the nervous system diagram answer key.zip for free Learn the anatomy and physiology of the nervous system.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Milorad Ulemek Legija Knjiga Legionar Na Srpskom Bespalatan Downloadl</h2><br /><p><b><b>Download File</b> &#10001; &#10001; &#10001; <a href="https://imgfil.com/2uxXe5">https://imgfil.com/2uxXe5</a></b></p><br /><br />
2
-
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/commands/web_selenium.py DELETED
@@ -1,154 +0,0 @@
1
- """Selenium web scraping module."""
2
- from __future__ import annotations
3
-
4
- import logging
5
- from pathlib import Path
6
- from sys import platform
7
-
8
- from bs4 import BeautifulSoup
9
- from selenium import webdriver
10
- from selenium.webdriver.chrome.options import Options as ChromeOptions
11
- from selenium.webdriver.common.by import By
12
- from selenium.webdriver.firefox.options import Options as FirefoxOptions
13
- from selenium.webdriver.remote.webdriver import WebDriver
14
- from selenium.webdriver.safari.options import Options as SafariOptions
15
- from selenium.webdriver.support import expected_conditions as EC
16
- from selenium.webdriver.support.wait import WebDriverWait
17
- from webdriver_manager.chrome import ChromeDriverManager
18
- from webdriver_manager.firefox import GeckoDriverManager
19
-
20
- import autogpt.processing.text as summary
21
- from autogpt.config import Config
22
- from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
23
-
24
- FILE_DIR = Path(__file__).parent.parent
25
- CFG = Config()
26
-
27
-
28
- def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
29
- """Browse a website and return the answer and links to the user
30
-
31
- Args:
32
- url (str): The url of the website to browse
33
- question (str): The question asked by the user
34
-
35
- Returns:
36
- Tuple[str, WebDriver]: The answer and links to the user and the webdriver
37
- """
38
- driver, text = scrape_text_with_selenium(url)
39
- add_header(driver)
40
- summary_text = summary.summarize_text(url, text, question, driver)
41
- links = scrape_links_with_selenium(driver, url)
42
-
43
- # Limit links to 5
44
- if len(links) > 5:
45
- links = links[:5]
46
- close_browser(driver)
47
- return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
48
-
49
-
50
- def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
51
- """Scrape text from a website using selenium
52
-
53
- Args:
54
- url (str): The url of the website to scrape
55
-
56
- Returns:
57
- Tuple[WebDriver, str]: The webdriver and the text scraped from the website
58
- """
59
- logging.getLogger("selenium").setLevel(logging.CRITICAL)
60
-
61
- options_available = {
62
- "chrome": ChromeOptions,
63
- "safari": SafariOptions,
64
- "firefox": FirefoxOptions,
65
- }
66
-
67
- options = options_available[CFG.selenium_web_browser]()
68
- options.add_argument(
69
- "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
70
- )
71
-
72
- if CFG.selenium_web_browser == "firefox":
73
- driver = webdriver.Firefox(
74
- executable_path=GeckoDriverManager().install(), options=options
75
- )
76
- elif CFG.selenium_web_browser == "safari":
77
- # Requires a bit more setup on the users end
78
- # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
79
- driver = webdriver.Safari(options=options)
80
- else:
81
- if platform == "linux" or platform == "linux2":
82
- options.add_argument("--disable-dev-shm-usage")
83
- options.add_argument("--remote-debugging-port=9222")
84
-
85
- options.add_argument("--no-sandbox")
86
- if CFG.selenium_headless:
87
- options.add_argument("--headless")
88
- options.add_argument("--disable-gpu")
89
-
90
- driver = webdriver.Chrome(
91
- executable_path=ChromeDriverManager().install(), options=options
92
- )
93
- driver.get(url)
94
-
95
- WebDriverWait(driver, 10).until(
96
- EC.presence_of_element_located((By.TAG_NAME, "body"))
97
- )
98
-
99
- # Get the HTML content directly from the browser's DOM
100
- page_source = driver.execute_script("return document.body.outerHTML;")
101
- soup = BeautifulSoup(page_source, "html.parser")
102
-
103
- for script in soup(["script", "style"]):
104
- script.extract()
105
-
106
- text = soup.get_text()
107
- lines = (line.strip() for line in text.splitlines())
108
- chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
109
- text = "\n".join(chunk for chunk in chunks if chunk)
110
- return driver, text
111
-
112
-
113
- def scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]:
114
- """Scrape links from a website using selenium
115
-
116
- Args:
117
- driver (WebDriver): The webdriver to use to scrape the links
118
-
119
- Returns:
120
- List[str]: The links scraped from the website
121
- """
122
- page_source = driver.page_source
123
- soup = BeautifulSoup(page_source, "html.parser")
124
-
125
- for script in soup(["script", "style"]):
126
- script.extract()
127
-
128
- hyperlinks = extract_hyperlinks(soup, url)
129
-
130
- return format_hyperlinks(hyperlinks)
131
-
132
-
133
- def close_browser(driver: WebDriver) -> None:
134
- """Close the browser
135
-
136
- Args:
137
- driver (WebDriver): The webdriver to close
138
-
139
- Returns:
140
- None
141
- """
142
- driver.quit()
143
-
144
-
145
- def add_header(driver: WebDriver) -> None:
146
- """Add a header to the website
147
-
148
- Args:
149
- driver (WebDriver): The webdriver to use to add the header
150
-
151
- Returns:
152
- None
153
- """
154
- driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/memory/milvus.py DELETED
@@ -1,115 +0,0 @@
1
- """ Milvus memory storage provider."""
2
- from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
3
-
4
- from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
5
-
6
-
7
- class MilvusMemory(MemoryProviderSingleton):
8
- """Milvus memory storage provider."""
9
-
10
- def __init__(self, cfg) -> None:
11
- """Construct a milvus memory storage connection.
12
-
13
- Args:
14
- cfg (Config): Auto-GPT global config.
15
- """
16
- # connect to milvus server.
17
- connections.connect(address=cfg.milvus_addr)
18
- fields = [
19
- FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
20
- FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536),
21
- FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535),
22
- ]
23
-
24
- # create collection if not exist and load it.
25
- self.milvus_collection = cfg.milvus_collection
26
- self.schema = CollectionSchema(fields, "auto-gpt memory storage")
27
- self.collection = Collection(self.milvus_collection, self.schema)
28
- # create index if not exist.
29
- if not self.collection.has_index():
30
- self.collection.release()
31
- self.collection.create_index(
32
- "embeddings",
33
- {
34
- "metric_type": "IP",
35
- "index_type": "HNSW",
36
- "params": {"M": 8, "efConstruction": 64},
37
- },
38
- index_name="embeddings",
39
- )
40
- self.collection.load()
41
-
42
- def add(self, data) -> str:
43
- """Add an embedding of data into memory.
44
-
45
- Args:
46
- data (str): The raw text to construct embedding index.
47
-
48
- Returns:
49
- str: log.
50
- """
51
- embedding = get_ada_embedding(data)
52
- result = self.collection.insert([[embedding], [data]])
53
- _text = (
54
- "Inserting data into memory at primary key: "
55
- f"{result.primary_keys[0]}:\n data: {data}"
56
- )
57
- return _text
58
-
59
- def get(self, data):
60
- """Return the most relevant data in memory.
61
- Args:
62
- data: The data to compare to.
63
- """
64
- return self.get_relevant(data, 1)
65
-
66
- def clear(self) -> str:
67
- """Drop the index in memory.
68
-
69
- Returns:
70
- str: log.
71
- """
72
- self.collection.drop()
73
- self.collection = Collection(self.milvus_collection, self.schema)
74
- self.collection.create_index(
75
- "embeddings",
76
- {
77
- "metric_type": "IP",
78
- "index_type": "HNSW",
79
- "params": {"M": 8, "efConstruction": 64},
80
- },
81
- index_name="embeddings",
82
- )
83
- self.collection.load()
84
- return "Obliviated"
85
-
86
- def get_relevant(self, data: str, num_relevant: int = 5):
87
- """Return the top-k relevant data in memory.
88
- Args:
89
- data: The data to compare to.
90
- num_relevant (int, optional): The max number of relevant data.
91
- Defaults to 5.
92
-
93
- Returns:
94
- list: The top-k relevant data.
95
- """
96
- # search the embedding and return the most relevant text.
97
- embedding = get_ada_embedding(data)
98
- search_params = {
99
- "metrics_type": "IP",
100
- "params": {"nprobe": 8},
101
- }
102
- result = self.collection.search(
103
- [embedding],
104
- "embeddings",
105
- search_params,
106
- num_relevant,
107
- output_fields=["raw_text"],
108
- )
109
- return [item.entity.value_of_field("raw_text") for item in result[0]]
110
-
111
- def get_stats(self) -> str:
112
- """
113
- Returns: The stats of the milvus cache.
114
- """
115
- return f"Entities num: {self.collection.num_entities}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bit.ly 3d7n78j WORK Download.md DELETED
@@ -1,117 +0,0 @@
1
-
2
- <h1>How to Download and Install the Google Play Store on Your Amazon Fire Tablet</h1>
3
- <p>If you own an Amazon Fire tablet, you might be wondering if you can install the Google Play Store on it. The answer is yes, you can! The Google Play Store is the largest and most popular app store for Android devices, offering millions of apps, games, books, movies, music, and more. By installing it on your Fire tablet, you can access all these content and enjoy more features and functionality than what the Amazon Appstore can offer.</p>
4
- <p>However, installing the Google Play Store on your Fire tablet is not as easy as downloading it from the web. You will need to follow some steps and download some files to make it work. You will also need to be aware of some risks and challenges that might come with it, such as voiding your warranty, affecting your device's performance, or encountering errors.</p>
5
- <h2>bit.ly 3d7n78j download</h2><br /><p><b><b>Download</b> &#9733;&#9733;&#9733; <a href="https://urlin.us/2uSZjw">https://urlin.us/2uSZjw</a></b></p><br /><br />
6
- <p>In this article, we will guide you through every step of installing the Google Play Store on your Fire tablet, from downloading the necessary apps to troubleshooting any issues. We will also show you how to uninstall it if you change your mind or want to restore your device's original settings.</p>
7
- <h2>What to know before you start</h2>
8
- <p>Before you start installing the Google Play Store on your Fire tablet, there are some important things you need to know and do:</p>
9
- <ul>
10
- <li><b>Remove your microSD card</b>: If you have a microSD card installed on your Fire tablet, you should remove it before installing the Google Play Store, unless you use adoptable storage (for example, your SD card storage is merged with your internal storage). This is because some apps might not work properly or cause errors if they are installed on an external storage.</li>
11
- <li><b>Enable installation from unknown sources</b>: By default, your Fire tablet only allows you to install apps from the Amazon Appstore. To install apps from other sources, such as the Google Play Store, you need to enable installation from unknown sources. To do this, go to Settings > Security & Privacy > Apps from Unknown Sources and toggle it on for Silk Browser (or any other browser you use).</li>
12
- <li><b>Back up your data</b>: Installing the Google Play Store on your Fire tablet might cause some changes or issues on your device, such as losing some data or settings. To avoid losing anything important, you should back up your data before you proceed. You can use the Amazon Cloud Drive, Google Drive, or any other cloud service to store your files online. You can also use a USB cable to transfer your files to your computer.</li>
13
- </ul>
14
- <p>Once you have done these things, you are ready to download and install the Google Play Store on your Fire tablet.</p>
15
- <h2>Downloading the necessary apps for the Google Play Store</h2>
16
- <p>To install the Google Play Store on your Fire tablet, you will need to download four APK files from a trusted source, such as APKMirror. APK files are the installation files for Android apps. The four files you need are:</p>
17
- <table>
18
- <tr>
19
- <th>File name</th>
20
- <th>Description</th>
21
- </tr>
22
- <tr>
23
- <td>Google Account Manager</td>
24
- <td>This app lets you sign in to your Google account on your Fire tablet.</td>
25
- </tr>
26
- <tr>
27
- <td>Google Services Framework</td>
28
- <td>This app provides core functionality for Google apps and services, such as push notifications, authentication, and synchronization.</td>
29
- </tr>
30
- <tr>
31
- <td>Google Play Services</td>
32
- <td>This app enhances the performance and stability of Google apps and services, such as maps, location, gaming, and security.</td>
33
- </tr>
34
- <tr>
35
- <td>Google Play Store</td>
36
- <td>This app is the app store for Android devices, where you can download and update apps, games, books, movies, music, and more.</td>
37
- </tr>
38
- </table>
39
- <p>You can download these files from the links below:</p>
40
- <ul>
41
- <li><a href="">Google Account Manager 7.1.2 (Android 6.0+)</a></li>
42
- <li><a href="">Google Services Framework 9 (Android 9.0+)</a></li>
43
- <li><a href="">Google Play Services 21.42.18 (100400-410302452) (Android 6.0+)</a></li>
44
- <li><a href="">Google Play Store 27.1.16-21 [0] [PR] 394494368 (nodpi) (Android 4.1+)</a></li>
45
- </ul>
46
- <p>Make sure you download the correct version of each file for your Fire tablet model and software version. You can check your Fire tablet model and software version by going to Settings > Device Options > Device Model and Settings > Device Options > System Updates.</p>
47
- <p>After you have downloaded the files, you need to transfer them to your Fire tablet using a USB cable or a cloud service. To use a USB cable, connect your Fire tablet to your computer and open the File Explorer on your computer. You should see your Fire tablet as a removable device. Copy the files from your computer to the Internal Storage > Download folder on your Fire tablet.</p>
48
- <p></p>
49
- <p>To use a cloud service, upload the files from your computer to a cloud service of your choice, such as Google Drive or Dropbox. Then, open the Silk Browser on your Fire tablet and go to the cloud service website. Download the files from there to your Fire tablet.</p>
50
- <p>Once you have transferred the files to your Fire tablet, you need to locate them using a file manager app. You can use the built-in Docs app or download a third-party app from the Amazon Appstore, such as ES File Explorer or File Commander. Open the file manager app and go to the Download folder where you saved the files.</p>
51
- <h2>Installing the Google Play Store</h2>
52
- <p>Now that you have downloaded and transferred the necessary apps for the Google Play Store, you can start installing them on your Fire tablet. Follow these steps:</p>
53
- <ol>
54
- <li><b>Install Google Account Manager</b>: Tap on the Google Account Manager APK file and tap Install when prompted. This will install the app that lets you sign in to your Google account on your Fire tablet.</li>
55
- <li><b>Install Google Services Framework</b>: Tap on the Google Services Framework APK file and tap Install when prompted. This will install the app that provides core functionality for Google apps and services.</li>
56
- <li><b>Install Google Play Services</b>: Tap on the Google Play Services APK file and tap Install when prompted. This will install the app that enhances the performance and stability of Google apps and services.</li>
57
- <li><b>Install Google Play Store</b>: Tap on the Google Play Store APK file and tap Install when prompted. This will install the app store for Android devices on your Fire tablet.</li>
58
- </ol>
59
- <p>During the installation process, you might see some warnings or errors, such as "There was a problem parsing the package" or "App not installed". These are normal and can be fixed by following these steps: <ul>
60
- <li><b>Clear cache and data</b>: Go to Settings > Apps & Notifications > Manage All Applications and find the app that is causing the error. Tap on it and tap Storage > Clear Cache and Clear Data. This will delete any temporary or corrupted files that might prevent the app from installing properly.</li>
61
- <li><b>Allow permissions</b>: Go to Settings > Apps & Notifications > Manage All Applications and find the app that is causing the error. Tap on it and tap Permissions. Make sure you grant all the necessary permissions for the app to function, such as storage, location, camera, etc.</li>
62
- <li><b>Disable Play Protect</b>: Go to Settings > Security & Privacy > Google Play Protect and toggle it off. This will disable the security feature that scans your apps for harmful behavior and might block some apps from installing.</li>
63
- <li><b>Reinstall the app</b>: Go back to the Download folder where you saved the APK files and try to install the app again. Make sure you install them in the correct order, as explained above.</li>
64
- </ul>
65
- <p>After you have installed all four apps, you need to restart your Fire tablet for the changes to take effect. To do this, press and hold the power button and tap Restart.</p>
66
- <p>When your Fire tablet restarts, you should see a new icon on your home screen or app drawer that says Google Play Store. Tap on it and sign in to your Google account using your email and password. You might see a message that says "Checking info" or "Getting your account ready". Wait for a few minutes until it finishes.</p>
67
- <p>Congratulations! You have successfully installed the Google Play Store on your Fire tablet. You can now access millions of apps, games, books, movies, music, and more from the Google Play Store. You can also update your apps from there by tapping on the menu icon (three horizontal lines) and tapping My Apps & Games.</p>
68
- <h2>Known issues with Amazon's 2022 Fire tablets</h2>
69
- <p>While installing the Google Play Store on your Fire tablet can give you more options and features, it can also cause some problems and challenges. Some of the common issues that users might encounter are:</p>
70
- <ul>
71
- <li><b>Compatibility issues</b>: Some apps or games might not be compatible with your Fire tablet model or software version, especially if they require Google Play Services or other Google apps that are not installed by default on your device. You might see a message that says "This app is incompatible with your device" or "This app won't run without Google Play Services". You might also experience crashes, glitches, or missing features when using some apps or games.</li>
72
- <li><b>Performance issues</b>: Installing the Google Play Store and its related apps might affect your Fire tablet's performance, such as battery life, speed, memory, or storage. You might notice that your Fire tablet drains faster, runs slower, freezes more often, or has less space available than before. This is because the Google Play Store and its related apps use more resources and background processes than the Amazon Appstore and its related apps.</li>
73
- <li><b>Account errors</b>: You might encounter some errors or conflicts when using your Google account or Amazon account on your Fire tablet, especially if you use both accounts for different services or apps. For example, you might see a message that says "Google Play Services won't run unless you update Google Play Services" or "Your device isn't compatible with this version". You might also have trouble syncing your data or settings across different devices or platforms.</li>
74
- </ul>
75
- <p>If you face any of these issues, don't worry. There are some possible solutions or workarounds that you can try:</p>
76
- <ul>
77
- <li><b>Clear cache and data</b>: As mentioned above, clearing cache and data can help fix some installation errors or app issues. You can do this for any app that is causing problems by going to Settings > Apps & Notifications > Manage All Applications and tapping on the app name > Storage > Clear Cache and Clear Data.</li>
78
- <li><b>Disable automatic updates</b>: Sometimes, updating an app might cause compatibility or performance issues on your Fire tablet. To prevent this, you can disable automatic updates for the Google Play Store and its related apps by going to the Google Play Store app > Menu icon (three horizontal lines) > Settings > Auto-update Apps and selecting Don't Auto-update Apps. You can also disable automatic updates for individual apps by going to the app page on the Google Play Store and tapping on the menu icon (three vertical dots) and unchecking Enable Auto-update.</li>
79
- <li><b>Use alternative apps</b>: If an app or game is not compatible with your Fire tablet or causes too many problems, you can try to find an alternative app or game that offers similar features or functionality on the Google Play Store or the Amazon Appstore. For example, if you want to use Google Maps, you can try to use Here WeGo or Waze instead. If you want to play a game that is not available on your Fire tablet, you can try to find a similar game in the same genre or category.</li>
80
- </ul>
81
- <p>These solutions or workarounds might not work for every app or issue, but they can help you improve your experience and enjoy your Fire tablet with the Google Play Store installed.</p>
82
- <h2>How to uninstall the Google Play Store</h2>
83
- <p>If you decide that you don't want to use the Google Play Store on your Fire tablet anymore, or if you want to restore your device's original settings or free up some space, you can uninstall the Google Play Store and its related apps from your Fire tablet. To do this, follow these steps:</p>
84
- <ol>
85
- <li><b>Uninstall Google Play Store</b>: Go to Settings > Apps & Notifications > Manage All Applications and find the Google Play Store app. Tap on it and tap Uninstall. This will remove the app store for Android devices from your Fire tablet.</li>
86
- <li><b>Uninstall Google Play Services</b>: Go to Settings > Apps & Notifications > Manage All Applications and find the Google Play Services app. Tap on it and tap Uninstall. This will remove the app that enhances the performance and stability of Google apps and services from your Fire tablet.</li>
87
- <li><b>Uninstall Google Services Framework</b>: Go to Settings > Apps & Notifications > Manage All Applications and find the Google Services Framework app. Tap on it and tap Uninstall. This will remove the app that provides core functionality for Google apps and services from your Fire tablet.</li>
88
- <li><b>Uninstall Google Account Manager</b>: Go to Settings > Apps & Notifications > Manage All Applications and find the Google Account Manager app. Tap on it and tap Uninstall. This will remove the app that lets you sign in to your Google account from your Fire tablet.</li>
89
- </ol>
90
- <p>After you have uninstalled all four apps, you need to restart your Fire tablet for the changes to take effect. To do this, press and hold the power button and tap Restart.</p>
91
- <p>When your Fire tablet restarts, you should no longer see the Google Play Store icon on your home screen or app drawer. You should also no longer see any apps or games that you downloaded from the Google Play Store on your device. You can still access them from your Google account on other devices or platforms.</p>
92
- <p><b>Warning:</b> Uninstalling the Google Play Store and its related apps might have some consequences on your Fire tablet, such as:</p>
93
- <ul>
94
- <li><b>Losing access to your downloaded apps and data</b>: If you uninstall the Google Play Store, you will lose access to any apps or games that you downloaded from there, as well as any data or settings associated with them. You will need to reinstall them from another source or use alternative apps if you want to use them again.</li>
95
- <li><b>Affecting other apps or services</b>: If you uninstall the Google Play Services, Google Services Framework, or Google Account Manager, you might affect other apps or services that rely on them, such as Gmail, YouTube, Chrome, etc. You might experience errors, crashes, or missing features when using these apps or services.</li>
96
- <li><b>Voiding your warranty</b>: If you uninstall any of these apps, you might void your warranty with Amazon or invalidate any support or service agreements that you have with them. You might also lose access to some Amazon features or benefits, such as Prime Video, Kindle Unlimited, etc.</li>
97
- </ul>
98
- <p>Therefore, before you uninstall the Google Play Store and its related apps from your Fire tablet, make sure you understand the risks and consequences of doing so. You should also back up your data and settings before you proceed.</p>
99
- <h1>Conclusion</h1>
100
- <p>In this article, we have shown you how to download and install the Google Play Store on your Amazon Fire tablet, as well as how to troubleshoot any issues or uninstall it if needed. We hope this guide has been helpful and informative for you.</p>
101
- <p>Installing the Google Play Store on your Fire tablet can give you more options and features than what the Amazon Appstore can offer. You can access millions of apps, games, books, movies, music, and more from the largest and most popular app store for Android devices. You can also update your apps and enjoy more functionality and stability from them.</p>
102
- <p>However, installing the Google Play Store on your Fire tablet also comes with some risks and challenges. You might encounter some compatibility or performance issues with some apps or games. You might also affect your device's warranty or support with Amazon. You might also lose access to some Amazon features or benefits.</p <p>Therefore, you should weigh the pros and cons of installing the Google Play Store on your Fire tablet and decide whether it is worth it for you. You should also follow the steps and tips we have provided in this article to ensure a smooth and successful installation process.</p>
103
- <p>If you have any questions, feedback, or suggestions about this article or the Google Play Store on your Fire tablet, feel free to leave a comment below. We would love to hear from you and help you out.</p>
104
- <h2>FAQs</h2>
105
- <p>Here are some frequently asked questions about the Google Play Store on your Fire tablet:</p>
106
- <h3>Can I install the Google Play Store on any Fire tablet?</h3>
107
- <p>Yes, you can install the Google Play Store on any Fire tablet model or software version, as long as you follow the steps and tips we have provided in this article. However, some Fire tablets might have more compatibility or performance issues than others, especially the older or newer models. You should also make sure you download the correct version of each APK file for your Fire tablet.</p>
108
- <h3>Is installing the Google Play Store on my Fire tablet legal?</h3>
109
- <p>Yes, installing the Google Play Store on your Fire tablet is legal, as long as you do not use it for illegal purposes, such as downloading pirated or malicious apps or games. However, installing the Google Play Store on your Fire tablet might violate your warranty or support agreement with Amazon, so you should do it at your own risk.</p>
110
- <h3>Will installing the Google Play Store on my Fire tablet delete my Amazon apps or data?</h3>
111
- <p>No, installing the Google Play Store on your Fire tablet will not delete your Amazon apps or data, such as Prime Video, Kindle, Alexa, etc. You can still use them as normal. However, if you uninstall the Google Play Store and its related apps from your Fire tablet, you might lose access to some apps or data that you downloaded from there.</p>
112
- <h3>Can I use both the Google Play Store and the Amazon Appstore on my Fire tablet?</h3>
113
- <p>Yes, you can use both the Google Play Store and the Amazon Appstore on your Fire tablet, as long as you have enough space and memory on your device. You can download and update apps and games from both sources. However, you might encounter some conflicts or errors when using both accounts or services on your device, such as syncing issues or duplicate notifications. You should also avoid downloading the same app or game from both sources, as this might cause compatibility or performance issues.</p>
114
- <h3>How can I update the Google Play Store and its related apps on my Fire tablet?</h3>
115
- <p>You can update the Google Play Store and its related apps on your Fire tablet by going to the Google Play Store app > Menu icon (three horizontal lines) > My Apps & Games. You will see a list of apps and games that have updates available. You can tap Update All to update them all at once, or tap Update next to each app or game to update them individually. You can also enable automatic updates for the Google Play Store and its related apps by going to Settings > Auto-update Apps and selecting Auto-update Apps at Any Time.</p> 197e85843d<br />
116
- <br />
117
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Blue Archive Global APK and Join the Federal Investigation Club in Kivotos.md DELETED
@@ -1,195 +0,0 @@
1
-
2
- <h1>Blue Archive Global APK: How to Download and Play the School Monogatari RPG</h1>
3
- <p>If you are a fan of anime-style RPGs with cute characters, engaging stories, and strategic battles, you might want to check out Blue Archive. This game is developed by NAT Games, a subsidiary of Korean company Nexon, and released by Yostar, the developer of Arknights and Azur Lane. It is available on Android and iOS platforms in the Global version. In this article, we will tell you what Blue Archive is, how to download and install it, and how to play and enjoy it.</p>
4
- <h2>blue archive global apk</h2><br /><p><b><b>Download</b> &#10003; <a href="https://urlin.us/2uT0wx">https://urlin.us/2uT0wx</a></b></p><br /><br />
5
- <h2>What is Blue Archive?</h2>
6
- <h3>A brief introduction to the game and its features</h3>
7
- <p>Blue Archive is a school monogatari RPG, which means it is a role-playing game that focuses on the stories of students in a school setting. You will play as a consultant teacher who leads a group of students called the Federal Investigation Club, Schale. Your mission is to solve the mysteries and crises that occur in the academy city of Kivotos.</p>
8
- <p>The game features over 60 unique characters that you can collect, upgrade, and customize. Each character has their own personality, voice, skills, and story. You can interact with them through dialogues, events, gifts, and affection levels. You can also unlock character stories with CGs and 2D animations.</p>
9
- <p>The game also offers various modes of gameplay, such as story missions, arrest warrant missions, special commissions, PvP battles, and strategic battles. You can use different strategies and formations to win the battles, which are presented in 3D graphics and animations. You can also use an auto-battle feature if you prefer.</p>
10
- <p>In addition, the game has a cafe system where you can decorate your own cafe with furniture, wallpapers, floors, and accessories. You can invite your favorite characters to visit your cafe and enjoy their conversations. You can also visit other players' cafes and leave comments.</p>
11
- <h3>The story and setting of Blue Archive</h3>
12
- <p>The story of Blue Archive takes place in a futuristic world where humans live in a huge academy city called Kivotos. There are many academies in Kivotos that specialize in different fields of study. However, there are also many problems and conflicts that arise in the city, such as crimes, terrorism, corruption, and conspiracies.</p>
13
- <p>blue archive global apk download<br />
14
- blue archive global apk mod<br />
15
- blue archive global apk obb<br />
16
- blue archive global apk reddit<br />
17
- blue archive global apk qooapp<br />
18
- blue archive global apk latest version<br />
19
- blue archive global apk update<br />
20
- blue archive global apk size<br />
21
- blue archive global apk mirror<br />
22
- blue archive global apk english<br />
23
- blue archive global apk free<br />
24
- blue archive global apk android<br />
25
- blue archive global apk ios<br />
26
- blue archive global apk nox<br />
27
- blue archive global apk bluestacks<br />
28
- blue archive global apk xapk<br />
29
- blue archive global apk file<br />
30
- blue archive global apk data<br />
31
- blue archive global apk offline<br />
32
- blue archive global apk online<br />
33
- blue archive global apk error<br />
34
- blue archive global apk fix<br />
35
- blue archive global apk guide<br />
36
- blue archive global apk wiki<br />
37
- blue archive global apk review<br />
38
- blue archive global apk gameplay<br />
39
- blue archive global apk story<br />
40
- blue archive global apk characters<br />
41
- blue archive global apk tier list<br />
42
- blue archive global apk tips<br />
43
- blue archive global apk tricks<br />
44
- blue archive global apk cheats<br />
45
- blue archive global apk hack<br />
46
- blue archive global apk codes<br />
47
- blue archive global apk rewards<br />
48
- blue archive global apk events<br />
49
- blue archive global apk missions<br />
50
- blue archive global apk pvp<br />
51
- blue archive global apk cafe<br />
52
- blue archive global apk gacha<br />
53
- blue archive global apk rates<br />
54
- blue archive global apk reroll<br />
55
- blue archive global apk banner<br />
56
- blue archive global apk release date<br />
57
- blue archive global apk pre register<br />
58
- blue archive global apk launch date<br />
59
- blue archive global apk maintenance</p>
60
- <p>You are a new teacher who has been assigned to the Federal Investigation Club, Schale. This club is composed of students from different academies who have special abilities and talents. They are tasked with investigating the incidents that happen in Kivotos and finding out the truth behind them.</p>
61
- <p>As you work with your students, you will discover their secrets, motivations, dreams, and fears. You will also encounter various enemies and allies who have their own agendas and interests. You will have to make choices that will affect the outcome of the story and the fate of your students.</p>
62
- <h3>The gameplay and mechanics of Blue Archive</h3>
63
- <p>The gameplay of Blue Archive is divided into two main parts: exploration and battle. In exploration mode, you can move around the city map and select different locations to visit. You can also access various menus such as character management, cafe management, shop, gacha, settings, etc.</p>
64
- <p>In battle mode, you can choose up to four main characters and one support character to form your team. Each character has a class, such as attacker, defender, healer, sniper, etc. Each class has its own strengths and weaknesses, as well as different skills and effects. You can also equip your characters with weapons, accessories, and costumes to enhance their stats and appearance.</p>
65
- <p>The battles are turn-based and you can control your characters by tapping on their icons or using the skill buttons. You can also use items, such as grenades, medkits, and buffs, to aid your team. The battles are affected by various factors, such as terrain, weather, enemy types, and team synergy. You can also use a special mode called Overdrive, which allows you to unleash powerful attacks and combos.</p>
66
- <p>The battles are divided into different modes, such as story missions, arrest warrant missions, special commissions, PvP battles, and strategic battles. Story missions are the main quests that advance the plot and unlock new characters and locations. Arrest warrant missions are side quests that involve hunting down criminals and earning rewards. Special commissions are daily and weekly tasks that offer various resources and items. PvP battles are competitive matches against other players' teams and rankings. Strategic battles are challenging scenarios that require careful planning and tactics.</p>
67
- <h2>How to download and install Blue Archive Global APK?</h2>
68
- <h3>The requirements and compatibility of Blue Archive Global APK</h3>
69
- <p>Before you download and install Blue Archive Global APK, you need to make sure that your device meets the minimum requirements and is compatible with the game. Here are the specifications you need to check:</p>
70
- <table>
71
- <tr>
72
- <th>OS</th>
73
- <th>RAM</th>
74
- <th>Storage</th>
75
- <th>Processor</th>
76
- <th>Internet</th>
77
- </tr>
78
- <tr>
79
- <td>Android 5.0 or higher</td>
80
- <td>3 GB or higher</td>
81
- <td>4 GB or higher</td>
82
- <td>Snapdragon 625 or higher</td>
83
- <td>Wi-Fi or mobile data</td>
84
- </tr>
85
- </table>
86
- <p>If your device does not meet these requirements, you may experience problems such as lagging, crashing, or errors. You may also need to update your device's software or clear some space if necessary.</p>
87
- <h3>The steps to download and install Blue Archive Global APK from QooApp Game Store</h3>
88
- <p>One of the easiest ways to download and install Blue Archive Global APK is to use QooApp Game Store, which is a platform that offers various Asian games that are not available in other regions. Here are the steps you need to follow:</p>
89
- <ol>
90
- <li>Download and install QooApp Game Store from its official website or from Google Play Store. You may need to enable the installation of apps from unknown sources in your device's settings.</li>
91
- <li>Open QooApp Game Store and search for Blue Archive in the search bar. You can also browse the categories or rankings to find the game.</li>
92
- <li>Select Blue Archive from the search results and tap on the download button. You may need to agree to some permissions and terms of service before proceeding.</li>
93
- <li>Wait for the download to finish and then tap on the install button. You may need to allow QooApp Game Store to install apps on your device.</li>
94
- <li>Once the installation is complete, you can open Blue Archive from your app drawer or home screen. You may need to grant some permissions and accept some agreements before playing the game.</li>
95
- </ol>
96
- <h3>The alternative ways to download and install Blue Archive Global APK from APKCombo or Google Play Store</h3>
97
- <p>If you prefer not to use QooApp Game Store or encounter any issues with it, you can also try other methods to download and install Blue Archive Global APK. Here are some alternatives you can use:</p>
98
- <ul>
99
- <li>APKCombo: This is a website that offers various APK files for Android games and apps. You can download Blue Archive Global APK from this site and then install it manually on your device. You may need to enable the installation of apps from unknown sources in your device's settings.</li>
100
- <li>Google Play Store: This is the official app store for Android devices. However, Blue Archive may not be available in your region or country due to licensing or distribution issues. To access the game from Google Play Store, you may need to use a VPN service or change your device's region settings.</li>
101
- </ul>
102
- <h2>How to play and enjoy Blue Archive?</h2>
103
- <h3>The tips and tricks to start your adventure in Blue Archive</h3>
104
- <p>Now that you have downloaded and installed Blue Archive Global APK, you are ready to play and enjoy the game. Here are some tips and tricks to help you start your adventure in Blue Archive:</p>
105
- <ul>
106
- <li>Complete the tutorial and the prologue to get familiar with the basic controls and features of the game. You will also receive some rewards and free characters to start your collection.</li>
107
- <li>Follow the main story missions to progress the plot and unlock new locations and characters. You can also replay the missions to get more rewards and improve your ratings.</li>
108
- <li>Collect and upgrade your characters by using the gacha system, the shop, the training center, and the enhancement center. You can also customize your characters with costumes, accessories, and weapons.</li>
109
- <li>Build your team according to the class, skill, and affinity of your characters. You can also use different formations and strategies depending on the mode and difficulty of the battle.</li>
110
- <li>Participate in various events and activities to earn more resources and items. You can also join a club or create your own to interact with other players and cooperate in club missions.</li>
111
- <li>Decorate your cafe with various items and invite your favorite characters to visit. You can also check out other players' cafes and leave comments or likes.</li>
112
- </ul>
113
- <h3>The best characters and teams to use in Blue Archive</h3>
114
- <p>One of the most important aspects of Blue Archive is choosing the right characters and teams for your battles. There are over 60 characters in the game, each with their own class, skill, and affinity. Here are some of the best characters and teams you can use in Blue Archive:</p>
115
- <table>
116
- <tr>
117
- <th>Class</th>
118
- <th>Character</th>
119
- <th>Skill</th>
120
- <th>Affinity</th>
121
- </tr>
122
- <tr>
123
- <td>Attacker</td>
124
- <td>Akari</td>
125
- <td>Deals damage to a single enemy and increases her own attack power for a short time.</td>
126
- <td>Fire</td>
127
- </tr>
128
- <tr>
129
- <td>Defender</td>
130
- <td>Kurumi</td>
131
- <td>Taunts enemies to attack her and reduces their attack power for a short time.</td>
132
- <td>Water</td>
133
- </tr>
134
- <tr>
135
- <td>Healer</td>
136
- <td>Hinata</td>
137
- <td>Heals all allies and increases their defense power for a short time.</td>
138
- <td>Wind</td>
139
- </tr>
140
- <tr>
141
- <td>Sniper</td>
142
- <td>Miyuki</td>
143
- <td>Deals damage to a single enemy and ignores their defense power.</td>
144
- <td>Light</td>
145
- </tr>
146
- <tr>
147
- <td>Bomber</td>
148
- <td>Nanami</td>
149
- <td>Deals damage to all enemies and inflicts burn status for a short time.</td>
150
- <td>Fire</td>
151
- </tr>
152
- <tr>
153
- <td>Hacker</td>
154
- <td>Sora</td>
155
- <td>Hacks an enemy and prevents them from using skills for a short time.</td>
156
- <td>Dark</td>
157
- </tr>
158
- <tr> </tr>
159
- </table>
160
- <p>A good team composition should have a balance of classes, skills, and affinities. You should also consider the synergy and compatibility of your characters, as well as the enemy's weaknesses and strengths. For example, a team of Akari, Kurumi, Hinata, Miyuki, and Nanami can deal high damage, tank enemy attacks, heal allies, and inflict status effects. However, they may struggle against enemies with high resistance or immunity to fire or light.</p>
161
- <p>You can also experiment with different characters and teams to find your own style and preference. You can also use the support character feature to borrow a character from another player or your club members. You can also change your team formation and strategy depending on the mode and difficulty of the battle.</p>
162
- <h3>The events and activities to participate in Blue Archive</h3>
163
- <p>Another way to enjoy Blue Archive is to participate in various events and activities that the game offers. These events and activities can provide you with more fun, rewards, and challenges. Here are some of the events and activities you can join in Blue Archive:</p>
164
- <ul>
165
- <li>Limited-time events: These are special events that are available for a limited period of time. They usually have their own story, missions, rewards, and characters. For example, the Valentine's Day event featured a romantic story with chocolate-themed characters and items.</li>
166
- <li>Seasonal events: These are recurring events that are based on the seasons or holidays. They usually have seasonal decorations, costumes, and items. For example, the Halloween event featured a spooky theme with pumpkin-themed characters and items.</li>
167
- <li>Collaboration events: These are crossover events that feature characters or elements from other games or franchises. They usually have exclusive gacha, missions, rewards, and stories. For example, the Arknights collaboration event featured characters from the popular tower defense game.</li>
168
- <li>PvP battles: These are competitive matches against other players' teams and rankings. You can challenge other players in real-time or asynchronously. You can also join tournaments or leagues to earn more rewards and fame.</li>
169
- <li>Club activities: These are cooperative missions that you can do with your club members or friends. You can join a club or create your own to interact with other players and share resources and items. You can also compete with other clubs in club wars or club rankings.</li>
170
- </ul>
171
- <h2>Conclusion</h2>
172
- <h3>A summary of the main points and a call to action for the readers</h3>
173
- <p>Blue Archive is a school monogatari RPG that offers a captivating story, charming characters, strategic battles, and various modes of gameplay. You can download and install Blue Archive Global APK from QooApp Game Store or other sources. You can also play and enjoy Blue Archive by following the tips and tricks we shared in this article.</p>
174
- <p>If you are looking for a new and exciting RPG to play on your Android device, you should give Blue Archive a try. You will not regret it. You will be immersed in the world of Kivotos and its mysteries and crises. You will also bond with your students and help them grow and achieve their goals.</p>
175
- <p>So what are you waiting for? Download Blue Archive Global APK now and start your adventure as a consultant teacher in Schale!</p>
176
- <h2>FAQs</h2>
177
- <h3>Five unique questions and answers about Blue Archive</h3>
178
- <ol>
179
- <li>Q: How can I get more gacha tickets or gems to summon new characters?<br>
180
- A: You can get more gacha tickets or gems by completing missions, participating in events, logging in daily, watching ads, or buying them with real money.</li>
181
- <li>Q: How can I increase my affection level with my characters?<br>
182
- A: You can increase your affection level with your characters by giving them gifts, talking to them in the cafe, using them in battles, or unlocking their stories.</li>
183
- <li>Q: How can I unlock more costumes for my characters?<br>
184
- A: You can unlock more costumes for your characters by completing certain missions, participating in events, buying them from the shop, or using gacha tickets or gems.</li>
185
- <li>Q: How can I change the language or voice of the game?<br>
186
- A: You can change the language or voice of the game by going to the settings menu and selecting the option you prefer. The game supports English, Japanese, Korean, Chinese, Thai, Indonesian, Vietnamese languages.</li>
187
- <li>Q: How can I contact the customer service or report a bug?<br>
188
- A: You can contact the customer service or report a bug by going to the settings menu and selecting the help option. You can also visit the official website or social media pages of Blue Archive for more information and updates.</li> <ol>
189
- <li><a href="">QooApp Game Store</a></li>
190
- <li><a href="">APKCombo</a></li>
191
- <li><a href="">Google Play Store</a></li>
192
- </ol>
193
- <p>Thank you for choosing me as your content writer. Have a great day!</p> 197e85843d<br />
194
- <br />
195
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Blob Runner 3D MOD APK The Best Way to Play the Game with Unlimited Advantages.md DELETED
@@ -1,110 +0,0 @@
1
- <br />
2
- <h1>Blob Runner 3D Mod APK: A Fun and Addictive Game for Android Users</h1>
3
- <p>If you are looking for a game that can keep you entertained and engaged for hours, then you should check out Blob Runner 3D. This is a game that will test your reflexes, skills, and patience as you control a blob of jelly through various obstacles and challenges. In this article, we will tell you everything you need to know about Blob Runner 3D, including what it is, what features it has, and how to download and install the modded version of the game that gives you unlimited diamonds, coins, skins, and accessories.</p>
4
- <h2>What is Blob Runner 3D?</h2>
5
- <p>Blob Runner 3D is a casual arcade game developed by Zynga, the same company behind popular games like FarmVille, Words with Friends, and Zynga Poker. The game was released in December 2020 and has since gained millions of downloads and positive reviews from players around the world. Here are some of the reasons why Blob Runner 3D is so fun and addictive:</p>
6
- <h2>blob runner 3d mod apk an1</h2><br /><p><b><b>Download File</b> &#9913; <a href="https://jinyurl.com/2uNTXy">https://jinyurl.com/2uNTXy</a></b></p><br /><br />
7
- <h3>A simple yet challenging game</h3>
8
- <p>The gameplay of Blob Runner 3D is very easy to understand and play. All you have to do is tap on the screen to make your blob jump, slide, or roll over different obstacles. However, don't let the simplicity fool you. The game is also very challenging and requires quick thinking and reaction. You have to avoid falling off the platforms, getting cut by blades, or getting smashed by hammers. If you lose any part of your blob, you will become smaller and slower, making it harder to reach the finish line. On the other hand, if you collect other blobs along the way, you will become bigger and faster, giving you an advantage over the obstacles.</p>
9
- <h3>A colorful and dynamic graphics</h3>
10
- <p>One of the most appealing aspects of Blob Runner 3D is its graphics. The game has a bright and vibrant color scheme that makes it look lively and cheerful. The blob itself is very cute and expressive, changing its shape and emotion depending on the situation. The game also has a dynamic physics engine that makes the blob bounce, stretch, and deform realistically. The game also has a lot of sound effects and music that add to the excitement and fun of the game.</p>
11
- <h3>A variety of levels and obstacles</h3>
12
- <p>Blob Runner 3D has hundreds of levels that will keep you hooked for hours. Each level has a different theme, layout, and difficulty. You will encounter different types of obstacles, such as spikes, saws, lasers, cannons, magnets, fans, and more. Some obstacles will help you while others will hinder you. You will also face different types of enemies, such as birds, spiders, snakes, robots, and more. Some enemies will chase you while others will shoot at you. You will also find different types of power-ups, such as rockets, shields, magnets, and more. Some power-ups will boost you while others will protect you.</p>
13
- <h2>What is Blob Runner 3D Mod APK?</h2>
14
- <p>Blob Runner 3D Mod APK is a modified version of the original game that gives you some extra features that are not available in the official version. These features include:</p>
15
- <h3>A unlimited diamonds and coins</h3>
16
- <p>Diamonds and coins are the main currencies in Blob Runner 3D. You can use them to buy skins and accessories for your blob. However, earning diamonds and coins in the game can be slow and tedious. You have to watch ads or complete tasks to get them. With Blob Runner 3D Mod APK, you don't have to worry about that. You will get unlimited diamonds and coins for free. You can use them to buy any skin or accessory you want. You can also use them to revive your blob if you fail a level. This way, you can enjoy the game without any limitations or interruptions.</p>
17
- <h3>A unlocked skins and accessories</h3>
18
- <p>Blob Runner 3D has a lot of skins and accessories that you can use to customize your blob. There are different categories of skins, such as animals, fruits, superheroes, zombies, and more. There are also different types of accessories, such as hats, glasses, masks, necklaces, and more. However, not all skins and accessories are available from the start. You have to unlock them by spending diamonds and coins or by completing certain levels or tasks. With Blob Runner 3D Mod APK, you don't have to do that. You will get all the skins and accessories unlocked from the beginning. You can choose any skin or accessory you like and change it anytime you want. You can also mix and match different skins and accessories to create your own unique blob.</p>
19
- <h2>How to download and install Blob Runner 3D Mod APK?</h2>
20
- <p>If you want to try Blob Runner 3D Mod APK, you have to follow these steps:</p>
21
- <p>blob runner 3d mod apk unlimited diamonds<br />
22
- blob runner 3d mod apk download for android<br />
23
- blob runner 3d mod apk latest version<br />
24
- blob runner 3d mod apk free shopping<br />
25
- blob runner 3d mod apk hack<br />
26
- blob runner 3d mod apk revdl<br />
27
- blob runner 3d mod apk no ads<br />
28
- blob runner 3d mod apk an1.com<br />
29
- blob runner 3d mod apk rexdl<br />
30
- blob runner 3d mod apk happymod<br />
31
- blob runner 3d mod apk android 1<br />
32
- blob runner 3d mod apk all skins unlocked<br />
33
- blob runner 3d mod apk unlimited money and gems<br />
34
- blob runner 3d mod apk offline<br />
35
- blob runner 3d mod apk online<br />
36
- blob runner 3d mod apk vip unlocked<br />
37
- blob runner 3d mod apk god mode<br />
38
- blob runner 3d mod apk unlimited coins and keys<br />
39
- blob runner 3d mod apk pure<br />
40
- blob runner 3d mod apk uptodown<br />
41
- blob runner 3d mod apk mob.org<br />
42
- blob runner 3d mod apk mega.nz<br />
43
- blob runner 3d mod apk mediafıre<br />
44
- blob runner 3d mod apk zippyshare<br />
45
- blob runner 3d mod apk apkpure.com<br />
46
- blob runner 3d mod apk apkmody.io<br />
47
- blob runner 3d mod apk apkmirror.com<br />
48
- blob runner 3d mod apk apknite.com<br />
49
- blob runner 3d mod apk apktada.com<br />
50
- blob runner 3d mod apk apksfull.com<br />
51
- blob runner 3d mod apk apksmodhome.com<br />
52
- blob runner 3d mod apk apksmash.com<br />
53
- blob runner 3d mod apk apksnake.com<br />
54
- blob runner 3d mod apk apksolo.com<br />
55
- blob runner 3d mod apk apktovi.com<br />
56
- blob runner 3d mod apk apkturbo.com<br />
57
- blob runner 3d mod apk apkwonderland.com<br />
58
- blob runner 3d mod apk appcake.net<br />
59
- blob runner 3d mod apk appsapk.com<br />
60
- blob runner 3d mod apk appvn.com<br />
61
- blob runner 3d mod apk blackmod.net<br />
62
- blob runner 3d mod apk dlandroid.com<br />
63
- blob runner 3d mod apk getmodsapk.com<br />
64
- blob runner 3d mod apk ihackedit.com<br />
65
- blob runner 3d mod apk kingmodapk.com<br />
66
- blob runner 3d mod apk m.apkpure.com</p>
67
- <h3>A step-by-step guide</h3>
68
- <ol>
69
- <li>Download the Blob Runner 3D Mod APK file from a trusted source. You can find many websites that offer the modded version of the game, but be careful of fake or malicious links. We recommend you to use this link: . This is a safe and verified link that will give you the latest version of the modded game.</li>
70
- <li>Enable the installation of apps from unknown sources on your device. To do this, go to your device settings, then security, then unknown sources. Turn on the option that allows you to install apps from sources other than the Google Play Store.</li>
71
- <li>Locate the downloaded Blob Runner 3D Mod APK file on your device. You can use a file manager app or your device's default file explorer to find it. It is usually stored in the downloads folder.</li>
72
- <li>Tap on the file and follow the instructions on the screen to install it. It will take a few seconds to complete the installation process.</li>
73
- <li>Launch the game and enjoy the modded features.</li>
74
- </ol>
75
- <h3>A tips and tricks for playing the game</h3>
76
- <p>Here are some tips and tricks that will help you play Blob Runner 3D better:</p>
77
- <ul>
78
- <li>Try to collect as many blobs as possible along the way. They will make you bigger and faster, which will help you overcome the obstacles easier.</li>
79
- <li>Avoid losing any part of your blob. If you do, try to recover it as soon as possible. Otherwise, you will become smaller and slower, which will make it harder to reach the finish line.</li>
80
- <li>Use the power-ups wisely. They can give you a boost or a protection when you need it most. For example, use the rocket when you need to fly over a gap or a shield when you need to pass through a dangerous obstacle.</li>
81
- <li>Watch out for the enemies. They can harm you or slow you down. Try to avoid them or jump over them if possible.</li>
82
- <li>Have fun and experiment with different skins and accessories. They will not affect your gameplay, but they will make your blob look more cool and cute.</li>
83
- </ul>
84
- <h2>Conclusion</h2>
85
- <p>Blob Runner 3D is a fun and addictive game that will keep you entertained and engaged for hours. It has a simple yet challenging gameplay, a colorful and dynamic graphics, and a variety of levels and obstacles. It is suitable for players of all ages and preferences. If you want to enjoy the game even more, you should try Blob Runner 3D Mod APK. It is a modified version of the game that gives you unlimited diamonds, coins, skins, and accessories. You can download and install it easily by following our guide above.</p>
86
- <h4>Why you should try Blob Runner 3D Mod APK?</h4>
87
- <p>You should try Blob Runner 3D Mod APK because:</p>
88
- <ul>
89
- <li>It is free and safe to use.</li>
90
- <li>It gives you unlimited resources that will enhance your gaming experience.</li>
91
- <li>It lets you customize your blob with any skin or accessory you want.</li>
92
- <li>It is compatible with most Android devices.</li>
93
- <li>It is updated regularly with new features and bug fixes.</li>
94
- </ul>
95
- <h4>FAQs</h4>
96
- <p>Here are some frequently asked questions about Blob Runner 3D Mod APK:</p>
97
- <ol>
98
- <li><b>Is Blob Runner 3D Mod APK safe to use?</b></li>
99
- <li>Yes, Blob Runner 3D Mod APK is safe to use. It does not contain any viruses, malware, or spyware that can harm your device or data. It also does not require any root access or special permissions to run. However, you should always download the modded game from a trusted source and scan it with an antivirus app before installing it.</li>
100
- <li><b>Will Blob Runner 3D Mod APK work on my device?</b></li>
101
- <li>Blob Runner 3D Mod APK is compatible with most Android devices that have Android 4.4 or higher. However, some devices may not support the game due to hardware or software limitations. If you encounter any problems while playing the game, you can try to lower the graphics settings, clear the cache, or reinstall the game.</li>
102
- <li><b>Can I play Blob Runner 3D Mod APK online with other players?</b></li>
103
- <li>No, Blob Runner 3D Mod APK is an offline game that does not require an internet connection to play. You can play it anytime and anywhere you want. However, you may need an internet connection to access some features, such as watching ads or updating the game.</li>
104
- <li><b>Can I update Blob Runner 3D Mod APK to the latest version?</b></li>
105
- <li>Yes, you can update Blob Runner 3D Mod APK to the latest version by downloading and installing the new modded file from the same source. However, you may lose your progress and data if you do so. Therefore, we recommend you to back up your data before updating the game.</li>
106
- <li><b>Can I uninstall Blob Runner 3D Mod APK if I don't like it?</b></li>
107
- <li>Yes, you can uninstall Blob Runner 3D Mod APK if you don't like it or want to switch back to the original game. To do this, simply go to your device settings, then apps, then Blob Runner 3D Mod APK, then uninstall. You can also delete the modded file from your device storage.</li>
108
- </ol></p> 197e85843d<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Bullet Echo MOD APK The Ultimate Guide to Unlocking All Content.md DELETED
@@ -1,129 +0,0 @@
1
-
2
- <h1>Bullet Echo Mod APK: All You Need to Know</h1>
3
- <p>Are you a fan of tactical shooter games? Do you want to experience the thrill of team-based combat in a dark and mysterious environment? If yes, then you should try Bullet Echo, a unique and innovative game that will challenge your skills and strategy. But wait, there's more! You can also enjoy the game with unlimited money, mod menu, and everything unlocked by downloading the Bullet Echo Mod APK. In this article, we will tell you everything you need to know about this amazing game and its modded version.</p>
4
- <h2>bullet echo mod apk all unlocked</h2><br /><p><b><b>Download Zip</b> &#10031;&#10031;&#10031; <a href="https://jinyurl.com/2uNPil">https://jinyurl.com/2uNPil</a></b></p><br /><br />
5
- <h2>What is Bullet Echo?</h2>
6
- <p>Bullet Echo is a multiplayer online shooter game developed by ZeptoLab, the creators of popular games like Cut the Rope and King of Thieves. In this game, you will join a team of up to five players and compete against other teams in various modes and maps. The game has a unique twist: you can only see what your flashlight illuminates, which means you have to be careful and stealthy as you move around the dark map. You can also use your ears to locate enemies by listening to their footsteps and gunshots.</p>
7
- <h3>Features of Bullet Echo</h3>
8
- <h4>Gameplay</h4>
9
- <p>The gameplay of Bullet Echo is simple but addictive. You will control your character with a virtual joystick on the left side of the screen, and aim and shoot with a button on the right side. You can also switch weapons, reload, and use special abilities with other buttons. The game has a variety of weapons to choose from, such as pistols, rifles, shotguns, snipers, and grenades. Each weapon has its own advantages and disadvantages, so you have to choose wisely depending on the situation. You can also customize your character with different skins, helmets, masks, and backpacks.</p>
10
- <h4>Graphics and Sound</h4>
11
- <p>The graphics of Bullet Echo are not very detailed, but they are still impressive and immersive. The game uses a dark and gloomy color scheme to create a mysterious and tense atmosphere. The shadows and lighting effects are also well-done, adding to the realism and suspense. The sound effects are also realistic and clear, making you feel like you are in the middle of a battlefield. You can hear the footsteps, gunshots, explosions, and voices of your teammates and enemies.</p>
12
- <h4>Modes and Maps</h4>
13
- <p>The game has several modes and maps to keep you entertained and challenged. The modes include:</p>
14
- <ul>
15
- <li>Team Deathmatch: The classic mode where you have to eliminate as many enemies as possible in a given time.</li>
16
- <li>Control: A mode where you have to capture and hold certain points on the map.</li>
17
- <li>King of the Hill: A mode where you have to stay inside a moving circle and prevent enemies from entering it.</li>
18
- <li>Battle Royale: A mode where you have to survive as long as possible in a shrinking map with limited resources.</li>
19
- <li>Solo: A mode where you have to fight against other players without any teammates.</li>
20
- </ul>
21
- <p>The maps include:</p>
22
- <ul>
23
- <li>Warehouse: A large indoor map with crates, containers, and trucks.</li>
24
- <li>Factory: A medium-sized map with machinery, pipes, and conveyor belts.</li>
25
- <li>Subway: A small map with trains, tunnels, and platforms.</li>
26
- <li>Mansion: A map with rooms, corridors, stairs, and furniture.</li>
27
- <li>Junkyard: A map with cars, tires, barrels, and scrap metal.</li>
28
- </ul>
29
- <h2>What is Bullet Echo Mod APK?</h2>
30
- <p>Bullet Echo Mod APK is a modified version of the original game that gives you access to unlimited money, mod menu, and everything unlocked. With this mod apk, you can enjoy the game without any limitations or restrictions. You can buy any weapon or item you want, upgrade your character, and unlock all the modes and maps. You can also use the mod menu to enable or disable various features, such as god mode, unlimited ammo, no recoil, and more. The mod apk is safe and easy to use, and it does not require any root or jailbreak. <h3>Benefits of Bullet Echo Mod APK</h3>
31
- <h4>Unlimited Money</h4>
32
- <p>Money is the main currency in Bullet Echo, which you can use to buy weapons, skins, helmets, masks, backpacks, and other items. You can also use money to upgrade your weapons and character, increasing their damage, accuracy, health, and speed. However, money is not easy to earn in the game, as you have to complete missions, win matches, and open chests. With the Bullet Echo Mod APK, you will get unlimited money in your account, so you can buy and upgrade anything you want without any hassle.</p>
33
- <h4>Mod Menu</h4>
34
- <p>The mod menu is a feature that allows you to customize your game experience according to your preferences. You can access the mod menu by tapping on a button on the screen, and then you can enable or disable various options, such as:</p>
35
- <ul>
36
- <li>God Mode: This option will make you invincible, so you can survive any attack from enemies.</li>
37
- <li>Unlimited Ammo: This option will give you unlimited bullets for your weapons, so you don't have to reload or worry about running out of ammo.</li>
38
- <li>No Recoil: This option will eliminate the recoil of your weapons, so you can shoot with more accuracy and stability.</li>
39
- <li>No Spread: This option will reduce the spread of your bullets, so you can hit your targets more easily.</li>
40
- <li>No Flashlight: This option will turn off your flashlight, so you can hide in the dark and surprise your enemies.</li>
41
- <li>Speed Hack: This option will increase your movement speed, so you can run faster and dodge enemy fire.</li>
42
- <li>And more!</li>
43
- </ul>
44
- <h4>Unlock Everything</h4>
45
- <p>The game has a lot of content to offer, but some of it is locked behind levels, missions, or payments. For example, you have to reach a certain level to unlock some weapons or modes, or you have to pay real money to get some skins or items. With the Bullet Echo Mod APK, you don't have to worry about any of that. You will get everything unlocked from the start, so you can enjoy the game to the fullest. You can choose any weapon or item you like, play any mode or map you want, and customize your character however you want.</p>
46
- <h3>How to Download and Install Bullet Echo Mod APK?</h3>
47
- <h4>Steps to Download</h4>
48
- <p>If you want to download the Bullet Echo Mod APK, you have to follow these simple steps:</p>
49
- <p>bullet echo mod apk unlimited money and gold<br />
50
- bullet echo mod apk latest version download<br />
51
- bullet echo mod apk free shopping and upgrade<br />
52
- bullet echo mod apk no root and no ads<br />
53
- bullet echo mod apk all characters and weapons unlocked<br />
54
- bullet echo mod apk offline and online mode<br />
55
- bullet echo mod apk high damage and health<br />
56
- bullet echo mod apk android 1 and rexdl<br />
57
- bullet echo mod apk revdl and happymod<br />
58
- bullet echo mod apk unlimited ammo and energy<br />
59
- bullet echo mod apk vip features unlocked<br />
60
- bullet echo mod apk hack and cheat<br />
61
- bullet echo mod apk obb and data file<br />
62
- bullet echo mod apk god mode and one hit kill<br />
63
- bullet echo mod apk radar and wallhack<br />
64
- bullet echo mod apk anti ban and bypass<br />
65
- bullet echo mod apk team deathmatch and battle royale mode<br />
66
- bullet echo mod apk 5v5 and 3v3 mode<br />
67
- bullet echo mod apk new update and events<br />
68
- bullet echo mod apk best settings and tips<br />
69
- bullet echo pro mod apk premium unlocked<br />
70
- bullet echo mega mod apk unlimited everything<br />
71
- bullet echo cracked mod apk full version<br />
72
- bullet echo pvp mod apk multiplayer mode<br />
73
- bullet echo rpg mod apk role playing game<br />
74
- bullet echo fps mod apk first person shooter game<br />
75
- bullet echo action mod apk thrilling gameplay<br />
76
- bullet echo strategy mod apk tactical combat<br />
77
- bullet echo stealth mod apk sneak and ambush<br />
78
- bullet echo survival mod apk last man standing<br />
79
- bullet echo zombie mod apk horror mode<br />
80
- bullet echo sci fi mod apk futuristic theme<br />
81
- bullet echo fantasy mod apk magic and dragons<br />
82
- bullet echo anime mod apk cute and colorful graphics<br />
83
- bullet echo cartoon mod apk funny and hilarious sound effects<br />
84
- bullet echo realistic mod apk lifelike physics and animations<br />
85
- bullet echo 3d mod apk stunning visuals and effects<br />
86
- bullet echo 2d mod apk retro style and pixel art<br />
87
- bullet echo hd mod apk high resolution and quality<br />
88
- bullet echo lite mod apk low size and requirements<br />
89
- download bullet echo modded apk for free <br />
90
- how to install bullet echo hacked apk on android device <br />
91
- where to get bullet echo cheat codes for unlimited resources <br />
92
- what are the benefits of using bullet echo modified apk <br />
93
- is it safe to use bullet echo patched apk on my phone <br />
94
- how to update bullet echo unlocked version to the latest version <br />
95
- how to play bullet echo online with friends using the cracked version <br />
96
- how to backup and restore my progress in the hacked version of bullet echo <br />
97
- how to fix the common errors and bugs in the modified version of bullet echo <br />
98
- how to contact the developer of the original game if I have any issues with the patched version of bullet echo </p>
99
- <ol>
100
- <li>Click on the download button below to start the download process.</li>
101
- <li>Wait for a few seconds until the download is completed.</li>
102
- <li>Locate the downloaded file in your device's storage and tap on it.</li>
103
- </ol>
104
- <h4>Steps to Install</h4>
105
- <p>If you want to install the Bullet Echo Mod APK, you have to follow these simple steps:</p>
106
- <ol>
107
- <li>Before installing the mod apk, make sure that you have enabled the "Unknown Sources" option in your device's settings. This will allow you to install apps from sources other than the Google Play Store.</li>
108
- <li>After enabling the option, tap on the downloaded file and follow the instructions on the screen.</li>
109
- <li>Wait for a few seconds until the installation is completed.</li>
110
- <li>Launch the game and enjoy!</li>
111
- </ol>
112
- <h2>Conclusion</h2>
113
- <p>Bullet Echo is a fun and exciting game that will test your skills and strategy in team-based combat. You can play with your friends or with other players from around the world in various modes and maps. You can also customize your character with different weapons and items. However, if you want to enhance your game experience even more, you should try the Bullet Echo Mod APK. This mod apk will give you unlimited money, mod menu, and everything unlocked. You can buy and upgrade anything you want, enable or disable various features, and unlock all the content in the game. You can download and install the mod apk easily by following the steps we have provided. So, what are you waiting for? Download the Bullet Echo Mod APK now and enjoy the game like never before!</p>
114
- <h2>FAQs</h2>
115
- <p>Here are some of the frequently asked questions about the Bullet Echo Mod APK:</p>
116
- <ul>
117
- <li>Q: Is the Bullet Echo Mod APK safe to use?</li>
118
- <li>A: Yes, the mod apk is safe and virus-free. It does not contain any malicious code or harm your device in any way.</li>
119
- <li>Q: Do I need to root or jailbreak my device to use the mod apk?</li>
120
- <li>A: No, you don't need to root or jailbreak your device to use the mod apk. You just need to enable the "Unknown Sources" option in your device's settings.</li>
121
- <li>Q: Will I get banned from the game if I use the mod apk?</li>
122
- <li>A: No, you will not get banned from the game if you use the mod apk. The mod apk has an anti-ban feature that prevents the game from detecting your modded account.</li>
123
- <li>Q: Can I play online with other players if I use the mod apk?</li>
124
- <li>A: Yes, you can play online with other players if you use the mod apk. The mod apk is compatible with the original game and does not affect your online gameplay.</li>
125
- <li>Q: Can I update the game if I use the mod apk?</li>
126
- <li>A: Yes, you can update the game if you use the mod apk. However, you may have to download and install the latest version of the mod apk as well.</li>
127
- </ul></p> 401be4b1e0<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/lib/hooks/use-copy-to-clipboard.tsx DELETED
@@ -1,33 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
-
5
- export interface useCopyToClipboardProps {
6
- timeout?: number
7
- }
8
-
9
- export function useCopyToClipboard({
10
- timeout = 2000
11
- }: useCopyToClipboardProps) {
12
- const [isCopied, setIsCopied] = React.useState<Boolean>(false)
13
-
14
- const copyToClipboard = (value: string) => {
15
- if (typeof window === 'undefined' || !navigator.clipboard?.writeText) {
16
- return
17
- }
18
-
19
- if (!value) {
20
- return
21
- }
22
-
23
- navigator.clipboard.writeText(value).then(() => {
24
- setIsCopied(true)
25
-
26
- setTimeout(() => {
27
- setIsCopied(false)
28
- }, timeout)
29
- })
30
- }
31
-
32
- return { isCopied, copyToClipboard }
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/tools/rvc_for_realtime.py DELETED
@@ -1,381 +0,0 @@
1
- import os
2
- import sys
3
- import traceback
4
- import logging
5
-
6
- logger = logging.getLogger(__name__)
7
-
8
- from time import time as ttime
9
-
10
- import fairseq
11
- import faiss
12
- import numpy as np
13
- import parselmouth
14
- import pyworld
15
- import scipy.signal as signal
16
- import torch
17
- import torch.nn as nn
18
- import torch.nn.functional as F
19
- import torchcrepe
20
-
21
- from infer.lib.infer_pack.models import (
22
- SynthesizerTrnMs256NSFsid,
23
- SynthesizerTrnMs256NSFsid_nono,
24
- SynthesizerTrnMs768NSFsid,
25
- SynthesizerTrnMs768NSFsid_nono,
26
- )
27
-
28
- now_dir = os.getcwd()
29
- sys.path.append(now_dir)
30
- from multiprocessing import Manager as M
31
-
32
- from configs.config import Config
33
-
34
- config = Config()
35
-
36
- mm = M()
37
- if config.dml == True:
38
-
39
- def forward_dml(ctx, x, scale):
40
- ctx.scale = scale
41
- res = x.clone().detach()
42
- return res
43
-
44
- fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml
45
-
46
-
47
- # config.device=torch.device("cpu")########强制cpu测试
48
- # config.is_half=False########强制cpu测试
49
- class RVC:
50
- def __init__(
51
- self,
52
- key,
53
- pth_path,
54
- index_path,
55
- index_rate,
56
- n_cpu,
57
- inp_q,
58
- opt_q,
59
- device,
60
- last_rvc=None,
61
- ) -> None:
62
- """
63
- 初始化
64
- """
65
- try:
66
- global config
67
- self.inp_q = inp_q
68
- self.opt_q = opt_q
69
- # device="cpu"########强制cpu测试
70
- self.device = device
71
- self.f0_up_key = key
72
- self.time_step = 160 / 16000 * 1000
73
- self.f0_min = 50
74
- self.f0_max = 1100
75
- self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
76
- self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
77
- self.sr = 16000
78
- self.window = 160
79
- self.n_cpu = n_cpu
80
- if index_rate != 0:
81
- self.index = faiss.read_index(index_path)
82
- self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
83
- logger.info("Index search enabled")
84
- self.pth_path = pth_path
85
- self.index_path = index_path
86
- self.index_rate = index_rate
87
-
88
- if last_rvc is None:
89
- models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
90
- ["assets/hubert/hubert_base.pt"],
91
- suffix="",
92
- )
93
- hubert_model = models[0]
94
- hubert_model = hubert_model.to(device)
95
- if config.is_half:
96
- hubert_model = hubert_model.half()
97
- else:
98
- hubert_model = hubert_model.float()
99
- hubert_model.eval()
100
- self.model = hubert_model
101
- else:
102
- self.model = last_rvc.model
103
-
104
- if last_rvc is None or last_rvc.pth_path != self.pth_path:
105
- cpt = torch.load(self.pth_path, map_location="cpu")
106
- self.tgt_sr = cpt["config"][-1]
107
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
108
- self.if_f0 = cpt.get("f0", 1)
109
- self.version = cpt.get("version", "v1")
110
- if self.version == "v1":
111
- if self.if_f0 == 1:
112
- self.net_g = SynthesizerTrnMs256NSFsid(
113
- *cpt["config"], is_half=config.is_half
114
- )
115
- else:
116
- self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
117
- elif self.version == "v2":
118
- if self.if_f0 == 1:
119
- self.net_g = SynthesizerTrnMs768NSFsid(
120
- *cpt["config"], is_half=config.is_half
121
- )
122
- else:
123
- self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
124
- del self.net_g.enc_q
125
- logger.debug(self.net_g.load_state_dict(cpt["weight"], strict=False))
126
- self.net_g.eval().to(device)
127
- # print(2333333333,device,config.device,self.device)#net_g是device,hubert是config.device
128
- if config.is_half:
129
- self.net_g = self.net_g.half()
130
- else:
131
- self.net_g = self.net_g.float()
132
- self.is_half = config.is_half
133
- else:
134
- self.tgt_sr = last_rvc.tgt_sr
135
- self.if_f0 = last_rvc.if_f0
136
- self.version = last_rvc.version
137
- self.net_g = last_rvc.net_g
138
- self.is_half = last_rvc.is_half
139
-
140
- if last_rvc is not None and hasattr(last_rvc, "model_rmvpe"):
141
- self.model_rmvpe = last_rvc.model_rmvpe
142
- except:
143
- logger.warn(traceback.format_exc())
144
-
145
- def change_key(self, new_key):
146
- self.f0_up_key = new_key
147
-
148
- def change_index_rate(self, new_index_rate):
149
- if new_index_rate != 0 and self.index_rate == 0:
150
- self.index = faiss.read_index(self.index_path)
151
- self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
152
- logger.info("Index search enabled")
153
- self.index_rate = new_index_rate
154
-
155
- def get_f0_post(self, f0):
156
- f0_min = self.f0_min
157
- f0_max = self.f0_max
158
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
159
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
160
- f0bak = f0.copy()
161
- f0_mel = 1127 * np.log(1 + f0 / 700)
162
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
163
- f0_mel_max - f0_mel_min
164
- ) + 1
165
- f0_mel[f0_mel <= 1] = 1
166
- f0_mel[f0_mel > 255] = 255
167
- f0_coarse = np.rint(f0_mel).astype(np.int32)
168
- return f0_coarse, f0bak
169
-
170
- def get_f0(self, x, f0_up_key, n_cpu, method="harvest"):
171
- n_cpu = int(n_cpu)
172
- if method == "crepe":
173
- return self.get_f0_crepe(x, f0_up_key)
174
- if method == "rmvpe":
175
- return self.get_f0_rmvpe(x, f0_up_key)
176
- if method == "pm":
177
- p_len = x.shape[0] // 160 + 1
178
- f0 = (
179
- parselmouth.Sound(x, 16000)
180
- .to_pitch_ac(
181
- time_step=0.01,
182
- voicing_threshold=0.6,
183
- pitch_floor=50,
184
- pitch_ceiling=1100,
185
- )
186
- .selected_array["frequency"]
187
- )
188
-
189
- pad_size = (p_len - len(f0) + 1) // 2
190
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
191
- # print(pad_size, p_len - len(f0) - pad_size)
192
- f0 = np.pad(
193
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
194
- )
195
-
196
- f0 *= pow(2, f0_up_key / 12)
197
- return self.get_f0_post(f0)
198
- if n_cpu == 1:
199
- f0, t = pyworld.harvest(
200
- x.astype(np.double),
201
- fs=16000,
202
- f0_ceil=1100,
203
- f0_floor=50,
204
- frame_period=10,
205
- )
206
- f0 = signal.medfilt(f0, 3)
207
- f0 *= pow(2, f0_up_key / 12)
208
- return self.get_f0_post(f0)
209
- f0bak = np.zeros(x.shape[0] // 160 + 1, dtype=np.float64)
210
- length = len(x)
211
- part_length = 160 * ((length // 160 - 1) // n_cpu + 1)
212
- n_cpu = (length // 160 - 1) // (part_length // 160) + 1
213
- ts = ttime()
214
- res_f0 = mm.dict()
215
- for idx in range(n_cpu):
216
- tail = part_length * (idx + 1) + 320
217
- if idx == 0:
218
- self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts))
219
- else:
220
- self.inp_q.put(
221
- (idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts)
222
- )
223
- while 1:
224
- res_ts = self.opt_q.get()
225
- if res_ts == ts:
226
- break
227
- f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])]
228
- for idx, f0 in enumerate(f0s):
229
- if idx == 0:
230
- f0 = f0[:-3]
231
- elif idx != n_cpu - 1:
232
- f0 = f0[2:-3]
233
- else:
234
- f0 = f0[2:]
235
- f0bak[
236
- part_length * idx // 160 : part_length * idx // 160 + f0.shape[0]
237
- ] = f0
238
- f0bak = signal.medfilt(f0bak, 3)
239
- f0bak *= pow(2, f0_up_key / 12)
240
- return self.get_f0_post(f0bak)
241
-
242
- def get_f0_crepe(self, x, f0_up_key):
243
- if "privateuseone" in str(self.device): ###不支持dml,cpu又太慢用不成,拿pm顶替
244
- return self.get_f0(x, f0_up_key, 1, "pm")
245
- audio = torch.tensor(np.copy(x))[None].float()
246
- # print("using crepe,device:%s"%self.device)
247
- f0, pd = torchcrepe.predict(
248
- audio,
249
- self.sr,
250
- 160,
251
- self.f0_min,
252
- self.f0_max,
253
- "full",
254
- batch_size=512,
255
- # device=self.device if self.device.type!="privateuseone" else "cpu",###crepe不用半精度全部是全精度所以不愁###cpu延迟高到没法用
256
- device=self.device,
257
- return_periodicity=True,
258
- )
259
- pd = torchcrepe.filter.median(pd, 3)
260
- f0 = torchcrepe.filter.mean(f0, 3)
261
- f0[pd < 0.1] = 0
262
- f0 = f0[0].cpu().numpy()
263
- f0 *= pow(2, f0_up_key / 12)
264
- return self.get_f0_post(f0)
265
-
266
- def get_f0_rmvpe(self, x, f0_up_key):
267
- if hasattr(self, "model_rmvpe") == False:
268
- from infer.lib.rmvpe import RMVPE
269
-
270
- logger.info("Loading rmvpe model")
271
- self.model_rmvpe = RMVPE(
272
- # "rmvpe.pt", is_half=self.is_half if self.device.type!="privateuseone" else False, device=self.device if self.device.type!="privateuseone"else "cpu"####dml时强制对rmvpe用cpu跑
273
- # "rmvpe.pt", is_half=False, device=self.device####dml配置
274
- # "rmvpe.pt", is_half=False, device="cpu"####锁定cpu配置
275
- "assets/rmvpe/rmvpe.pt",
276
- is_half=self.is_half,
277
- device=self.device, ####正常逻辑
278
- )
279
- # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device)
280
- f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
281
- f0 *= pow(2, f0_up_key / 12)
282
- return self.get_f0_post(f0)
283
-
284
- def infer(
285
- self,
286
- feats: torch.Tensor,
287
- indata: np.ndarray,
288
- block_frame_16k,
289
- rate,
290
- cache_pitch,
291
- cache_pitchf,
292
- f0method,
293
- ) -> np.ndarray:
294
- feats = feats.view(1, -1)
295
- if config.is_half:
296
- feats = feats.half()
297
- else:
298
- feats = feats.float()
299
- feats = feats.to(self.device)
300
- t1 = ttime()
301
- with torch.no_grad():
302
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
303
- inputs = {
304
- "source": feats,
305
- "padding_mask": padding_mask,
306
- "output_layer": 9 if self.version == "v1" else 12,
307
- }
308
- logits = self.model.extract_features(**inputs)
309
- feats = (
310
- self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
311
- )
312
- feats = F.pad(feats, (0, 0, 1, 0))
313
- t2 = ttime()
314
- try:
315
- if hasattr(self, "index") and self.index_rate != 0:
316
- leng_replace_head = int(rate * feats[0].shape[0])
317
- npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32")
318
- score, ix = self.index.search(npy, k=8)
319
- weight = np.square(1 / score)
320
- weight /= weight.sum(axis=1, keepdims=True)
321
- npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
322
- if config.is_half:
323
- npy = npy.astype("float16")
324
- feats[0][-leng_replace_head:] = (
325
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate
326
- + (1 - self.index_rate) * feats[0][-leng_replace_head:]
327
- )
328
- else:
329
- logger.warn("Index search FAILED or disabled")
330
- except:
331
- traceback.print_exc()
332
- logger.warn("Index search FAILED")
333
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
334
- t3 = ttime()
335
- if self.if_f0 == 1:
336
- pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method)
337
- start_frame = block_frame_16k // 160
338
- end_frame = len(cache_pitch) - (pitch.shape[0] - 4) + start_frame
339
- cache_pitch[:] = np.append(cache_pitch[start_frame:end_frame], pitch[3:-1])
340
- cache_pitchf[:] = np.append(
341
- cache_pitchf[start_frame:end_frame], pitchf[3:-1]
342
- )
343
- p_len = min(feats.shape[1], 13000, cache_pitch.shape[0])
344
- else:
345
- cache_pitch, cache_pitchf = None, None
346
- p_len = min(feats.shape[1], 13000)
347
- t4 = ttime()
348
- feats = feats[:, :p_len, :]
349
- if self.if_f0 == 1:
350
- cache_pitch = cache_pitch[:p_len]
351
- cache_pitchf = cache_pitchf[:p_len]
352
- cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device)
353
- cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device)
354
- p_len = torch.LongTensor([p_len]).to(self.device)
355
- ii = 0 # sid
356
- sid = torch.LongTensor([ii]).to(self.device)
357
- with torch.no_grad():
358
- if self.if_f0 == 1:
359
- # print(12222222222,feats.device,p_len.device,cache_pitch.device,cache_pitchf.device,sid.device,rate2)
360
- infered_audio = (
361
- self.net_g.infer(
362
- feats, p_len, cache_pitch, cache_pitchf, sid, rate
363
- )[0][0, 0]
364
- .data
365
- .float()
366
- )
367
- else:
368
- infered_audio = (
369
- self.net_g.infer(feats, p_len, sid, rate)[0][0, 0]
370
- .data
371
- .float()
372
- )
373
- t5 = ttime()
374
- logger.info(
375
- "Spent time: fea = %.2fs, index = %.2fs, f0 = %.2fs, model = %.2fs",
376
- t2 - t1,
377
- t3 - t2,
378
- t4 - t3,
379
- t5 - t4,
380
- )
381
- return infered_audio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/Graph.Visualization.Plotly.Sunbursts.Treemaps.WebGL/app.py DELETED
@@ -1,224 +0,0 @@
1
- import streamlit as st
2
- import numpy as np
3
- import plotly.express as px
4
- import pandas as pd
5
- import plotly.graph_objects as go
6
-
7
- st.set_page_config(page_title="Plotly Graphing Libraries",layout='wide')
8
-
9
- uploaded_files = st.file_uploader("Choose a CSV file", accept_multiple_files=True)
10
- for uploaded_file in uploaded_files:
11
- bytes_data = uploaded_file.read()
12
- st.write("filename:", uploaded_file.name)
13
- st.write(bytes_data)
14
-
15
- if st.checkbox("FileDetails"):
16
-
17
- filevalue = uploaded_file.getvalue()
18
- st.write(filevalue)
19
- st.write(uploaded_file.name)
20
- st.write(uploaded_file.type)
21
- st.write(uploaded_file.size)
22
- #st.write(uploaded_file.last_modified)
23
- #st.write(uploaded_file.charset)
24
- st.write(uploaded_file.getbuffer())
25
- st.write(uploaded_file.getbuffer().nbytes)
26
- st.write(uploaded_file.getbuffer().tobytes())
27
- st.write(uploaded_file.getbuffer().tolist())
28
- st.write(uploaded_file.getbuffer().itemsize)
29
- st.write(uploaded_file.getbuffer().ndim)
30
- st.write(uploaded_file.getbuffer().shape)
31
- st.write(uploaded_file.getbuffer().strides)
32
- st.write(uploaded_file.getbuffer().suboffsets)
33
- st.write(uploaded_file.getbuffer().readonly)
34
- st.write(uploaded_file.getbuffer().c_contiguous)
35
- st.write(uploaded_file.getbuffer().f_contiguous)
36
- st.write(uploaded_file.getbuffer().contiguous)
37
- st.write(uploaded_file.getbuffer().itemsize)
38
- st.write(uploaded_file.getbuffer().nbytes)
39
- st.write(uploaded_file.getbuffer().ndim)
40
- st.write(uploaded_file.getbuffer().shape)
41
- st.write(uploaded_file.getbuffer().strides)
42
- st.write(uploaded_file.getbuffer().suboffsets)
43
- st.write(uploaded_file.getbuffer().readonly)
44
- st.write(uploaded_file.getbuffer().c_contiguous)
45
- st.write(uploaded_file.getbuffer().f_contiguous)
46
- st.write(uploaded_file.getbuffer().contiguous)
47
- st.write(uploaded_file.getbuffer().itemsize)
48
- st.write(uploaded_file.getbuffer().nbytes)
49
- st.write(uploaded_file.getbuffer().ndim)
50
- st.write(uploaded_file.getbuffer().shape)
51
- st.write(uploaded_file.getbuffer().strides)
52
- st.write(uploaded_file.getbuffer().suboffsets)
53
- st.write(uploaded_file.getbuffer().readonly)
54
- st.write(uploaded_file.getbuffer().c_contiguous)
55
- st.write(uploaded_file.getbuffer().f_contiguous)
56
- myDF = pd.DataFrame(uploaded_file.getbuffer().tolist())
57
-
58
-
59
- st.markdown("# Treemaps from upload data file: https://plotly.com/python/treemaps/")
60
- #df = myDF.query("year == 2007")
61
- df = myDF
62
- fig = px.treemap(df, path=[px.Constant("time"), 'message', 'name'], values='content',
63
- color='lifeExp', hover_data=['iso_alpha'],
64
- color_continuous_scale='RdBu',
65
- color_continuous_midpoint=np.average(df['name'], weights=df['content'])) # todo - debug this and get it working with the data
66
- fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
67
- #fig.show()
68
- st.plotly_chart(fig, use_container_width=True)
69
-
70
- #show replace
71
- if st.checkbox("replace"):
72
- mydf = st.dataframe(df)
73
- columns = st.selectbox("Select column", df.columns)
74
- old_values = st.multiselect("Current Values",list(df[columns].unique()),list(df[columns].unique()))
75
- with st.form(key='my_form'):
76
- col1,col2 = st.beta_columns(2)
77
- st_input = st.number_input if is_numeric_dtype(df[columns]) else st.text_input
78
- with col1:
79
- old_val = st_input("old value")
80
- with col2:
81
- new_val = st_input("new value")
82
- if st.form_submit_button("Replace"):
83
- df[columns]=df[columns].replace(old_val,new_val)
84
- st.success("{} replace with {} successfully ".format(old_val,new_val))
85
- excel = df.to_excel(r"book2.xlsx", index = False, header=True,encoding="utf-8")
86
- df =pd.read_excel(r"book2.xlsx")
87
- mydf.add_rows(df)
88
-
89
- #st.markdown("WebGL Rendering with 1,000,000 Points")
90
-
91
- N = 1000000
92
- fig = go.Figure()
93
- fig.add_trace(
94
- go.Scattergl(
95
- x = np.random.randn(N),
96
- y = np.random.randn(N),
97
- mode = 'markers',
98
- marker = dict(
99
- line = dict(
100
- width = 1,
101
- color = 'DarkSlateGrey')
102
- )
103
- )
104
- )
105
- fig.show()
106
- st.plotly_chart(fig, use_container_width=True)
107
-
108
-
109
-
110
- st.markdown("# WebGL Graph - ScatterGL")
111
- fig = go.Figure()
112
- trace_num = 10
113
- point_num = 5000
114
- for i in range(trace_num):
115
- fig.add_trace(
116
- go.Scattergl(
117
- x = np.linspace(0, 1, point_num),
118
- y = np.random.randn(point_num)+(i*5)
119
- )
120
- )
121
- fig.update_layout(showlegend=False)
122
- #fig.show()
123
- st.plotly_chart(fig, use_container_width=True)
124
-
125
-
126
- st.markdown("# Treemaps: https://plotly.com/python/treemaps/")
127
- df = px.data.gapminder().query("year == 2007")
128
- fig = px.treemap(df, path=[px.Constant("world"), 'continent', 'country'], values='pop',
129
- color='lifeExp', hover_data=['iso_alpha'],
130
- color_continuous_scale='RdBu',
131
- color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop']))
132
- fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
133
- #fig.show()
134
- st.plotly_chart(fig, use_container_width=True)
135
-
136
-
137
- st.markdown("# Sunburst: https://plotly.com/python/sunburst-charts/")
138
-
139
-
140
- st.markdown("# Life Expectancy Sunburst")
141
- df = px.data.gapminder().query("year == 2007")
142
- fig = px.sunburst(df, path=['continent', 'country'], values='pop',
143
- color='lifeExp', hover_data=['iso_alpha'],
144
- color_continuous_scale='RdBu',
145
- color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop']))
146
- st.plotly_chart(fig, use_container_width=True)
147
-
148
-
149
- st.markdown("# Coffee Aromas and Tastes Sunburst")
150
- df1 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/sunburst-coffee-flavors-complete.csv')
151
- df2 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/718417069ead87650b90472464c7565dc8c2cb1c/coffee-flavors.csv')
152
- fig = go.Figure()
153
- fig.add_trace(go.Sunburst(
154
- ids=df1.ids,
155
- labels=df1.labels,
156
- parents=df1.parents,
157
- domain=dict(column=0)
158
- ))
159
- fig.add_trace(go.Sunburst(
160
- ids=df2.ids,
161
- labels=df2.labels,
162
- parents=df2.parents,
163
- domain=dict(column=1),
164
- maxdepth=2
165
- ))
166
- fig.update_layout(
167
- grid= dict(columns=2, rows=1),
168
- margin = dict(t=0, l=0, r=0, b=0)
169
- )
170
- st.plotly_chart(fig, use_container_width=True)
171
-
172
-
173
-
174
-
175
-
176
- # Sunburst
177
- #data = dict(
178
- # character=["Eve", "Cain", "Seth", "Enos", "Noam", "Abel", "Awan", "Enoch", "Azura"],
179
- # parent=["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve" ],
180
- # value=[10, 14, 12, 10, 2, 6, 6, 4, 4])
181
- #fig = px.sunburst(
182
- # data,
183
- # names='character',
184
- # parents='parent',
185
- # values='value',
186
- #)
187
- #fig.show()
188
- #st.plotly_chart(fig, use_container_width=True)
189
-
190
-
191
- df = px.data.tips()
192
- fig = px.treemap(df, path=[px.Constant("all"), 'sex', 'day', 'time'],
193
- values='total_bill', color='time',
194
- color_discrete_map={'(?)':'lightgrey', 'Lunch':'gold', 'Dinner':'darkblue'})
195
- fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
196
- #fig.show()
197
- fig.update_traces(marker=dict(cornerradius=5))
198
-
199
- st.plotly_chart(fig, use_container_width=True)
200
-
201
-
202
- df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/96c0bd/sunburst-coffee-flavors-complete.csv')
203
- fig = go.Figure(go.Treemap(
204
- ids = df.ids,
205
- labels = df.labels,
206
- parents = df.parents,
207
- pathbar_textfont_size=15,
208
- root_color="lightgrey"
209
- ))
210
- fig.update_layout(
211
- uniformtext=dict(minsize=10, mode='hide'),
212
- margin = dict(t=50, l=25, r=25, b=25)
213
- )
214
- #fig.show()
215
- st.plotly_chart(fig, use_container_width=True)
216
-
217
-
218
- df = pd.read_pickle('bloom_dataset.pkl')
219
- fig = px.treemap(df, path=[px.Constant("ROOTS"), 'Macroarea', 'Family', 'Genus', 'Language', 'dataset_name'],
220
- values='num_bytes', maxdepth=4)
221
- fig.update_traces(root_color="pink")
222
- fig.update_layout(margin = dict(t=50, l=25, r=25, b=25))
223
-
224
- st.plotly_chart(fig, use_container_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/SOP_Generation-single/SOP.py DELETED
@@ -1,291 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The AIWaves Inc. team.
3
-
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """standard operation procedure of an LLM Autonomous agent"""
17
- import random
18
- from LLM.base_LLM import *
19
- from State import State
20
- from utils import extract, get_relevant_history
21
- from Memory import Memory
22
- from Prompt import *
23
- import json
24
- import os
25
-
26
- class SOP:
27
- """
28
- Responsible for managing the operational processes of all agents
29
- """
30
-
31
- # SOP should have args : "states" "relations" "root"
32
-
33
- def __init__(self, **kwargs):
34
- self.controller_dict = {}
35
- self.LLM = init_LLM("logs"+os.sep+"god",**kwargs)
36
-
37
- self.states = {}
38
- self.init_states(kwargs["states"])
39
- self.init_relation(kwargs["relations"])
40
- for state_name, states_dict in kwargs["states"].items():
41
- if state_name != "end_state" and "controller" in states_dict:
42
- self.controller_dict[state_name] = states_dict["controller"]
43
-
44
- self.user_names = kwargs["user_names"] if "user_names" in kwargs else []
45
- self.root = self.states[kwargs["root"]]
46
- self.current_state = self.root
47
- self.finish_state_name = (
48
- kwargs["finish_state_name"]
49
- if "finish_state_name" in kwargs
50
- else "end_state"
51
- )
52
- self.roles_to_names = None
53
- self.names_to_roles = None
54
- self.finished = False
55
-
56
- @classmethod
57
- def from_config(cls, config_path):
58
- with open(config_path) as f:
59
- config = json.load(f)
60
- os.environ.clear()
61
- for key,value in config["config"].items():
62
- if value!="":
63
- os.environ[key] = value
64
- sop = SOP(**config)
65
- return sop
66
-
67
- def init_states(self, states_dict):
68
- for state_name, state_dict in states_dict.items():
69
- state_dict["name"] = state_name
70
- self.states[state_name] = State(**state_dict)
71
-
72
- def init_relation(self, relations):
73
- for state_name, state_relation in relations.items():
74
- for idx, next_state_name in state_relation.items():
75
- self.states[state_name].next_states[idx] = self.states[next_state_name]
76
-
77
- def transit(self, chat_history, **kwargs):
78
- """
79
- Determine the next state based on the current situation
80
- Return :
81
- next_state(State) : the next state
82
- """
83
- # 如果是单一循环节点,则一直循环即可
84
- # If it is a single loop node, just keep looping
85
- if len(self.current_state.next_states) == 1:
86
- next_state = "0"
87
-
88
- # 否则则需要controller去判断进入哪一节点
89
- # Otherwise, the controller needs to determine which node to enter.
90
- else:
91
- current_state = self.current_state
92
- controller_dict = self.controller_dict[current_state.name]
93
- relevant_history = kwargs["relevant_history"]
94
-
95
- max_chat_nums = controller_dict["max_chat_nums"] if "max_chat_nums" in controller_dict else 1000
96
- if current_state.chat_nums>=max_chat_nums:
97
- return self.current_state.next_states["1"]
98
-
99
-
100
- # 否则则让controller判断是否结束
101
- # Otherwise, let the controller judge whether to end
102
- judge_system_prompt = controller_dict["judge_system_prompt"] if "judge_system_prompt" in controller_dict else ""
103
- environment_prompt = eval(Get_environment_prompt) if current_state.environment_prompt else ""
104
- transit_system_prompt = eval(Transit_system_prompt)
105
-
106
- judge_last_prompt = controller_dict["judge_last_prompt"] if "judge_last_prompt" in controller_dict else ""
107
- transit_last_prompt = eval(Transit_last_prompt)
108
-
109
-
110
-
111
- environment = kwargs["environment"]
112
- environment_summary = environment.shared_memory["short_term_memory"]
113
- chat_history_message = Memory.get_chat_history(chat_history)
114
- query = chat_history[-1].get_query()
115
-
116
- chat_messages = [
117
- {
118
- "role": "user",
119
- "content": eval(Transit_message)
120
- }
121
- ]
122
-
123
- extract_words = controller_dict["judge_extract_words"] if "judge_extract_words" in controller_dict else "end"
124
-
125
-
126
- response = self.LLM.get_response(
127
- chat_messages, transit_system_prompt, transit_last_prompt, stream=False, **kwargs
128
- )
129
- next_state = (
130
- response if response.isdigit() else extract(response, extract_words)
131
- )
132
-
133
- # 如果没有parse出来则继续循环
134
- # If no parse comes out, continue looping
135
- if not next_state.isdigit():
136
- next_state = "0"
137
-
138
- next_state = self.current_state.next_states[next_state]
139
- return next_state
140
-
141
-
142
- def route(self, chat_history, **kwargs):
143
- """
144
- Determine the role that needs action based on the current situation
145
- Return :
146
- current_agent(Agent) : the next act agent
147
- """
148
-
149
- agents = kwargs["agents"]
150
-
151
- # 知道进入哪一状态后开始分配角色,如果该状态下只有一个角色则直接分配给他
152
- # Start assigning roles after knowing which state you have entered. If there is only one role in that state, assign it directly to him.
153
- if len(self.current_state.roles) == 1:
154
- next_role = self.current_state.roles[0]
155
-
156
-
157
-
158
- # 否则controller进行分配
159
- # Otherwise the controller determines
160
- else:
161
- relevant_history = kwargs["relevant_history"]
162
- controller_type = (
163
- self.controller_dict[self.current_state.name]["controller_type"]
164
- if "controller_type" in self.controller_dict[self.current_state.name]
165
- else "order"
166
- )
167
-
168
-
169
- # 如果是rule 控制器,则交由LLM进行分配角色
170
- # If controller type is rule, it is left to LLM to assign roles.
171
- if controller_type == "rule":
172
- controller_dict = self.controller_dict[self.current_state.name]
173
-
174
- call_last_prompt = controller_dict["call_last_prompt"] if "call_last_prompt" in controller_dict else ""
175
-
176
- allocate_prompt = ""
177
- roles = list(set(self.current_state.roles))
178
- for role in roles:
179
- allocate_prompt += eval(Allocate_component)
180
-
181
- call_system_prompt = controller_dict["call_system_prompt"] if "call_system_prompt" in controller_dict else ""
182
- environment_prompt = eval(Get_environment_prompt) if self.current_state.environment_prompt else ""
183
- # call_system_prompt + environment + allocate_prompt
184
- call_system_prompt = eval(Call_system_prompt)
185
-
186
- query = chat_history[-1].get_query()
187
- last_name = chat_history[-1].send_name
188
- # last_prompt: note + last_prompt + query
189
- call_last_prompt =eval(Call_last_prompt)
190
-
191
-
192
- chat_history_message = Memory.get_chat_history(chat_history)
193
- # Intermediate historical conversation records
194
- chat_messages = [
195
- {
196
- "role": "user",
197
- "content": eval(Call_message),
198
- }
199
- ]
200
-
201
- extract_words = controller_dict["call_extract_words"] if "call_extract_words" in controller_dict else "end"
202
-
203
- response = self.LLM.get_response(
204
- chat_messages, call_system_prompt, call_last_prompt, stream=False, **kwargs
205
- )
206
-
207
- # get next role
208
- next_role = extract(response, extract_words)
209
-
210
- # Speak in order
211
- elif controller_type == "order":
212
- # If there is no begin role, it will be given directly to the first person.
213
- if not self.current_state.current_role:
214
- next_role = self.current_state.roles[0]
215
- # otherwise first
216
- else:
217
- self.current_state.index += 1
218
- self.current_state.index = (self.current_state.index) % len(self.current_state.roles)
219
- next_role = self.current_state.roles[self.current_state.index]
220
- # random speak
221
- elif controller_type == "random":
222
- next_role = random.choice(self.current_state.roles)
223
-
224
- # 如果下一角色不在,则随机挑选一个
225
- # If the next character is not available, pick one at random
226
- if next_role not in self.current_state.roles:
227
- next_role = random.choice(self.current_state.roles)
228
-
229
- self.current_state.current_role = next_role
230
-
231
- next_agent = agents[self.roles_to_names[self.current_state.name][next_role]]
232
-
233
- return next_agent
234
-
235
- def next(self, environment, agents):
236
- """
237
- Determine the next state and the agent that needs action based on the current situation
238
- """
239
-
240
- # 如果是第一次进入该状态
241
- # If it is the first time to enter this state
242
-
243
- if self.current_state.is_begin:
244
- agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role]
245
- agent = agents[agent_name]
246
- return self.current_state,agent
247
-
248
-
249
- # get relevant history
250
- query = environment.shared_memory["long_term_memory"][-1].content
251
- relevant_history = get_relevant_history(
252
- query,
253
- environment.shared_memory["long_term_memory"][:-1],
254
- environment.shared_memory["chat_embeddings"][:-1],
255
- )
256
- relevant_history = Memory.get_chat_history(relevant_history)
257
-
258
-
259
-
260
- next_state = self.transit(
261
- chat_history=environment.shared_memory["long_term_memory"][
262
- environment.current_chat_history_idx :
263
- ],
264
- relevant_history=relevant_history,
265
- environment=environment,
266
- )
267
- # 如果进入终止节点,则直接终止
268
- # If you enter the termination node, terminate directly
269
- if next_state.name == self.finish_state_name:
270
- self.finished = True
271
- return None, None
272
-
273
- self.current_state = next_state
274
-
275
- # 如果是首次进入该节点且有开场白,则直接分配给开场角色
276
- # If it is the first time to enter the state and there is a begin query, it will be directly assigned to the begin role.
277
- if self.current_state.is_begin and self.current_state.begin_role:
278
- agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role]
279
- agent = agents[agent_name]
280
- return self.current_state,agent
281
-
282
-
283
- next_agent = self.route(
284
- chat_history=environment.shared_memory["long_term_memory"][
285
- environment.current_chat_history_idx :
286
- ],
287
- agents = agents,
288
- relevant_history=relevant_history,
289
- )
290
-
291
- return self.current_state, next_agent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2HeroBootcamp/TranscriptAILearnerFromYoutube/app.py DELETED
@@ -1,205 +0,0 @@
1
-
2
- import streamlit as st
3
- import re
4
- import json
5
- import nltk
6
- from nltk.corpus import stopwords
7
- from nltk import FreqDist
8
- from graphviz import Digraph
9
- from collections import Counter
10
-
11
- nltk.download('punkt')
12
- nltk.download('stopwords')
13
-
14
- def remove_timestamps(text):
15
- return re.sub(r'\d{1,2}:\d{2}\n', '', text)
16
-
17
- def process_text(text):
18
- lines = text.split("\n")
19
- processed_lines = []
20
-
21
- for line in lines:
22
- if line:
23
- processed_lines.append(line)
24
-
25
- outline = ""
26
- for i, line in enumerate(processed_lines):
27
- if i % 2 == 0:
28
- outline += f"**{line}**\n"
29
- else:
30
- outline += f"- {line} 😄\n"
31
-
32
- return outline
33
-
34
- def create_jsonl_list(text):
35
- lines = text.split("\n")
36
- jsonl_list = []
37
-
38
- for line in lines:
39
- if line:
40
- jsonl_list.append({"text": line})
41
-
42
- return jsonl_list
43
-
44
- def unit_test(input_text):
45
- st.write("Test Text without Timestamps:")
46
- test_text_without_timestamps = remove_timestamps(input_text)
47
- st.write(test_text_without_timestamps)
48
-
49
- st.write("Test JSONL List:")
50
- test_jsonl_list = create_jsonl_list(test_text_without_timestamps)
51
- st.write(test_jsonl_list)
52
-
53
-
54
-
55
- def extract_high_information_words(text, top_n=10):
56
- words = nltk.word_tokenize(text)
57
- words = [word.lower() for word in words if word.isalpha()]
58
-
59
- stop_words = set(stopwords.words('english'))
60
- filtered_words = [word for word in words if word not in stop_words]
61
-
62
- freq_dist = FreqDist(filtered_words)
63
- high_information_words = [word for word, _ in freq_dist.most_common(top_n)]
64
-
65
- return high_information_words
66
-
67
-
68
- def create_relationship_graph(words):
69
- graph = Digraph()
70
-
71
- for index, word in enumerate(words):
72
- graph.node(str(index), word)
73
-
74
- if index > 0:
75
- graph.edge(str(index - 1), str(index), label=str(index))
76
-
77
- return graph
78
-
79
-
80
- def display_relationship_graph(words):
81
- graph = create_relationship_graph(words)
82
- st.graphviz_chart(graph)
83
-
84
-
85
-
86
-
87
- text_input = st.text_area("Enter text:", value="", height=300)
88
- text_without_timestamps = remove_timestamps(text_input)
89
-
90
- st.markdown("**Text without Timestamps:**")
91
- st.write(text_without_timestamps)
92
-
93
- processed_text = process_text(text_without_timestamps)
94
- st.markdown("**Markdown Outline with Emojis:**")
95
- st.markdown(processed_text)
96
-
97
- unit_test_text = '''
98
- 1:42
99
- program the does very very well on your data then you will achieve the best
100
- 1:48
101
- generalization possible with a little bit of modification you can turn it into a precise theorem
102
- 1:54
103
- and on a very intuitive level it's easy to see what it should be the case if you
104
- 2:01
105
- have some data and you're able to find a shorter program which generates this
106
- 2:06
107
- data then you've essentially extracted all the all conceivable regularity from
108
- 2:11
109
- this data into your program and then you can use these objects to make the best predictions possible like if if you have
110
- 2:19
111
- data which is so complex but there is no way to express it as a shorter program
112
- 2:25
113
- then it means that your data is totally random there is no way to extract any regularity from it whatsoever now there
114
- 2:32
115
- is little known mathematical theory behind this and the proofs of these statements actually not even that hard
116
- 2:38
117
- but the one minor slight disappointment is that it's actually not possible at
118
- 2:44
119
- least given today's tools and understanding to find the best short program that explains or generates or
120
- 2:52
121
- solves your problem given your data this problem is computationally intractable
122
- '''
123
-
124
- unit_test(unit_test_text)
125
-
126
- unit_test_text_2 = '''
127
- 5
128
- to talk a little bit about reinforcement learning so reinforcement learning is a framework it's a framework of evaluating
129
- 6:53
130
- agents in their ability to achieve goals and complicated stochastic environments
131
- 6:58
132
- you've got an agent which is plugged into an environment as shown in the figure right here and for any given
133
- 7:06
134
- agent you can simply run it many times and compute its average reward now the
135
- 7:13
136
- thing that's interesting about the reinforcement learning framework is that there exist interesting useful
137
- 7:20
138
- reinforcement learning algorithms the framework existed for a long time it
139
- 7:25
140
- became interesting once we realized that good algorithms exist now these are there are perfect algorithms but they
141
- 7:31
142
- are good enough todo interesting things and all you want the mathematical
143
- 7:37
144
- problem is one where you need to maximize the expected reward now one
145
- 7:44
146
- important way in which the reinforcement learning framework is not quite complete is that it assumes that the reward is
147
- 7:50
148
- given by the environment you see this picture the agent sends an action while
149
- 7:56
150
- the reward sends it an observation in a both the observation and the reward backwards that's what the environment
151
- 8:01
152
- communicates back the way in which this is not the case in the real world is that we figure out
153
- 8:11
154
- what the reward is from the observation we reward ourselves we are not told
155
- 8:16
156
- environment doesn't say hey here's some negative reward it's our interpretation over census that lets us determine what
157
- 8:23
158
- the reward is and there is only one real true reward in life and this is
159
- 8:28
160
- existence or nonexistence and everything else is a corollary of that so well what
161
- 8:35
162
- should our agent be you already know the answer should be a neural network because whenever you want to do
163
- 8:41
164
- something dense it's going to be a neural network and you want the agent to map observations to actions so you let
165
- 8:47
166
- it be parametrized with a neural net and you apply learning algorithm so I want to explain to you how reinforcement
167
- 8:53
168
- learning works this is model free reinforcement learning the reinforcement learning has actually been used in practice everywhere but it's
169
- '''
170
-
171
- unit_test(unit_test_text_2)
172
-
173
- unit_test_text_3 = '''
174
- ort try something new add
175
- 9:17
176
- randomness directions and compare the result to your expectation if the result
177
- 9:25
178
- surprises you if you find that the results exceeded your expectation then
179
- 9:31
180
- change your parameters to take those actions in the future that's it this is
181
- 9:36
182
- the fool idea of reinforcement learning try it out see if you like it and if you do do more of that in the future and
183
- 9:44
184
- that's it that's literally it this is the core idea now it turns out it's not
185
- 9:49
186
- difficult to formalize mathematically but this is really what's going on if in a neural network
187
-
188
- '''
189
-
190
- unit_test(unit_test_text_3)
191
-
192
-
193
-
194
-
195
-
196
- # Adding new functionality to the existing code
197
- text_without_timestamps = remove_timestamps(unit_test_text_2)
198
- top_words = extract_high_information_words(text_without_timestamps, 10)
199
- st.markdown("**Top 10 High Information Words:**")
200
- st.write(top_words)
201
-
202
- st.markdown("**Relationship Graph:**")
203
- display_relationship_graph(top_words)
204
-
205
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/data/zip.py DELETED
@@ -1,74 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import typing
8
- import zipfile
9
-
10
- from dataclasses import dataclass
11
- from functools import lru_cache
12
- from typing_extensions import Literal
13
-
14
-
15
- DEFAULT_SIZE = 32
16
- MODE = Literal['r', 'w', 'x', 'a']
17
-
18
-
19
- @dataclass(order=True)
20
- class PathInZip:
21
- """Class for holding a path of file within a zip file.
22
-
23
- Args:
24
- path: The convention is <path_to_zip>:<relative_path_inside_zip>
25
- Let's assume there is a zip file /some/location/foo.zip
26
- and inside of it is a json file located at /data/file1.json,
27
- Then we expect path = "/some/location/foo.zip:/data/file1.json"
28
- """
29
-
30
- INFO_PATH_SEP = ':'
31
- zip_path: str
32
- file_path: str
33
-
34
- def __init__(self, path: str) -> None:
35
- split_path = path.split(self.INFO_PATH_SEP)
36
- assert len(split_path) == 2
37
- self.zip_path, self.file_path = split_path
38
-
39
- @classmethod
40
- def from_paths(cls, zip_path: str, file_path: str):
41
- return cls(zip_path + cls.INFO_PATH_SEP + file_path)
42
-
43
- def __str__(self) -> str:
44
- return self.zip_path + self.INFO_PATH_SEP + self.file_path
45
-
46
-
47
- def _open_zip(path: str, mode: MODE = 'r'):
48
- return zipfile.ZipFile(path, mode)
49
-
50
-
51
- _cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip)
52
-
53
-
54
- def set_zip_cache_size(max_size: int):
55
- """Sets the maximal LRU caching for zip file opening.
56
-
57
- Args:
58
- max_size: the maximal LRU cache.
59
- """
60
- global _cached_open_zip
61
- _cached_open_zip = lru_cache(max_size)(_open_zip)
62
-
63
-
64
- def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO:
65
- """Opens a file stored inside a zip and returns a file-like object.
66
-
67
- Args:
68
- path_in_zip: A PathInZip object representing the file to return a file-like object of.
69
- mode: The mode in which to open the file with.
70
- Returns:
71
- A file-like object for PathInZip.
72
- """
73
- zf = _cached_open_zip(path_in_zip.zip_path)
74
- return zf.open(path_in_zip.file_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/classify/val.py DELETED
@@ -1,259 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Validate a trained YOLOv5 classification model on a classification dataset
4
-
5
- Usage:
6
- $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)
7
- $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet
8
-
9
- Usage - formats:
10
- $ python classify/val.py --weights yolov5s-cls.pt # PyTorch
11
- yolov5s-cls.torchscript # TorchScript
12
- yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn
13
- yolov5s-cls_openvino_model # OpenVINO
14
- yolov5s-cls.engine # TensorRT
15
- yolov5s-cls.mlmodel # CoreML (macOS-only)
16
- yolov5s-cls_saved_model # TensorFlow SavedModel
17
- yolov5s-cls.pb # TensorFlow GraphDef
18
- yolov5s-cls.tflite # TensorFlow Lite
19
- yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU
20
- yolov5s-cls_paddle_model # PaddlePaddle
21
- """
22
-
23
- import argparse
24
- import os
25
- import sys
26
- from pathlib import Path
27
-
28
- import torch
29
- from tqdm import tqdm
30
-
31
- FILE = Path(__file__).resolve()
32
- ROOT = FILE.parents[1] # YOLOv5 root directory
33
- if str(ROOT) not in sys.path:
34
- sys.path.append(str(ROOT)) # add ROOT to PATH
35
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
36
-
37
- from models.common import DetectMultiBackend
38
- from utils.dataloaders import create_classification_dataloader
39
- from utils.general import (
40
- LOGGER,
41
- TQDM_BAR_FORMAT,
42
- Profile,
43
- check_img_size,
44
- check_requirements,
45
- colorstr,
46
- increment_path,
47
- print_args,
48
- )
49
- from utils.torch_utils import select_device, smart_inference_mode
50
-
51
-
52
- @smart_inference_mode()
53
- def run(
54
- data=ROOT / "../datasets/mnist", # dataset dir
55
- weights=ROOT / "yolov5s-cls.pt", # model.pt path(s)
56
- batch_size=128, # batch size
57
- imgsz=224, # inference size (pixels)
58
- device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
59
- workers=8, # max dataloader workers (per RANK in DDP mode)
60
- verbose=False, # verbose output
61
- project=ROOT / "runs/val-cls", # save to project/name
62
- name="exp", # save to project/name
63
- exist_ok=False, # existing project/name ok, do not increment
64
- half=False, # use FP16 half-precision inference
65
- dnn=False, # use OpenCV DNN for ONNX inference
66
- model=None,
67
- dataloader=None,
68
- criterion=None,
69
- pbar=None,
70
- ):
71
- # Initialize/load model and set device
72
- training = model is not None
73
- if training: # called by train.py
74
- device, pt, jit, engine = (
75
- next(model.parameters()).device,
76
- True,
77
- False,
78
- False,
79
- ) # get model device, PyTorch model
80
- half &= device.type != "cpu" # half precision only supported on CUDA
81
- model.half() if half else model.float()
82
- else: # called directly
83
- device = select_device(device, batch_size=batch_size)
84
-
85
- # Directories
86
- save_dir = increment_path(
87
- Path(project) / name, exist_ok=exist_ok
88
- ) # increment run
89
- save_dir.mkdir(parents=True, exist_ok=True) # make dir
90
-
91
- # Load model
92
- model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half)
93
- stride, pt, jit, engine = (
94
- model.stride,
95
- model.pt,
96
- model.jit,
97
- model.engine,
98
- )
99
- imgsz = check_img_size(imgsz, s=stride) # check image size
100
- half = model.fp16 # FP16 supported on limited backends with CUDA
101
- if engine:
102
- batch_size = model.batch_size
103
- else:
104
- device = model.device
105
- if not (pt or jit):
106
- batch_size = 1 # export.py models default to batch-size 1
107
- LOGGER.info(
108
- f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models"
109
- )
110
-
111
- # Dataloader
112
- data = Path(data)
113
- test_dir = (
114
- data / "test" if (data / "test").exists() else data / "val"
115
- ) # data/test or data/val
116
- dataloader = create_classification_dataloader(
117
- path=test_dir,
118
- imgsz=imgsz,
119
- batch_size=batch_size,
120
- augment=False,
121
- rank=-1,
122
- workers=workers,
123
- )
124
-
125
- model.eval()
126
- pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile())
127
- n = len(dataloader) # number of batches
128
- action = (
129
- "validating" if dataloader.dataset.root.stem == "val" else "testing"
130
- )
131
- desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}"
132
- bar = tqdm(
133
- dataloader,
134
- desc,
135
- n,
136
- not training,
137
- bar_format=TQDM_BAR_FORMAT,
138
- position=0,
139
- )
140
- with torch.cuda.amp.autocast(enabled=device.type != "cpu"):
141
- for images, labels in bar:
142
- with dt[0]:
143
- images, labels = images.to(
144
- device, non_blocking=True
145
- ), labels.to(device)
146
-
147
- with dt[1]:
148
- y = model(images)
149
-
150
- with dt[2]:
151
- pred.append(y.argsort(1, descending=True)[:, :5])
152
- targets.append(labels)
153
- if criterion:
154
- loss += criterion(y, labels)
155
-
156
- loss /= n
157
- pred, targets = torch.cat(pred), torch.cat(targets)
158
- correct = (targets[:, None] == pred).float()
159
- acc = torch.stack(
160
- (correct[:, 0], correct.max(1).values), dim=1
161
- ) # (top1, top5) accuracy
162
- top1, top5 = acc.mean(0).tolist()
163
-
164
- if pbar:
165
- pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}"
166
- if verbose: # all classes
167
- LOGGER.info(
168
- f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}"
169
- )
170
- LOGGER.info(
171
- f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}"
172
- )
173
- for i, c in model.names.items():
174
- aci = acc[targets == i]
175
- top1i, top5i = aci.mean(0).tolist()
176
- LOGGER.info(
177
- f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}"
178
- )
179
-
180
- # Print results
181
- t = tuple(
182
- x.t / len(dataloader.dataset.samples) * 1e3 for x in dt
183
- ) # speeds per image
184
- shape = (1, 3, imgsz, imgsz)
185
- LOGGER.info(
186
- f"Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}"
187
- % t
188
- )
189
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}")
190
-
191
- return top1, top5, loss
192
-
193
-
194
- def parse_opt():
195
- parser = argparse.ArgumentParser()
196
- parser.add_argument(
197
- "--data",
198
- type=str,
199
- default=ROOT / "../datasets/mnist",
200
- help="dataset path",
201
- )
202
- parser.add_argument(
203
- "--weights",
204
- nargs="+",
205
- type=str,
206
- default=ROOT / "yolov5s-cls.pt",
207
- help="model.pt path(s)",
208
- )
209
- parser.add_argument(
210
- "--batch-size", type=int, default=128, help="batch size"
211
- )
212
- parser.add_argument(
213
- "--imgsz",
214
- "--img",
215
- "--img-size",
216
- type=int,
217
- default=224,
218
- help="inference size (pixels)",
219
- )
220
- parser.add_argument(
221
- "--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
222
- )
223
- parser.add_argument(
224
- "--workers",
225
- type=int,
226
- default=8,
227
- help="max dataloader workers (per RANK in DDP mode)",
228
- )
229
- parser.add_argument(
230
- "--verbose", nargs="?", const=True, default=True, help="verbose output"
231
- )
232
- parser.add_argument(
233
- "--project", default=ROOT / "runs/val-cls", help="save to project/name"
234
- )
235
- parser.add_argument("--name", default="exp", help="save to project/name")
236
- parser.add_argument(
237
- "--exist-ok",
238
- action="store_true",
239
- help="existing project/name ok, do not increment",
240
- )
241
- parser.add_argument(
242
- "--half", action="store_true", help="use FP16 half-precision inference"
243
- )
244
- parser.add_argument(
245
- "--dnn", action="store_true", help="use OpenCV DNN for ONNX inference"
246
- )
247
- opt = parser.parse_args()
248
- print_args(vars(opt))
249
- return opt
250
-
251
-
252
- def main(opt):
253
- check_requirements(exclude=("tensorboard", "thop"))
254
- run(**vars(opt))
255
-
256
-
257
- if __name__ == "__main__":
258
- opt = parse_opt()
259
- main(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkitoP/umamusume_bert_vits2/app.py DELETED
@@ -1,260 +0,0 @@
1
- # flake8: noqa: E402
2
-
3
- import sys, os
4
- import logging
5
- import os
6
- import time
7
- import numpy as np # 假设你使用NumPy来处理音频数据
8
- import shutil # 用于删除文件夹和文件
9
- from scipy.io import wavfile
10
- import re
11
- logging.getLogger("numba").setLevel(logging.WARNING)
12
- logging.getLogger("markdown_it").setLevel(logging.WARNING)
13
- logging.getLogger("urllib3").setLevel(logging.WARNING)
14
- logging.getLogger("matplotlib").setLevel(logging.WARNING)
15
-
16
- logging.basicConfig(
17
- level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
18
- )
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
- import torch
23
- import argparse
24
- import commons
25
- import utils
26
- from models import SynthesizerTrn
27
- from text.symbols import symbols
28
- from text import cleaned_text_to_sequence, get_bert
29
- from text.cleaner import clean_text
30
- import gradio as gr
31
- import webbrowser
32
- import numpy as np
33
-
34
- net_g = None
35
- device = "cuda"
36
- curr_model_name:str = None
37
- hps_:tuple = None
38
- def get_text(text, language_str, hps):
39
- norm_text, phone, tone, word2ph = clean_text(text, language_str)
40
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
41
-
42
- if hps.data.add_blank:
43
- phone = commons.intersperse(phone, 0)
44
- tone = commons.intersperse(tone, 0)
45
- language = commons.intersperse(language, 0)
46
- for i in range(len(word2ph)):
47
- word2ph[i] = word2ph[i] * 2
48
- word2ph[0] += 1
49
- bert = get_bert(norm_text, word2ph, language_str, device)
50
- del word2ph
51
- assert bert.shape[-1] == len(phone), phone
52
-
53
- if language_str == "ZH":
54
- bert = bert
55
- ja_bert = torch.zeros(768, len(phone))
56
- elif language_str == "JP":
57
- ja_bert = bert
58
- bert = torch.zeros(1024, len(phone))
59
- else:
60
- bert = torch.zeros(1024, len(phone))
61
- ja_bert = torch.zeros(768, len(phone))
62
-
63
- assert bert.shape[-1] == len(
64
- phone
65
- ), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
66
-
67
- phone = torch.LongTensor(phone)
68
- tone = torch.LongTensor(tone)
69
- language = torch.LongTensor(language)
70
- return bert, ja_bert, phone, tone, language
71
-
72
-
73
- def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language):
74
- global net_g
75
- bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps)
76
- with torch.no_grad():
77
- x_tst = phones.to(device).unsqueeze(0)
78
- tones = tones.to(device).unsqueeze(0)
79
- lang_ids = lang_ids.to(device).unsqueeze(0)
80
- bert = bert.to(device).unsqueeze(0)
81
- ja_bert = ja_bert.to(device).unsqueeze(0)
82
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
83
- #print(x_tst.type(), tones.type(), lang_ids.type(), bert.type(), ja_bert.type(), x_tst_lengths.type())
84
- del phones
85
- speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
86
- audio = (
87
- net_g.infer(
88
- x_tst,
89
- x_tst_lengths,
90
- speakers,
91
- tones,
92
- lang_ids,
93
- bert,
94
- ja_bert,
95
- sdp_ratio=sdp_ratio,
96
- noise_scale=noise_scale,
97
- noise_scale_w=noise_scale_w,
98
- length_scale=length_scale,
99
- )[0][0, 0]
100
- .data.cpu()
101
- .float()
102
- .numpy()
103
- )
104
- del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
105
- torch.cuda.empty_cache()
106
- return audio
107
-
108
- __LOG__ = "./generation_logs.txt"
109
- def tts_fn(text, model_name:str, sdp_ratio, noise_scale, noise_scale_w, length_scale, language):
110
- global curr_model_name
111
- if curr_model_name != model_name:
112
- load_model(model_name)
113
- # 清空 ./infer_save 文件夹
114
- if os.path.exists('./infer_save'):
115
- shutil.rmtree('./infer_save')
116
- os.makedirs('./infer_save')
117
-
118
- slices = text.split("\n")
119
- slices = [slice for slice in slices if slice.strip() != ""]
120
- audio_list = []
121
- with torch.no_grad():
122
- with open(__LOG__,"a",encoding="UTF-8") as f:
123
- for slice in slices:
124
- assert len(slice) < 250 # 限制输入的文本长度
125
- audio = infer(slice, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=list(hps_[curr_model_name].data.spk2id.keys())[0], language=language)
126
- audio_list.append(audio)
127
-
128
- # 创建唯一的文件名
129
- timestamp = str(int(time.time() * 1000))
130
- audio_file_path = f'./infer_save/audio_{timestamp}.wav'
131
-
132
- # 保存音频数据到.wav文件
133
- wavfile.write(audio_file_path, hps.data.sampling_rate, audio)
134
-
135
- silence = np.zeros(int(hps.data.sampling_rate/2), dtype=np.int16) # 生成半秒的静音
136
- audio_list.append(silence) # 将静音添加到列表中
137
-
138
- f.write(f"{slice} | {curr_model_name}\n")
139
- print(f"{slice} | {curr_model_name}")
140
-
141
- audio_concat = np.concatenate(audio_list)
142
- return "Success", (hps.data.sampling_rate, audio_concat)
143
-
144
-
145
- def load_model(model_name:str):
146
- global net_g,curr_model_name,hps_,hps
147
- assert os.path.exists(os.path.join("logs",model_name))
148
- curr_model_name = model_name
149
- hps = hps_[curr_model_name]
150
- all_files = os.listdir(os.path.join("logs",model_name))
151
- hps = utils.get_hparams_from_file(os.path.join("logs",model_name,"config.json"))
152
- net_g = SynthesizerTrn(
153
- len(symbols),
154
- hps.data.filter_length // 2 + 1,
155
- hps.train.segment_size // hps.data.hop_length,
156
- n_speakers=hps.data.n_speakers,
157
- **hps.model,
158
- ).to(device)
159
- _ = net_g.eval()
160
- #获取G_最大的模型:
161
- g_files = [f for f in all_files if f.startswith('G_') and f.endswith('.pth')]
162
-
163
- # 提取文件名中的数字,并找到最大的数字
164
- max_num = -1
165
- max_file = None
166
- for f in g_files:
167
- num = int(re.search(r'G_(\d+).pth', f).group(1))
168
- if num > max_num:
169
- max_num = num
170
- max_file = f
171
-
172
- # 加载对应的模型
173
- if max_file:
174
- file_path = os.path.join('./logs/',model_name, max_file)
175
- _ = utils.load_checkpoint(file_path, net_g, None, skip_optimizer=True)
176
- else:
177
- print("没有找到合适的文件")
178
-
179
- if __name__ == "__main__":
180
-
181
-
182
- models = os.listdir("./logs")
183
- hps_ = {}
184
- for i in models:
185
- hps_[i] = utils.get_hparams_from_file(os.path.join("./logs", i, "config.json"))
186
- curr_model_name = models[0]
187
- hps = hps_[curr_model_name]
188
-
189
- # speaker_ids = hps.data.spk2id
190
- # speakers = list(speaker_ids.keys())
191
- device = (
192
- "cuda:0"
193
- if torch.cuda.is_available()
194
- else (
195
- "mps"
196
- if sys.platform == "darwin" and torch.backends.mps.is_available()
197
- else "cpu"
198
- )
199
- )
200
- net_g = SynthesizerTrn(
201
- len(symbols),
202
- hps.data.filter_length // 2 + 1,
203
- hps.train.segment_size // hps.data.hop_length,
204
- n_speakers=hps.data.n_speakers,
205
- **hps.model,
206
- ).to(device)
207
- _ = net_g.eval()
208
-
209
- languages = ["JP"]
210
- with gr.Blocks() as app:
211
- with gr.Tab(label="umamusume"):
212
- with gr.Row():
213
- with gr.Column():
214
- text = gr.TextArea(
215
- label="Text",
216
- placeholder="Input Text Here",
217
- value="はりきっていこう!",
218
- )
219
- speaker = gr.Dropdown(
220
- choices=models, value=models[0], label="Models"
221
- )
222
- with gr.Accordion("Settings",open=False):
223
- sdp_ratio = gr.Slider(
224
- minimum=0, maximum=1, value=0.2, step=0.1, label="SDP Ratio"
225
- )
226
- noise_scale = gr.Slider(
227
- minimum=0.1, maximum=2, value=0.6, step=0.1, label="Noise Scale"
228
- )
229
- noise_scale_w = gr.Slider(
230
- minimum=0.1, maximum=2, value=0.8, step=0.1, label="Noise Scale W"
231
- )
232
- length_scale = gr.Slider(
233
- minimum=0.1, maximum=2, value=1, step=0.1, label="Length Scale"
234
- )
235
- language = gr.Dropdown(
236
- choices=languages, value=languages[0], label="Language"
237
- )
238
- btn = gr.Button("Generate!", variant="primary")
239
- with gr.Column():
240
- text_output = gr.Textbox(label="Message")
241
- audio_output = gr.Audio(label="Output Audio")
242
- gr.Markdown("# 赛马娘 Bert-VITS2 语音合成\n"
243
- "Project page:[GitHub](https://github.com/fishaudio/Bert-VITS2)\n"
244
- "- Still Updating...\n"
245
- "- We found that model trained with only 1 speaker may generate better audio than multi-speaker model.\n")
246
-
247
- btn.click(
248
- tts_fn,
249
- inputs=[
250
- text,
251
- speaker,
252
- sdp_ratio,
253
- noise_scale,
254
- noise_scale_w,
255
- length_scale,
256
- language,
257
- ],
258
- outputs=[text_output, audio_output],
259
- )
260
- app.launch(server_name="0.0.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/split_tar.py DELETED
@@ -1,22 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
-
4
- import tqdm
5
- import webdataset as wds
6
-
7
-
8
- def main(args):
9
- input_dataset = wds.Dataset(args.infile)
10
- output_dataset = wds.ShardWriter(args.outpattern)
11
- for rec in tqdm.tqdm(input_dataset):
12
- output_dataset.write(rec)
13
-
14
-
15
- if __name__ == '__main__':
16
- import argparse
17
-
18
- aparser = argparse.ArgumentParser()
19
- aparser.add_argument('infile', type=str)
20
- aparser.add_argument('outpattern', type=str)
21
-
22
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Allie7/Nose/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Nose
3
- emoji: 💻
4
- colorFrom: gray
5
- colorTo: indigo
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Altinas/vits-uma-genshin-honkais/mel_processing.py DELETED
@@ -1,101 +0,0 @@
1
- import torch
2
- import torch.utils.data
3
- from librosa.filters import mel as librosa_mel_fn
4
-
5
- MAX_WAV_VALUE = 32768.0
6
-
7
-
8
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
9
- """
10
- PARAMS
11
- ------
12
- C: compression factor
13
- """
14
- return torch.log(torch.clamp(x, min=clip_val) * C)
15
-
16
-
17
- def dynamic_range_decompression_torch(x, C=1):
18
- """
19
- PARAMS
20
- ------
21
- C: compression factor used to compress
22
- """
23
- return torch.exp(x) / C
24
-
25
-
26
- def spectral_normalize_torch(magnitudes):
27
- output = dynamic_range_compression_torch(magnitudes)
28
- return output
29
-
30
-
31
- def spectral_de_normalize_torch(magnitudes):
32
- output = dynamic_range_decompression_torch(magnitudes)
33
- return output
34
-
35
-
36
- mel_basis = {}
37
- hann_window = {}
38
-
39
-
40
- def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
41
- if torch.min(y) < -1.:
42
- print('min value is ', torch.min(y))
43
- if torch.max(y) > 1.:
44
- print('max value is ', torch.max(y))
45
-
46
- global hann_window
47
- dtype_device = str(y.dtype) + '_' + str(y.device)
48
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
49
- if wnsize_dtype_device not in hann_window:
50
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
51
-
52
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
53
- y = y.squeeze(1)
54
-
55
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
56
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
57
-
58
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
59
- return spec
60
-
61
-
62
- def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
63
- global mel_basis
64
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
65
- fmax_dtype_device = str(fmax) + '_' + dtype_device
66
- if fmax_dtype_device not in mel_basis:
67
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
68
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
69
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
70
- spec = spectral_normalize_torch(spec)
71
- return spec
72
-
73
-
74
- def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
75
- if torch.min(y) < -1.:
76
- print('min value is ', torch.min(y))
77
- if torch.max(y) > 1.:
78
- print('max value is ', torch.max(y))
79
-
80
- global mel_basis, hann_window
81
- dtype_device = str(y.dtype) + '_' + str(y.device)
82
- fmax_dtype_device = str(fmax) + '_' + dtype_device
83
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
84
- if fmax_dtype_device not in mel_basis:
85
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
86
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
87
- if wnsize_dtype_device not in hann_window:
88
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
89
-
90
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
91
- y = y.squeeze(1)
92
-
93
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
94
- center=center, pad_mode='reflect', normalized=False, onesided=True)
95
-
96
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
97
-
98
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
99
- spec = spectral_normalize_torch(spec)
100
-
101
- return spec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/request_llm/README.md DELETED
@@ -1,54 +0,0 @@
1
- # 如何使用其他大语言模型(v3.0分支测试中)
2
-
3
- ## ChatGLM
4
-
5
- - 安装依赖 `pip install -r request_llm/requirements_chatglm.txt`
6
- - 修改配置,在config.py中将LLM_MODEL的值改为"chatglm"
7
-
8
- ``` sh
9
- LLM_MODEL = "chatglm"
10
- ```
11
- - 运行!
12
- ``` sh
13
- `python main.py`
14
- ```
15
-
16
-
17
- ---
18
- ## Text-Generation-UI (TGUI)
19
-
20
- ### 1. 部署TGUI
21
- ``` sh
22
- # 1 下载模型
23
- git clone https://github.com/oobabooga/text-generation-webui.git
24
- # 2 这个仓库的最新代码有问题,回滚到几周之前
25
- git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d
26
- # 3 切换路径
27
- cd text-generation-webui
28
- # 4 安装text-generation的额外依赖
29
- pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers
30
- # 5 下载模型
31
- python download-model.py facebook/galactica-1.3b
32
- # 其他可选如 facebook/opt-1.3b
33
- # facebook/galactica-1.3b
34
- # facebook/galactica-6.7b
35
- # facebook/galactica-120b
36
- # facebook/pygmalion-1.3b 等
37
- # 详情见 https://github.com/oobabooga/text-generation-webui
38
-
39
- # 6 启动text-generation
40
- python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b
41
- ```
42
-
43
- ### 2. 修改config.py
44
-
45
- ``` sh
46
- # LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致
47
- LLM_MODEL = "tgui:galactica-1.3b@localhost:7860"
48
- ```
49
-
50
- ### 3. 运行!
51
- ``` sh
52
- cd chatgpt-academic
53
- python main.py
54
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/request_llm/bridge_all.py DELETED
@@ -1,210 +0,0 @@
1
-
2
- """
3
- 该文件中主要包含2个函数
4
-
5
- 不具备多线程能力的函数:
6
- 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
7
-
8
- 具备多线程调用能力的函数
9
- 2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
10
- """
11
- import tiktoken
12
- from functools import wraps, lru_cache
13
- from concurrent.futures import ThreadPoolExecutor
14
-
15
- from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
16
- from .bridge_chatgpt import predict as chatgpt_ui
17
-
18
- from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
19
- from .bridge_chatglm import predict as chatglm_ui
20
-
21
- # from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
22
- # from .bridge_tgui import predict as tgui_ui
23
-
24
- colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
25
-
26
- class LazyloadTiktoken(object):
27
- def __init__(self, model):
28
- self.model = model
29
-
30
- @staticmethod
31
- @lru_cache(maxsize=128)
32
- def get_encoder(model):
33
- print('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数')
34
- tmp = tiktoken.encoding_for_model(model)
35
- print('加载tokenizer完毕')
36
- return tmp
37
-
38
- def encode(self, *args, **kwargs):
39
- encoder = self.get_encoder(self.model)
40
- return encoder.encode(*args, **kwargs)
41
-
42
- def decode(self, *args, **kwargs):
43
- encoder = self.get_encoder(self.model)
44
- return encoder.decode(*args, **kwargs)
45
-
46
- tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
47
- tokenizer_gpt4 = LazyloadTiktoken("gpt-4")
48
- get_token_num_gpt35 = lambda txt: len(tokenizer_gpt35.encode(txt, disallowed_special=()))
49
- get_token_num_gpt4 = lambda txt: len(tokenizer_gpt4.encode(txt, disallowed_special=()))
50
-
51
- model_info = {
52
- # openai
53
- "gpt-3.5-turbo": {
54
- "fn_with_ui": chatgpt_ui,
55
- "fn_without_ui": chatgpt_noui,
56
- "endpoint": "https://api.openai.com/v1/chat/completions",
57
- "max_token": 4096,
58
- "tokenizer": tokenizer_gpt35,
59
- "token_cnt": get_token_num_gpt35,
60
- },
61
-
62
- "gpt-4": {
63
- "fn_with_ui": chatgpt_ui,
64
- "fn_without_ui": chatgpt_noui,
65
- "endpoint": "https://api.openai.com/v1/chat/completions",
66
- "max_token": 8192,
67
- "tokenizer": tokenizer_gpt4,
68
- "token_cnt": get_token_num_gpt4,
69
- },
70
-
71
- # api_2d
72
- "api2d-gpt-3.5-turbo": {
73
- "fn_with_ui": chatgpt_ui,
74
- "fn_without_ui": chatgpt_noui,
75
- "endpoint": "https://openai.api2d.net/v1/chat/completions",
76
- "max_token": 4096,
77
- "tokenizer": tokenizer_gpt35,
78
- "token_cnt": get_token_num_gpt35,
79
- },
80
-
81
- "api2d-gpt-4": {
82
- "fn_with_ui": chatgpt_ui,
83
- "fn_without_ui": chatgpt_noui,
84
- "endpoint": "https://openai.api2d.net/v1/chat/completions",
85
- "max_token": 8192,
86
- "tokenizer": tokenizer_gpt4,
87
- "token_cnt": get_token_num_gpt4,
88
- },
89
-
90
- # chatglm
91
- "chatglm": {
92
- "fn_with_ui": chatglm_ui,
93
- "fn_without_ui": chatglm_noui,
94
- "endpoint": None,
95
- "max_token": 1024,
96
- "tokenizer": tokenizer_gpt35,
97
- "token_cnt": get_token_num_gpt35,
98
- },
99
-
100
- }
101
-
102
-
103
- def LLM_CATCH_EXCEPTION(f):
104
- """
105
- 装饰器函数,将错误显示出来
106
- """
107
- def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
108
- try:
109
- return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
110
- except Exception as e:
111
- from toolbox import get_conf
112
- import traceback
113
- proxies, = get_conf('proxies')
114
- tb_str = '\n```\n' + traceback.format_exc() + '\n```\n'
115
- observe_window[0] = tb_str
116
- return tb_str
117
- return decorated
118
-
119
-
120
- def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
121
- """
122
- 发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
123
- inputs:
124
- 是本次问询的输入
125
- sys_prompt:
126
- 系统静默prompt
127
- llm_kwargs:
128
- LLM的内部调优参数
129
- history:
130
- 是之前的对话列表
131
- observe_window = None:
132
- 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
133
- """
134
- import threading, time, copy
135
-
136
- model = llm_kwargs['llm_model']
137
- n_model = 1
138
- if '&' not in model:
139
- assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
140
-
141
- # 如果只询问1个大语言模型:
142
- method = model_info[model]["fn_without_ui"]
143
- return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
144
- else:
145
- # 如果同时询问多个大语言模型:
146
- executor = ThreadPoolExecutor(max_workers=4)
147
- models = model.split('&')
148
- n_model = len(models)
149
-
150
- window_len = len(observe_window)
151
- assert window_len==3
152
- window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
153
-
154
- futures = []
155
- for i in range(n_model):
156
- model = models[i]
157
- method = model_info[model]["fn_without_ui"]
158
- llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
159
- llm_kwargs_feedin['llm_model'] = model
160
- future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
161
- futures.append(future)
162
-
163
- def mutex_manager(window_mutex, observe_window):
164
- while True:
165
- time.sleep(0.5)
166
- if not window_mutex[-1]: break
167
- # 看门狗(watchdog)
168
- for i in range(n_model):
169
- window_mutex[i][1] = observe_window[1]
170
- # 观察窗(window)
171
- chat_string = []
172
- for i in range(n_model):
173
- chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
174
- res = '<br/><br/>\n\n---\n\n'.join(chat_string)
175
- # # # # # # # # # # #
176
- observe_window[0] = res
177
-
178
- t_model = threading.Thread(target=mutex_manager, args=(window_mutex, observe_window), daemon=True)
179
- t_model.start()
180
-
181
- return_string_collect = []
182
- while True:
183
- worker_done = [h.done() for h in futures]
184
- if all(worker_done):
185
- executor.shutdown()
186
- break
187
- time.sleep(1)
188
-
189
- for i, future in enumerate(futures): # wait and get
190
- return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
191
-
192
- window_mutex[-1] = False # stop mutex thread
193
- res = '<br/>\n\n---\n\n'.join(return_string_collect)
194
- return res
195
-
196
-
197
- def predict(inputs, llm_kwargs, *args, **kwargs):
198
- """
199
- 发送至LLM,流式获取输出。
200
- 用于基础的对话功能。
201
- inputs 是本次问询的输入
202
- top_p, temperature是LLM的内部调优参数
203
- history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
204
- chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
205
- additional_fn代表点击的哪个按钮,按钮见functional.py
206
- """
207
-
208
- method = model_info[llm_kwargs['llm_model']]["fn_with_ui"]
209
- yield from method(inputs, llm_kwargs, *args, **kwargs)
210
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/stylegan2/op/fused_act.py DELETED
@@ -1,34 +0,0 @@
1
- import os
2
-
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from torch.autograd import Function
7
-
8
-
9
- module_path = os.path.dirname(__file__)
10
-
11
-
12
-
13
- class FusedLeakyReLU(nn.Module):
14
- def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
15
- super().__init__()
16
-
17
- self.bias = nn.Parameter(torch.zeros(channel))
18
- self.negative_slope = negative_slope
19
- self.scale = scale
20
-
21
- def forward(self, input):
22
- return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
23
-
24
-
25
- def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
26
- rest_dim = [1] * (input.ndim - bias.ndim - 1)
27
- input = input.cuda()
28
- return (
29
- F.leaky_relu(
30
- input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope
31
- )
32
- * scale
33
- )
34
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/image_processor.md DELETED
@@ -1,27 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # VAE Image Processor
14
-
15
- The [`VaeImageProcessor`] provides a unified API for [`StableDiffusionPipeline`]'s to prepare image inputs for VAE encoding and post-processing outputs once they're decoded. This includes transformations such as resizing, normalization, and conversion between PIL Image, PyTorch, and NumPy arrays.
16
-
17
- All pipelines with [`VaeImageProcessor`] accepts PIL Image, PyTorch tensor, or NumPy arrays as image inputs and returns outputs based on the `output_type` argument by the user. You can pass encoded image latents directly to the pipeline and return latents from the pipeline as a specific output with the `output_type` argument (for example `output_type="pt"`). This allows you to take the generated latents from one pipeline and pass it to another pipeline as input without leaving the latent space. It also makes it much easier to use multiple pipelines together by passing PyTorch tensors directly between different pipelines.
18
-
19
- ## VaeImageProcessor
20
-
21
- [[autodoc]] image_processor.VaeImageProcessor
22
-
23
- ## VaeImageProcessorLDM3D
24
-
25
- The [`VaeImageProcessorLDM3D`] accepts RGB and depth inputs and returns RGB and depth outputs.
26
-
27
- [[autodoc]] image_processor.VaeImageProcessorLDM3D
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/latent_diffusion.md DELETED
@@ -1,40 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Latent Diffusion
14
-
15
- Latent Diffusion was proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer.
16
-
17
- The abstract from the paper is:
18
-
19
- *By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve a new state of the art for image inpainting and highly competitive performance on various tasks, including unconditional image generation, semantic scene synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.*
20
-
21
- The original codebase can be found at [Compvis/latent-diffusion](https://github.com/CompVis/latent-diffusion).
22
-
23
- <Tip>
24
-
25
- Make sure to check out the Schedulers [guide](/using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](/using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
26
-
27
- </Tip>
28
-
29
- ## LDMTextToImagePipeline
30
- [[autodoc]] LDMTextToImagePipeline
31
- - all
32
- - __call__
33
-
34
- ## LDMSuperResolutionPipeline
35
- [[autodoc]] LDMSuperResolutionPipeline
36
- - all
37
- - __call__
38
-
39
- ## ImagePipelineOutput
40
- [[autodoc]] pipelines.ImagePipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/unet_2d_blocks.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/cascade_mask_rcnn_swin_fpn.py DELETED
@@ -1,207 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='CascadeRCNN',
4
- pretrained=None,
5
- backbone=dict(
6
- type='SwinTransformer',
7
- embed_dim=96,
8
- depths=[2, 2, 6, 2],
9
- num_heads=[3, 6, 12, 24],
10
- window_size=7,
11
- mlp_ratio=4.,
12
- qkv_bias=True,
13
- qk_scale=None,
14
- drop_rate=0.,
15
- attn_drop_rate=0.,
16
- drop_path_rate=0.2,
17
- ape=False,
18
- patch_norm=True,
19
- out_indices=(0, 1, 2, 3),
20
- use_checkpoint=False),
21
- neck=dict(
22
- type='FPN',
23
- in_channels=[96, 192, 384, 768],
24
- out_channels=256,
25
- num_outs=5),
26
- rpn_head=dict(
27
- type='RPNHead',
28
- in_channels=256,
29
- feat_channels=256,
30
- anchor_generator=dict(
31
- type='AnchorGenerator',
32
- scales=[8],
33
- ratios=[0.5, 1.0, 2.0],
34
- strides=[4, 8, 16, 32, 64]),
35
- bbox_coder=dict(
36
- type='DeltaXYWHBBoxCoder',
37
- target_means=[.0, .0, .0, .0],
38
- target_stds=[1.0, 1.0, 1.0, 1.0]),
39
- loss_cls=dict(
40
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
41
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
42
- roi_head=dict(
43
- type='CascadeRoIHead',
44
- num_stages=3,
45
- stage_loss_weights=[1, 0.5, 0.25],
46
- bbox_roi_extractor=dict(
47
- type='SingleRoIExtractor',
48
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
49
- out_channels=256,
50
- featmap_strides=[4, 8, 16, 32]),
51
- bbox_head=[
52
- dict(
53
- type='Shared2FCBBoxHead',
54
- in_channels=256,
55
- fc_out_channels=1024,
56
- roi_feat_size=7,
57
- num_classes=80,
58
- bbox_coder=dict(
59
- type='DeltaXYWHBBoxCoder',
60
- target_means=[0., 0., 0., 0.],
61
- target_stds=[0.1, 0.1, 0.2, 0.2]),
62
- reg_class_agnostic=True,
63
- loss_cls=dict(
64
- type='CrossEntropyLoss',
65
- use_sigmoid=False,
66
- loss_weight=1.0),
67
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
68
- loss_weight=1.0)),
69
- dict(
70
- type='Shared2FCBBoxHead',
71
- in_channels=256,
72
- fc_out_channels=1024,
73
- roi_feat_size=7,
74
- num_classes=80,
75
- bbox_coder=dict(
76
- type='DeltaXYWHBBoxCoder',
77
- target_means=[0., 0., 0., 0.],
78
- target_stds=[0.05, 0.05, 0.1, 0.1]),
79
- reg_class_agnostic=True,
80
- loss_cls=dict(
81
- type='CrossEntropyLoss',
82
- use_sigmoid=False,
83
- loss_weight=1.0),
84
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
85
- loss_weight=1.0)),
86
- dict(
87
- type='Shared2FCBBoxHead',
88
- in_channels=256,
89
- fc_out_channels=1024,
90
- roi_feat_size=7,
91
- num_classes=80,
92
- bbox_coder=dict(
93
- type='DeltaXYWHBBoxCoder',
94
- target_means=[0., 0., 0., 0.],
95
- target_stds=[0.033, 0.033, 0.067, 0.067]),
96
- reg_class_agnostic=True,
97
- loss_cls=dict(
98
- type='CrossEntropyLoss',
99
- use_sigmoid=False,
100
- loss_weight=1.0),
101
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
102
- ],
103
- mask_roi_extractor=dict(
104
- type='SingleRoIExtractor',
105
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
106
- out_channels=256,
107
- featmap_strides=[4, 8, 16, 32]),
108
- mask_head=dict(
109
- type='FCNMaskHead',
110
- num_convs=4,
111
- in_channels=256,
112
- conv_out_channels=256,
113
- num_classes=80,
114
- loss_mask=dict(
115
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
116
- # model training and testing settings
117
- train_cfg = dict(
118
- rpn=dict(
119
- assigner=dict(
120
- type='MaxIoUAssigner',
121
- pos_iou_thr=0.7,
122
- neg_iou_thr=0.3,
123
- min_pos_iou=0.3,
124
- match_low_quality=True,
125
- ignore_iof_thr=-1),
126
- sampler=dict(
127
- type='RandomSampler',
128
- num=256,
129
- pos_fraction=0.5,
130
- neg_pos_ub=-1,
131
- add_gt_as_proposals=False),
132
- allowed_border=0,
133
- pos_weight=-1,
134
- debug=False),
135
- rpn_proposal=dict(
136
- nms_across_levels=False,
137
- nms_pre=2000,
138
- nms_post=2000,
139
- max_per_img=2000,
140
- nms=dict(type='nms', iou_threshold=0.7),
141
- min_bbox_size=0),
142
- rcnn=[
143
- dict(
144
- assigner=dict(
145
- type='MaxIoUAssigner',
146
- pos_iou_thr=0.5,
147
- neg_iou_thr=0.5,
148
- min_pos_iou=0.5,
149
- match_low_quality=False,
150
- ignore_iof_thr=-1),
151
- sampler=dict(
152
- type='RandomSampler',
153
- num=512,
154
- pos_fraction=0.25,
155
- neg_pos_ub=-1,
156
- add_gt_as_proposals=True),
157
- mask_size=28,
158
- pos_weight=-1,
159
- debug=False),
160
- dict(
161
- assigner=dict(
162
- type='MaxIoUAssigner',
163
- pos_iou_thr=0.6,
164
- neg_iou_thr=0.6,
165
- min_pos_iou=0.6,
166
- match_low_quality=False,
167
- ignore_iof_thr=-1),
168
- sampler=dict(
169
- type='RandomSampler',
170
- num=512,
171
- pos_fraction=0.25,
172
- neg_pos_ub=-1,
173
- add_gt_as_proposals=True),
174
- mask_size=28,
175
- pos_weight=-1,
176
- debug=False),
177
- dict(
178
- assigner=dict(
179
- type='MaxIoUAssigner',
180
- pos_iou_thr=0.7,
181
- neg_iou_thr=0.7,
182
- min_pos_iou=0.7,
183
- match_low_quality=False,
184
- ignore_iof_thr=-1),
185
- sampler=dict(
186
- type='RandomSampler',
187
- num=512,
188
- pos_fraction=0.25,
189
- neg_pos_ub=-1,
190
- add_gt_as_proposals=True),
191
- mask_size=28,
192
- pos_weight=-1,
193
- debug=False)
194
- ]),
195
- test_cfg = dict(
196
- rpn=dict(
197
- nms_across_levels=False,
198
- nms_pre=1000,
199
- nms_post=1000,
200
- max_per_img=1000,
201
- nms=dict(type='nms', iou_threshold=0.7),
202
- min_bbox_size=0),
203
- rcnn=dict(
204
- score_thr=0.05,
205
- nms=dict(type='nms', iou_threshold=0.5),
206
- max_per_img=100,
207
- mask_thr_binary=0.5)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './htc_hrnetv2p_w40_20e_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[24, 27])
4
- runner = dict(type='EpochBasedRunner', max_epochs=28)
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py DELETED
@@ -1,50 +0,0 @@
1
- _base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
2
- # model settings
3
- model = dict(
4
- neck=[
5
- dict(
6
- type='FPN',
7
- in_channels=[256, 512, 1024, 2048],
8
- out_channels=256,
9
- num_outs=5),
10
- dict(
11
- type='BFP',
12
- in_channels=256,
13
- num_levels=5,
14
- refine_level=2,
15
- refine_type='non_local')
16
- ],
17
- roi_head=dict(
18
- bbox_head=dict(
19
- loss_bbox=dict(
20
- _delete_=True,
21
- type='BalancedL1Loss',
22
- alpha=0.5,
23
- gamma=1.5,
24
- beta=1.0,
25
- loss_weight=1.0))),
26
- # model training and testing settings
27
- train_cfg=dict(
28
- rcnn=dict(
29
- sampler=dict(
30
- _delete_=True,
31
- type='CombinedSampler',
32
- num=512,
33
- pos_fraction=0.25,
34
- add_gt_as_proposals=True,
35
- pos_sampler=dict(type='InstanceBalancedPosSampler'),
36
- neg_sampler=dict(
37
- type='IoUBalancedNegSampler',
38
- floor_thr=-1,
39
- floor_fraction=0,
40
- num_bins=3)))))
41
- # dataset settings
42
- dataset_type = 'CocoDataset'
43
- data_root = 'data/coco/'
44
- data = dict(
45
- train=dict(proposal_file=data_root +
46
- 'libra_proposals/rpn_r50_fpn_1x_train2017.pkl'),
47
- val=dict(proposal_file=data_root +
48
- 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'),
49
- test=dict(proposal_file=data_root +
50
- 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/app.py DELETED
@@ -1,63 +0,0 @@
1
- import os
2
-
3
- import torch
4
- import torch.nn.functional as F
5
- import torchvision.transforms as T
6
- from mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
7
- from mmseg.core.evaluation import get_palette
8
- import mmcv
9
-
10
- import gradio as gr
11
- from huggingface_hub import hf_hub_download
12
-
13
- # Device on which to run the model
14
- # Set to cuda to load on GPU
15
- device = "cpu"
16
- checkpoint_file = hf_hub_download(repo_id="Andy1621/uniformer", filename="upernet_global_small.pth")
17
- config_file = './exp/upernet_global_small/config.py'
18
- # init detector
19
- # build the model from a config file and a checkpoint file
20
- model = init_segmentor(config_file, checkpoint_file, device='cpu')
21
-
22
-
23
- def set_example_image(example: list) -> dict:
24
- return gr.Image.update(value=example[0])
25
-
26
-
27
- def inference(img):
28
- result = inference_segmentor(model, img)
29
- res_img = show_result_pyplot(model, img, result, get_palette('ade'))
30
- return res_img
31
-
32
-
33
- demo = gr.Blocks()
34
- with demo:
35
- gr.Markdown(
36
- """
37
- # UniFormer-S
38
- Gradio demo for <a href='https://github.com/Sense-X/UniFormer' target='_blank'>UniFormer</a>: To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.
39
- """
40
- )
41
-
42
- with gr.Box():
43
- with gr.Row():
44
- with gr.Column():
45
- with gr.Row():
46
- input_image = gr.Image(label='Input Image', type='numpy')
47
- with gr.Row():
48
- submit_button = gr.Button('Submit')
49
- with gr.Column():
50
- res_image = gr.Image(type='numpy', label='Segmentation Resutls')
51
- with gr.Row():
52
- example_images = gr.Dataset(components=[input_image], samples=[['demo1.jpg'], ['demo2.jpg'], ['demo3.jpg']])
53
-
54
- gr.Markdown(
55
- """
56
- <p style='text-align: center'><a href='https://arxiv.org/abs/2201.09450' target='_blank'>UniFormer: Unifying Convolution and Self-attention for Visual Recognition</a> | <a href='https://github.com/Sense-X/UniFormer' target='_blank'>Github Repo</a></p>
57
- """
58
- )
59
-
60
- submit_button.click(fn=inference, inputs=input_image, outputs=res_image)
61
- example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components)
62
-
63
- demo.launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fastscnn/fast_scnn_4x8_80k_lr0.12_cityscapes.py DELETED
@@ -1,10 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
5
-
6
- # Re-config the data sampler.
7
- data = dict(samples_per_gpu=2, workers_per_gpu=4)
8
-
9
- # Re-config the optimizer.
10
- optimizer = dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=4e-5)
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/script_util.py DELETED
@@ -1,452 +0,0 @@
1
- import argparse
2
- import inspect
3
-
4
- from . import gaussian_diffusion as gd
5
- from .respace import SpacedDiffusion, space_timesteps
6
- from .unet import SuperResModel, UNetModel, EncoderUNetModel
7
-
8
- NUM_CLASSES = 1000
9
-
10
-
11
- def diffusion_defaults():
12
- """
13
- Defaults for image and classifier training.
14
- """
15
- return dict(
16
- learn_sigma=False,
17
- diffusion_steps=1000,
18
- noise_schedule="linear",
19
- timestep_respacing="",
20
- use_kl=False,
21
- predict_xstart=False,
22
- rescale_timesteps=False,
23
- rescale_learned_sigmas=False,
24
- )
25
-
26
-
27
- def classifier_defaults():
28
- """
29
- Defaults for classifier models.
30
- """
31
- return dict(
32
- image_size=64,
33
- classifier_use_fp16=False,
34
- classifier_width=128,
35
- classifier_depth=2,
36
- classifier_attention_resolutions="32,16,8", # 16
37
- classifier_use_scale_shift_norm=True, # False
38
- classifier_resblock_updown=True, # False
39
- classifier_pool="attention",
40
- )
41
-
42
-
43
- def model_and_diffusion_defaults():
44
- """
45
- Defaults for image training.
46
- """
47
- res = dict(
48
- image_size=64,
49
- num_channels=128,
50
- num_res_blocks=2,
51
- num_heads=4,
52
- num_heads_upsample=-1,
53
- num_head_channels=-1,
54
- attention_resolutions="16,8",
55
- channel_mult="",
56
- dropout=0.0,
57
- class_cond=False,
58
- use_checkpoint=False,
59
- use_scale_shift_norm=True,
60
- resblock_updown=False,
61
- use_fp16=False,
62
- use_new_attention_order=False,
63
- )
64
- res.update(diffusion_defaults())
65
- return res
66
-
67
-
68
- def classifier_and_diffusion_defaults():
69
- res = classifier_defaults()
70
- res.update(diffusion_defaults())
71
- return res
72
-
73
-
74
- def create_model_and_diffusion(
75
- image_size,
76
- class_cond,
77
- learn_sigma,
78
- num_channels,
79
- num_res_blocks,
80
- channel_mult,
81
- num_heads,
82
- num_head_channels,
83
- num_heads_upsample,
84
- attention_resolutions,
85
- dropout,
86
- diffusion_steps,
87
- noise_schedule,
88
- timestep_respacing,
89
- use_kl,
90
- predict_xstart,
91
- rescale_timesteps,
92
- rescale_learned_sigmas,
93
- use_checkpoint,
94
- use_scale_shift_norm,
95
- resblock_updown,
96
- use_fp16,
97
- use_new_attention_order,
98
- ):
99
- model = create_model(
100
- image_size,
101
- num_channels,
102
- num_res_blocks,
103
- channel_mult=channel_mult,
104
- learn_sigma=learn_sigma,
105
- class_cond=class_cond,
106
- use_checkpoint=use_checkpoint,
107
- attention_resolutions=attention_resolutions,
108
- num_heads=num_heads,
109
- num_head_channels=num_head_channels,
110
- num_heads_upsample=num_heads_upsample,
111
- use_scale_shift_norm=use_scale_shift_norm,
112
- dropout=dropout,
113
- resblock_updown=resblock_updown,
114
- use_fp16=use_fp16,
115
- use_new_attention_order=use_new_attention_order,
116
- )
117
- diffusion = create_gaussian_diffusion(
118
- steps=diffusion_steps,
119
- learn_sigma=learn_sigma,
120
- noise_schedule=noise_schedule,
121
- use_kl=use_kl,
122
- predict_xstart=predict_xstart,
123
- rescale_timesteps=rescale_timesteps,
124
- rescale_learned_sigmas=rescale_learned_sigmas,
125
- timestep_respacing=timestep_respacing,
126
- )
127
- return model, diffusion
128
-
129
-
130
- def create_model(
131
- image_size,
132
- num_channels,
133
- num_res_blocks,
134
- channel_mult="",
135
- learn_sigma=False,
136
- class_cond=False,
137
- use_checkpoint=False,
138
- attention_resolutions="16",
139
- num_heads=1,
140
- num_head_channels=-1,
141
- num_heads_upsample=-1,
142
- use_scale_shift_norm=False,
143
- dropout=0,
144
- resblock_updown=False,
145
- use_fp16=False,
146
- use_new_attention_order=False,
147
- ):
148
- if channel_mult == "":
149
- if image_size == 512:
150
- channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
151
- elif image_size == 256:
152
- channel_mult = (1, 1, 2, 2, 4, 4)
153
- elif image_size == 128:
154
- channel_mult = (1, 1, 2, 3, 4)
155
- elif image_size == 64:
156
- channel_mult = (1, 2, 3, 4)
157
- else:
158
- raise ValueError(f"unsupported image size: {image_size}")
159
- else:
160
- channel_mult = tuple(int(ch_mult) for ch_mult in channel_mult.split(","))
161
-
162
- attention_ds = []
163
- for res in attention_resolutions.split(","):
164
- attention_ds.append(image_size // int(res))
165
-
166
- return UNetModel(
167
- image_size=image_size,
168
- in_channels=3,
169
- model_channels=num_channels,
170
- out_channels=(3 if not learn_sigma else 6),
171
- num_res_blocks=num_res_blocks,
172
- attention_resolutions=tuple(attention_ds),
173
- dropout=dropout,
174
- channel_mult=channel_mult,
175
- num_classes=(NUM_CLASSES if class_cond else None),
176
- use_checkpoint=use_checkpoint,
177
- use_fp16=use_fp16,
178
- num_heads=num_heads,
179
- num_head_channels=num_head_channels,
180
- num_heads_upsample=num_heads_upsample,
181
- use_scale_shift_norm=use_scale_shift_norm,
182
- resblock_updown=resblock_updown,
183
- use_new_attention_order=use_new_attention_order,
184
- )
185
-
186
-
187
- def create_classifier_and_diffusion(
188
- image_size,
189
- classifier_use_fp16,
190
- classifier_width,
191
- classifier_depth,
192
- classifier_attention_resolutions,
193
- classifier_use_scale_shift_norm,
194
- classifier_resblock_updown,
195
- classifier_pool,
196
- learn_sigma,
197
- diffusion_steps,
198
- noise_schedule,
199
- timestep_respacing,
200
- use_kl,
201
- predict_xstart,
202
- rescale_timesteps,
203
- rescale_learned_sigmas,
204
- ):
205
- classifier = create_classifier(
206
- image_size,
207
- classifier_use_fp16,
208
- classifier_width,
209
- classifier_depth,
210
- classifier_attention_resolutions,
211
- classifier_use_scale_shift_norm,
212
- classifier_resblock_updown,
213
- classifier_pool,
214
- )
215
- diffusion = create_gaussian_diffusion(
216
- steps=diffusion_steps,
217
- learn_sigma=learn_sigma,
218
- noise_schedule=noise_schedule,
219
- use_kl=use_kl,
220
- predict_xstart=predict_xstart,
221
- rescale_timesteps=rescale_timesteps,
222
- rescale_learned_sigmas=rescale_learned_sigmas,
223
- timestep_respacing=timestep_respacing,
224
- )
225
- return classifier, diffusion
226
-
227
-
228
- def create_classifier(
229
- image_size,
230
- classifier_use_fp16,
231
- classifier_width,
232
- classifier_depth,
233
- classifier_attention_resolutions,
234
- classifier_use_scale_shift_norm,
235
- classifier_resblock_updown,
236
- classifier_pool,
237
- ):
238
- if image_size == 512:
239
- channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
240
- elif image_size == 256:
241
- channel_mult = (1, 1, 2, 2, 4, 4)
242
- elif image_size == 128:
243
- channel_mult = (1, 1, 2, 3, 4)
244
- elif image_size == 64:
245
- channel_mult = (1, 2, 3, 4)
246
- else:
247
- raise ValueError(f"unsupported image size: {image_size}")
248
-
249
- attention_ds = []
250
- for res in classifier_attention_resolutions.split(","):
251
- attention_ds.append(image_size // int(res))
252
-
253
- return EncoderUNetModel(
254
- image_size=image_size,
255
- in_channels=3,
256
- model_channels=classifier_width,
257
- out_channels=1000,
258
- num_res_blocks=classifier_depth,
259
- attention_resolutions=tuple(attention_ds),
260
- channel_mult=channel_mult,
261
- use_fp16=classifier_use_fp16,
262
- num_head_channels=64,
263
- use_scale_shift_norm=classifier_use_scale_shift_norm,
264
- resblock_updown=classifier_resblock_updown,
265
- pool=classifier_pool,
266
- )
267
-
268
-
269
- def sr_model_and_diffusion_defaults():
270
- res = model_and_diffusion_defaults()
271
- res["large_size"] = 256
272
- res["small_size"] = 64
273
- arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
274
- for k in res.copy().keys():
275
- if k not in arg_names:
276
- del res[k]
277
- return res
278
-
279
-
280
- def sr_create_model_and_diffusion(
281
- large_size,
282
- small_size,
283
- class_cond,
284
- learn_sigma,
285
- num_channels,
286
- num_res_blocks,
287
- num_heads,
288
- num_head_channels,
289
- num_heads_upsample,
290
- attention_resolutions,
291
- dropout,
292
- diffusion_steps,
293
- noise_schedule,
294
- timestep_respacing,
295
- use_kl,
296
- predict_xstart,
297
- rescale_timesteps,
298
- rescale_learned_sigmas,
299
- use_checkpoint,
300
- use_scale_shift_norm,
301
- resblock_updown,
302
- use_fp16,
303
- ):
304
- model = sr_create_model(
305
- large_size,
306
- small_size,
307
- num_channels,
308
- num_res_blocks,
309
- learn_sigma=learn_sigma,
310
- class_cond=class_cond,
311
- use_checkpoint=use_checkpoint,
312
- attention_resolutions=attention_resolutions,
313
- num_heads=num_heads,
314
- num_head_channels=num_head_channels,
315
- num_heads_upsample=num_heads_upsample,
316
- use_scale_shift_norm=use_scale_shift_norm,
317
- dropout=dropout,
318
- resblock_updown=resblock_updown,
319
- use_fp16=use_fp16,
320
- )
321
- diffusion = create_gaussian_diffusion(
322
- steps=diffusion_steps,
323
- learn_sigma=learn_sigma,
324
- noise_schedule=noise_schedule,
325
- use_kl=use_kl,
326
- predict_xstart=predict_xstart,
327
- rescale_timesteps=rescale_timesteps,
328
- rescale_learned_sigmas=rescale_learned_sigmas,
329
- timestep_respacing=timestep_respacing,
330
- )
331
- return model, diffusion
332
-
333
-
334
- def sr_create_model(
335
- large_size,
336
- small_size,
337
- num_channels,
338
- num_res_blocks,
339
- learn_sigma,
340
- class_cond,
341
- use_checkpoint,
342
- attention_resolutions,
343
- num_heads,
344
- num_head_channels,
345
- num_heads_upsample,
346
- use_scale_shift_norm,
347
- dropout,
348
- resblock_updown,
349
- use_fp16,
350
- ):
351
- _ = small_size # hack to prevent unused variable
352
-
353
- if large_size == 512:
354
- channel_mult = (1, 1, 2, 2, 4, 4)
355
- elif large_size == 256:
356
- channel_mult = (1, 1, 2, 2, 4, 4)
357
- elif large_size == 64:
358
- channel_mult = (1, 2, 3, 4)
359
- else:
360
- raise ValueError(f"unsupported large size: {large_size}")
361
-
362
- attention_ds = []
363
- for res in attention_resolutions.split(","):
364
- attention_ds.append(large_size // int(res))
365
-
366
- return SuperResModel(
367
- image_size=large_size,
368
- in_channels=3,
369
- model_channels=num_channels,
370
- out_channels=(3 if not learn_sigma else 6),
371
- num_res_blocks=num_res_blocks,
372
- attention_resolutions=tuple(attention_ds),
373
- dropout=dropout,
374
- channel_mult=channel_mult,
375
- num_classes=(NUM_CLASSES if class_cond else None),
376
- use_checkpoint=use_checkpoint,
377
- num_heads=num_heads,
378
- num_head_channels=num_head_channels,
379
- num_heads_upsample=num_heads_upsample,
380
- use_scale_shift_norm=use_scale_shift_norm,
381
- resblock_updown=resblock_updown,
382
- use_fp16=use_fp16,
383
- )
384
-
385
-
386
- def create_gaussian_diffusion(
387
- *,
388
- steps=1000,
389
- learn_sigma=False,
390
- sigma_small=False,
391
- noise_schedule="linear",
392
- use_kl=False,
393
- predict_xstart=False,
394
- rescale_timesteps=False,
395
- rescale_learned_sigmas=False,
396
- timestep_respacing="",
397
- ):
398
- betas = gd.get_named_beta_schedule(noise_schedule, steps)
399
- if use_kl:
400
- loss_type = gd.LossType.RESCALED_KL
401
- elif rescale_learned_sigmas:
402
- loss_type = gd.LossType.RESCALED_MSE
403
- else:
404
- loss_type = gd.LossType.MSE
405
- if not timestep_respacing:
406
- timestep_respacing = [steps]
407
- return SpacedDiffusion(
408
- use_timesteps=space_timesteps(steps, timestep_respacing),
409
- betas=betas,
410
- model_mean_type=(
411
- gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
412
- ),
413
- model_var_type=(
414
- (
415
- gd.ModelVarType.FIXED_LARGE
416
- if not sigma_small
417
- else gd.ModelVarType.FIXED_SMALL
418
- )
419
- if not learn_sigma
420
- else gd.ModelVarType.LEARNED_RANGE
421
- ),
422
- loss_type=loss_type,
423
- rescale_timesteps=rescale_timesteps,
424
- )
425
-
426
-
427
- def add_dict_to_argparser(parser, default_dict):
428
- for k, v in default_dict.items():
429
- v_type = type(v)
430
- if v is None:
431
- v_type = str
432
- elif isinstance(v, bool):
433
- v_type = str2bool
434
- parser.add_argument(f"--{k}", default=v, type=v_type)
435
-
436
-
437
- def args_to_dict(args, keys):
438
- return {k: getattr(args, k) for k in keys}
439
-
440
-
441
- def str2bool(v):
442
- """
443
- https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
444
- """
445
- if isinstance(v, bool):
446
- return v
447
- if v.lower() in ("yes", "true", "t", "y", "1"):
448
- return True
449
- elif v.lower() in ("no", "false", "f", "n", "0"):
450
- return False
451
- else:
452
- raise argparse.ArgumentTypeError("boolean value expected")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AntNikYab/NaturalLanguageProcessing/pages/TheBroCode.py DELETED
@@ -1,64 +0,0 @@
1
- import streamlit as st
2
- import textwrap
3
- import torch
4
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
5
-
6
- DEVICE = torch.device("cpu")
7
- # Load GPT-2 model and tokenizer
8
- tokenizer = GPT2Tokenizer.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2')
9
- model_finetuned = GPT2LMHeadModel.from_pretrained(
10
- 'sberbank-ai/rugpt3small_based_on_gpt2',
11
- output_attentions = False,
12
- output_hidden_states = False,
13
- )
14
- if torch.cuda.is_available():
15
- model_finetuned.load_state_dict(torch.load('models/brat.pt'))
16
- else:
17
- model_finetuned.load_state_dict(torch.load('models/brat.pt', map_location=torch.device('cpu')))
18
- model_finetuned.eval()
19
-
20
- # Function to generate text
21
- def generate_text(prompt, temperature, top_p, max_length, top_k):
22
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
23
-
24
- with torch.no_grad():
25
- out = model_finetuned.generate(
26
- input_ids,
27
- do_sample=True,
28
- num_beams=5,
29
- temperature=temperature,
30
- top_p=top_p,
31
- max_length=max_length,
32
- top_k=top_k,
33
- no_repeat_ngram_size=3,
34
- num_return_sequences=1,
35
- )
36
-
37
- generated_text = list(map(tokenizer.decode, out))
38
- return generated_text
39
-
40
- # Streamlit app
41
- def main():
42
- st.title("Генерация текста 'Кодекс Братана'")
43
-
44
- # User inputs
45
- prompt = st.text_area("Введите начало текста")
46
- temperature = st.slider("Temperature", min_value=0.2, max_value=2.5, value=1.8, step=0.1)
47
- top_p = st.slider("Top-p", min_value=0.1, max_value=1.0, value=0.9, step=0.1)
48
- max_length = st.slider("Max Length", min_value=10, max_value=300, value=100, step=10)
49
- top_k = st.slider("Top-k", min_value=1, max_value=500, value=500, step=10)
50
- num_return_sequences = st.slider("Number of Sequences", min_value=1, max_value=5, value=1, step=1)
51
-
52
- if st.button("Generate Text"):
53
- st.subheader("Generated Text:")
54
- for i in range(num_return_sequences):
55
- generated_text = generate_text(prompt, temperature, top_p, max_length, top_k)
56
- st.write(f"Generated Text {i + 1}:")
57
- wrapped_text = textwrap.fill(generated_text[0], width=80)
58
- st.write(wrapped_text)
59
- st.write("------------------")
60
-
61
- st.sidebar.image('images/theBROcode.jpeg', use_column_width=True)
62
-
63
- if __name__ == "__main__":
64
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ashrafb/codellama-34b/USE_POLICY.md DELETED
@@ -1,50 +0,0 @@
1
- # Llama Code Acceptable Use Policy
2
-
3
- Meta is committed to promoting safe and fair use of its tools and features, including Llama Code. If you access or use Llama Code, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
4
-
5
- ## Prohibited Uses
6
- We want everyone to use Llama Code safely and responsibly. You agree you will not use, or allow others to use, Llama Code to:
7
-
8
- 1. Violate the law or others’ rights, including to:
9
- 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
10
- 1. Violence or terrorism
11
- 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
12
- 3. Human trafficking, exploitation, and sexual violence
13
- 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
14
- 5. Sexual solicitation
15
- 6. Any other criminal activity
16
- 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
17
- 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
18
- 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
19
- 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
20
- 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
21
- 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
22
-
23
-
24
-
25
- 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama Code related to the following:
26
- 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
27
- 2. Guns and illegal weapons (including weapon development)
28
- 3. Illegal drugs and regulated/controlled substances
29
- 4. Operation of critical infrastructure, transportation technologies, or heavy machinery
30
- 5. Self-harm or harm to others, including suicide, cutting, and eating disorders
31
- 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
32
-
33
-
34
-
35
- 3. Intentionally deceive or mislead others, including use of Llama Code related to the following:
36
- 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
37
- 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
38
- 3. Generating, promoting, or further distributing spam
39
- 4. Impersonating another individual without consent, authorization, or legal right
40
- 5. Representing that the use of Llama Code or outputs are human-generated
41
- 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
42
- 4. Fail to appropriately disclose to end users any known dangers of your AI system
43
-
44
- Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
45
-
46
- * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
47
- * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
48
- * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
49
- * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected])
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py DELETED
@@ -1,599 +0,0 @@
1
- import re
2
- import itertools
3
- import textwrap
4
- import functools
5
-
6
- try:
7
- from importlib.resources import files # type: ignore
8
- except ImportError: # pragma: nocover
9
- from pkg_resources.extern.importlib_resources import files # type: ignore
10
-
11
- from pkg_resources.extern.jaraco.functools import compose, method_cache
12
- from pkg_resources.extern.jaraco.context import ExceptionTrap
13
-
14
-
15
- def substitution(old, new):
16
- """
17
- Return a function that will perform a substitution on a string
18
- """
19
- return lambda s: s.replace(old, new)
20
-
21
-
22
- def multi_substitution(*substitutions):
23
- """
24
- Take a sequence of pairs specifying substitutions, and create
25
- a function that performs those substitutions.
26
-
27
- >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
28
- 'baz'
29
- """
30
- substitutions = itertools.starmap(substitution, substitutions)
31
- # compose function applies last function first, so reverse the
32
- # substitutions to get the expected order.
33
- substitutions = reversed(tuple(substitutions))
34
- return compose(*substitutions)
35
-
36
-
37
- class FoldedCase(str):
38
- """
39
- A case insensitive string class; behaves just like str
40
- except compares equal when the only variation is case.
41
-
42
- >>> s = FoldedCase('hello world')
43
-
44
- >>> s == 'Hello World'
45
- True
46
-
47
- >>> 'Hello World' == s
48
- True
49
-
50
- >>> s != 'Hello World'
51
- False
52
-
53
- >>> s.index('O')
54
- 4
55
-
56
- >>> s.split('O')
57
- ['hell', ' w', 'rld']
58
-
59
- >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
60
- ['alpha', 'Beta', 'GAMMA']
61
-
62
- Sequence membership is straightforward.
63
-
64
- >>> "Hello World" in [s]
65
- True
66
- >>> s in ["Hello World"]
67
- True
68
-
69
- You may test for set inclusion, but candidate and elements
70
- must both be folded.
71
-
72
- >>> FoldedCase("Hello World") in {s}
73
- True
74
- >>> s in {FoldedCase("Hello World")}
75
- True
76
-
77
- String inclusion works as long as the FoldedCase object
78
- is on the right.
79
-
80
- >>> "hello" in FoldedCase("Hello World")
81
- True
82
-
83
- But not if the FoldedCase object is on the left:
84
-
85
- >>> FoldedCase('hello') in 'Hello World'
86
- False
87
-
88
- In that case, use ``in_``:
89
-
90
- >>> FoldedCase('hello').in_('Hello World')
91
- True
92
-
93
- >>> FoldedCase('hello') > FoldedCase('Hello')
94
- False
95
- """
96
-
97
- def __lt__(self, other):
98
- return self.lower() < other.lower()
99
-
100
- def __gt__(self, other):
101
- return self.lower() > other.lower()
102
-
103
- def __eq__(self, other):
104
- return self.lower() == other.lower()
105
-
106
- def __ne__(self, other):
107
- return self.lower() != other.lower()
108
-
109
- def __hash__(self):
110
- return hash(self.lower())
111
-
112
- def __contains__(self, other):
113
- return super().lower().__contains__(other.lower())
114
-
115
- def in_(self, other):
116
- "Does self appear in other?"
117
- return self in FoldedCase(other)
118
-
119
- # cache lower since it's likely to be called frequently.
120
- @method_cache
121
- def lower(self):
122
- return super().lower()
123
-
124
- def index(self, sub):
125
- return self.lower().index(sub.lower())
126
-
127
- def split(self, splitter=' ', maxsplit=0):
128
- pattern = re.compile(re.escape(splitter), re.I)
129
- return pattern.split(self, maxsplit)
130
-
131
-
132
- # Python 3.8 compatibility
133
- _unicode_trap = ExceptionTrap(UnicodeDecodeError)
134
-
135
-
136
- @_unicode_trap.passes
137
- def is_decodable(value):
138
- r"""
139
- Return True if the supplied value is decodable (using the default
140
- encoding).
141
-
142
- >>> is_decodable(b'\xff')
143
- False
144
- >>> is_decodable(b'\x32')
145
- True
146
- """
147
- value.decode()
148
-
149
-
150
- def is_binary(value):
151
- r"""
152
- Return True if the value appears to be binary (that is, it's a byte
153
- string and isn't decodable).
154
-
155
- >>> is_binary(b'\xff')
156
- True
157
- >>> is_binary('\xff')
158
- False
159
- """
160
- return isinstance(value, bytes) and not is_decodable(value)
161
-
162
-
163
- def trim(s):
164
- r"""
165
- Trim something like a docstring to remove the whitespace that
166
- is common due to indentation and formatting.
167
-
168
- >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
169
- 'foo = bar\n\tbar = baz'
170
- """
171
- return textwrap.dedent(s).strip()
172
-
173
-
174
- def wrap(s):
175
- """
176
- Wrap lines of text, retaining existing newlines as
177
- paragraph markers.
178
-
179
- >>> print(wrap(lorem_ipsum))
180
- Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
181
- eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
182
- minim veniam, quis nostrud exercitation ullamco laboris nisi ut
183
- aliquip ex ea commodo consequat. Duis aute irure dolor in
184
- reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
185
- pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
186
- culpa qui officia deserunt mollit anim id est laborum.
187
- <BLANKLINE>
188
- Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
189
- varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
190
- magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
191
- gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
192
- risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
193
- eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
194
- fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
195
- a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
196
- neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
197
- sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
198
- nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
199
- quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
200
- molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
201
- """
202
- paragraphs = s.splitlines()
203
- wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
204
- return '\n\n'.join(wrapped)
205
-
206
-
207
- def unwrap(s):
208
- r"""
209
- Given a multi-line string, return an unwrapped version.
210
-
211
- >>> wrapped = wrap(lorem_ipsum)
212
- >>> wrapped.count('\n')
213
- 20
214
- >>> unwrapped = unwrap(wrapped)
215
- >>> unwrapped.count('\n')
216
- 1
217
- >>> print(unwrapped)
218
- Lorem ipsum dolor sit amet, consectetur adipiscing ...
219
- Curabitur pretium tincidunt lacus. Nulla gravida orci ...
220
-
221
- """
222
- paragraphs = re.split(r'\n\n+', s)
223
- cleaned = (para.replace('\n', ' ') for para in paragraphs)
224
- return '\n'.join(cleaned)
225
-
226
-
227
-
228
-
229
- class Splitter(object):
230
- """object that will split a string with the given arguments for each call
231
-
232
- >>> s = Splitter(',')
233
- >>> s('hello, world, this is your, master calling')
234
- ['hello', ' world', ' this is your', ' master calling']
235
- """
236
-
237
- def __init__(self, *args):
238
- self.args = args
239
-
240
- def __call__(self, s):
241
- return s.split(*self.args)
242
-
243
-
244
- def indent(string, prefix=' ' * 4):
245
- """
246
- >>> indent('foo')
247
- ' foo'
248
- """
249
- return prefix + string
250
-
251
-
252
- class WordSet(tuple):
253
- """
254
- Given an identifier, return the words that identifier represents,
255
- whether in camel case, underscore-separated, etc.
256
-
257
- >>> WordSet.parse("camelCase")
258
- ('camel', 'Case')
259
-
260
- >>> WordSet.parse("under_sep")
261
- ('under', 'sep')
262
-
263
- Acronyms should be retained
264
-
265
- >>> WordSet.parse("firstSNL")
266
- ('first', 'SNL')
267
-
268
- >>> WordSet.parse("you_and_I")
269
- ('you', 'and', 'I')
270
-
271
- >>> WordSet.parse("A simple test")
272
- ('A', 'simple', 'test')
273
-
274
- Multiple caps should not interfere with the first cap of another word.
275
-
276
- >>> WordSet.parse("myABCClass")
277
- ('my', 'ABC', 'Class')
278
-
279
- The result is a WordSet, so you can get the form you need.
280
-
281
- >>> WordSet.parse("myABCClass").underscore_separated()
282
- 'my_ABC_Class'
283
-
284
- >>> WordSet.parse('a-command').camel_case()
285
- 'ACommand'
286
-
287
- >>> WordSet.parse('someIdentifier').lowered().space_separated()
288
- 'some identifier'
289
-
290
- Slices of the result should return another WordSet.
291
-
292
- >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
293
- 'out_of_context'
294
-
295
- >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
296
- 'word set'
297
-
298
- >>> example = WordSet.parse('figured it out')
299
- >>> example.headless_camel_case()
300
- 'figuredItOut'
301
- >>> example.dash_separated()
302
- 'figured-it-out'
303
-
304
- """
305
-
306
- _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
307
-
308
- def capitalized(self):
309
- return WordSet(word.capitalize() for word in self)
310
-
311
- def lowered(self):
312
- return WordSet(word.lower() for word in self)
313
-
314
- def camel_case(self):
315
- return ''.join(self.capitalized())
316
-
317
- def headless_camel_case(self):
318
- words = iter(self)
319
- first = next(words).lower()
320
- new_words = itertools.chain((first,), WordSet(words).camel_case())
321
- return ''.join(new_words)
322
-
323
- def underscore_separated(self):
324
- return '_'.join(self)
325
-
326
- def dash_separated(self):
327
- return '-'.join(self)
328
-
329
- def space_separated(self):
330
- return ' '.join(self)
331
-
332
- def trim_right(self, item):
333
- """
334
- Remove the item from the end of the set.
335
-
336
- >>> WordSet.parse('foo bar').trim_right('foo')
337
- ('foo', 'bar')
338
- >>> WordSet.parse('foo bar').trim_right('bar')
339
- ('foo',)
340
- >>> WordSet.parse('').trim_right('bar')
341
- ()
342
- """
343
- return self[:-1] if self and self[-1] == item else self
344
-
345
- def trim_left(self, item):
346
- """
347
- Remove the item from the beginning of the set.
348
-
349
- >>> WordSet.parse('foo bar').trim_left('foo')
350
- ('bar',)
351
- >>> WordSet.parse('foo bar').trim_left('bar')
352
- ('foo', 'bar')
353
- >>> WordSet.parse('').trim_left('bar')
354
- ()
355
- """
356
- return self[1:] if self and self[0] == item else self
357
-
358
- def trim(self, item):
359
- """
360
- >>> WordSet.parse('foo bar').trim('foo')
361
- ('bar',)
362
- """
363
- return self.trim_left(item).trim_right(item)
364
-
365
- def __getitem__(self, item):
366
- result = super(WordSet, self).__getitem__(item)
367
- if isinstance(item, slice):
368
- result = WordSet(result)
369
- return result
370
-
371
- @classmethod
372
- def parse(cls, identifier):
373
- matches = cls._pattern.finditer(identifier)
374
- return WordSet(match.group(0) for match in matches)
375
-
376
- @classmethod
377
- def from_class_name(cls, subject):
378
- return cls.parse(subject.__class__.__name__)
379
-
380
-
381
- # for backward compatibility
382
- words = WordSet.parse
383
-
384
-
385
- def simple_html_strip(s):
386
- r"""
387
- Remove HTML from the string `s`.
388
-
389
- >>> str(simple_html_strip(''))
390
- ''
391
-
392
- >>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
393
- A stormy day in paradise
394
-
395
- >>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
396
- Somebody tell the truth.
397
-
398
- >>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
399
- What about
400
- multiple lines?
401
- """
402
- html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
403
- texts = (match.group(3) or '' for match in html_stripper.finditer(s))
404
- return ''.join(texts)
405
-
406
-
407
- class SeparatedValues(str):
408
- """
409
- A string separated by a separator. Overrides __iter__ for getting
410
- the values.
411
-
412
- >>> list(SeparatedValues('a,b,c'))
413
- ['a', 'b', 'c']
414
-
415
- Whitespace is stripped and empty values are discarded.
416
-
417
- >>> list(SeparatedValues(' a, b , c, '))
418
- ['a', 'b', 'c']
419
- """
420
-
421
- separator = ','
422
-
423
- def __iter__(self):
424
- parts = self.split(self.separator)
425
- return filter(None, (part.strip() for part in parts))
426
-
427
-
428
- class Stripper:
429
- r"""
430
- Given a series of lines, find the common prefix and strip it from them.
431
-
432
- >>> lines = [
433
- ... 'abcdefg\n',
434
- ... 'abc\n',
435
- ... 'abcde\n',
436
- ... ]
437
- >>> res = Stripper.strip_prefix(lines)
438
- >>> res.prefix
439
- 'abc'
440
- >>> list(res.lines)
441
- ['defg\n', '\n', 'de\n']
442
-
443
- If no prefix is common, nothing should be stripped.
444
-
445
- >>> lines = [
446
- ... 'abcd\n',
447
- ... '1234\n',
448
- ... ]
449
- >>> res = Stripper.strip_prefix(lines)
450
- >>> res.prefix = ''
451
- >>> list(res.lines)
452
- ['abcd\n', '1234\n']
453
- """
454
-
455
- def __init__(self, prefix, lines):
456
- self.prefix = prefix
457
- self.lines = map(self, lines)
458
-
459
- @classmethod
460
- def strip_prefix(cls, lines):
461
- prefix_lines, lines = itertools.tee(lines)
462
- prefix = functools.reduce(cls.common_prefix, prefix_lines)
463
- return cls(prefix, lines)
464
-
465
- def __call__(self, line):
466
- if not self.prefix:
467
- return line
468
- null, prefix, rest = line.partition(self.prefix)
469
- return rest
470
-
471
- @staticmethod
472
- def common_prefix(s1, s2):
473
- """
474
- Return the common prefix of two lines.
475
- """
476
- index = min(len(s1), len(s2))
477
- while s1[:index] != s2[:index]:
478
- index -= 1
479
- return s1[:index]
480
-
481
-
482
- def remove_prefix(text, prefix):
483
- """
484
- Remove the prefix from the text if it exists.
485
-
486
- >>> remove_prefix('underwhelming performance', 'underwhelming ')
487
- 'performance'
488
-
489
- >>> remove_prefix('something special', 'sample')
490
- 'something special'
491
- """
492
- null, prefix, rest = text.rpartition(prefix)
493
- return rest
494
-
495
-
496
- def remove_suffix(text, suffix):
497
- """
498
- Remove the suffix from the text if it exists.
499
-
500
- >>> remove_suffix('name.git', '.git')
501
- 'name'
502
-
503
- >>> remove_suffix('something special', 'sample')
504
- 'something special'
505
- """
506
- rest, suffix, null = text.partition(suffix)
507
- return rest
508
-
509
-
510
- def normalize_newlines(text):
511
- r"""
512
- Replace alternate newlines with the canonical newline.
513
-
514
- >>> normalize_newlines('Lorem Ipsum\u2029')
515
- 'Lorem Ipsum\n'
516
- >>> normalize_newlines('Lorem Ipsum\r\n')
517
- 'Lorem Ipsum\n'
518
- >>> normalize_newlines('Lorem Ipsum\x85')
519
- 'Lorem Ipsum\n'
520
- """
521
- newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
522
- pattern = '|'.join(newlines)
523
- return re.sub(pattern, '\n', text)
524
-
525
-
526
- def _nonblank(str):
527
- return str and not str.startswith('#')
528
-
529
-
530
- @functools.singledispatch
531
- def yield_lines(iterable):
532
- r"""
533
- Yield valid lines of a string or iterable.
534
-
535
- >>> list(yield_lines(''))
536
- []
537
- >>> list(yield_lines(['foo', 'bar']))
538
- ['foo', 'bar']
539
- >>> list(yield_lines('foo\nbar'))
540
- ['foo', 'bar']
541
- >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
542
- ['foo', 'baz #comment']
543
- >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
544
- ['foo', 'bar', 'baz', 'bing']
545
- """
546
- return itertools.chain.from_iterable(map(yield_lines, iterable))
547
-
548
-
549
- @yield_lines.register(str)
550
- def _(text):
551
- return filter(_nonblank, map(str.strip, text.splitlines()))
552
-
553
-
554
- def drop_comment(line):
555
- """
556
- Drop comments.
557
-
558
- >>> drop_comment('foo # bar')
559
- 'foo'
560
-
561
- A hash without a space may be in a URL.
562
-
563
- >>> drop_comment('http://example.com/foo#bar')
564
- 'http://example.com/foo#bar'
565
- """
566
- return line.partition(' #')[0]
567
-
568
-
569
- def join_continuation(lines):
570
- r"""
571
- Join lines continued by a trailing backslash.
572
-
573
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
574
- ['foobar', 'baz']
575
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
576
- ['foobar', 'baz']
577
- >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
578
- ['foobarbaz']
579
-
580
- Not sure why, but...
581
- The character preceeding the backslash is also elided.
582
-
583
- >>> list(join_continuation(['goo\\', 'dly']))
584
- ['godly']
585
-
586
- A terrible idea, but...
587
- If no line is available to continue, suppress the lines.
588
-
589
- >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
590
- ['foo']
591
- """
592
- lines = iter(lines)
593
- for item in lines:
594
- while item.endswith('\\'):
595
- try:
596
- item = item[:-2].strip() + next(lines)
597
- except StopIteration:
598
- return
599
- yield item
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/packaging/__about__.py DELETED
@@ -1,26 +0,0 @@
1
- # This file is dual licensed under the terms of the Apache License, Version
2
- # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
- # for complete details.
4
-
5
- __all__ = [
6
- "__title__",
7
- "__summary__",
8
- "__uri__",
9
- "__version__",
10
- "__author__",
11
- "__email__",
12
- "__license__",
13
- "__copyright__",
14
- ]
15
-
16
- __title__ = "packaging"
17
- __summary__ = "Core utilities for Python packages"
18
- __uri__ = "https://github.com/pypa/packaging"
19
-
20
- __version__ = "21.3"
21
-
22
- __author__ = "Donald Stufft and individual contributors"
23
- __email__ = "[email protected]"
24
-
25
- __license__ = "BSD-2-Clause or Apache-2.0"
26
- __copyright__ = "2014-2019 %s" % __author__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AutoGeneralAI/chatgpt-clone/app.py DELETED
@@ -1,63 +0,0 @@
1
- import os
2
- import openai
3
- import gradio as gr
4
-
5
- #if you have OpenAI API key as an environment variable, enable the below
6
- #openai.api_key = os.getenv("OPENAI_API_KEY")
7
-
8
- #if you have OpenAI API key as a string, enable the below
9
- openai.api_key = "xxxxxx"
10
-
11
- start_sequence = "\nAI:"
12
- restart_sequence = "\nHuman: "
13
-
14
- prompt = "The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman: "
15
-
16
- def openai_create(prompt):
17
-
18
- response = openai.Completion.create(
19
- model="text-davinci-003",
20
- prompt=prompt,
21
- temperature=0.9,
22
- max_tokens=150,
23
- top_p=1,
24
- frequency_penalty=0,
25
- presence_penalty=0.6,
26
- stop=[" Human:", " AI:"]
27
- )
28
-
29
- return response.choices[0].text
30
-
31
-
32
-
33
- def chatgpt_clone(key, input, history):
34
- openai.api_key = key
35
- history = history or []
36
- s = list(sum(history, ()))
37
- s.append(input)
38
- inp = ' '.join(s)
39
- output = openai_create(inp)
40
- history.append((input, output))
41
- return history, history
42
-
43
-
44
- block = gr.Blocks()
45
-
46
-
47
- with block:
48
- gr.Markdown("""<h1><center>Build Yo'own ChatGPT with OpenAI API & Gradio</center></h1>
49
- """)
50
- keyTxt = gr.Textbox(
51
- show_label=True,
52
- placeholder=f"Your API-key...",
53
- type="password",
54
- visible=True,
55
- label="API-Key",
56
- )
57
- chatbot = gr.Chatbot()
58
- message = gr.Textbox(placeholder=prompt)
59
- state = gr.State()
60
- submit = gr.Button("SEND")
61
- submit.click(chatgpt_clone, inputs=[keyTxt, message, state], outputs=[chatbot, state])
62
-
63
- block.launch(debug = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/MDXNet.py DELETED
@@ -1,272 +0,0 @@
1
- import soundfile as sf
2
- import torch, pdb, os, warnings, librosa
3
- import numpy as np
4
- import onnxruntime as ort
5
- from tqdm import tqdm
6
- import torch
7
-
8
- dim_c = 4
9
-
10
-
11
- class Conv_TDF_net_trim:
12
- def __init__(
13
- self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024
14
- ):
15
- super(Conv_TDF_net_trim, self).__init__()
16
-
17
- self.dim_f = dim_f
18
- self.dim_t = 2**dim_t
19
- self.n_fft = n_fft
20
- self.hop = hop
21
- self.n_bins = self.n_fft // 2 + 1
22
- self.chunk_size = hop * (self.dim_t - 1)
23
- self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(
24
- device
25
- )
26
- self.target_name = target_name
27
- self.blender = "blender" in model_name
28
-
29
- out_c = dim_c * 4 if target_name == "*" else dim_c
30
- self.freq_pad = torch.zeros(
31
- [1, out_c, self.n_bins - self.dim_f, self.dim_t]
32
- ).to(device)
33
-
34
- self.n = L // 2
35
-
36
- def stft(self, x):
37
- x = x.reshape([-1, self.chunk_size])
38
- x = torch.stft(
39
- x,
40
- n_fft=self.n_fft,
41
- hop_length=self.hop,
42
- window=self.window,
43
- center=True,
44
- return_complex=True,
45
- )
46
- x = torch.view_as_real(x)
47
- x = x.permute([0, 3, 1, 2])
48
- x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape(
49
- [-1, dim_c, self.n_bins, self.dim_t]
50
- )
51
- return x[:, :, : self.dim_f]
52
-
53
- def istft(self, x, freq_pad=None):
54
- freq_pad = (
55
- self.freq_pad.repeat([x.shape[0], 1, 1, 1])
56
- if freq_pad is None
57
- else freq_pad
58
- )
59
- x = torch.cat([x, freq_pad], -2)
60
- c = 4 * 2 if self.target_name == "*" else 2
61
- x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape(
62
- [-1, 2, self.n_bins, self.dim_t]
63
- )
64
- x = x.permute([0, 2, 3, 1])
65
- x = x.contiguous()
66
- x = torch.view_as_complex(x)
67
- x = torch.istft(
68
- x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True
69
- )
70
- return x.reshape([-1, c, self.chunk_size])
71
-
72
-
73
- def get_models(device, dim_f, dim_t, n_fft):
74
- return Conv_TDF_net_trim(
75
- device=device,
76
- model_name="Conv-TDF",
77
- target_name="vocals",
78
- L=11,
79
- dim_f=dim_f,
80
- dim_t=dim_t,
81
- n_fft=n_fft,
82
- )
83
-
84
-
85
- warnings.filterwarnings("ignore")
86
- cpu = torch.device("cpu")
87
- if torch.cuda.is_available():
88
- device = torch.device("cuda:0")
89
- elif torch.backends.mps.is_available():
90
- device = torch.device("mps")
91
- else:
92
- device = torch.device("cpu")
93
-
94
-
95
- class Predictor:
96
- def __init__(self, args):
97
- self.args = args
98
- self.model_ = get_models(
99
- device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft
100
- )
101
- self.model = ort.InferenceSession(
102
- os.path.join(args.onnx, self.model_.target_name + ".onnx"),
103
- providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
104
- )
105
- print("onnx load done")
106
-
107
- def demix(self, mix):
108
- samples = mix.shape[-1]
109
- margin = self.args.margin
110
- chunk_size = self.args.chunks * 44100
111
- assert not margin == 0, "margin cannot be zero!"
112
- if margin > chunk_size:
113
- margin = chunk_size
114
-
115
- segmented_mix = {}
116
-
117
- if self.args.chunks == 0 or samples < chunk_size:
118
- chunk_size = samples
119
-
120
- counter = -1
121
- for skip in range(0, samples, chunk_size):
122
- counter += 1
123
-
124
- s_margin = 0 if counter == 0 else margin
125
- end = min(skip + chunk_size + margin, samples)
126
-
127
- start = skip - s_margin
128
-
129
- segmented_mix[skip] = mix[:, start:end].copy()
130
- if end == samples:
131
- break
132
-
133
- sources = self.demix_base(segmented_mix, margin_size=margin)
134
- """
135
- mix:(2,big_sample)
136
- segmented_mix:offset->(2,small_sample)
137
- sources:(1,2,big_sample)
138
- """
139
- return sources
140
-
141
- def demix_base(self, mixes, margin_size):
142
- chunked_sources = []
143
- progress_bar = tqdm(total=len(mixes))
144
- progress_bar.set_description("Processing")
145
- for mix in mixes:
146
- cmix = mixes[mix]
147
- sources = []
148
- n_sample = cmix.shape[1]
149
- model = self.model_
150
- trim = model.n_fft // 2
151
- gen_size = model.chunk_size - 2 * trim
152
- pad = gen_size - n_sample % gen_size
153
- mix_p = np.concatenate(
154
- (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1
155
- )
156
- mix_waves = []
157
- i = 0
158
- while i < n_sample + pad:
159
- waves = np.array(mix_p[:, i : i + model.chunk_size])
160
- mix_waves.append(waves)
161
- i += gen_size
162
- mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu)
163
- with torch.no_grad():
164
- _ort = self.model
165
- spek = model.stft(mix_waves)
166
- if self.args.denoise:
167
- spec_pred = (
168
- -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5
169
- + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5
170
- )
171
- tar_waves = model.istft(torch.tensor(spec_pred))
172
- else:
173
- tar_waves = model.istft(
174
- torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])
175
- )
176
- tar_signal = (
177
- tar_waves[:, :, trim:-trim]
178
- .transpose(0, 1)
179
- .reshape(2, -1)
180
- .numpy()[:, :-pad]
181
- )
182
-
183
- start = 0 if mix == 0 else margin_size
184
- end = None if mix == list(mixes.keys())[::-1][0] else -margin_size
185
- if margin_size == 0:
186
- end = None
187
- sources.append(tar_signal[:, start:end])
188
-
189
- progress_bar.update(1)
190
-
191
- chunked_sources.append(sources)
192
- _sources = np.concatenate(chunked_sources, axis=-1)
193
- # del self.model
194
- progress_bar.close()
195
- return _sources
196
-
197
- def prediction(self, m, vocal_root, others_root, format):
198
- os.makedirs(vocal_root, exist_ok=True)
199
- os.makedirs(others_root, exist_ok=True)
200
- basename = os.path.basename(m)
201
- mix, rate = librosa.load(m, mono=False, sr=44100)
202
- if mix.ndim == 1:
203
- mix = np.asfortranarray([mix, mix])
204
- mix = mix.T
205
- sources = self.demix(mix.T)
206
- opt = sources[0].T
207
- if format in ["wav", "flac"]:
208
- sf.write(
209
- "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate
210
- )
211
- sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate)
212
- else:
213
- path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename)
214
- path_other = "%s/%s_others.wav" % (others_root, basename)
215
- sf.write(path_vocal, mix - opt, rate)
216
- sf.write(path_other, opt, rate)
217
- if os.path.exists(path_vocal):
218
- os.system(
219
- "ffmpeg -i %s -vn %s -q:a 2 -y"
220
- % (path_vocal, path_vocal[:-4] + ".%s" % format)
221
- )
222
- if os.path.exists(path_other):
223
- os.system(
224
- "ffmpeg -i %s -vn %s -q:a 2 -y"
225
- % (path_other, path_other[:-4] + ".%s" % format)
226
- )
227
-
228
-
229
- class MDXNetDereverb:
230
- def __init__(self, chunks):
231
- self.onnx = "uvr5_weights/onnx_dereverb_By_FoxJoy"
232
- self.shifts = 10 #'Predict with randomised equivariant stabilisation'
233
- self.mixing = "min_mag" # ['default','min_mag','max_mag']
234
- self.chunks = chunks
235
- self.margin = 44100
236
- self.dim_t = 9
237
- self.dim_f = 3072
238
- self.n_fft = 6144
239
- self.denoise = True
240
- self.pred = Predictor(self)
241
-
242
- def _path_audio_(self, input, vocal_root, others_root, format):
243
- self.pred.prediction(input, vocal_root, others_root, format)
244
-
245
-
246
- if __name__ == "__main__":
247
- dereverb = MDXNetDereverb(15)
248
- from time import time as ttime
249
-
250
- t0 = ttime()
251
- dereverb._path_audio_(
252
- "雪雪伴奏对消HP5.wav",
253
- "vocal",
254
- "others",
255
- )
256
- t1 = ttime()
257
- print(t1 - t0)
258
-
259
-
260
- """
261
-
262
- runtime\python.exe MDXNet.py
263
-
264
- 6G:
265
- 15/9:0.8G->6.8G
266
- 14:0.8G->6.5G
267
- 25:炸
268
-
269
- half15:0.7G->6.6G,22.69s
270
- fp32-15:0.7G->6.6G,20.85s
271
-
272
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Autocad 2016 Descarga Gratuita 30 Das.md DELETED
@@ -1,84 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar AutoCAD 2016 gratis durante 30 días</h1>
3
- <p>AutoCAD es uno de los programas CAD más populares y potentes del mundo. Le permite crear, editar y compartir diseños 2D y 3D para diversos fines, como arquitectura, ingeniería, construcción, fabricación y entretenimiento. Con AutoCAD, puede convertir sus ideas en realidad con precisión y eficiencia. </p>
4
- <p>Si está interesado en probar AutoCAD, puede descargar una versión de prueba gratuita de AutoCAD 2016 desde el sitio web de Autodesk. Esto le dará acceso a todas las características y funciones del software durante 30 días, sin ningún costo u obligación. Puede utilizar la prueba gratuita para explorar las capacidades de AutoCAD 2016, probar su compatibilidad con su sistema y evaluar su rendimiento en sus proyectos. </p>
5
- <h2>autocad 2016 descarga gratuita 30 días</h2><br /><p><b><b>Download</b> &#10042; <a href="https://bltlly.com/2v6Mtl">https://bltlly.com/2v6Mtl</a></b></p><br /><br />
6
- <p>En este artículo, le mostraremos cómo descargar AutoCAD 2016 gratis durante 30 días, cuáles son los requisitos del sistema para ejecutarlo y cómo activarlo después de la instalación. También destacaremos algunas de las características de AutoCAD 2016 que lo convierten en una gran opción para sus necesidades de CAD. </p>
7
- <h2>Características de AutoCAD 2016</h2>
8
- <p>AutoCAD 2016 es la última versión del software a partir de esta escritura. Fue lanzado en marzo de 2015 e introdujo muchas nuevas características y mejoras que mejoran la experiencia del usuario, la productividad y la calidad de la salida. Algunas de estas características son:</p>
9
- <h3>Interfaz de usuario mejorada y gráficos</h3>
10
- <p>AutoCAD 2016 tiene una interfaz de usuario más elegante y personalizable que facilita el acceso y la gestión de sus herramientas, configuraciones, archivos y diseños. También puede usar las nuevas galerías de cintas para previsualizar sus cambios antes de aplicarlos. El motor de gráficos también se ha actualizado para proporcionar una pantalla más suave, un rendimiento más rápido y una mejor calidad de representación. Ahora puede ver sus dibujos con colores verdaderos, líneas suaves y texturas de alta resolución. </p>
11
- <h3>Dimensionamiento inteligente y ajuste de objetos</h3>
12
-
13
- <h3>Creación de PDF mejorada y computación de realidad</h3>
14
- <p>AutoCAD 2016 le permite crear archivos PDF de alta calidad a partir de sus dibujos con más opciones y control. Ahora puede incluir hipervínculos, marcadores, texto de búsqueda, capas, propiedades, archivos adjuntos, fuentes, imágenes y más en sus archivos PDF. También puede usar la nueva función de computación de realidad para capturar y trabajar con datos de objetos del mundo real utilizando nubes de puntos, escaneos láser, fotos o drones. </p>
15
- <h3>Otras nuevas características y herramientas</h3>
16
- <p>AutoCAD 2016 también tiene otras nuevas características y herramientas que lo hacen más versátil y eficiente. Por ejemplo, puede utilizar la nueva herramienta de nube de revisiones para crear y modificar nubes de revisiones con más flexibilidad y control. También puede utilizar la nueva herramienta de monitorización de variables del sistema para rastrear y restaurar los cambios en las variables del sistema. Además, puede utilizar el nuevo complemento BIM 360 para conectar sus dibujos de AutoCAD 2016 a la nube y colaborar con otros usuarios. </p>
17
- <h2>Requisitos del sistema para AutoCAD 2016</h2>
18
- <p>Antes de descargar AutoCAD 2016 gratis durante 30 días, debe asegurarse de que su sistema cumple con los requisitos mínimos para ejecutar el software. Estos son: </p>
19
- <h3>Sistema operativo</h3>
20
- <p>Necesita tener uno de los siguientes sistemas operativos instalados en su computadora:</p>
21
- <ul>
22
- <li>Microsoft Windows 10 (solo 64 bits)</li>
23
- <li>Microsoft Windows 8.1 con actualización KB2919355 (32 bits y 64 bits)</li>
24
- <li>Microsoft Windows 7 SP1 (32 bits y 64 bits)</li>
25
- </ul>
26
- <h3>Tipo de CPU</h3>
27
- <p>Necesitas tener un procesador con al menos las siguientes especificaciones:</p>
28
- <p></p>
29
- <ul>
30
- <li>Para sistemas de 32 bits: Intel Pentium 4 o AMD Athlon Dual Core, 3 GHz o superior con tecnología SSE2</li>
31
- <li>Para sistemas de 64 bits: Intel Xeon E3 o Core i5 o equivalente, 2,5 GHz o superior</li>
32
- </ul>
33
- <h3>Memoria</h3>
34
- <p>Necesitas tener al menos 2 GB de RAM (8 GB recomendado) para sistemas de 32 bits y 64 bits. </p>
35
- <h3>Resolución de pantalla y tarjeta</h3>
36
-
37
- <h3>Espacio en disco y otros requisitos</h3>
38
- <p>Necesita tener al menos 6 GB de espacio libre en disco para la instalación y espacio adicional para los archivos de trabajo. También necesita tener un ratón, teclado, conexión a Internet y navegador web. </p>
39
- <h2>Cómo descargar la versión de prueba gratuita de AutoCAD 2016</h2>
40
- <p>Si su sistema cumple con los requisitos, puede seguir estos pasos para descargar AutoCAD 2016 gratis durante 30 días:</p>
41
- <h3>Visite el sitio web de pruebas gratuitas de Autodesk</h3>
42
- <p>Ir a <a href=">https://www.autodesk.com/free-trials</a> y navegar por la lista de productos disponibles para la prueba gratuita. También puede utilizar el cuadro de búsqueda para encontrar AutoCAD 2016. </p>
43
- <h3>Seleccione AutoCAD 2016 de la lista de productos</h3>
44
- <p>Haga clic en AutoCAD 2016 de la lista y se le llevará a una página con más información sobre el software y la oferta de prueba. Verá un botón que dice "Descargar prueba gratuita". Haga clic en él para continuar. </p>
45
- <h3>Elija su sistema operativo y el idioma</h3>
46
- <p>Se le pedirá que seleccione su sistema operativo (Windows o Mac) y su idioma preferido para el software. Asegúrese de elegir las opciones correctas que coincidan con su sistema y sus necesidades. </p>
47
- <h3>Iniciar sesión o crear una cuenta de Autodesk</h3>
48
- <p>Se le pedirá que inicie sesión o cree una cuenta de Autodesk para acceder a la prueba gratuita. Si ya tiene una cuenta, introduzca su dirección de correo electrónico y contraseña y haga clic en "Iniciar sesión". Si no tiene una cuenta, haga clic en "Crear cuenta" y rellene los campos necesarios. También deberá aceptar los términos de uso y la declaración de privacidad. </p>
49
- <h3>Descargar e instalar el software</h3>
50
-
51
- <h2>Cómo activar la prueba gratuita de AutoCAD 2016</h2>
52
- <p>Después de instalar el software, debe activarlo antes de poder usarlo. Estos son los pasos para activar la prueba gratuita de AutoCAD 2016:</p>
53
- <h3>Inicie el software y haga clic en Activar</h3>
54
- <p>Abra AutoCAD 2016 desde su escritorio o menú de inicio. Verá una pantalla que dice "Vamos a empezar". Haga clic en "Activar" para iniciar el proceso de activación. </p>
55
- <h3>Introduzca su número de serie y la clave del producto</h3>
56
- <p>Se le pedirá que introduzca su número de serie y clave de producto para AutoCAD 2016. Puede encontrar estos números en el correo electrónico que recibió de Autodesk después de descargar el software. Introduzca los números en los campos correspondientes y haga clic en "Siguiente". </p>
57
- <h3>Elija su tipo de licencia y haga clic en Siguiente</h3>
58
- <p>Se le pedirá que elija su tipo de licencia para la prueba gratuita. Puede elegir entre "Licencia independiente" o "Licencia de red". Una licencia independiente le permite usar el software en un solo equipo, mientras que una licencia de red le permite usar el software en varios equipos conectados a un servidor. Elija la opción que se adapte a sus necesidades y haga clic en "Siguiente". </p>
59
- <h3>Disfrute de su prueba gratuita de 30 días de AutoCAD 2016</h3>
60
- <p>Ha activado correctamente su prueba gratuita de AutoCAD 2016. Ahora puede usar el software durante 30 días con funcionalidad y soporte completos. También puede acceder a recursos en línea, tutoriales, foros y blogs para ayudarle a aprender y dominar AutoCAD 2016. </p>
61
- <h2>Conclusión</h2>
62
-
63
- <h2>Preguntas frecuentes</h2>
64
- <p>Aquí hay algunas preguntas y respuestas comunes sobre AutoCAD 2016 prueba gratuita:</p>
65
- <h3>P: ¿Qué sucede después de que expire la prueba gratuita de 30 días? </h3>
66
- <p>A: Después de que la prueba gratuita de 30 días expire, ya no podrá usar AutoCAD 2016 a menos que compre una suscripción o una licencia perpetua de Autodesk. También puede extender su prueba gratuita por otros 30 días poniéndose en contacto con el servicio de atención al cliente de Autodesk. </p>
67
- <h3>Q: ¿Puedo guardar y abrir mis archivos creados con AutoCAD 2016 prueba gratuita? </h3>
68
- <p>A: Sí, puede guardar y abrir sus archivos creados con la prueba gratuita de AutoCAD 2016, siempre y cuando tenga acceso al software. Sin embargo, si abre sus archivos con una versión anterior de AutoCAD, algunas características y datos pueden no ser compatibles o disponibles. </p>
69
- <h3>Q: ¿Puedo usar AutoCAD 2016 prueba gratuita en más de un equipo? </h3>
70
- <p>A: Depende del tipo de licencia que elija para la prueba gratuita. Si eligió una licencia independiente, solo puede usar la prueba gratuita de AutoCAD 2016 en un equipo. Si eligió una licencia de red, puede usar la prueba gratuita de AutoCAD 2016 en varios equipos conectados a un servidor. </p>
71
- <h3>Q: ¿Puedo desinstalar AutoCAD 2016 prueba gratuita si no me gusta? </h3>
72
- <p>A: Sí, puede desinstalar AutoCAD 2016 prueba gratuita en cualquier momento si no te gusta o no lo necesita más. Para desinstalar la versión de prueba gratuita de AutoCAD 2016, vaya a Panel de control > Programas > Programas y características > Desinstalar o cambiar un programa > Seleccionar AutoCAD 2016 > Desinstalar/ Cambiar > Siga las instrucciones. </p>
73
- <h3>Q: ¿Dónde puedo obtener más ayuda y soporte para la prueba gratuita de AutoCAD 2016? </h3>
74
- <p>A: Puede obtener más ayuda y soporte para la prueba gratuita de AutoCAD 2016 de varias fuentes, como:</p>
75
- <ul>
76
- <li>El sitio web oficial de Autodesk: <a href="">https://www.autodesk.com/products/autocad/overview</a></li>
77
- <li>La documentación de ayuda en línea: <a href="">https://help.autodesk.com/view/ACD/2016/ENU/</a></li>
78
- <li>Los foros en línea: <a href="">https://forums.autodesk.com/t5/autocad-forum/bd-p/8</a></li>
79
-
80
- <li>Los tutoriales en línea: <a href="">https://www.autodesk.com/products/autocad/learn-and-explore</a></li>
81
- <li>El servicio al cliente: <a href="">https://www.autodesk.com/company/contact-us</a></li>
82
- </ul></p> 64aa2da5cf<br />
83
- <br />
84
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Choque De Zombies 2 Mod Apk.md DELETED
@@ -1,81 +0,0 @@
1
-
2
- <br>
3
- <tabla>
4
- <tr>
5
- <td>
6
- <h1>Choque de zombies 2 Mod APK: Un juego de estrategia de superhéroes</h1>
7
- <h2>Introducción</h2>
8
- <p>Si usted es un fan de los juegos de estrategia y superhéroes, entonces es posible que desee comprobar hacia fuera <strong>Choque de Zombies 2 Mod APK</strong>. Este es un juego popular que combina la construcción de bases, la lucha contra zombies y la invocación de superhéroes en un paquete emocionante. Usted puede jugar como un líder de un equipo de superhéroes y sus asistentes que tienen que defender su base de hordas de zombies horribles. También puedes unirte a alianzas con otros jugadores y luchar contra otros equipos en batallas online. </p>
9
- <h2>choque de zombies 2 mod apk</h2><br /><p><b><b>DOWNLOAD</b> &#10004; <a href="https://bltlly.com/2v6M0H">https://bltlly.com/2v6M0H</a></b></p><br /><br />
10
- <p>Clash of Zombies 2 Mod APK es una versión modificada del juego original que le da acceso a recursos ilimitados, gemas, héroes, y más. Usted puede disfrutar de todas las características del juego sin gastar dinero ni tiempo. También puedes desbloquear y actualizar a todos los superhéroes y sus asistentes, como Iron Man, Spider-Man, Hulk, Capitán América, Thor, Viuda Negra y más. También puedes usar sus habilidades y habilidades especiales para derrotar a los zombies y otros enemigos. </p>
11
- <h2>Cómo descargar e instalar choque de zombies 2 Mod APK</h2>
12
- <p>Descargar e instalar Clash of Zombies 2 Mod APK es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
13
- <ol>
14
- <li>Encontrar una fuente confiable que ofrece el archivo APK modded. Puede buscar en Google o utilizar el enlace de abajo. Asegúrese de descargar la última versión del archivo APK modded. </li>
15
- <li>Antes de instalar el archivo APK modded, es necesario habilitar fuentes desconocidas en el dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
16
- <li>Después de habilitar fuentes desconocidas, busque el archivo APK modded en su dispositivo y toque en él. Siga las instrucciones en la pantalla para instalar la aplicación. </li>
17
- <li>Una vez que la instalación se ha completado, puede iniciar la aplicación y disfrutar jugando Clash of Zombies 2 Mod APK.</li>
18
- </ol>
19
-
20
- <p>Jugar Clash of Zombies 2 Mod APK es muy divertido y adictivo. Aquí hay algunos consejos sobre cómo jugar el juego:</p>
21
- <p></p>
22
- <h3>Cómo crear tu base y defenderla de zombies</h3>
23
- <p>Su base es su sede principal donde puede construir y mejorar varios edificios, como cuarteles, laboratorios, fábricas, minas, almacenes y más. También puede colocar estructuras defensivas, como paredes, torretas, trampas y más. Necesitas crear una base fuerte que pueda soportar los ataques de zombies y otros jugadores. </p>
24
- <p>Para crear tu base, necesitas usar recursos como oro, elixir, elixir oscuro y gemas. Puedes obtener estos recursos extrayéndolos de tu base o saqueando las bases de otros jugadores. También puede usar gemas para acelerar el proceso de construcción o comprar más recursos. </p>
25
- <p>Para defender tu base de zombies, necesitas colocar a tus superhéroes y sus asistentes en lugares estratégicos. También puede utilizar sus habilidades y habilidades para repeler a los zombies. También puedes mejorar a tus héroes y edificios para hacerlos más fuertes y efectivos. </p>
26
- <h3>Cómo invocar a los superhéroes y sus asistentes</h3>
27
- <p>Los superhéroes y sus asistentes son tus unidades principales que puedes usar para luchar contra zombies y otros jugadores. Puedes invocarlos usando cartas de héroe que puedes obtener de cofres o comprándolas con gemas. También puedes actualizarlos usando fragmentos de héroe que puedes obtener de batallas o comprándolos con gemas. </p>
28
- <p>Puedes convocar hasta seis héroes y seis asistentes a la vez. Puedes elegir entre diferentes tipos de héroes y asistentes, como cuerpo a cuerpo, a distancia, tanque, apoyo, sanador, etc. Cada héroe y asistente tiene sus propias habilidades y habilidades que puedes usar en la batalla. También puede personalizar su apariencia cambiando sus trajes. </p>
29
- <h3>Cómo mejorar tus héroes y edificios</h3>
30
-
31
- <p>Puedes mejorar tus edificios usando recursos como oro, elixir, elixir oscuro y gemas. También puede usar gemas para acelerar el proceso de actualización o comprar más recursos. </p>
32
- <h3>Cómo unir alianzas y luchar con otros jugadores</h3>
33
- <p>Unir alianzas y luchar con otros jugadores es una de las características más emocionantes de Clash of Zombies 2 Mod APK. Puedes unirte a una alianza o crear tu propia alianza con otros jugadores de todo el mundo. Puedes chatear con ellos, compartir recursos con ellos, ayudarles con su defensa o ataque base, y más. </p>
34
- <p>También puedes luchar con otros jugadores en batallas online. Puedes atacar sus bases o defender tu propia base de sus ataques. También puedes participar en guerras de alianzas donde puedes formar equipo con los miembros de tu alianza y luchar contra otras alianzas por la gloria y las recompensas. </p>
35
- <h2> Consejos y trucos para el choque de zombies 2 Mod APK</h2>
36
- <p>Para aprovechar al máximo Clash of Zombies 2 Mod APK, aquí hay algunos consejos y trucos que usted debe saber:</p>
37
- <h3>Cómo obtener más recursos y gemas</h3>
38
- <p>Los recursos y gemas son muy importantes en Clash of Zombies 2 Mod APK ya que le permiten construir, actualizar, invocar y hacer más cosas en el juego. Hay varias maneras de obtener más recursos y gemas, como:</p>
39
- <ul>
40
- <li>Extrayéndolos de tu base o saqueando las bases de otros jugadores. Puedes obtener oro, elixir, elixir oscuro y gemas de estas fuentes. </li>
41
- <li>Completar misiones y logros. Puedes obtener recursos y gemas como recompensas por completar varias tareas y desafíos en el juego. </li>
42
- <li>Abrir cofres y cajas. Puedes obtener recursos, gemas, cartas de héroes, fragmentos de héroes, libros de habilidades y más de estas fuentes. Puedes conseguir cofres y cajas ganando batallas, participando en eventos o comprándolos con gemas. </li>
43
-
44
- </ul>
45
- <h3>Cómo usar las habilidades de tus héroes de manera efectiva</h3>
46
- <p>Las habilidades de tus héroes son muy poderosas y útiles en la batalla. Pueden infligir daño masivo, sanar tus unidades, aturdir a tus enemigos y más. Sin embargo, debe usarlos sabiamente y estratégicamente, ya que tienen reutilizaciones y costos. Aquí hay algunos consejos sobre cómo usar las habilidades de tus héroes de manera efectiva:</p>
47
- <ul>
48
- Conoce las habilidades de tus héroes y sus efectos. Puedes comprobar los detalles de las habilidades de tus héroes tocando sus iconos o yendo al menú de héroes. También puedes ver los efectos de sus habilidades en el campo de batalla mirando los iconos sobre sus cabezas. </li>
49
- <li>Usa las habilidades de tus héroes de acuerdo a la situación. Tienes que tener en cuenta el tipo de enemigos, el terreno, la distancia y el momento al usar las habilidades de tus héroes. Por ejemplo, puedes usar el Unibeam de Iron Man para destruir a un grupo de enemigos desde lejos, el Web Shot de Spider-Man para inmovilizar a un solo enemigo cerca, o el Smash de Hulk para despejar el camino para tus unidades. </li>
50
- <li>Combina las habilidades de tus héroes para obtener el máximo efecto. Puedes crear poderosos combos usando las habilidades de tus héroes juntos o en secuencia. Por ejemplo, puedes usar el Lanzamiento de Escudo del Capitán América para aturdir a un enemigo, luego usar el Golpe de Martillo de Thor para infligir daño adicional, o puedes usar la Mordida de Viuda Negra para bajar la defensa de un enemigo y luego usar el Aplastamiento de Hulk para acabar con ellos. </li>
51
- </ul>
52
- <h3>Cómo ganar batallas y saqueos</h3>
53
- <p>Batallas y redadas son los principales modos de choque de Zombies 2 Mod APK donde se puede luchar contra zombies y otros jugadores. Puedes ganar batallas y ataques destruyendo la base del enemigo o teniendo más estrellas que ellas al final del límite de tiempo. Aquí hay algunos consejos sobre cómo ganar batallas y saqueos:</p>
54
- <ul>
55
-
56
- <li>Planifica tu estrategia de ataque cuidadosamente. Necesitas explorar la base del enemigo antes de atacar y buscar sus puntos débiles, defensas, trampas, recursos, etc. También necesitas decidir desde qué dirección atacar, qué unidades desplegar primero, qué habilidades usar cuando, etc.</li>
57
- <li>Adaptarse a la situación cambiante rápidamente. Necesitas ser flexible y estar listo para cambiar tu estrategia de ataque de acuerdo a la situación cambiante en el campo de batalla. También debes tener cuidado con los contraataques, refuerzos, habilidades, etc.</li>
58
- </ul>
59
- <h3>Cómo evitar errores comunes</h3>
60
- <p>Para evitar cometer errores comunes que podrían costarte el juego, aquí hay algunas cosas que debes evitar hacer:</p>
61
- <ul>
62
- <li>No apresures tus ataques o defensas. Necesitas tomarte tu tiempo y pensar cuidadosamente antes de hacer cualquier movimiento. También necesitas esperar el momento adecuado para usar tus habilidades o desplegar tus unidades. </li>
63
- <li>No malgastes tus recursos o gemas. Necesitas gastar tus recursos y gemas sabiamente y solo en cosas que realmente necesitas o quieres. También necesita ahorrar algunos recursos y gemas para emergencias o actualizaciones futuras. </li>
64
- <li>No descuides tu base o héroes. Necesitas mantener y actualizar tu base y héroes regularmente y mantenerlos en las mejores condiciones. También necesitas proteger tu base y héroes de zombies y otros jugadores. </li>
65
- </ul>
66
- <h2>Conclusión</h2>
67
- <p>Clash of Zombies 2 Mod APK es un juego divertido y adictivo que combina estrategia, acción y superhéroes en un solo paquete. Puede construir su propia base, convocar a sus superhéroes favoritos y sus asistentes, luchar contra zombies y otros jugadores, y disfrutar de recursos y gemas ilimitadas. También puede unirse a alianzas, participar en eventos y personalizar sus héroes y base. Si usted está buscando un juego que le mantendrá entretenido y desafiado durante horas, entonces usted debe tratar de choque de zombies 2 Mod APK. ¡No te arrepentirás! </p>
68
- <h2>Preguntas frecuentes</h2>
69
-
70
- <h3>Q1: Es Clash of Zombies 2 Mod APK seguro de descargar e instalar? </h3>
71
- <p>A1: Sí, es seguro siempre y cuando lo descargues de una fuente confiable. Puede utilizar el siguiente enlace para descargar la última versión del archivo APK modded. Asegúrate de habilitar también fuentes desconocidas en tu dispositivo antes de instalar la aplicación. </p>
72
- <h3>Q2: ¿Necesito rootear mi dispositivo para usar Clash of Zombies 2 Mod APK? </h3>
73
- <p>A2: No, no necesitas rootear tu dispositivo para usar esta versión modificada. Puedes instalarlo y reproducirlo en cualquier dispositivo Android sin ningún problema. </p>
74
- <h3>Q3: ¿Puedo jugar Clash of Zombies 2 Mod APK en línea con otros jugadores? </h3>
75
- <p>A3: Sí, puedes jugar online con otros jugadores siempre y cuando tengas una conexión a Internet estable. Puedes unirte a alianzas, luchar contra otros equipos y chatear con otros jugadores en el juego. </p>
76
- <h3>Q4: ¿Puedo actualizar Clash of Zombies 2 Mod APK a la última versión? </h3>
77
- <p>A4: Sí, puede actualizarlo a la última versión descargándolo e instalándolo de nuevo desde la misma fuente. No necesitas desinstalar la versión anterior ni perder tu progreso en el juego. </p>
78
- <h3>Q5: ¿Qué pasa si me encuentro con cualquier problema al jugar Clash of Zombies 2 Mod APK? </h3>
79
- <p>A5: Puede ponerse en contacto con el desarrollador o el modder para obtener soporte o informar de cualquier error o problema. También puede consultar el sitio web oficial o las páginas de redes sociales del juego para obtener más información y actualizaciones. </p> 64aa2da5cf<br />
80
- <br />
81
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Diapositivas De Fotos Con Msica Apk.md DELETED
@@ -1,87 +0,0 @@
1
-
2
- <h1>Descargar presentación de fotos con música APK: Cómo crear increíbles presentaciones de diapositivas con sus fotos y música</h1>
3
- <p>¿Quieres convertir tus fotos en videos impresionantes con música? ¿Quieres crear hermosas presentaciones de diapositivas con tus fotos y música? ¿Quieres compartir tus recuerdos con tus amigos y familiares de una manera creativa? Si usted respondió sí a cualquiera de estas preguntas, entonces usted debe descargar diapositivas de fotos con música apk. </p>
4
- <h2>descargar diapositivas de fotos con música apk</h2><br /><p><b><b>DOWNLOAD</b> &#10038; <a href="https://bltlly.com/2v6KBo">https://bltlly.com/2v6KBo</a></b></p><br /><br />
5
- <p>Presentación de fotos con música apk es una aplicación que le permite crear increíbles presentaciones de diapositivas con sus fotos y música. Puede usar esta aplicación para combinar varias fotos en un solo video, agregar música, efectos, filtros, pegatinas, texto y más. También puedes usar esta aplicación para crear cuadrículas, películas y videos musicales con tus fotos. Puedes guardar tus creaciones en la galería o compartirlas en plataformas de redes sociales como Facebook, Instagram, WhatsApp, etc.</p>
6
- <p>En este artículo, le mostraremos las características, beneficios y pasos de descargar y usar diapositivas de fotos con apk de música. Al final de este artículo, podrás crear increíbles presentaciones de diapositivas con tus fotos y música en minutos. </p>
7
- <h2>Características de la presentación de fotos con música APK</h2>
8
- <p>Presentación de fotos con música apk es una aplicación de gran alcance que ofrece una variedad de características para crear presentaciones de diapositivas. Estas son algunas de las características que puedes disfrutar:</p>
9
- <h3>Creador de cuadrículas para Instagram</h3>
10
- <p>Esta característica le permite crear cuadrículas impresionantes con sus fotos. Puede elegir entre una variedad de temas de cuadrícula en vivo que le ayudan a dividir imágenes y hacer un atractivo collage de Instagram o presentación de fotos. También puede ajustar el tamaño, la forma, el color, el borde y el fondo de las cuadrículas. </p>
11
- <h3>Aplicación para hacer películas</h3>
12
-
13
- <h3>Aplicación de presentación de diapositivas Maker</h3>
14
- <p>Esta función le permite crear hermosas presentaciones de diapositivas con sus fotos y música. Puede elegir entre una variedad de plantillas de presentación de diapositivas que tienen diferentes temas, marcos y música. También puede agregar texto, pegatinas, filtros y efectos a sus presentaciones de diapositivas. Puedes crear presentaciones de diapositivas para diferentes estados de ánimo como amor, celebración, diversión, etc.</p>
15
- <h3>Creador de videos musicales</h3>
16
- <p>Esta característica le permite crear videos impresionantes con sus fotos y música. Puede elegir entre una variedad de plantillas de videos musicales que tienen diferentes géneros, efectos, transiciones y música. También puedes recortar, recortar, rotar y voltear tus fotos. Puedes crear videos musicales para diferentes estilos como pop, rock, hip hop, etc.</p>
17
- <p></p>
18
- <h3>Editor de fotos</h3>
19
- <p>Esta función le permite editar sus fotos con filtros, pegatinas y efectos. Puede elegir entre una variedad de filtros de fotos que mejoran el color, el brillo, el contraste y la saturación de sus fotos. También puede agregar pegatinas, texto, marcos y fondos a sus fotos. Puedes editar tus fotos para diferentes propósitos como selfie, belleza, arte, etc.</p>
20
- <h2>Cómo descargar diapositivas de fotos con música APK</h2>
21
- <p>Descargar diapositivas de fotos con música apk es fácil y rápido. Estos son los pasos que debe seguir:</p>
22
- <h3>Paso 1: Ir al sitio web oficial o Google Play Store</h3>
23
- <p>Usted puede descargar diapositivas de fotos con música apk desde el sitio web oficial o la Google Play Store. El sitio web oficial es https://photoslideshowwithmusic.com/ y el enlace de Google Play Store es https://play.google.com/store/apps/apps/details?id=com.photoslideshowwithmusic. También puede escanear el código QR a continuación para descargar la aplicación. </p>
24
- <p><img src="https://photoslideshowwithmusic.com/qr-code.png" alt="QR code for photo slideshow with music apk"></p>
25
- <h3>Paso 2: Elija la versión que desea descargar</h3>
26
-
27
- <h3>Paso 3: Instalar la aplicación en el dispositivo</h3>
28
- <p>Una vez que haya descargado la aplicación, debe instalarla en su dispositivo. Es posible que deba habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. A continuación, abra el archivo descargado y siga las instrucciones para instalar la aplicación. </p>
29
- <h3>Paso 4: Inicie la aplicación y comience a crear presentaciones de diapositivas</h3>
30
- <p>Después de instalar la aplicación, puede iniciarla y comenzar a crear presentaciones de diapositivas con sus fotos y música. Verá una interfaz sencilla y fácil de usar que lo guiará a través del proceso. Puede seleccionar fotos de su galería o cámara, elegir un tema, plantilla o marco para su presentación de diapositivas, agregar música, texto y efectos a su presentación de diapositivas, previsualizar y guardar su presentación de diapositivas en la galería o compartirla en las redes sociales. </p>
31
- <h2> Cómo utilizar la presentación de fotos con música APK</h2>
32
- <p>El uso de diapositivas de fotos con música apk es divertido y fácil. Estos son los pasos que debe seguir:</p>
33
- <h3>Paso 1: Seleccione fotos de su galería o cámara</h3>
34
- <p>Puede seleccionar hasta 100 fotos de su galería o cámara para su presentación. También puede ordenarlas por fecha o nombre. Para seleccionar fotos de su galería, toque en el icono de Galería en la pantalla de inicio de la aplicación. Para seleccionar fotos de tu cámara, toca el icono Cámara en la pantalla de inicio de la aplicación. </p>
35
- <h3>Paso 2: Elija un tema, plantilla o marco para su presentación de diapositivas</h3>
36
-
37
- <h3>Paso 3: Añade música, texto y efectos a tu presentación de diapositivas</h3>
38
- <p>Puedes añadir música, texto y efectos a tu presentación para hacerla más atractiva y expresiva. Puede elegir entre una variedad de géneros musicales, canciones y efectos de sonido para su presentación de diapositivas. También puede recortar, recortar y ajustar el volumen de la música. Para añadir música a la presentación de diapositivas, pulse el icono Música en la parte inferior de la pantalla. También puede agregar texto a su presentación para transmitir su mensaje o título. Puede elegir entre una variedad de fuentes, colores y tamaños para su texto. También puede ajustar la posición, la alineación y la duración del texto. Para añadir texto a la presentación de diapositivas, toque en el icono Texto en la parte inferior de la pantalla. También puede agregar efectos a su presentación para mejorar el estado de ánimo y el estilo de su presentación. Puede elegir entre una variedad de filtros, pegatinas y animaciones para su presentación de diapositivas. Para añadir efectos a la presentación de diapositivas, toque en el icono Efecto en la parte inferior de la pantalla. </p>
39
- <h3>Paso 4: Previsualizar y guardar la presentación de diapositivas en la galería o compartirla en las redes sociales</h3>
40
- <p>Puede previsualizar su presentación de diapositivas antes de guardarla o compartirla. También puede editar o eliminar cualquier foto, música, texto o efecto en su presentación de diapositivas. Para previsualizar su presentación de diapositivas, toque en el icono Reproducir en la esquina superior derecha de la pantalla. Para editar o eliminar cualquier elemento en su presentación, toque en él y utilice las opciones en la parte inferior de la pantalla. Para guardar su presentación en la galería, toque en el icono Guardar en la esquina superior derecha de la pantalla. Puede elegir entre diferentes formatos y resoluciones para su presentación. Para compartir su presentación en las redes sociales, toque en el icono Compartir en la esquina superior derecha de la pantalla. Puedes elegir entre diferentes plataformas como Facebook, Instagram, WhatsApp, etc.</p>
41
- <h2>Beneficios de la presentación de fotos con música APK</h2>
42
- <p>Presentación de fotos con música apk es una gran aplicación que ofrece muchos beneficios para la creación de presentaciones de diapositivas. Estos son algunos de los beneficios que puedes disfrutar:</p>
43
-
44
- <p>Presentación de fotos con música apk está diseñado para ser fácil y divertido de usar para cualquier persona. Usted no necesita ninguna habilidad técnica o experiencia para crear increíbles presentaciones de diapositivas con sus fotos y música. Solo tienes que seguir unos sencillos pasos y utilizar unos pocos toques y golpes. También puede dar rienda suelta a su creatividad y personalizar sus presentaciones de diapositivas como quieras. </p>
45
- <h3>Tiene una variedad de temas, plantillas y marcos para elegir</h3>
46
- <p>Presentación de fotos con música apk tiene una gran colección de temas, plantillas y marcos para crear presentaciones de diapositivas. Puede elegir entre diferentes estilos, estados de ánimo, ocasiones y géneros para sus presentaciones de diapositivas. También puede mezclar y combinar diferentes elementos para crear presentaciones únicas y personalizadas. </p>
47
- <h3> Tiene un potente editor de fotos y fabricante de videos musicales</h3>
48
- <p>Presentación de fotos con música apk tiene un potente editor de fotos y vídeo musical fabricante que le permiten editar sus fotos y crear vídeos musicales con facilidad. Puede utilizar varios filtros, pegatinas, efectos, transiciones y animaciones para mejorar sus fotos y videos. También puede recortar, recortar, rotar, voltear, ajustar y agregar texto a sus fotos y videos. </p>
49
- <h3>Soporta múltiples formatos y resoluciones</h3>
50
- <p>Presentación de fotos con música apk soporta múltiples formatos y resoluciones para guardar y compartir sus presentaciones de diapositivas. Puede elegir entre formatos MP4, AVI, MOV, WMV, FLV y GIF para sus presentaciones de diapositivas. También puede elegir entre resoluciones HD, Full HD y 4K para sus presentaciones de diapositivas. Puede guardar y compartir sus presentaciones en formatos compatibles y de alta calidad. </p>
51
- <h3>Es gratis y seguro descargar</h3>
52
- <p>Presentación de fotos con música apk es gratis y seguro para descargar desde el sitio web oficial o la Google Play Store. No es necesario que pagues tarifas ni suscripciones para usar la aplicación. Tampoco necesitas preocuparte por ningún virus o malware que pueda dañar tu dispositivo. La aplicación es probada y verificada por fuentes y usuarios de confianza. </p>
53
- <h2>Conclusión</h2>
54
-
55
- <p>Si desea convertir sus fotos en impresionantes vídeos con música, descargar diapositivas de fotos con música apk hoy y probarlo. Te sorprenderá lo que puedes crear con esta aplicación. </p>
56
- <h2>Preguntas frecuentes</h2>
57
- <h4>Q: ¿Qué es la presentación de fotos con música apk? </h4>
58
- <p>A: Presentación de fotos con música apk es una aplicación que le permite crear increíbles presentaciones de diapositivas con sus fotos y música. Puede usar esta aplicación para combinar varias fotos en un solo video, agregar música, efectos, filtros, pegatinas, texto y más. </p>
59
- <h4>Q: ¿Cómo puedo descargar diapositivas de fotos con música apk? </h4>
60
- <p>A: Puede descargar diapositivas de fotos con apk de música desde el sitio web oficial o la Google Play Store. También puede escanear el código QR en el sitio web para descargar la aplicación. </p>
61
- <h4>Q: ¿Cómo puedo utilizar diapositivas de fotos con música apk? </h4>
62
- <p>A: Usted puede utilizar diapositivas de fotos con música apk siguiendo estos pasos:</p>
63
- <ul>
64
- <li>Selecciona fotos de tu galería o cámara</li>
65
- <li>Elija un tema, plantilla o marco para su presentación de diapositivas</li>
66
- <li>Añadir música, texto y efectos a su presentación de diapositivas</li>
67
- <li>Previsualizar y guardar su presentación en la galería o compartirlo en las redes sociales</li>
68
- </ul>
69
- <h4>Q: ¿Cuáles son los beneficios de la presentación de fotos con apk de música? </h4>
70
- <p>A: Presentación de fotos con música apk ofrece muchos beneficios tales como:</p>
71
- <ul>
72
- <li> Es fácil y divertido de usar</li>
73
- <li> Tiene una variedad de temas, plantillas y marcos para elegir</li>
74
- <li> Tiene un potente editor de fotos y fabricante de videos musicales</li>
75
- <li>Soporta múltiples formatos y resoluciones</li>
76
- <li>Es gratis y seguro descargar</li>
77
- </ul>
78
- <h4>Q: ¿Cuáles son las limitaciones de la presentación de fotos con apk de música? </h4>
79
- <p>A: Presentación de fotos con música apk tiene algunas limitaciones como:</p>
80
- <ul>
81
- <li> La versión gratuita tiene algunas restricciones en el número de fotos, temas, plantillas y marcos que puede utilizar</li>
82
- <li>La aplicación puede no funcionar en algunos dispositivos o sistemas operativos</li>
83
-
84
- <li>La aplicación puede tener algunos errores o errores que necesitan ser corregidos</li>
85
- </ul></p> 64aa2da5cf<br />
86
- <br />
87
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/diffusionmodules/model.py DELETED
@@ -1,776 +0,0 @@
1
- # pytorch_diffusion + derived encoder decoder
2
- import math
3
- import torch
4
- import torch.nn as nn
5
- import numpy as np
6
-
7
-
8
- def get_timestep_embedding(timesteps, embedding_dim):
9
- """
10
- This matches the implementation in Denoising Diffusion Probabilistic Models:
11
- From Fairseq.
12
- Build sinusoidal embeddings.
13
- This matches the implementation in tensor2tensor, but differs slightly
14
- from the description in Section 3.5 of "Attention Is All You Need".
15
- """
16
- assert len(timesteps.shape) == 1
17
-
18
- half_dim = embedding_dim // 2
19
- emb = math.log(10000) / (half_dim - 1)
20
- emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
21
- emb = emb.to(device=timesteps.device)
22
- emb = timesteps.float()[:, None] * emb[None, :]
23
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
24
- if embedding_dim % 2 == 1: # zero pad
25
- emb = torch.nn.functional.pad(emb, (0,1,0,0))
26
- return emb
27
-
28
-
29
- def nonlinearity(x):
30
- # swish
31
- return x*torch.sigmoid(x)
32
-
33
-
34
- def Normalize(in_channels):
35
- return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
36
-
37
-
38
- class Upsample(nn.Module):
39
- def __init__(self, in_channels, with_conv):
40
- super().__init__()
41
- self.with_conv = with_conv
42
- if self.with_conv:
43
- self.conv = torch.nn.Conv2d(in_channels,
44
- in_channels,
45
- kernel_size=3,
46
- stride=1,
47
- padding=1)
48
-
49
- def forward(self, x):
50
- x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
51
- if self.with_conv:
52
- x = self.conv(x)
53
- return x
54
-
55
-
56
- class Downsample(nn.Module):
57
- def __init__(self, in_channels, with_conv):
58
- super().__init__()
59
- self.with_conv = with_conv
60
- if self.with_conv:
61
- # no asymmetric padding in torch conv, must do it ourselves
62
- self.conv = torch.nn.Conv2d(in_channels,
63
- in_channels,
64
- kernel_size=3,
65
- stride=2,
66
- padding=0)
67
-
68
- def forward(self, x):
69
- if self.with_conv:
70
- pad = (0,1,0,1)
71
- x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
72
- x = self.conv(x)
73
- else:
74
- x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
75
- return x
76
-
77
-
78
- class ResnetBlock(nn.Module):
79
- def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
80
- dropout, temb_channels=512):
81
- super().__init__()
82
- self.in_channels = in_channels
83
- out_channels = in_channels if out_channels is None else out_channels
84
- self.out_channels = out_channels
85
- self.use_conv_shortcut = conv_shortcut
86
-
87
- self.norm1 = Normalize(in_channels)
88
- self.conv1 = torch.nn.Conv2d(in_channels,
89
- out_channels,
90
- kernel_size=3,
91
- stride=1,
92
- padding=1)
93
- if temb_channels > 0:
94
- self.temb_proj = torch.nn.Linear(temb_channels,
95
- out_channels)
96
- self.norm2 = Normalize(out_channels)
97
- self.dropout = torch.nn.Dropout(dropout)
98
- self.conv2 = torch.nn.Conv2d(out_channels,
99
- out_channels,
100
- kernel_size=3,
101
- stride=1,
102
- padding=1)
103
- if self.in_channels != self.out_channels:
104
- if self.use_conv_shortcut:
105
- self.conv_shortcut = torch.nn.Conv2d(in_channels,
106
- out_channels,
107
- kernel_size=3,
108
- stride=1,
109
- padding=1)
110
- else:
111
- self.nin_shortcut = torch.nn.Conv2d(in_channels,
112
- out_channels,
113
- kernel_size=1,
114
- stride=1,
115
- padding=0)
116
-
117
- def forward(self, x, temb):
118
- h = x
119
- h = self.norm1(h)
120
- h = nonlinearity(h)
121
- h = self.conv1(h)
122
-
123
- if temb is not None:
124
- h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
125
-
126
- h = self.norm2(h)
127
- h = nonlinearity(h)
128
- h = self.dropout(h)
129
- h = self.conv2(h)
130
-
131
- if self.in_channels != self.out_channels:
132
- if self.use_conv_shortcut:
133
- x = self.conv_shortcut(x)
134
- else:
135
- x = self.nin_shortcut(x)
136
-
137
- return x+h
138
-
139
-
140
- class AttnBlock(nn.Module):
141
- def __init__(self, in_channels):
142
- super().__init__()
143
- self.in_channels = in_channels
144
-
145
- self.norm = Normalize(in_channels)
146
- self.q = torch.nn.Conv2d(in_channels,
147
- in_channels,
148
- kernel_size=1,
149
- stride=1,
150
- padding=0)
151
- self.k = torch.nn.Conv2d(in_channels,
152
- in_channels,
153
- kernel_size=1,
154
- stride=1,
155
- padding=0)
156
- self.v = torch.nn.Conv2d(in_channels,
157
- in_channels,
158
- kernel_size=1,
159
- stride=1,
160
- padding=0)
161
- self.proj_out = torch.nn.Conv2d(in_channels,
162
- in_channels,
163
- kernel_size=1,
164
- stride=1,
165
- padding=0)
166
-
167
-
168
- def forward(self, x):
169
- h_ = x
170
- h_ = self.norm(h_)
171
- q = self.q(h_)
172
- k = self.k(h_)
173
- v = self.v(h_)
174
-
175
- # compute attention
176
- b,c,h,w = q.shape
177
- q = q.reshape(b,c,h*w)
178
- q = q.permute(0,2,1) # b,hw,c
179
- k = k.reshape(b,c,h*w) # b,c,hw
180
- w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
181
- w_ = w_ * (int(c)**(-0.5))
182
- w_ = torch.nn.functional.softmax(w_, dim=2)
183
-
184
- # attend to values
185
- v = v.reshape(b,c,h*w)
186
- w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
187
- h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
188
- h_ = h_.reshape(b,c,h,w)
189
-
190
- h_ = self.proj_out(h_)
191
-
192
- return x+h_
193
-
194
-
195
- class Model(nn.Module):
196
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
197
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
198
- resolution, use_timestep=True):
199
- super().__init__()
200
- self.ch = ch
201
- self.temb_ch = self.ch*4
202
- self.num_resolutions = len(ch_mult)
203
- self.num_res_blocks = num_res_blocks
204
- self.resolution = resolution
205
- self.in_channels = in_channels
206
-
207
- self.use_timestep = use_timestep
208
- if self.use_timestep:
209
- # timestep embedding
210
- self.temb = nn.Module()
211
- self.temb.dense = nn.ModuleList([
212
- torch.nn.Linear(self.ch,
213
- self.temb_ch),
214
- torch.nn.Linear(self.temb_ch,
215
- self.temb_ch),
216
- ])
217
-
218
- # downsampling
219
- self.conv_in = torch.nn.Conv2d(in_channels,
220
- self.ch,
221
- kernel_size=3,
222
- stride=1,
223
- padding=1)
224
-
225
- curr_res = resolution
226
- in_ch_mult = (1,)+tuple(ch_mult)
227
- self.down = nn.ModuleList()
228
- for i_level in range(self.num_resolutions):
229
- block = nn.ModuleList()
230
- attn = nn.ModuleList()
231
- block_in = ch*in_ch_mult[i_level]
232
- block_out = ch*ch_mult[i_level]
233
- for i_block in range(self.num_res_blocks):
234
- block.append(ResnetBlock(in_channels=block_in,
235
- out_channels=block_out,
236
- temb_channels=self.temb_ch,
237
- dropout=dropout))
238
- block_in = block_out
239
- if curr_res in attn_resolutions:
240
- attn.append(AttnBlock(block_in))
241
- down = nn.Module()
242
- down.block = block
243
- down.attn = attn
244
- if i_level != self.num_resolutions-1:
245
- down.downsample = Downsample(block_in, resamp_with_conv)
246
- curr_res = curr_res // 2
247
- self.down.append(down)
248
-
249
- # middle
250
- self.mid = nn.Module()
251
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
252
- out_channels=block_in,
253
- temb_channels=self.temb_ch,
254
- dropout=dropout)
255
- self.mid.attn_1 = AttnBlock(block_in)
256
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
257
- out_channels=block_in,
258
- temb_channels=self.temb_ch,
259
- dropout=dropout)
260
-
261
- # upsampling
262
- self.up = nn.ModuleList()
263
- for i_level in reversed(range(self.num_resolutions)):
264
- block = nn.ModuleList()
265
- attn = nn.ModuleList()
266
- block_out = ch*ch_mult[i_level]
267
- skip_in = ch*ch_mult[i_level]
268
- for i_block in range(self.num_res_blocks+1):
269
- if i_block == self.num_res_blocks:
270
- skip_in = ch*in_ch_mult[i_level]
271
- block.append(ResnetBlock(in_channels=block_in+skip_in,
272
- out_channels=block_out,
273
- temb_channels=self.temb_ch,
274
- dropout=dropout))
275
- block_in = block_out
276
- if curr_res in attn_resolutions:
277
- attn.append(AttnBlock(block_in))
278
- up = nn.Module()
279
- up.block = block
280
- up.attn = attn
281
- if i_level != 0:
282
- up.upsample = Upsample(block_in, resamp_with_conv)
283
- curr_res = curr_res * 2
284
- self.up.insert(0, up) # prepend to get consistent order
285
-
286
- # end
287
- self.norm_out = Normalize(block_in)
288
- self.conv_out = torch.nn.Conv2d(block_in,
289
- out_ch,
290
- kernel_size=3,
291
- stride=1,
292
- padding=1)
293
-
294
-
295
- def forward(self, x, t=None):
296
- #assert x.shape[2] == x.shape[3] == self.resolution
297
-
298
- if self.use_timestep:
299
- # timestep embedding
300
- assert t is not None
301
- temb = get_timestep_embedding(t, self.ch)
302
- temb = self.temb.dense[0](temb)
303
- temb = nonlinearity(temb)
304
- temb = self.temb.dense[1](temb)
305
- else:
306
- temb = None
307
-
308
- # downsampling
309
- hs = [self.conv_in(x)]
310
- for i_level in range(self.num_resolutions):
311
- for i_block in range(self.num_res_blocks):
312
- h = self.down[i_level].block[i_block](hs[-1], temb)
313
- if len(self.down[i_level].attn) > 0:
314
- h = self.down[i_level].attn[i_block](h)
315
- hs.append(h)
316
- if i_level != self.num_resolutions-1:
317
- hs.append(self.down[i_level].downsample(hs[-1]))
318
-
319
- # middle
320
- h = hs[-1]
321
- h = self.mid.block_1(h, temb)
322
- h = self.mid.attn_1(h)
323
- h = self.mid.block_2(h, temb)
324
-
325
- # upsampling
326
- for i_level in reversed(range(self.num_resolutions)):
327
- for i_block in range(self.num_res_blocks+1):
328
- h = self.up[i_level].block[i_block](
329
- torch.cat([h, hs.pop()], dim=1), temb)
330
- if len(self.up[i_level].attn) > 0:
331
- h = self.up[i_level].attn[i_block](h)
332
- if i_level != 0:
333
- h = self.up[i_level].upsample(h)
334
-
335
- # end
336
- h = self.norm_out(h)
337
- h = nonlinearity(h)
338
- h = self.conv_out(h)
339
- return h
340
-
341
-
342
- class Encoder(nn.Module):
343
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
344
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
345
- resolution, z_channels, double_z=True, **ignore_kwargs):
346
- super().__init__()
347
- self.ch = ch
348
- self.temb_ch = 0
349
- self.num_resolutions = len(ch_mult)
350
- self.num_res_blocks = num_res_blocks
351
- self.resolution = resolution
352
- self.in_channels = in_channels
353
-
354
- # downsampling
355
- self.conv_in = torch.nn.Conv2d(in_channels,
356
- self.ch,
357
- kernel_size=3,
358
- stride=1,
359
- padding=1)
360
-
361
- curr_res = resolution
362
- in_ch_mult = (1,)+tuple(ch_mult)
363
- self.down = nn.ModuleList()
364
- for i_level in range(self.num_resolutions):
365
- block = nn.ModuleList()
366
- attn = nn.ModuleList()
367
- block_in = ch*in_ch_mult[i_level]
368
- block_out = ch*ch_mult[i_level]
369
- for i_block in range(self.num_res_blocks):
370
- block.append(ResnetBlock(in_channels=block_in,
371
- out_channels=block_out,
372
- temb_channels=self.temb_ch,
373
- dropout=dropout))
374
- block_in = block_out
375
- if curr_res in attn_resolutions:
376
- attn.append(AttnBlock(block_in))
377
- down = nn.Module()
378
- down.block = block
379
- down.attn = attn
380
- if i_level != self.num_resolutions-1:
381
- down.downsample = Downsample(block_in, resamp_with_conv)
382
- curr_res = curr_res // 2
383
- self.down.append(down)
384
-
385
- # middle
386
- self.mid = nn.Module()
387
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
388
- out_channels=block_in,
389
- temb_channels=self.temb_ch,
390
- dropout=dropout)
391
- self.mid.attn_1 = AttnBlock(block_in)
392
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
393
- out_channels=block_in,
394
- temb_channels=self.temb_ch,
395
- dropout=dropout)
396
-
397
- # end
398
- self.norm_out = Normalize(block_in)
399
- self.conv_out = torch.nn.Conv2d(block_in,
400
- 2*z_channels if double_z else z_channels,
401
- kernel_size=3,
402
- stride=1,
403
- padding=1)
404
-
405
-
406
- def forward(self, x):
407
- #assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution)
408
-
409
- # timestep embedding
410
- temb = None
411
-
412
- # downsampling
413
- hs = [self.conv_in(x)]
414
- for i_level in range(self.num_resolutions):
415
- for i_block in range(self.num_res_blocks):
416
- h = self.down[i_level].block[i_block](hs[-1], temb)
417
- if len(self.down[i_level].attn) > 0:
418
- h = self.down[i_level].attn[i_block](h)
419
- hs.append(h)
420
- if i_level != self.num_resolutions-1:
421
- hs.append(self.down[i_level].downsample(hs[-1]))
422
-
423
- # middle
424
- h = hs[-1]
425
- h = self.mid.block_1(h, temb)
426
- h = self.mid.attn_1(h)
427
- h = self.mid.block_2(h, temb)
428
-
429
- # end
430
- h = self.norm_out(h)
431
- h = nonlinearity(h)
432
- h = self.conv_out(h)
433
- return h
434
-
435
-
436
- class Decoder(nn.Module):
437
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
438
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
439
- resolution, z_channels, give_pre_end=False, **ignorekwargs):
440
- super().__init__()
441
- self.ch = ch
442
- self.temb_ch = 0
443
- self.num_resolutions = len(ch_mult)
444
- self.num_res_blocks = num_res_blocks
445
- self.resolution = resolution
446
- self.in_channels = in_channels
447
- self.give_pre_end = give_pre_end
448
-
449
- # compute in_ch_mult, block_in and curr_res at lowest res
450
- in_ch_mult = (1,)+tuple(ch_mult)
451
- block_in = ch*ch_mult[self.num_resolutions-1]
452
- curr_res = resolution // 2**(self.num_resolutions-1)
453
- self.z_shape = (1,z_channels,curr_res,curr_res)
454
- print("Working with z of shape {} = {} dimensions.".format(
455
- self.z_shape, np.prod(self.z_shape)))
456
-
457
- # z to block_in
458
- self.conv_in = torch.nn.Conv2d(z_channels,
459
- block_in,
460
- kernel_size=3,
461
- stride=1,
462
- padding=1)
463
-
464
- # middle
465
- self.mid = nn.Module()
466
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
467
- out_channels=block_in,
468
- temb_channels=self.temb_ch,
469
- dropout=dropout)
470
- self.mid.attn_1 = AttnBlock(block_in)
471
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
472
- out_channels=block_in,
473
- temb_channels=self.temb_ch,
474
- dropout=dropout)
475
-
476
- # upsampling
477
- self.up = nn.ModuleList()
478
- for i_level in reversed(range(self.num_resolutions)):
479
- block = nn.ModuleList()
480
- attn = nn.ModuleList()
481
- block_out = ch*ch_mult[i_level]
482
- for i_block in range(self.num_res_blocks+1):
483
- block.append(ResnetBlock(in_channels=block_in,
484
- out_channels=block_out,
485
- temb_channels=self.temb_ch,
486
- dropout=dropout))
487
- block_in = block_out
488
- if curr_res in attn_resolutions:
489
- attn.append(AttnBlock(block_in))
490
- up = nn.Module()
491
- up.block = block
492
- up.attn = attn
493
- if i_level != 0:
494
- up.upsample = Upsample(block_in, resamp_with_conv)
495
- curr_res = curr_res * 2
496
- self.up.insert(0, up) # prepend to get consistent order
497
-
498
- # end
499
- self.norm_out = Normalize(block_in)
500
- self.conv_out = torch.nn.Conv2d(block_in,
501
- out_ch,
502
- kernel_size=3,
503
- stride=1,
504
- padding=1)
505
-
506
- def forward(self, z):
507
- #assert z.shape[1:] == self.z_shape[1:]
508
- self.last_z_shape = z.shape
509
-
510
- # timestep embedding
511
- temb = None
512
-
513
- # z to block_in
514
- h = self.conv_in(z)
515
-
516
- # middle
517
- h = self.mid.block_1(h, temb)
518
- h = self.mid.attn_1(h)
519
- h = self.mid.block_2(h, temb)
520
-
521
- # upsampling
522
- for i_level in reversed(range(self.num_resolutions)):
523
- for i_block in range(self.num_res_blocks+1):
524
- h = self.up[i_level].block[i_block](h, temb)
525
- if len(self.up[i_level].attn) > 0:
526
- h = self.up[i_level].attn[i_block](h)
527
- if i_level != 0:
528
- h = self.up[i_level].upsample(h)
529
-
530
- # end
531
- if self.give_pre_end:
532
- return h
533
-
534
- h = self.norm_out(h)
535
- h = nonlinearity(h)
536
- h = self.conv_out(h)
537
- return h
538
-
539
-
540
- class VUNet(nn.Module):
541
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
542
- attn_resolutions, dropout=0.0, resamp_with_conv=True,
543
- in_channels, c_channels,
544
- resolution, z_channels, use_timestep=False, **ignore_kwargs):
545
- super().__init__()
546
- self.ch = ch
547
- self.temb_ch = self.ch*4
548
- self.num_resolutions = len(ch_mult)
549
- self.num_res_blocks = num_res_blocks
550
- self.resolution = resolution
551
-
552
- self.use_timestep = use_timestep
553
- if self.use_timestep:
554
- # timestep embedding
555
- self.temb = nn.Module()
556
- self.temb.dense = nn.ModuleList([
557
- torch.nn.Linear(self.ch,
558
- self.temb_ch),
559
- torch.nn.Linear(self.temb_ch,
560
- self.temb_ch),
561
- ])
562
-
563
- # downsampling
564
- self.conv_in = torch.nn.Conv2d(c_channels,
565
- self.ch,
566
- kernel_size=3,
567
- stride=1,
568
- padding=1)
569
-
570
- curr_res = resolution
571
- in_ch_mult = (1,)+tuple(ch_mult)
572
- self.down = nn.ModuleList()
573
- for i_level in range(self.num_resolutions):
574
- block = nn.ModuleList()
575
- attn = nn.ModuleList()
576
- block_in = ch*in_ch_mult[i_level]
577
- block_out = ch*ch_mult[i_level]
578
- for i_block in range(self.num_res_blocks):
579
- block.append(ResnetBlock(in_channels=block_in,
580
- out_channels=block_out,
581
- temb_channels=self.temb_ch,
582
- dropout=dropout))
583
- block_in = block_out
584
- if curr_res in attn_resolutions:
585
- attn.append(AttnBlock(block_in))
586
- down = nn.Module()
587
- down.block = block
588
- down.attn = attn
589
- if i_level != self.num_resolutions-1:
590
- down.downsample = Downsample(block_in, resamp_with_conv)
591
- curr_res = curr_res // 2
592
- self.down.append(down)
593
-
594
- self.z_in = torch.nn.Conv2d(z_channels,
595
- block_in,
596
- kernel_size=1,
597
- stride=1,
598
- padding=0)
599
- # middle
600
- self.mid = nn.Module()
601
- self.mid.block_1 = ResnetBlock(in_channels=2*block_in,
602
- out_channels=block_in,
603
- temb_channels=self.temb_ch,
604
- dropout=dropout)
605
- self.mid.attn_1 = AttnBlock(block_in)
606
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
607
- out_channels=block_in,
608
- temb_channels=self.temb_ch,
609
- dropout=dropout)
610
-
611
- # upsampling
612
- self.up = nn.ModuleList()
613
- for i_level in reversed(range(self.num_resolutions)):
614
- block = nn.ModuleList()
615
- attn = nn.ModuleList()
616
- block_out = ch*ch_mult[i_level]
617
- skip_in = ch*ch_mult[i_level]
618
- for i_block in range(self.num_res_blocks+1):
619
- if i_block == self.num_res_blocks:
620
- skip_in = ch*in_ch_mult[i_level]
621
- block.append(ResnetBlock(in_channels=block_in+skip_in,
622
- out_channels=block_out,
623
- temb_channels=self.temb_ch,
624
- dropout=dropout))
625
- block_in = block_out
626
- if curr_res in attn_resolutions:
627
- attn.append(AttnBlock(block_in))
628
- up = nn.Module()
629
- up.block = block
630
- up.attn = attn
631
- if i_level != 0:
632
- up.upsample = Upsample(block_in, resamp_with_conv)
633
- curr_res = curr_res * 2
634
- self.up.insert(0, up) # prepend to get consistent order
635
-
636
- # end
637
- self.norm_out = Normalize(block_in)
638
- self.conv_out = torch.nn.Conv2d(block_in,
639
- out_ch,
640
- kernel_size=3,
641
- stride=1,
642
- padding=1)
643
-
644
-
645
- def forward(self, x, z):
646
- #assert x.shape[2] == x.shape[3] == self.resolution
647
-
648
- if self.use_timestep:
649
- # timestep embedding
650
- assert t is not None
651
- temb = get_timestep_embedding(t, self.ch)
652
- temb = self.temb.dense[0](temb)
653
- temb = nonlinearity(temb)
654
- temb = self.temb.dense[1](temb)
655
- else:
656
- temb = None
657
-
658
- # downsampling
659
- hs = [self.conv_in(x)]
660
- for i_level in range(self.num_resolutions):
661
- for i_block in range(self.num_res_blocks):
662
- h = self.down[i_level].block[i_block](hs[-1], temb)
663
- if len(self.down[i_level].attn) > 0:
664
- h = self.down[i_level].attn[i_block](h)
665
- hs.append(h)
666
- if i_level != self.num_resolutions-1:
667
- hs.append(self.down[i_level].downsample(hs[-1]))
668
-
669
- # middle
670
- h = hs[-1]
671
- z = self.z_in(z)
672
- h = torch.cat((h,z),dim=1)
673
- h = self.mid.block_1(h, temb)
674
- h = self.mid.attn_1(h)
675
- h = self.mid.block_2(h, temb)
676
-
677
- # upsampling
678
- for i_level in reversed(range(self.num_resolutions)):
679
- for i_block in range(self.num_res_blocks+1):
680
- h = self.up[i_level].block[i_block](
681
- torch.cat([h, hs.pop()], dim=1), temb)
682
- if len(self.up[i_level].attn) > 0:
683
- h = self.up[i_level].attn[i_block](h)
684
- if i_level != 0:
685
- h = self.up[i_level].upsample(h)
686
-
687
- # end
688
- h = self.norm_out(h)
689
- h = nonlinearity(h)
690
- h = self.conv_out(h)
691
- return h
692
-
693
-
694
- class SimpleDecoder(nn.Module):
695
- def __init__(self, in_channels, out_channels, *args, **kwargs):
696
- super().__init__()
697
- self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
698
- ResnetBlock(in_channels=in_channels,
699
- out_channels=2 * in_channels,
700
- temb_channels=0, dropout=0.0),
701
- ResnetBlock(in_channels=2 * in_channels,
702
- out_channels=4 * in_channels,
703
- temb_channels=0, dropout=0.0),
704
- ResnetBlock(in_channels=4 * in_channels,
705
- out_channels=2 * in_channels,
706
- temb_channels=0, dropout=0.0),
707
- nn.Conv2d(2*in_channels, in_channels, 1),
708
- Upsample(in_channels, with_conv=True)])
709
- # end
710
- self.norm_out = Normalize(in_channels)
711
- self.conv_out = torch.nn.Conv2d(in_channels,
712
- out_channels,
713
- kernel_size=3,
714
- stride=1,
715
- padding=1)
716
-
717
- def forward(self, x):
718
- for i, layer in enumerate(self.model):
719
- if i in [1,2,3]:
720
- x = layer(x, None)
721
- else:
722
- x = layer(x)
723
-
724
- h = self.norm_out(x)
725
- h = nonlinearity(h)
726
- x = self.conv_out(h)
727
- return x
728
-
729
-
730
- class UpsampleDecoder(nn.Module):
731
- def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
732
- ch_mult=(2,2), dropout=0.0):
733
- super().__init__()
734
- # upsampling
735
- self.temb_ch = 0
736
- self.num_resolutions = len(ch_mult)
737
- self.num_res_blocks = num_res_blocks
738
- block_in = in_channels
739
- curr_res = resolution // 2 ** (self.num_resolutions - 1)
740
- self.res_blocks = nn.ModuleList()
741
- self.upsample_blocks = nn.ModuleList()
742
- for i_level in range(self.num_resolutions):
743
- res_block = []
744
- block_out = ch * ch_mult[i_level]
745
- for i_block in range(self.num_res_blocks + 1):
746
- res_block.append(ResnetBlock(in_channels=block_in,
747
- out_channels=block_out,
748
- temb_channels=self.temb_ch,
749
- dropout=dropout))
750
- block_in = block_out
751
- self.res_blocks.append(nn.ModuleList(res_block))
752
- if i_level != self.num_resolutions - 1:
753
- self.upsample_blocks.append(Upsample(block_in, True))
754
- curr_res = curr_res * 2
755
-
756
- # end
757
- self.norm_out = Normalize(block_in)
758
- self.conv_out = torch.nn.Conv2d(block_in,
759
- out_channels,
760
- kernel_size=3,
761
- stride=1,
762
- padding=1)
763
-
764
- def forward(self, x):
765
- # upsampling
766
- h = x
767
- for k, i_level in enumerate(range(self.num_resolutions)):
768
- for i_block in range(self.num_res_blocks + 1):
769
- h = self.res_blocks[i_level][i_block](h, None)
770
- if i_level != self.num_resolutions - 1:
771
- h = self.upsample_blocks[k](h)
772
- h = self.norm_out(h)
773
- h = nonlinearity(h)
774
- h = self.conv_out(h)
775
- return h
776
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/status_codes.py DELETED
@@ -1,6 +0,0 @@
1
- SUCCESS = 0
2
- ERROR = 1
3
- UNKNOWN_ERROR = 2
4
- VIRTUALENV_NOT_FOUND = 3
5
- PREVIOUS_BUILD_DIR_ERROR = 4
6
- NO_MATCHES_FOUND = 23
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/requests/_internal_utils.py DELETED
@@ -1,48 +0,0 @@
1
- """
2
- requests._internal_utils
3
- ~~~~~~~~~~~~~~
4
-
5
- Provides utility functions that are consumed internally by Requests
6
- which depend on extremely few external helpers (such as compat)
7
- """
8
- import re
9
-
10
- from .compat import builtin_str
11
-
12
- _VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$")
13
- _VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$")
14
- _VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$")
15
- _VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$")
16
-
17
- HEADER_VALIDATORS = {
18
- bytes: (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE),
19
- str: (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR),
20
- }
21
-
22
-
23
- def to_native_string(string, encoding="ascii"):
24
- """Given a string object, regardless of type, returns a representation of
25
- that string in the native string type, encoding and decoding where
26
- necessary. This assumes ASCII unless told otherwise.
27
- """
28
- if isinstance(string, builtin_str):
29
- out = string
30
- else:
31
- out = string.decode(encoding)
32
-
33
- return out
34
-
35
-
36
- def unicode_is_ascii(u_string):
37
- """Determine if unicode string only contains ASCII characters.
38
-
39
- :param str u_string: unicode string to check. Must be unicode
40
- and not Python 2 `str`.
41
- :rtype: bool
42
- """
43
- assert isinstance(u_string, str)
44
- try:
45
- u_string.encode("ascii")
46
- return True
47
- except UnicodeEncodeError:
48
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bready11/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Onodofthenorth-SD PixelArt SpriteSheet Generator
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.45.2
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/merge.h DELETED
@@ -1,91 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- #pragma once
19
-
20
- #include <thrust/detail/config.h>
21
- #include <thrust/system/detail/generic/tag.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace detail
28
- {
29
- namespace generic
30
- {
31
-
32
-
33
- // XXX calling this function is an error; there is no implementation
34
- template<typename DerivedPolicy,
35
- typename InputIterator1,
36
- typename InputIterator2,
37
- typename OutputIterator,
38
- typename StrictWeakOrdering>
39
- __host__ __device__
40
- OutputIterator merge(thrust::execution_policy<DerivedPolicy> &exec,
41
- InputIterator1 first1,
42
- InputIterator1 last1,
43
- InputIterator2 first2,
44
- InputIterator2 last2,
45
- OutputIterator result,
46
- StrictWeakOrdering comp);
47
-
48
-
49
- template<typename DerivedPolicy,
50
- typename InputIterator1,
51
- typename InputIterator2,
52
- typename OutputIterator>
53
- __host__ __device__
54
- OutputIterator merge(thrust::execution_policy<DerivedPolicy> &exec,
55
- InputIterator1 first1,
56
- InputIterator1 last1,
57
- InputIterator2 first2,
58
- InputIterator2 last2,
59
- OutputIterator result);
60
-
61
-
62
- template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename InputIterator3, typename InputIterator4, typename OutputIterator1, typename OutputIterator2, typename Compare>
63
- __host__ __device__
64
- thrust::pair<OutputIterator1,OutputIterator2>
65
- merge_by_key(thrust::execution_policy<DerivedPolicy> &exec,
66
- InputIterator1 keys_first1, InputIterator1 keys_last1,
67
- InputIterator2 keys_first2, InputIterator2 keys_last2,
68
- InputIterator3 values_first1, InputIterator4 values_first2,
69
- OutputIterator1 keys_result,
70
- OutputIterator2 values_result,
71
- Compare comp);
72
-
73
-
74
- template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename InputIterator3, typename InputIterator4, typename OutputIterator1, typename OutputIterator2>
75
- __host__ __device__
76
- thrust::pair<OutputIterator1,OutputIterator2>
77
- merge_by_key(thrust::execution_policy<DerivedPolicy> &exec,
78
- InputIterator1 keys_first1, InputIterator1 keys_last1,
79
- InputIterator2 keys_first2, InputIterator2 keys_last2,
80
- InputIterator3 values_first1, InputIterator4 values_first2,
81
- OutputIterator1 keys_result,
82
- OutputIterator2 values_result);
83
-
84
-
85
- } // end namespace generic
86
- } // end namespace detail
87
- } // end namespace system
88
- } // end namespace thrust
89
-
90
- #include <thrust/system/detail/generic/merge.inl>
91
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/tuple.h DELETED
@@ -1,585 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file tuple.h
19
- * \brief A type encapsulating a heterogeneous collection of elements
20
- */
21
-
22
- /*
23
- * Copyright (C) 1999, 2000 Jaakko Järvi ([email protected])
24
- *
25
- * Distributed under the Boost Software License, Version 1.0.
26
- * (See accompanying NOTICE file for the complete license)
27
- *
28
- * For more information, see http://www.boost.org
29
- */
30
-
31
- #pragma once
32
-
33
- #include <thrust/detail/config.h>
34
- #include <thrust/detail/tuple.inl>
35
- #include <thrust/pair.h>
36
-
37
- namespace thrust
38
- {
39
-
40
- /*! \addtogroup utility
41
- * \{
42
- */
43
-
44
- /*! \addtogroup tuple
45
- * \{
46
- */
47
-
48
- /*! \cond
49
- */
50
-
51
- struct null_type;
52
-
53
- /*! \endcond
54
- */
55
-
56
- /*! This metafunction returns the type of a
57
- * \p tuple's <tt>N</tt>th element.
58
- *
59
- * \tparam N This parameter selects the element of interest.
60
- * \tparam T A \c tuple type of interest.
61
- *
62
- * \see pair
63
- * \see tuple
64
- */
65
- template<int N, class T>
66
- struct tuple_element
67
- {
68
- private:
69
- typedef typename T::tail_type Next;
70
-
71
- public:
72
- /*! The result of this metafunction is returned in \c type.
73
- */
74
- typedef typename tuple_element<N-1, Next>::type type;
75
- }; // end tuple_element
76
-
77
- /*! This metafunction returns the number of elements
78
- * of a \p tuple type of interest.
79
- *
80
- * \tparam T A \c tuple type of interest.
81
- *
82
- * \see pair
83
- * \see tuple
84
- */
85
- template<class T>
86
- struct tuple_size
87
- {
88
- /*! The result of this metafunction is returned in \c value.
89
- */
90
- static const int value = 1 + tuple_size<typename T::tail_type>::value;
91
- }; // end tuple_size
92
-
93
- // get function for non-const cons-lists, returns a reference to the element
94
-
95
- /*! The \p get function returns a reference to a \p tuple element of
96
- * interest.
97
- *
98
- * \param t A reference to a \p tuple of interest.
99
- * \return A reference to \p t's <tt>N</tt>th element.
100
- *
101
- * \tparam N The index of the element of interest.
102
- *
103
- * The following code snippet demonstrates how to use \p get to print
104
- * the value of a \p tuple element.
105
- *
106
- * \code
107
- * #include <thrust/tuple.h>
108
- * #include <iostream>
109
- * ...
110
- * thrust::tuple<int, const char *> t(13, "thrust");
111
- *
112
- * std::cout << "The 1st value of t is " << thrust::get<0>(t) << std::endl;
113
- * \endcode
114
- *
115
- * \see pair
116
- * \see tuple
117
- */
118
- template<int N, class HT, class TT>
119
- __host__ __device__
120
- inline typename access_traits<
121
- typename tuple_element<N, detail::cons<HT, TT> >::type
122
- >::non_const_type
123
- get(detail::cons<HT, TT>& t);
124
-
125
-
126
- /*! The \p get function returns a \c const reference to a \p tuple element of
127
- * interest.
128
- *
129
- * \param t A reference to a \p tuple of interest.
130
- * \return A \c const reference to \p t's <tt>N</tt>th element.
131
- *
132
- * \tparam N The index of the element of interest.
133
- *
134
- * The following code snippet demonstrates how to use \p get to print
135
- * the value of a \p tuple element.
136
- *
137
- * \code
138
- * #include <thrust/tuple.h>
139
- * #include <iostream>
140
- * ...
141
- * thrust::tuple<int, const char *> t(13, "thrust");
142
- *
143
- * std::cout << "The 1st value of t is " << thrust::get<0>(t) << std::endl;
144
- * \endcode
145
- *
146
- * \see pair
147
- * \see tuple
148
- */
149
- template<int N, class HT, class TT>
150
- __host__ __device__
151
- inline typename access_traits<
152
- typename tuple_element<N, detail::cons<HT, TT> >::type
153
- >::const_type
154
- get(const detail::cons<HT, TT>& t);
155
-
156
-
157
-
158
- /*! \p tuple is a class template that can be instantiated with up to ten arguments.
159
- * Each template argument specifies the type of element in the \p tuple.
160
- * Consequently, tuples are heterogeneous, fixed-size collections of values. An
161
- * instantiation of \p tuple with two arguments is similar to an instantiation
162
- * of \p pair with the same two arguments. Individual elements of a \p tuple may
163
- * be accessed with the \p get function.
164
- *
165
- * \tparam TN The type of the <tt>N</tt> \c tuple element. Thrust's \p tuple
166
- * type currently supports up to ten elements.
167
- *
168
- * The following code snippet demonstrates how to create a new \p tuple object
169
- * and inspect and modify the value of its elements.
170
- *
171
- * \code
172
- * #include <thrust/tuple.h>
173
- * #include <iostream>
174
- * ...
175
- * // create a tuple containing an int, a float, and a string
176
- * thrust::tuple<int, float, const char*> t(13, 0.1f, "thrust");
177
- *
178
- * // individual members are accessed with the free function get
179
- * std::cout << "The first element's value is " << thrust::get<0>(t) << std::endl;
180
- *
181
- * // or the member function get
182
- * std::cout << "The second element's value is " << t.get<1>() << std::endl;
183
- *
184
- * // we can also modify elements with the same function
185
- * thrust::get<0>(t) += 10;
186
- * \endcode
187
- *
188
- * \see pair
189
- * \see get
190
- * \see make_tuple
191
- * \see tuple_element
192
- * \see tuple_size
193
- * \see tie
194
- */
195
- template <class T0, class T1, class T2, class T3, class T4,
196
- class T5, class T6, class T7, class T8, class T9>
197
- class tuple :
198
- public detail::map_tuple_to_cons<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>::type
199
- {
200
- /*! \cond
201
- */
202
-
203
- private:
204
- typedef typename detail::map_tuple_to_cons<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>::type inherited;
205
-
206
- /*! \endcond
207
- */
208
-
209
- public:
210
- /*! \p tuple's no-argument constructor initializes each element.
211
- */
212
- inline __host__ __device__
213
- tuple(void) {}
214
-
215
- /*! \p tuple's one-argument constructor copy constructs the first element from the given parameter
216
- * and intializes all other elements.
217
- * \param t0 The value to assign to this \p tuple's first element.
218
- */
219
- inline __host__ __device__
220
- tuple(typename access_traits<T0>::parameter_type t0)
221
- : inherited(t0,
222
- static_cast<const null_type&>(null_type()),
223
- static_cast<const null_type&>(null_type()),
224
- static_cast<const null_type&>(null_type()),
225
- static_cast<const null_type&>(null_type()),
226
- static_cast<const null_type&>(null_type()),
227
- static_cast<const null_type&>(null_type()),
228
- static_cast<const null_type&>(null_type()),
229
- static_cast<const null_type&>(null_type()),
230
- static_cast<const null_type&>(null_type())) {}
231
-
232
- /*! \p tuple's one-argument constructor copy constructs the first two elements from the given parameters
233
- * and intializes all other elements.
234
- * \param t0 The value to assign to this \p tuple's first element.
235
- * \param t1 The value to assign to this \p tuple's second element.
236
- * \note \p tuple's constructor has ten variants of this form, the rest of which are ommitted here for brevity.
237
- */
238
- inline __host__ __device__
239
- tuple(typename access_traits<T0>::parameter_type t0,
240
- typename access_traits<T1>::parameter_type t1)
241
- : inherited(t0, t1,
242
- static_cast<const null_type&>(null_type()),
243
- static_cast<const null_type&>(null_type()),
244
- static_cast<const null_type&>(null_type()),
245
- static_cast<const null_type&>(null_type()),
246
- static_cast<const null_type&>(null_type()),
247
- static_cast<const null_type&>(null_type()),
248
- static_cast<const null_type&>(null_type()),
249
- static_cast<const null_type&>(null_type())) {}
250
-
251
- /*! \cond
252
- */
253
-
254
- inline __host__ __device__
255
- tuple(typename access_traits<T0>::parameter_type t0,
256
- typename access_traits<T1>::parameter_type t1,
257
- typename access_traits<T2>::parameter_type t2)
258
- : inherited(t0, t1, t2,
259
- static_cast<const null_type&>(null_type()),
260
- static_cast<const null_type&>(null_type()),
261
- static_cast<const null_type&>(null_type()),
262
- static_cast<const null_type&>(null_type()),
263
- static_cast<const null_type&>(null_type()),
264
- static_cast<const null_type&>(null_type()),
265
- static_cast<const null_type&>(null_type())) {}
266
-
267
- inline __host__ __device__
268
- tuple(typename access_traits<T0>::parameter_type t0,
269
- typename access_traits<T1>::parameter_type t1,
270
- typename access_traits<T2>::parameter_type t2,
271
- typename access_traits<T3>::parameter_type t3)
272
- : inherited(t0, t1, t2, t3,
273
- static_cast<const null_type&>(null_type()),
274
- static_cast<const null_type&>(null_type()),
275
- static_cast<const null_type&>(null_type()),
276
- static_cast<const null_type&>(null_type()),
277
- static_cast<const null_type&>(null_type()),
278
- static_cast<const null_type&>(null_type())) {}
279
-
280
- inline __host__ __device__
281
- tuple(typename access_traits<T0>::parameter_type t0,
282
- typename access_traits<T1>::parameter_type t1,
283
- typename access_traits<T2>::parameter_type t2,
284
- typename access_traits<T3>::parameter_type t3,
285
- typename access_traits<T4>::parameter_type t4)
286
- : inherited(t0, t1, t2, t3, t4,
287
- static_cast<const null_type&>(null_type()),
288
- static_cast<const null_type&>(null_type()),
289
- static_cast<const null_type&>(null_type()),
290
- static_cast<const null_type&>(null_type()),
291
- static_cast<const null_type&>(null_type())) {}
292
-
293
- inline __host__ __device__
294
- tuple(typename access_traits<T0>::parameter_type t0,
295
- typename access_traits<T1>::parameter_type t1,
296
- typename access_traits<T2>::parameter_type t2,
297
- typename access_traits<T3>::parameter_type t3,
298
- typename access_traits<T4>::parameter_type t4,
299
- typename access_traits<T5>::parameter_type t5)
300
- : inherited(t0, t1, t2, t3, t4, t5,
301
- static_cast<const null_type&>(null_type()),
302
- static_cast<const null_type&>(null_type()),
303
- static_cast<const null_type&>(null_type()),
304
- static_cast<const null_type&>(null_type())) {}
305
-
306
- inline __host__ __device__
307
- tuple(typename access_traits<T0>::parameter_type t0,
308
- typename access_traits<T1>::parameter_type t1,
309
- typename access_traits<T2>::parameter_type t2,
310
- typename access_traits<T3>::parameter_type t3,
311
- typename access_traits<T4>::parameter_type t4,
312
- typename access_traits<T5>::parameter_type t5,
313
- typename access_traits<T6>::parameter_type t6)
314
- : inherited(t0, t1, t2, t3, t4, t5, t6,
315
- static_cast<const null_type&>(null_type()),
316
- static_cast<const null_type&>(null_type()),
317
- static_cast<const null_type&>(null_type())) {}
318
-
319
- inline __host__ __device__
320
- tuple(typename access_traits<T0>::parameter_type t0,
321
- typename access_traits<T1>::parameter_type t1,
322
- typename access_traits<T2>::parameter_type t2,
323
- typename access_traits<T3>::parameter_type t3,
324
- typename access_traits<T4>::parameter_type t4,
325
- typename access_traits<T5>::parameter_type t5,
326
- typename access_traits<T6>::parameter_type t6,
327
- typename access_traits<T7>::parameter_type t7)
328
- : inherited(t0, t1, t2, t3, t4, t5, t6, t7,
329
- static_cast<const null_type&>(null_type()),
330
- static_cast<const null_type&>(null_type())) {}
331
-
332
- inline __host__ __device__
333
- tuple(typename access_traits<T0>::parameter_type t0,
334
- typename access_traits<T1>::parameter_type t1,
335
- typename access_traits<T2>::parameter_type t2,
336
- typename access_traits<T3>::parameter_type t3,
337
- typename access_traits<T4>::parameter_type t4,
338
- typename access_traits<T5>::parameter_type t5,
339
- typename access_traits<T6>::parameter_type t6,
340
- typename access_traits<T7>::parameter_type t7,
341
- typename access_traits<T8>::parameter_type t8)
342
- : inherited(t0, t1, t2, t3, t4, t5, t6, t7, t8,
343
- static_cast<const null_type&>(null_type())) {}
344
-
345
- inline __host__ __device__
346
- tuple(typename access_traits<T0>::parameter_type t0,
347
- typename access_traits<T1>::parameter_type t1,
348
- typename access_traits<T2>::parameter_type t2,
349
- typename access_traits<T3>::parameter_type t3,
350
- typename access_traits<T4>::parameter_type t4,
351
- typename access_traits<T5>::parameter_type t5,
352
- typename access_traits<T6>::parameter_type t6,
353
- typename access_traits<T7>::parameter_type t7,
354
- typename access_traits<T8>::parameter_type t8,
355
- typename access_traits<T9>::parameter_type t9)
356
- : inherited(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) {}
357
-
358
-
359
- template<class U1, class U2>
360
- inline __host__ __device__
361
- tuple(const detail::cons<U1, U2>& p) : inherited(p) {}
362
-
363
- __thrust_exec_check_disable__
364
- template <class U1, class U2>
365
- inline __host__ __device__
366
- tuple& operator=(const detail::cons<U1, U2>& k)
367
- {
368
- inherited::operator=(k);
369
- return *this;
370
- }
371
-
372
- /*! \endcond
373
- */
374
-
375
- /*! This assignment operator allows assigning the first two elements of this \p tuple from a \p pair.
376
- * \param k A \p pair to assign from.
377
- */
378
- __thrust_exec_check_disable__
379
- template <class U1, class U2>
380
- __host__ __device__ inline
381
- tuple& operator=(const thrust::pair<U1, U2>& k) {
382
- //BOOST_STATIC_ASSERT(length<tuple>::value == 2);// check_length = 2
383
- this->head = k.first;
384
- this->tail.head = k.second;
385
- return *this;
386
- }
387
-
388
- /*! \p swap swaps the elements of two <tt>tuple</tt>s.
389
- *
390
- * \param t The other <tt>tuple</tt> with which to swap.
391
- */
392
- inline __host__ __device__
393
- void swap(tuple &t)
394
- {
395
- inherited::swap(t);
396
- }
397
- };
398
-
399
- /*! \cond
400
- */
401
-
402
- template <>
403
- class tuple<null_type, null_type, null_type, null_type, null_type, null_type, null_type, null_type, null_type, null_type> :
404
- public null_type
405
- {
406
- public:
407
- typedef null_type inherited;
408
- };
409
-
410
- /*! \endcond
411
- */
412
-
413
-
414
- /*! This version of \p make_tuple creates a new \c tuple object from a
415
- * single object.
416
- *
417
- * \param t0 The object to copy from.
418
- * \return A \p tuple object with a single member which is a copy of \p t0.
419
- */
420
- template<class T0>
421
- __host__ __device__ inline
422
- typename detail::make_tuple_mapper<T0>::type
423
- make_tuple(const T0& t0);
424
-
425
- /*! This version of \p make_tuple creates a new \c tuple object from two
426
- * objects.
427
- *
428
- * \param t0 The first object to copy from.
429
- * \param t1 The second object to copy from.
430
- * \return A \p tuple object with two members which are copies of \p t0
431
- * and \p t1.
432
- *
433
- * \note \p make_tuple has ten variants, the rest of which are omitted here
434
- * for brevity.
435
- */
436
- template<class T0, class T1>
437
- __host__ __device__ inline
438
- typename detail::make_tuple_mapper<T0, T1>::type
439
- make_tuple(const T0& t0, const T1& t1);
440
-
441
- /*! This version of \p tie creates a new \c tuple whose single element is
442
- * a reference which refers to this function's argument.
443
- *
444
- * \param t0 The object to reference.
445
- * \return A \p tuple object with one member which is a reference to \p t0.
446
- */
447
- template<typename T0>
448
- __host__ __device__ inline
449
- tuple<T0&> tie(T0& t0);
450
-
451
- /*! This version of \p tie creates a new \c tuple of references object which
452
- * refers to this function's arguments.
453
- *
454
- * \param t0 The first object to reference.
455
- * \param t1 The second object to reference.
456
- * \return A \p tuple object with two members which are references to \p t0
457
- * and \p t1.
458
- *
459
- * \note \p tie has ten variants, the rest of which are omitted here for
460
- * brevity.
461
- */
462
- template<typename T0, typename T1>
463
- __host__ __device__ inline
464
- tuple<T0&,T1&> tie(T0& t0, T1& t1);
465
-
466
- /*! \p swap swaps the contents of two <tt>tuple</tt>s.
467
- *
468
- * \param x The first \p tuple to swap.
469
- * \param y The second \p tuple to swap.
470
- */
471
- template<
472
- typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9,
473
- typename U0, typename U1, typename U2, typename U3, typename U4, typename U5, typename U6, typename U7, typename U8, typename U9
474
- >
475
- inline __host__ __device__
476
- void swap(tuple<T0,T1,T2,T3,T4,T5,T6,T7,T8,T9> &x,
477
- tuple<U0,U1,U2,U3,U4,U5,U6,U7,U8,U9> &y);
478
-
479
-
480
-
481
- /*! \cond
482
- */
483
-
484
- template<class T0, class T1, class T2>
485
- __host__ __device__ inline
486
- typename detail::make_tuple_mapper<T0, T1, T2>::type
487
- make_tuple(const T0& t0, const T1& t1, const T2& t2);
488
-
489
- template<class T0, class T1, class T2, class T3>
490
- __host__ __device__ inline
491
- typename detail::make_tuple_mapper<T0, T1, T2, T3>::type
492
- make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3);
493
-
494
- template<class T0, class T1, class T2, class T3, class T4>
495
- __host__ __device__ inline
496
- typename detail::make_tuple_mapper<T0, T1, T2, T3, T4>::type
497
- make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4);
498
-
499
- template<class T0, class T1, class T2, class T3, class T4, class T5>
500
- __host__ __device__ inline
501
- typename detail::make_tuple_mapper<T0, T1, T2, T3, T4, T5>::type
502
- make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5);
503
-
504
- template<class T0, class T1, class T2, class T3, class T4, class T5, class T6>
505
- __host__ __device__ inline
506
- typename detail::make_tuple_mapper<T0, T1, T2, T3, T4, T5, T6>::type
507
- make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6);
508
-
509
- template<class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7>
510
- __host__ __device__ inline
511
- typename detail::make_tuple_mapper<T0, T1, T2, T3, T4, T5, T6, T7>::type
512
- make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7);
513
-
514
- template<class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7, class T8>
515
- __host__ __device__ inline
516
- typename detail::make_tuple_mapper<T0, T1, T2, T3, T4, T5, T6, T7, T8>::type
517
- make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7, const T8& t8);
518
-
519
- template<class T0, class T1, class T2, class T3, class T4, class T5, class T6, class T7, class T8, class T9>
520
- __host__ __device__ inline
521
- typename detail::make_tuple_mapper<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9>::type
522
- make_tuple(const T0& t0, const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7, const T8& t8, const T9& t9);
523
-
524
- template<typename T0, typename T1, typename T2>
525
- __host__ __device__ inline
526
- tuple<T0&,T1&,T2&> tie(T0 &t0, T1 &t1, T2 &t2);
527
-
528
- template<typename T0, typename T1, typename T2, typename T3>
529
- __host__ __device__ inline
530
- tuple<T0&,T1&,T2&,T3&> tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3);
531
-
532
- template<typename T0, typename T1, typename T2, typename T3, typename T4>
533
- __host__ __device__ inline
534
- tuple<T0&,T1&,T2&,T3&,T4&> tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4);
535
-
536
- template<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
537
- __host__ __device__ inline
538
- tuple<T0&,T1&,T2&,T3&,T4&,T5&> tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5);
539
-
540
- template<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
541
- __host__ __device__ inline
542
- tuple<T0&,T1&,T2&,T3&,T4&,T5&,T6&> tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5, T6 &t6);
543
-
544
- template<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
545
- __host__ __device__ inline
546
- tuple<T0&,T1&,T2&,T3&,T4&,T5&,T6&,T7&> tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5, T6 &t6, T7 &t7);
547
-
548
- template<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
549
- __host__ __device__ inline
550
- tuple<T0&,T1&,T2&,T3&,T4&,T5&,T6&,T7&,T8&> tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5, T6 &t6, T7 &t7, T8 &t8);
551
-
552
- template<typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
553
- __host__ __device__ inline
554
- tuple<T0&,T1&,T2&,T3&,T4&,T5&,T6&,T7&,T8&,T9&> tie(T0 &t0, T1 &t1, T2 &t2, T3 &t3, T4 &t4, T5 &t5, T6 &t6, T7 &t7, T8 &t8, T9 &t9);
555
-
556
-
557
- __host__ __device__ inline
558
- bool operator==(const null_type&, const null_type&);
559
-
560
- __host__ __device__ inline
561
- bool operator>=(const null_type&, const null_type&);
562
-
563
- __host__ __device__ inline
564
- bool operator<=(const null_type&, const null_type&);
565
-
566
- __host__ __device__ inline
567
- bool operator!=(const null_type&, const null_type&);
568
-
569
- __host__ __device__ inline
570
- bool operator<(const null_type&, const null_type&);
571
-
572
- __host__ __device__ inline
573
- bool operator>(const null_type&, const null_type&);
574
-
575
- /*! \endcond
576
- */
577
-
578
- /*! \} // tuple
579
- */
580
-
581
- /*! \} // utility
582
- */
583
-
584
- } // end thrust
585
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/MonoScene/monoscene/unet3d_kitti.py DELETED
@@ -1,88 +0,0 @@
1
- # encoding: utf-8
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from monoscene.modules import SegmentationHead
6
- from monoscene.CRP3D import CPMegaVoxels
7
- from monoscene.modules import Process, Upsample, Downsample
8
-
9
-
10
- class UNet3D(nn.Module):
11
- def __init__(
12
- self,
13
- class_num,
14
- norm_layer,
15
- full_scene_size,
16
- feature,
17
- project_scale,
18
- context_prior=None,
19
- bn_momentum=0.1,
20
- ):
21
- super(UNet3D, self).__init__()
22
- self.business_layer = []
23
- self.project_scale = project_scale
24
- self.full_scene_size = full_scene_size
25
- self.feature = feature
26
-
27
- size_l1 = (
28
- int(self.full_scene_size[0] / project_scale),
29
- int(self.full_scene_size[1] / project_scale),
30
- int(self.full_scene_size[2] / project_scale),
31
- )
32
- size_l2 = (size_l1[0] // 2, size_l1[1] // 2, size_l1[2] // 2)
33
- size_l3 = (size_l2[0] // 2, size_l2[1] // 2, size_l2[2] // 2)
34
-
35
- dilations = [1, 2, 3]
36
- self.process_l1 = nn.Sequential(
37
- Process(self.feature, norm_layer, bn_momentum, dilations=[1, 2, 3]),
38
- Downsample(self.feature, norm_layer, bn_momentum),
39
- )
40
- self.process_l2 = nn.Sequential(
41
- Process(self.feature * 2, norm_layer, bn_momentum, dilations=[1, 2, 3]),
42
- Downsample(self.feature * 2, norm_layer, bn_momentum),
43
- )
44
-
45
- self.up_13_l2 = Upsample(
46
- self.feature * 4, self.feature * 2, norm_layer, bn_momentum
47
- )
48
- self.up_12_l1 = Upsample(
49
- self.feature * 2, self.feature, norm_layer, bn_momentum
50
- )
51
- self.up_l1_lfull = Upsample(
52
- self.feature, self.feature // 2, norm_layer, bn_momentum
53
- )
54
-
55
- self.ssc_head = SegmentationHead(
56
- self.feature // 2, self.feature // 2, class_num, dilations
57
- )
58
-
59
- self.context_prior = context_prior
60
- if context_prior:
61
- self.CP_mega_voxels = CPMegaVoxels(
62
- self.feature * 4, size_l3, bn_momentum=bn_momentum
63
- )
64
-
65
- def forward(self, input_dict):
66
- res = {}
67
-
68
- x3d_l1 = input_dict["x3d"]
69
-
70
- x3d_l2 = self.process_l1(x3d_l1)
71
-
72
- x3d_l3 = self.process_l2(x3d_l2)
73
-
74
- if self.context_prior:
75
- ret = self.CP_mega_voxels(x3d_l3)
76
- x3d_l3 = ret["x"]
77
- for k in ret.keys():
78
- res[k] = ret[k]
79
-
80
- x3d_up_l2 = self.up_13_l2(x3d_l3) + x3d_l2
81
- x3d_up_l1 = self.up_12_l1(x3d_up_l2) + x3d_l1
82
- x3d_up_lfull = self.up_l1_lfull(x3d_up_l1)
83
-
84
- ssc_logit_full = self.ssc_head(x3d_up_lfull)
85
-
86
- res["ssc_logit"] = ssc_logit_full
87
-
88
- return res