parquet-converter commited on
Commit
28c3eda
·
1 Parent(s): bc8e748

Update parquet files (step 74 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0xSynapse/PixelFusion/README.md +0 -13
  2. spaces/17TheWord/RealESRGAN/realesrgan/data/realesrgan_dataset.py +0 -192
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acronis True Image 2020 Bootable ISO Build 20770 The Ultimate Backup Solution.md +0 -47
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avira Antivirus for Windows 10 32 Bit A Complete Guide.md +0 -42
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Kpg 141d Learn How to Program NX Series Radios with KPG-141D FPU.md +0 -100
  6. spaces/1gistliPinn/ChatGPT4/Examples/Bus Driver Simulator 2019 DLC Unlocker-PLAZA.md +0 -125
  7. spaces/1line/AutoGPT/CONTRIBUTING.md +0 -105
  8. spaces/1line/AutoGPT/autogpt/commands/google_search.py +0 -87
  9. spaces/1phancelerku/anime-remove-background/Download APK Real Boxing and Experience the Ultimate Fighting Game on Android.md +0 -132
  10. spaces/22h/vintedois-diffusion-v0-2/README.md +0 -12
  11. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/docs/eval.md +0 -31
  12. spaces/801artistry/RVC801/infer/modules/ipex/hijacks.py +0 -196
  13. spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/README.md +0 -20
  14. spaces/AIGC-Audio/AudioGPT/mono2binaural/src/utils.py +0 -251
  15. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/conditioners.py +0 -990
  16. spaces/AchyuthGamer/OpenGPT/client/js/icons.js +0 -1
  17. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Aichat.py +0 -54
  18. spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Bard.py +0 -92
  19. spaces/Adr740/CV_XPLORER_POC/get_cv.py +0 -39
  20. spaces/AgentVerse/agentVerse/scripts/__init__.py +0 -0
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/quadimage.js +0 -13
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/imagebox/ImageBox.js +0 -2
  23. spaces/Akmyradov/TurkmenTTSweSTT/vits/monotonic_align/__init__.py +0 -19
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_onnx_objects.py +0 -17
  25. spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py +0 -5
  26. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py +0 -4
  27. spaces/AndySAnker/DeepStruc/tools/utils.py +0 -279
  28. spaces/Ariharasudhan/Kenya_food_classification/README.md +0 -13
  29. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/__init__.py +0 -0
  30. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/scheduler_list.py +0 -32
  31. spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese_bert.py +0 -47
  32. spaces/Ashrafb/Imdf2/README.md +0 -12
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/collector.py +0 -505
  34. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/timeout.py +0 -271
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/fancy_getopt.py +0 -470
  36. spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/chinese.py +0 -193
  37. spaces/Benson/text-generation/Examples/Blackpink El Juego Apkmirror.md +0 -18
  38. spaces/Benson/text-generation/Examples/Descargar Cheat Kick El Amigo 2.md +0 -125
  39. spaces/Benson/text-generation/Examples/Descargar Derby De Demolicin 3 Mod Apk.md +0 -58
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/translate.py +0 -78
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py +0 -36
  42. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/gqa/eval/result_eval.py +0 -61
  43. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/exec.py +0 -58
  44. spaces/CVPR/LIVE/pybind11/tools/pybind11NewTools.cmake +0 -203
  45. spaces/CVPR/WALT/mmdet/utils/collect_env.py +0 -16
  46. spaces/CVPR/lama-example/saicinpainting/evaluation/evaluator.py +0 -220
  47. spaces/CVPR/regionclip-demo/detectron2/layers/csrc/vision.cpp +0 -129
  48. spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/local.py +0 -136
  49. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/webvid_datasets.py +0 -122
  50. spaces/DHEIVER/ThyroidTumorClassificationModel/app.py +0 -66
spaces/0xSynapse/PixelFusion/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: PixelFusion
3
- emoji: 🔥
4
- colorFrom: green
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- license: gpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/RealESRGAN/realesrgan/data/realesrgan_dataset.py DELETED
@@ -1,192 +0,0 @@
1
- import cv2
2
- import math
3
- import numpy as np
4
- import os
5
- import os.path as osp
6
- import random
7
- import time
8
- import torch
9
- from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
10
- from basicsr.data.transforms import augment
11
- from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
12
- from basicsr.utils.registry import DATASET_REGISTRY
13
- from torch.utils import data as data
14
-
15
-
16
- @DATASET_REGISTRY.register()
17
- class RealESRGANDataset(data.Dataset):
18
- """Dataset used for Real-ESRGAN model:
19
- Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
20
-
21
- It loads gt (Ground-Truth) images, and augments them.
22
- It also generates blur kernels and sinc kernels for generating low-quality images.
23
- Note that the low-quality images are processed in tensors on GPUS for faster processing.
24
-
25
- Args:
26
- opt (dict): Config for train datasets. It contains the following keys:
27
- dataroot_gt (str): Data root path for gt.
28
- meta_info (str): Path for meta information file.
29
- io_backend (dict): IO backend type and other kwarg.
30
- use_hflip (bool): Use horizontal flips.
31
- use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
32
- Please see more options in the codes.
33
- """
34
-
35
- def __init__(self, opt):
36
- super(RealESRGANDataset, self).__init__()
37
- self.opt = opt
38
- self.file_client = None
39
- self.io_backend_opt = opt['io_backend']
40
- self.gt_folder = opt['dataroot_gt']
41
-
42
- # file client (lmdb io backend)
43
- if self.io_backend_opt['type'] == 'lmdb':
44
- self.io_backend_opt['db_paths'] = [self.gt_folder]
45
- self.io_backend_opt['client_keys'] = ['gt']
46
- if not self.gt_folder.endswith('.lmdb'):
47
- raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
48
- with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
49
- self.paths = [line.split('.')[0] for line in fin]
50
- else:
51
- # disk backend with meta_info
52
- # Each line in the meta_info describes the relative path to an image
53
- with open(self.opt['meta_info']) as fin:
54
- paths = [line.strip().split(' ')[0] for line in fin]
55
- self.paths = [os.path.join(self.gt_folder, v) for v in paths]
56
-
57
- # blur settings for the first degradation
58
- self.blur_kernel_size = opt['blur_kernel_size']
59
- self.kernel_list = opt['kernel_list']
60
- self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
61
- self.blur_sigma = opt['blur_sigma']
62
- self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
63
- self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
64
- self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
65
-
66
- # blur settings for the second degradation
67
- self.blur_kernel_size2 = opt['blur_kernel_size2']
68
- self.kernel_list2 = opt['kernel_list2']
69
- self.kernel_prob2 = opt['kernel_prob2']
70
- self.blur_sigma2 = opt['blur_sigma2']
71
- self.betag_range2 = opt['betag_range2']
72
- self.betap_range2 = opt['betap_range2']
73
- self.sinc_prob2 = opt['sinc_prob2']
74
-
75
- # a final sinc filter
76
- self.final_sinc_prob = opt['final_sinc_prob']
77
-
78
- self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
79
- # TODO: kernel range is now hard-coded, should be in the configure file
80
- self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
81
- self.pulse_tensor[10, 10] = 1
82
-
83
- def __getitem__(self, index):
84
- if self.file_client is None:
85
- self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
86
-
87
- # -------------------------------- Load gt images -------------------------------- #
88
- # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
89
- gt_path = self.paths[index]
90
- # avoid errors caused by high latency in reading files
91
- retry = 3
92
- while retry > 0:
93
- try:
94
- img_bytes = self.file_client.get(gt_path, 'gt')
95
- except (IOError, OSError) as e:
96
- logger = get_root_logger()
97
- logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}')
98
- # change another file to read
99
- index = random.randint(0, self.__len__())
100
- gt_path = self.paths[index]
101
- time.sleep(1) # sleep 1s for occasional server congestion
102
- else:
103
- break
104
- finally:
105
- retry -= 1
106
- img_gt = imfrombytes(img_bytes, float32=True)
107
-
108
- # -------------------- Do augmentation for training: flip, rotation -------------------- #
109
- img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot'])
110
-
111
- # crop or pad to 400
112
- # TODO: 400 is hard-coded. You may change it accordingly
113
- h, w = img_gt.shape[0:2]
114
- crop_pad_size = 400
115
- # pad
116
- if h < crop_pad_size or w < crop_pad_size:
117
- pad_h = max(0, crop_pad_size - h)
118
- pad_w = max(0, crop_pad_size - w)
119
- img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101)
120
- # crop
121
- if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size:
122
- h, w = img_gt.shape[0:2]
123
- # randomly choose top and left coordinates
124
- top = random.randint(0, h - crop_pad_size)
125
- left = random.randint(0, w - crop_pad_size)
126
- img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...]
127
-
128
- # ------------------------ Generate kernels (used in the first degradation) ------------------------ #
129
- kernel_size = random.choice(self.kernel_range)
130
- if np.random.uniform() < self.opt['sinc_prob']:
131
- # this sinc filter setting is for kernels ranging from [7, 21]
132
- if kernel_size < 13:
133
- omega_c = np.random.uniform(np.pi / 3, np.pi)
134
- else:
135
- omega_c = np.random.uniform(np.pi / 5, np.pi)
136
- kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
137
- else:
138
- kernel = random_mixed_kernels(
139
- self.kernel_list,
140
- self.kernel_prob,
141
- kernel_size,
142
- self.blur_sigma,
143
- self.blur_sigma, [-math.pi, math.pi],
144
- self.betag_range,
145
- self.betap_range,
146
- noise_range=None)
147
- # pad kernel
148
- pad_size = (21 - kernel_size) // 2
149
- kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
150
-
151
- # ------------------------ Generate kernels (used in the second degradation) ------------------------ #
152
- kernel_size = random.choice(self.kernel_range)
153
- if np.random.uniform() < self.opt['sinc_prob2']:
154
- if kernel_size < 13:
155
- omega_c = np.random.uniform(np.pi / 3, np.pi)
156
- else:
157
- omega_c = np.random.uniform(np.pi / 5, np.pi)
158
- kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
159
- else:
160
- kernel2 = random_mixed_kernels(
161
- self.kernel_list2,
162
- self.kernel_prob2,
163
- kernel_size,
164
- self.blur_sigma2,
165
- self.blur_sigma2, [-math.pi, math.pi],
166
- self.betag_range2,
167
- self.betap_range2,
168
- noise_range=None)
169
-
170
- # pad kernel
171
- pad_size = (21 - kernel_size) // 2
172
- kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
173
-
174
- # ------------------------------------- the final sinc kernel ------------------------------------- #
175
- if np.random.uniform() < self.opt['final_sinc_prob']:
176
- kernel_size = random.choice(self.kernel_range)
177
- omega_c = np.random.uniform(np.pi / 3, np.pi)
178
- sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
179
- sinc_kernel = torch.FloatTensor(sinc_kernel)
180
- else:
181
- sinc_kernel = self.pulse_tensor
182
-
183
- # BGR to RGB, HWC to CHW, numpy to tensor
184
- img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]
185
- kernel = torch.FloatTensor(kernel)
186
- kernel2 = torch.FloatTensor(kernel2)
187
-
188
- return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path}
189
- return return_d
190
-
191
- def __len__(self):
192
- return len(self.paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Acronis True Image 2020 Bootable ISO Build 20770 The Ultimate Backup Solution.md DELETED
@@ -1,47 +0,0 @@
1
-
2
- <h1>Acronis True Image 2020 Bootable ISO Build 20770: What Is It and Why You Need It</h1>
3
- <p>If you are looking for a reliable and easy-to-use backup and recovery solution for your Windows or Mac computer, you might have heard of <strong>Acronis True Image 2020</strong>. This software allows you to create an exact copy of your system, including all your files, applications, settings, and preferences. You can store this copy on an external drive, a cloud service, or a network location, and use it to restore your system in case of a disaster.</p>
4
- <h2>Acronis True Image 2020 Bootable ISO Build 20770</h2><br /><p><b><b>Download File</b> &#9733; <a href="https://byltly.com/2uKwus">https://byltly.com/2uKwus</a></b></p><br /><br />
5
- <p>But what if your computer does not boot or you have a new computer without any operating system installed on it? How can you access and restore your backup in such situations? This is where <strong>Acronis True Image 2020 Bootable ISO Build 20770</strong> comes in handy. This is a standalone version of Acronis True Image that you can use to boot your computer and restore your system from a backup. In this article, we will explain what this software is, how to create it, and how to use it.</p>
6
- <h2>What is Acronis True Image 2020?</h2>
7
- <p>Acronis True Image 2020 is a comprehensive backup and recovery software that protects your data from any threat, such as ransomware, hardware failure, natural disaster, human error, or theft. With Acronis True Image 2020, you can:</p>
8
- <ul>
9
- <li>Create full-image backups of your entire system or selected partitions.</li>
10
- <li>Create incremental or differential backups to save time and storage space.</li>
11
- <li>Create file-level backups of specific folders or files.</li>
12
- <li>Create disk clones to migrate your system to a new drive.</li>
13
- <li>Create Acronis Survival Kit, a combination of bootable media and full-image backup on a single external drive.</li>
14
- <li>Encrypt your backups with AES-256 encryption.</li>
15
- <li>Compress your backups to save storage space.</li>
16
- <li>Split your backups into multiple files or volumes.</li>
17
- <li>Schedule your backups to run automatically at specific times or events.</li>
18
- <li>Validate your backups to ensure their integrity and reliability.</li>
19
- <li>Browse and recover individual files or folders from your backups.</li>
20
- <li>Restore your entire system or selected partitions from your backups.</li>
21
- <li>Restore your system to dissimilar hardware with Acronis Universal Restore.</li>
22
- <li>Recover your system from ransomware attacks with Acronis Active Protection.</li>
23
- <li>Sync your files across multiple devices with Acronis Cloud Storage.</li>
24
- <li>Access and manage your backups remotely with Acronis Mobile App.</li>
25
- </ul>
26
- <h3>Features and benefits of Acronis True Image 2020</h3>
27
- <p>Acronis True Image 2020 offers many features and benefits that make it one of the best backup and recovery solutions on the market. Here are some of them:</p>
28
- <ul>
29
- <li><strong>Easy-to-use interface:</strong> Acronis True Image 2020 has a simple and intuitive interface that guides you through the backup and recovery process. You can easily select what to back up, where to store it, how often to run it, and how to restore it. You can also monitor the status of your backups, view backup statistics, manage backup settings, and more.</li>
30
- <li><strong>Fast and reliable performance:</strong> Acronis True Image 2020 uses advanced technologies to ensure fast and reliable backup and recovery performance. For example, it uses sector-by-sector imaging to create exact copies of your system, it uses incremental or differential backup methods to back up only the changes since the last backup, it uses compression and deduplication techniques to reduce the size of your backups, it uses validation tools to check the integrity of your backups, and more.</li>
31
- <li><strong>Versatile backup options:</strong> Acronis True Image 2020 gives you the flexibility to choose how to back up your data. You can back up your entire system or selected partitions, you can back up specific folders or files, you can back up to an external drive or a cloud service or a network location, you can back up manually or automatically or on demand, you can back up on a daily or weekly or monthly basis or based on events, you can back up with encryption or compression or splitting options, and more.</li>
32
- <li><strong>Powerful recovery options:</strong> Acronis True Image 2020 gives you the ability to recover your data in any situation. You can recover your entire system or selected partitions from a full-image backup, you can recover individual files or folders from a file-level backup, you can recover your system to the same or dissimilar hardware with Acronis Universal Restore, you can recover your system from ransomware attacks with Acronis Active Protection, you can recover your data from any device with Acronis Mobile App, and more.</li>
33
- </ul>
34
- <h3>What is new in Acronis True Image 2020 Build 20770?</h3>
35
- <p>Acronis True Image 2020 Build 20770 is the latest update for Acronis True Image 2020 that was released on October 5th, 2019. This update introduces some new features and improvements for the software. Here are some of them:</p>
36
- <ul>
37
- <li><strong>New backup format:</strong> This update introduces a new technology for disk-level backup that improves the performance and reliability of the software. The new backup format supports larger disks (up to 10 TB), faster backup creation (up to twice as fast), faster backup validation (up to three times faster), faster backup browsing (up to ten times faster), faster recovery (up to two times faster), better compression (up to ten percent smaller), better encryption (AES-XTS mode), better deduplication (sector-level), better error correction (Reed-Solomon codes), better resilience (self-healing backups), better compatibility (with third-party software), better security (against ransomware attacks), better scalability (for large-scale deployments), and more. The new backup format is available for Windows only. To use it, you need to create new disk-level backups with this update. Existing disk-level backups will remain in the old format until they are recreated with this update.</li>
38
- <li><strong>New notification center:</strong> This update introduces a new notification center that displays all important messages about the status of your backups in one place. You can access the notification center by clicking on the bell icon in the upper right corner of the software interface. The notification center shows you information such as backup errors, warnings, successes, recommendations, tips, news, updates, offers, and more. You can also customize which notifications you want to see and how often you want to see them.</li>
39
- <li><strong>New dark mode:</strong> This update introduces a new dark mode option that changes the appearance of the software interface to a dark theme. The dark mode option is available for Windows only. To enable it, go to Settings > Appearance > Dark mode.</li>
40
- </ul>
41
- <h2>What is a bootable media and how to create it with Acronis True Image 2020?</h2>
42
- <h3>What is a bootable media and why you need it?</h3>
43
- <p>A bootable media is a USB flash drive or a CD/DVD with Acronis True Image software on it. This way, you can boot your computer with this media and access the software without installing it on your hard drive. A bootable media is useful in situations where:</p>
44
- <ul>
45
- <li>Your computer does not</p> 0a6ba089eb<br />
46
- <br />
47
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avira Antivirus for Windows 10 32 Bit A Complete Guide.md DELETED
@@ -1,42 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install Avira Antivirus for Windows 10 32 Bit</h1>
3
- <p>If you are looking for a reliable and free antivirus software for your Windows 10 32 bit computer, you might want to consider Avira Free Antivirus. Avira is one of the most popular and trusted antivirus solutions in the market, with over 35 years of online security experience and millions of satisfied users. In this article, we will show you how to download and install Avira Antivirus for Windows 10 32 bit in a few easy steps.</p>
4
- <h2>Why Choose Avira Antivirus for Windows 10 32 Bit?</h2>
5
- <p>Avira Antivirus for Windows 10 32 bit offers you several advantages over other antivirus programs. Here are some of them:</p>
6
- <h2>avira antivirus free download for windows 10 32 bit with crack</h2><br /><p><b><b>Download</b> - <a href="https://byltly.com/2uKv9u">https://byltly.com/2uKv9u</a></b></p><br /><br />
7
- <ul>
8
- <li>It protects you from all online threats, such as viruses, worms, trojans, ransomware, spyware, adware, and rootkits.</li>
9
- <li>It has a low system impact, meaning it does not slow down your computer or interfere with your performance.</li>
10
- <li>It has a near-perfect detection rate, thanks to its intelligent learning algorithms and award-winning technology.</li>
11
- <li>It has an intuitive interface, making it easy to use and customize.</li>
12
- <li>It is compatible with Windows 10, Windows 11, Windows 8, and Windows 7.</li>
13
- <li>It is completely free and does not show any ads or sell your data.</li>
14
- </ul>
15
- <h2>How to Download Avira Antivirus for Windows 10 32 Bit?</h2>
16
- <p>To download Avira Antivirus for Windows 10 32 bit, follow these steps:</p>
17
- <ol>
18
- <li>Go to the official website of Avira and click on the "Download for free" button.</li>
19
- <li>Choose the option "Avira Free Antivirus for Windows" and click on the "Download now" button.</li>
20
- <li>Save the file to your computer and run it once the download is complete.</li>
21
- </ol>
22
- <h2>How to Install Avira Antivirus for Windows 10 32 Bit?</h2>
23
- <p>To install Avira Antivirus for Windows 10 32 bit, follow these steps:</p>
24
- <ol>
25
- <li>After running the downloaded file, click on the "Accept and install" button to start the installation process.</li>
26
- <li>Wait for the installation to finish. It may take a few minutes depending on your internet speed and system configuration.</li>
27
- <li>Once the installation is done, you will see a confirmation message. Click on the "Open Avira" button to launch the program.</li>
28
- <li>You can also create an account or log in with your existing account to access more features and settings.</li>
29
- </ol>
30
- <h2>How to Use Avira Antivirus for Windows 10 32 Bit?</h2>
31
- <p>To use Avira Antivirus for Windows 10 32 bit, follow these steps:</p>
32
- <ul>
33
- <li>To scan your computer for malware, click on the "Scan" button on the main screen. You can choose between a quick scan, a full scan, or a custom scan.</li>
34
- <li>To update your antivirus database, click on the "Update" button on the main screen. You can also enable automatic updates in the settings.</li>
35
- <li>To adjust your security settings, click on the "Settings" button on the main screen. You can change various options such as real-time protection, firewall, web protection, email protection, ransomware protection, etc.</li>
36
- <li>To access more features and tools, click on the "More tools" button on the main screen. You can find useful utilities such as password manager, VPN service, software updater, system speedup, etc.</li>
37
- </ul>
38
-
39
- <p>We hope this article helped you learn how to download and install Avira Antivirus for Windows 10 32 bit. If you have any questions or feedback, feel free to leave a comment below. Stay safe online with Avira!</p>
40
- <p></p> ddb901b051<br />
41
- <br />
42
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crack Kpg 141d Learn How to Program NX Series Radios with KPG-141D FPU.md DELETED
@@ -1,100 +0,0 @@
1
- <br />
2
- <h1>What is Crack Kpg 141d and why you need it</h1>
3
- <p>If you own a Kenwood radio, you may have heard of Crack Kpg 141d. It is a software that allows you to program your radio without paying for the official Kenwood programming software. But what exactly is Crack Kpg 141d and how does it work? In this article, we will explain everything you need to know about Crack Kpg 141d, including how to download, install, and use it, as well as the benefits and risks of using it. We will also provide some alternatives to Crack Kpg 141d in case you are looking for other options.</p>
4
- <h2>Crack Kpg 141d</h2><br /><p><b><b>Download File</b> >>> <a href="https://byltly.com/2uKxUM">https://byltly.com/2uKxUM</a></b></p><br /><br />
5
- <h2>How to download and install Crack Kpg 141d</h2>
6
- <p>The first step to use Crack Kpg 141d is to download it from a reliable source. There are many websites that offer Crack Kpg 141d for free, but some of them may contain viruses, malware, or spyware that can harm your computer or radio. Therefore, you should be careful when choosing where to download Crack Kpg 141d from. One of the most trusted sources for downloading Crack Kpg 141d is HamFiles, a website that provides various radio programming software, patches, cracks, and serial numbers. Here are the steps to download Crack Kpg 141d from HamFiles:</p>
7
- <ul>
8
- <li>Go to <a href="https://hamfiles.co.uk/index.php?page=downloads&type=entry&id=radio-programming%2Fkpg-141-d-prpgramming_2">this link</a> and log in or register an account.</li>
9
- <li>Click on the "Download" button and wait for the file to be downloaded.</li>
10
- <li>The file name should be "KPG-141D_v5.10.zip" and the size should be about 6 MB.</li>
11
- </ul>
12
- <p>Once you have downloaded Crack Kpg 141d, you need to install it on your computer. Here are the steps to install Crack Kpg 141d:</p>
13
- <ul>
14
- <li>Extract the zip file using a program like WinRAR or WinZip.</li>
15
- <li>Open the folder "KPG-141D_v5.10" and double-click on the file "Setup.exe".</li>
16
- <li>Follow the instructions on the screen and accept the terms and conditions.</li>
17
- <li>When prompted, enter the serial number that is provided in the folder "Serial Number".</li>
18
- <li>Complete the installation process and restart your computer if necessary.</li>
19
- </ul>
20
- <h2>How to use Crack Kpg 141d to program Kenwood radios</h2>
21
- <p>After installing Crack Kpg 141d, you can use it to program your Kenwood radios. Crack Kpg 141d supports various models and frequency ranges of Kenwood radios, such as NX-206, NX-220, NX-306, NX-320, NX-420, NX-720, NX-720G, NX-720H, NX-720HG, NX-820, NX-820G, NX-820H, NX-820HG, NX-920, NX-920G. Here are the steps to use Crack Kpg 141d to program your Kenwood radios:</p>
22
- <ul>
23
- <li>Connect your Kenwood radio to your computer using a USB cable or a programming cable. Make sure your radio is turned off before connecting it.</li>
24
- <li>Launch Crack Kpg 141d from your desktop or start menu.</li>
25
- <li>Select your radio model and frequency range from the drop-down menus at the top left corner of the screen.</li>
26
- <li>Click on "Read" to read the data from your radio or click on "New" to create a new data file.</li>
27
- <li>Customize the settings and features of your radio according to your preferences. You can change things like channel frequency, power output, squelch level, tone code, scan list, signaling system, etc.</li>
28
- <li>Click on "Write" to write the data to your radio or click on "Save" to save the data file on your computer.</li>
29
- <li>Verify that your radio works correctly by testing its functions and features.</li>
30
- </ul>
31
- <h2>The benefits and risks of using Crack Kpg 141d</h2>
32
- <p>Using Crack Kpg 141d has some benefits and risks that you should be aware of before deciding whether to use it or not. Here are some of them:</p>
33
- <h3>Benefit 1: You can access all the functions and options of your Kenwood radio without paying for the official software</h3>
34
- <h3>Benefit 2: You can program multiple radios with different models and frequencies using one software</h3>
35
- <p>Another benefit of using Crack Kpg 141d is that you can program multiple radios with different models and frequencies using one software. This can save you time and money, as you don't need to buy different software for each radio model or frequency range. For example, you can use Crack Kpg 141d to program NX-206, NX-220, NX-306, NX-320, NX-420, NX-720, NX-720G, NX-720H, NX-720HG, NX-820, NX-820G, NX-820H, NX-820HG, NX-920, NX-920G radios with various frequency ranges from 136 MHz to 950 MHz. You can also customize each radio individually or clone the same data to multiple radios.</p>
36
- <h3>Risk 1: You may violate the license agreement and warranty of your Kenwood radio by using unauthorized software</h3>
37
- <p>One of the risks of using Crack Kpg 141d is that you may violate the license agreement and warranty of your Kenwood radio by using unauthorized software. According to Kenwood's website, the use of unauthorized software may result in "damage to your radio or computer equipment" and "loss of warranty coverage". Kenwood also states that "the use of unauthorized software may be illegal" and that "Kenwood is not responsible for any problems caused by unauthorized software". Therefore, you should be aware of the legal and ethical implications of using Crack Kpg 141d before deciding to use it.</p>
38
- <h3>Risk 2: You may damage your radio or computer by using corrupted or infected files</h3>
39
- <p>Another risk of using Crack Kpg 141d is that you may damage your radio or computer by using corrupted or infected files. As mentioned earlier, some websites that offer Crack Kpg 141d for free may contain viruses, malware, or spyware that can harm your computer or radio. Even if you download Crack Kpg 141d from a reliable source like HamFiles, there is no guarantee that the file is safe and error-free. You may encounter problems like data corruption, system crash, device malfunction, or data loss. Therefore, you should always scan the file with an antivirus program before installing it and backup your data before programming your radio.</p>
40
- <h2>The alternatives to Crack Kpg 141d</h2>
41
- <p>If you are not comfortable with using Crack Kpg 141d or if you want to avoid the risks associated with it, you may consider some alternatives to Crack Kpg 141d. Here are some of them:</p>
42
- <h3>Alternative 1: Buy the original Kenwood programming software from an authorized dealer or online store</h3>
43
- <p>The most obvious alternative to Crack Kpg 141d is to buy the original Kenwood programming software from an authorized dealer or online store. This way, you can ensure that you are using a legitimate and safe software that is compatible with your Kenwood radio. You can also enjoy the full support and warranty from Kenwood and avoid any legal or ethical issues. However, the downside of this alternative is that it can be quite expensive and hard to find. For example, according to Radio Software Online, the original price of KPG-141D is $150 USD. You may also need to buy different software for different radio models or frequency ranges.</p>
44
- <p>Kenwood radio programming software download<br />
45
- Kenwood KPG-141D FPU (v5.1) Programming Software – HamFiles<br />
46
- Kenwood KPG-141D FPU (v5.1) Programming Software for NX series<br />
47
- Kenwood KPG-141D FPU (v5.1) Programming Software Supported Languages<br />
48
- Kenwood KPG-141D FPU (v5.1) Programming Software Compatible Operating Systems<br />
49
- Kenwood KPG-141D FPU (v5.1) Programming Software Download Link<br />
50
- Kenwood KPG-141D FPU (v5.1) Programming Software Rating and Reviews<br />
51
- Kenwood KPG-141D FPU (v5.1) Programming Software Images and Screenshots<br />
52
- Kenwood KPG-141D FPU (v5.1) Programming Software Installation Guide<br />
53
- Kenwood KPG-141D FPU (v5.1) Programming Software User Manual<br />
54
- Kenwood KPG-141D FPU (v5.1) Programming Software Serial Number<br />
55
- Kenwood KPG-141D FPU (v5.1) Programming Software Crack Patch Keygen<br />
56
- Kenwood KPG-141D FPU (v5.1) Programming Software License Activation Code<br />
57
- Kenwood KPG-141D FPU (v5.1) Programming Software Free Trial Version<br />
58
- Kenwood KPG-141D FPU (v5.1) Programming Software Full Version Download<br />
59
- Kenwood KPG-141D FPU (v5.1) Programming Software Alternative Software<br />
60
- Kenwood KPG-141D FPU (v5.1) Programming Software Features and Benefits<br />
61
- Kenwood KPG-141D FPU (v5.1) Programming Software Requirements and Specifications<br />
62
- Kenwood KPG-141D FPU (v5.1) Programming Software Troubleshooting and Support<br />
63
- Kenwood KPG-141D FPU (v5.1) Programming Software Updates and Upgrades<br />
64
- How to use Kenwood KPG-141D FPU (v5.1) Programming Software<br />
65
- How to program NX series radios with Kenwood KPG-141D FPU (v5.1) Programming Software<br />
66
- How to crack Kenwood KPG-141D FPU (v5.1) Programming Software<br />
67
- How to activate Kenwood KPG-141D FPU (v5.1) Programming Software license<br />
68
- How to get Kenwood KPG-141D FPU (v5.1) Programming Software for free<br />
69
- How to download Kenwood KPG-141D FPU (v5.1) Programming Software from HamFiles<br />
70
- How to install Kenwood KPG-141D FPU (v5.1) Programming Software on Windows 10<br />
71
- How to update Kenwood KPG-141D FPU (v5.1) Programming Software to the latest version<br />
72
- How to uninstall Kenwood KPG-141D FPU (v5.1) Programming Software from your computer<br />
73
- How to backup and restore your data with Kenwood KPG-141D FPU (v5.1) Programming Software<br />
74
- What is the difference between Kenwood KPG-141D FPU and other programming software<br />
75
- What are the advantages of using Kenwood KPG-141D FPU over other programming software<br />
76
- What are the limitations of using Kenwood KPG-141D FPU for programming radios<br />
77
- What are the best practices for using Kenwood KPG-141D FPU safely and securely<br />
78
- What are the common errors and issues with Kenwood KPG-141D FPU and how to fix them<br />
79
- Where can I find more information about Kenwood KPG-141D FPU and its features<br />
80
- Where can I buy or order Kenwood KPG</p>
81
- <h3>Alternative 2: Use other free or low-cost programming software that are compatible with Kenwood radios, such as CHIRP or RT Systems</h3>
82
- <p>Another alternative to Crack Kpg 141d is to use other free or low-cost programming software that are compatible with Kenwood radios, such as CHIRP or RT Systems. These are third-party software that can program various models and brands of radios, including Kenwood radios. They are usually easy to use and have many features and options. They are also updated regularly and have a large community of users who can provide help and feedback. However, the downside of this alternative is that they may not support all the functions and options of your Kenwood radio. They may also have some bugs or errors that can affect your programming process.</p>
83
- <h1>Conclusion</h1>
84
- <p>In conclusion, Crack Kpg 141d is a software that allows you to program your Kenwood radio without paying for the official Kenwood programming software. It has some benefits and risks that you should weigh before deciding whether to use it or not. It also has some alternatives that you can consider if you are looking for other options. We hope this article has helped you understand what Crack Kpg 141d is and how to use it. However, we do not endorse or recommend using Crack Kpg 141d for any purpose. We advise you to use it at your own risk and responsibility.</p>
85
- <h2>FAQs</h2>
86
- <ul>
87
- <li><b>What is Crack Kpg 141d?</b><br>
88
- Crack Kpg 141d is a software that allows you to program your Kenwood radio without paying for the official Kenwood programming software.</li>
89
- <li><b>Where can I download Crack Kpg 141d?</b><br>
90
- You can download Crack Kpg 141d from various websites that offer it for free, but some of them may contain viruses, malware, or spyware that can harm your computer or radio. One of the most trusted sources for downloading Crack Kpg 141d is HamFiles, a website that provides various radio programming software, patches, cracks, and serial numbers.</li>
91
- <li><b>How do I install Crack Kpg 141d?</b><br>
92
- You need to extract the zip file and run the setup file. Then follow the instructions on the screen and enter the serial number that is provided in the folder "Serial Number".</li>
93
- <li><b>How do I use Crack Kpg 141d?</b><br>
94
- You need to connect your Kenwood radio to your computer using a USB cable or a programming cable. Then launch Crack Kpg 141d and select your radio model and frequency range. Then customize the settings and features of your radio according to your preferences. Then write the data to your radio and verify that it works correctly.</li>
95
- <li><b>What are the benefits and risks of using Crack Kpg 141d?</b><br>
96
- The benefits of using Crack Kpg 141d are that you can access all the functions and options of your Kenwood radio without paying for the official software and that you can program multiple radios with different models and frequencies using one software. The risks of using Crack Kpg 141d are that you may violate the license agreement and warranty of your Kenwood radio by using unauthorized software and that you may damage your radio or computer by using corrupted or infected files.</li>
97
- </ul>
98
- </p> 0a6ba089eb<br />
99
- <br />
100
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Bus Driver Simulator 2019 DLC Unlocker-PLAZA.md DELETED
@@ -1,125 +0,0 @@
1
- <br />
2
- <h1>Bus Driver Simulator 2019 DLC Unlocker-PLAZA: How to Enjoy the Full Experience of Driving Different Buses</h1>
3
-
4
- <p>Bus Driver Simulator 2019 is a game that lets you experience the life of a passenger bus driver. You can drive different buses from different countries and eras, such as Soviet, European, American, and Hungarian buses. You can also explore real cities and their suburbs, such as Moscow, Serpukhov, Cologne, Paris, and Budapest. You can customize your bus with various skins, stickers, horns, and accessories. You can complete various scenarios with pre-set conditions or build your own career in free mode.</p>
5
- <h2>Bus Driver Simulator 2019 DLC Unlocker-PLAZA</h2><br /><p><b><b>Download</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://imgfil.com/2uy1TF">https://imgfil.com/2uy1TF</a></b></p><br /><br />
6
-
7
- <p>However, if you want to enjoy the full experience of the game, you may need to buy some DLCs that add more content and features to the game. These DLCs are:</p>
8
-
9
- <ul>
10
- <li>Bus Driver Simulator 2019 - Hungarian Legend: This DLC adds a legendary Hungarian bus Ikarus 250.93 to the game.</li>
11
- <li>Bus Driver Simulator 2019 - Soviet Legend: This DLC adds a legendary Soviet bus LAZ-695 to the game.</li>
12
- <li>Bus Driver Simulator 2019 - European Minibus: This DLC adds a modern European minibus Mercedes-Benz Sprinter to the game.</li>
13
- <li>Bus Driver Simulator 2019 - Old Legend: This DLC adds an old American school bus International Harvester Loadstar to the game.</li>
14
- </ul>
15
-
16
- <p>But what if you don't want to spend money on these DLCs? What if you want to get them for free? Well, there is a solution for this: Bus Driver Simulator 2019 DLC Unlocker-PLAZA.</p>
17
-
18
- <h2>What is Bus Driver Simulator 2019 DLC Unlocker-PLAZA?</h2>
19
-
20
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a file that you can download and install on your PC to unlock all the DLCs for the game. This way, you can play with all the buses, maps, and options that the game has to offer, without paying anything.</p>
21
-
22
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is also compatible with the latest version of the game, v3.9.68, which includes many updates and improvements to make the game more realistic and enjoyable.</p>
23
-
24
- <h2>Where to Download Bus Driver Simulator 2019 DLC Unlocker-PLAZA?</h2>
25
-
26
- <p>You can find many websites that offer Bus Driver Simulator 2019 DLC Unlocker-PLAZA for free, but you need to be careful and choose a reliable and trustworthy source. Some websites may contain viruses or malware that can harm your PC or steal your personal information.</p>
27
- <p></p>
28
-
29
- <p>One of the best websites to download Bus Driver Simulator 2019 DLC Unlocker-PLAZA is Skidrow & Reloaded Games. This website has been around for a long time and provides safe and working torrent files for many PC games. You can download Bus Driver Simulator 2019 DLC Unlocker-PLAZA from this link: https://www.skidrowreloaded.com/bus-driver-simulator-2019-plaza/</p>
30
-
31
- <h2>How to Install Bus Driver Simulator 2019 DLC Unlocker-PLAZA?</h2>
32
-
33
- <p>Installing Bus Driver Simulator 2019 DLC Unlocker-PLAZA is very easy and simple. Here are the steps you need to follow:</p>
34
-
35
- <ol>
36
- <li>Download Bus Driver Simulator 2019 DLC Unlocker-PLAZA from Skidrow & Reloaded Games or another trusted website.</li>
37
- <li>Extract the file using a program like WinRAR or 7-Zip. You will get a folder called PLAZA.</li>
38
- <li>Copy the folder and paste it into your Bus Driver Simulator 2019 installation folder. This is usually located in C:\Program Files (x86)\Steam\steamapps\common\Bus Driver Simulator 2019\ or C:\Program Files\Steam\steamapps\common\Bus Driver Simulator 2019\ depending on your system.</li>
39
- <li>Merge the folder with the existing one. You may need to confirm this action or provide administrator permission.</li>
40
- <li>Run the game as usual by launching it from Steam or using a shortcut on your desktop.</li>
41
- </ol>
42
-
43
- <p>Congratulations! You have successfully installed Bus Driver Simulator 2019 DLC Unlocker-PLAZA and you can now play with all the DLCs for free.</p>
44
-
45
- <h2>Why Should You Use Bus Driver Simulator 2019 DLC Unlocker-PLAZA?</h2>
46
-
47
- <p>There are many benefits of using Bus Driver Simulator 2019 DLC Unlocker-PLAZA to play the game. Here are some of them:</p>
48
-
49
- <ul>
50
- <li>You can save money by not having to buy the DLCs separately.</li>
51
- <li>You can have more fun and variety by playing with different buses, maps, and options.</li>
52
- <li>You can enjoy the game with better performance and stability, as the unlocker eliminates some bugs and errors that may occur with the DLCs.</li>
53
- <li>You can use the unlocker with any version of the game, including v3.9.68, which is the most updated and improved one.</li>
54
- </ul>
55
-
56
- <h2>Conclusion</h2>
57
-
58
- <p>Bus Driver Simulator 2019 is a game that deserves to be played by every simulation fan who loves driving games. However, if you want to enjoy the full experience of the game, you may need to get some DLCs that add more content and features to the game. That's why Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a great solution for this problem.</p>
59
-
60
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a file that you can download and install on your PC to unlock all the DLCs for the game. It is easy to use, compatible with any version of the game, and works with any patch or mod that you want to use. It also improves the performance and stability of the game, making it more enjoyable and realistic.</p>
61
-
62
- <p>If you want to play Bus Driver Simulator 2019 with all the content and features for free, download Bus Driver Simulator 2019 DLC Unlocker-PLAZA today and experience the best driving simulation game ever made.</p>
63
- <h2>What are the Features of Bus Driver Simulator 2019 DLC Unlocker-PLAZA?</h2>
64
-
65
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is not just a simple file that unlocks the DLCs for the game. It also adds some features and options that make the game more enjoyable and realistic. Here are some of them:</p>
66
-
67
- <ul>
68
- <li>You can choose from different weather conditions and time of day, such as sunny, cloudy, rainy, snowy, day, night, etc.</li>
69
- <li>You can adjust the traffic density and difficulty level, such as easy, normal, hard, etc.</li>
70
- <li>You can enable or disable the realistic physics and damage system, such as collisions, breakdowns, tire wear, etc.</li>
71
- <li>You can enable or disable the realistic passenger behavior and feedback system, such as boarding, alighting, paying, complaining, etc.</li>
72
- <li>You can enable or disable the realistic traffic rules and regulations system, such as speed limits, traffic lights, signs, fines, etc.</li>
73
- </ul>
74
-
75
- <h2>How to Play Bus Driver Simulator 2019 DLC Unlocker-PLAZA?</h2>
76
-
77
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that is easy to play but hard to master. You need to have good driving skills and knowledge of the traffic rules and regulations. You also need to have good management skills and customer service skills. Here are some tips and tricks to help you play the game:</p>
78
-
79
- <ol>
80
- <li>Choose a bus that suits your style and preference. You can choose from different buses with different characteristics, such as speed, acceleration, handling, fuel consumption, capacity, etc.</li>
81
- <li>Choose a map that suits your mood and challenge. You can choose from different maps with different locations, routes, landmarks, scenery, etc.</li>
82
- <li>Choose a mode that suits your goal and interest. You can choose from different modes with different objectives, conditions, rewards, etc. You can play scenarios with pre-set goals and situations. You can play free mode with your own rules and settings. You can also play online multiplayer mode with other players or friends.</li>
83
- <li>Drive carefully and responsibly. You need to follow the traffic rules and regulations. You need to avoid accidents and damages. You need to respect other road users and pedestrians. You need to drive smoothly and safely.</li>
84
- <li>Manage your bus and passengers well. You need to check your bus condition and fuel level. You need to service your bus regularly and repair it when needed. You need to pick up and drop off passengers at designated stops. You need to collect fares and give change. You need to satisfy your passengers and deal with their complaints.</li>
85
- </ol>
86
-
87
- <h2>Conclusion</h2>
88
-
89
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that will give you a lot of fun and satisfaction. It is a game that will let you experience the life of a passenger bus driver. It is a game that will let you drive different buses from different countries and eras. It is a game that will let you explore real cities and their suburbs. It is a game that will let you customize your bus with various skins, stickers, horns, and accessories. It is a game that will let you complete various scenarios with pre-set conditions or build your own career in free mode.</p>
90
-
91
- <p>If you want to play this amazing game without a CD, download Bus Driver Simulator 2019 DLC Unlocker-PLAZA today and enjoy the best driving simulation game ever made.</p>
92
- <h2>What are the Reviews of Bus Driver Simulator 2019 DLC Unlocker-PLAZA?</h2>
93
-
94
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that has received positive reviews from many players and critics. Here are some of the reviews that you can find online:</p>
95
-
96
- <ul>
97
- <li>"This game is amazing. I love driving different buses and exploring different cities. The graphics are great and the physics are realistic. The DLCs add more content and variety to the game. The unlocker works perfectly and saves me money. I highly recommend this game to anyone who likes driving games." - Steam user</li>
98
- <li>"This game is a hidden gem. It is one of the best driving simulation games I have ever played. The game is very immersive and challenging. The DLCs are awesome and add more buses, maps, and options to the game. The unlocker is easy to use and compatible with any version of the game. This game is a must-have for any simulation fan." - Skidrow & Reloaded Games user</li>
99
- <li>"This game is a lot of fun and satisfaction. It is a game that lets you experience the life of a passenger bus driver. The game is very realistic and enjoyable. The DLCs are worth it and add more content and features to the game. The unlocker is a great solution for this problem. It unlocks all the DLCs for free and improves the performance and stability of the game. This game is a great value for money." - JJ Riley user</li>
100
- </ul>
101
-
102
- <h2>What are the Alternatives to Bus Driver Simulator 2019 DLC Unlocker-PLAZA?</h2>
103
-
104
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that may not be suitable for everyone. Some people may not like driving games or simulation games. Some people may not like the idea of using an unlocker to get free DLCs. Some people may have technical issues or compatibility problems with the game or the unlocker. If you are one of these people, you may want to look for some alternatives to Bus Driver Simulator 2019 DLC Unlocker-PLAZA. Here are some of them:</p>
105
-
106
- <ul>
107
- <li>Bus Simulator 18: This is another bus driving simulation game that lets you drive different buses in a huge open world map. You can also create your own routes, customize your buses, play online multiplayer mode, and use mods.</li>
108
- <li>Omsi 2: This is another bus driving simulation game that lets you drive different buses from different eras in realistic scenarios. You can also create your own maps, vehicles, scripts, and sounds.</li>
109
- <li>Fernbus Simulator: This is another bus driving simulation game that lets you drive modern coaches across Germany and Europe. You can also experience realistic traffic, weather, passengers, damage, and accidents.</li>
110
- </ul>
111
-
112
- <h2>Conclusion</h2>
113
-
114
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that will give you a lot of fun and satisfaction. It is a game that will let you experience the life of a passenger bus driver. It is a game that will let you drive different buses from different countries and eras. It is a game that will let you explore real cities and their suburbs. It is a game that will let you customize your bus with various skins, stickers, horns, and accessories. It is a game that will let you complete various scenarios with pre-set conditions or build your own career in free mode.</p>
115
-
116
- <p>If you want to play this amazing game without a CD, download Bus Driver Simulator 2019 DLC Unlocker-PLAZA today and enjoy the best driving simulation game ever made.</p>
117
- <h2>Conclusion</h2>
118
-
119
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a game that deserves to be played by every simulation fan who loves driving games. However, if you want to enjoy the full experience of the game, you may need to get some DLCs that add more content and features to the game. That's why Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a great solution for this problem.</p>
120
-
121
- <p>Bus Driver Simulator 2019 DLC Unlocker-PLAZA is a file that you can download and install on your PC to unlock all the DLCs for the game. It is easy to use, compatible with any version of the game, and works with any patch or mod that you want to use. It also improves the performance and stability of the game, making it more enjoyable and realistic.</p>
122
-
123
- <p>If you want to play Bus Driver Simulator 2019 with all the content and features for free, download Bus Driver Simulator 2019 DLC Unlocker-PLAZA today and experience the best driving simulation game ever made.</p> 3cee63e6c2<br />
124
- <br />
125
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/CONTRIBUTING.md DELETED
@@ -1,105 +0,0 @@
1
- # Contributing to ProjectName
2
-
3
- First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
4
-
5
- This document provides guidelines and best practices to help you contribute effectively.
6
-
7
- ## Table of Contents
8
-
9
- - [Code of Conduct](#code-of-conduct)
10
- - [Getting Started](#getting-started)
11
- - [How to Contribute](#how-to-contribute)
12
- - [Reporting Bugs](#reporting-bugs)
13
- - [Suggesting Enhancements](#suggesting-enhancements)
14
- - [Submitting Pull Requests](#submitting-pull-requests)
15
- - [Style Guidelines](#style-guidelines)
16
- - [Code Formatting](#code-formatting)
17
- - [Pre-Commit Hooks](#pre-commit-hooks)
18
-
19
- ## Code of Conduct
20
-
21
- By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project.
22
-
23
- ## 📢 A Quick Word
24
- Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
25
-
26
- However, you absolutely can still add these commands to Auto-GPT in the form of plugins. Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
27
- > ⚠️ Plugin support is expected to ship within the week. You can follow PR #757 for more updates!
28
-
29
- ## Getting Started
30
-
31
- To start contributing, follow these steps:
32
-
33
- 1. Fork the repository and clone your fork.
34
- 2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
35
- 3. Make your changes in the new branch.
36
- 4. Test your changes thoroughly.
37
- 5. Commit and push your changes to your fork.
38
- 6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
39
-
40
- ## How to Contribute
41
-
42
- ### Reporting Bugs
43
-
44
- If you find a bug in the project, please create an issue on GitHub with the following information:
45
-
46
- - A clear, descriptive title for the issue.
47
- - A description of the problem, including steps to reproduce the issue.
48
- - Any relevant logs, screenshots, or other supporting information.
49
-
50
- ### Suggesting Enhancements
51
-
52
- If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
53
-
54
- - A clear, descriptive title for the issue.
55
- - A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
56
- - Any relevant examples, mockups, or supporting information.
57
-
58
- ### Submitting Pull Requests
59
-
60
- When submitting a pull request, please ensure that your changes meet the following criteria:
61
-
62
- - Your pull request should be atomic and focus on a single change.
63
- - Your pull request should include tests for your change.
64
- - You should have thoroughly tested your changes with multiple different prompts.
65
- - You should have considered potential risks and mitigations for your changes.
66
- - You should have documented your changes clearly and comprehensively.
67
- - You should not include any unrelated or "extra" small tweaks or changes.
68
-
69
- ## Style Guidelines
70
-
71
- ### Code Formatting
72
-
73
- We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`:
74
-
75
- ```bash
76
- pip install black
77
- ```
78
-
79
- To format your code, run the following command in the project's root directory:
80
-
81
- ```bash
82
- black .
83
- ```
84
- ### Pre-Commit Hooks
85
- We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
86
-
87
- Install the pre-commit package using pip:
88
- ```bash
89
- pip install pre-commit
90
- ```
91
-
92
- Run the following command in the project's root directory to install the pre-commit hooks:
93
- ```bash
94
- pre-commit install
95
- ```
96
-
97
- Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
98
-
99
- If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
100
-
101
- Happy coding, and once again, thank you for your contributions!
102
-
103
- Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
104
-
105
- https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/commands/google_search.py DELETED
@@ -1,87 +0,0 @@
1
- """Google search command for Autogpt."""
2
- from __future__ import annotations
3
-
4
- import json
5
-
6
- from duckduckgo_search import ddg
7
-
8
- from autogpt.config import Config
9
-
10
- CFG = Config()
11
-
12
-
13
- def google_search(query: str, num_results: int = 8) -> str:
14
- """Return the results of a Google search
15
-
16
- Args:
17
- query (str): The search query.
18
- num_results (int): The number of results to return.
19
-
20
- Returns:
21
- str: The results of the search.
22
- """
23
- search_results = []
24
- if not query:
25
- return json.dumps(search_results)
26
-
27
- results = ddg(query, max_results=num_results)
28
- if not results:
29
- return json.dumps(search_results)
30
-
31
- for j in results:
32
- search_results.append(j)
33
-
34
- return json.dumps(search_results, ensure_ascii=False, indent=4)
35
-
36
-
37
- def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
38
- """Return the results of a Google search using the official Google API
39
-
40
- Args:
41
- query (str): The search query.
42
- num_results (int): The number of results to return.
43
-
44
- Returns:
45
- str: The results of the search.
46
- """
47
-
48
- from googleapiclient.discovery import build
49
- from googleapiclient.errors import HttpError
50
-
51
- try:
52
- # Get the Google API key and Custom Search Engine ID from the config file
53
- api_key = CFG.google_api_key
54
- custom_search_engine_id = CFG.custom_search_engine_id
55
-
56
- # Initialize the Custom Search API service
57
- service = build("customsearch", "v1", developerKey=api_key)
58
-
59
- # Send the search query and retrieve the results
60
- result = (
61
- service.cse()
62
- .list(q=query, cx=custom_search_engine_id, num=num_results)
63
- .execute()
64
- )
65
-
66
- # Extract the search result items from the response
67
- search_results = result.get("items", [])
68
-
69
- # Create a list of only the URLs from the search results
70
- search_results_links = [item["link"] for item in search_results]
71
-
72
- except HttpError as e:
73
- # Handle errors in the API call
74
- error_details = json.loads(e.content.decode())
75
-
76
- # Check if the error is related to an invalid or missing API key
77
- if error_details.get("error", {}).get(
78
- "code"
79
- ) == 403 and "invalid API key" in error_details.get("error", {}).get(
80
- "message", ""
81
- ):
82
- return "Error: The provided Google API key is invalid or missing."
83
- else:
84
- return f"Error: {e}"
85
-
86
- # Return the list of search result URLs
87
- return search_results_links
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download APK Real Boxing and Experience the Ultimate Fighting Game on Android.md DELETED
@@ -1,132 +0,0 @@
1
- <br />
2
- <h1>APK Real Boxing: A Review of the Best Fighting Game on Android</h1>
3
- <p>If you are a fan of fighting games and boxing simulators, you might have heard of APK Real Boxing, one of the most popular and realistic games on Google Play. But what is APK Real Boxing, and why is it so awesome? In this article, we will give you a comprehensive review of this game, its features, benefits, and how to play it like a pro. Read on to find out more!</p>
4
- <h2>apk real boxing</h2><br /><p><b><b>DOWNLOAD</b> &#127383; <a href="https://jinyurl.com/2uNUmi">https://jinyurl.com/2uNUmi</a></b></p><br /><br />
5
- <h2>What is APK Real Boxing?</h2>
6
- <p>APK Real Boxing is a fighting game and boxing simulator developed by Vivid Games S.A., a studio behind well-known online fighting games on mobile. It is based on the original KO boxing simulator that won the hearts of punching games fans worldwide. It uses Unreal Engine-powered graphics and realistic motion-captured animations to create a stunning and immersive boxing experience. You can download APK Real Boxing from various sources online, such as [APKCombo](^1^), [Google Play](^2^), or [Vivid Games](^3^).</p>
7
- <h3>The features and benefits of APK Real Boxing</h3>
8
- <p>APK Real Boxing has many features and benefits that make it stand out from other fighting games on Android. Here are some of them:</p>
9
- <ul>
10
- <li>It has a full-blown career mode for your boxer, where you can knock out over 30 unique boxers with their own adaptive boxing styles and become the best boxer in the world.</li>
11
- <li>It allows you to customize your own boxer with dozens of unlockable hairstyles, tattoos, and gear. You can also train in various mini-games to boost your speed, strength, and stamina.</li>
12
- <li>It has a variety of punches and combos that you can use in the knockout boxing game. You can also tip the odds in your favor with power-ups and feel every jab, hook, and KO uppercut thanks to the intuitive controls.</li>
13
- <li>It has exhilarating bonus modes where you can test your boxing skills against boxeo bosses in Arcade Mode or take on the Underground Tournament to unlock new gear for your boxer.</li>
14
- <li>It has amazing graphics and sound that immerse you in the adrenaline-pumping world of boxing. You can also win prizes with Daily Rewards and Daily Spin.</li>
15
- </ul>
16
- <h3>How to download and install APK Real Boxing</h3>
17
- <p>To download and install APK Real Boxing, you need to follow these simple steps:</p>
18
- <ol>
19
- <li>Choose a reliable source for downloading APK Real Boxing, such as [APKCombo](^1^), [Google Play](^2^), or [Vivid Games](^3^).</li>
20
- <li>Click on the download button or link and wait for the file to be downloaded on your device.</li>
21
- <li>Once the file is downloaded, locate it in your file manager and tap on it to start the installation process.</li>
22
- <li>Follow the instructions on the screen and grant the necessary permissions to install the app.</li>
23
- <li>After the installation is complete, launch the app and enjoy playing APK Real Boxing!</li>
24
- </ol>
25
- <h2>Why APK Real Boxing is the ultimate fighting game</h2>
26
- <p>Now that you know what APK Real Boxing is and how to get it, you might be wondering why it is the ultimate fighting game on Android. Well, there are many reasons why this game is so awesome, but we will focus on three main aspects: the knockout gameplay, the comprehensive career mode, and the exhilarating bonus modes.</p>
27
- <h3>The knockout gameplay and intuitive controls</h3>
28
- <p>The knockout gameplay of APK Real Boxing is one of its best features. It allows you to fight using a variety of punches and combos in a realistic boxing boxeo bosses in Arcade Mode or take on the Underground Tournament to unlock new gear for your boxer. You can also play in Real-Time Multiplayer mode and join other boxers in weekly tournaments and special events. The amazing graphics of APK Real Boxing are another reason why this game is so awesome. It uses Unreal Engine-powered graphics and realistic motion-captured animations to create a stunning and immersive boxing experience. You can feel every jab, hook, and KO uppercut thanks to the realistic physics and sound effects. You can also admire the detailed and lifelike models of the boxers and the environments.</p>
29
- <h2>How to play APK Real Boxing like a pro</h2>
30
- <p>Now that you know why APK Real Boxing is the ultimate fighting game on Android, you might want to learn how to play it like a pro. Well, there are many tips and tricks that can help you improve your boxing skills and win more fights. Here are some of them:</p>
31
- <p>real boxing apk mod<br />
32
- real boxing apk download<br />
33
- real boxing apk obb<br />
34
- real boxing apk data<br />
35
- real boxing apk offline<br />
36
- real boxing apk android<br />
37
- real boxing apk latest version<br />
38
- real boxing apk revdl<br />
39
- real boxing apk hack<br />
40
- real boxing apk unlimited money<br />
41
- real boxing apk free download<br />
42
- real boxing apk full version<br />
43
- real boxing apk + sd data<br />
44
- real boxing apk pure<br />
45
- real boxing apk mirror<br />
46
- real boxing apk game<br />
47
- real boxing apk rexdl<br />
48
- real boxing apk for pc<br />
49
- real boxing apk uptodown<br />
50
- real boxing apk andropalace<br />
51
- real boxing apk mob.org<br />
52
- real boxing apk highly compressed<br />
53
- real boxing apk old version<br />
54
- real boxing apk 2.9.0<br />
55
- real boxing apk 2.4.2<br />
56
- real boxing apk 2.6.1<br />
57
- real boxing apk 2.7.6<br />
58
- real boxing apk 2.8.0<br />
59
- real boxing apk 2.5.0<br />
60
- real boxing apk 2.3.3<br />
61
- real boxing apkpure download<br />
62
- real boxing apkmirror download<br />
63
- real boxing apkpure mod<br />
64
- real boxing apkmirror mod<br />
65
- real boxing apkpure hack<br />
66
- real boxing apkmirror hack<br />
67
- real boxing apkpure offline<br />
68
- real boxing apkmirror offline<br />
69
- real boxing apkpure latest version download<br />
70
- real boxing apkmirror latest version download</p>
71
- <h3>The basic moves and combos</h3>
72
- <p>The basic moves and combos of APK Real Boxing are essential for any boxer. You need to master them to be able to fight effectively and efficiently. Here are some of the basic moves and combos you should know:</p>
73
- <ul>
74
- <li>Jab: A quick and straight punch that can be used to keep your opponent at bay or set up other punches.</li>
75
- <li>Hook: A powerful punch that can be thrown from either side and can cause a lot of damage if it lands on the chin or the temple.</li>
76
- <li>Uppercut: A devastating punch that can be thrown from below and can knock out your opponent if it hits the jaw or the nose.</li>
77
- <li>Body shot: A punch that targets the torso or the ribs of your opponent and can weaken their stamina and defense.</li>
78
- <li>Combo: A series of punches that can be chained together to create a more effective attack. For example, you can use a jab-hook-uppercut combo or a body shot-hook-jab combo.</li>
79
- </ul>
80
- <h3>The power-ups and strategies</h3>
81
- <p>The power-ups and strategies of APK Real Boxing are important for any boxer. You need to use them wisely to gain an advantage over your opponent or turn the tide of the fight. Here are some of the power-ups and strategies you should use:</p>
82
- <ul>
83
- <li>Health: A power-up that restores some of your health and can help you survive longer in the fight.</li>
84
- <li>Stamina: A power-up that restores some of your stamina and can help you throw more punches and move faster in the fight.</li>
85
- <li>Shield: A power-up that protects you from incoming punches for a short time and can help you avoid damage in the fight.</li>
86
- <li>Fury: A power-up that increases your damage output for a short time and can help you deal more damage in the fight.</li>
87
- <li>Dodge: A strategy that allows you to evade incoming punches by swiping left or right on the screen and can help you avoid damage in the fight.</li>
88
- <li>Block: A strategy that allows you to defend yourself from incoming punches by holding down the block button on the screen and can help you reduce damage in the fight.</li>
89
- <li>Counter: A strategy that allows you to retaliate after dodging or blocking an incoming punch by tapping on the screen and can help you deal more damage in the fight.</li>
90
- </ul>
91
- <h3>The tips and tricks from the experts</h3>
92
- <p>The tips and tricks from the experts of APK Real Boxing are useful for any boxer. You need to follow them to improve your boxing skills and win more fights. Here are some of the tips and tricks from the experts:</p>
93
- <ul>
94
- <li>Train regularly in the mini-games to boost your speed, strength, and stamina.</li>
95
- <li>Customize your boxer with gear that suits your style and preferences.</li>
96
- <li>Fight against different boxers with different styles and learn from their strengths and weaknesses.</li>
97
- <li>Use different punches and combos depending on the situation and your opponent's behavior.</li>
98
- <li>Use power-ups strategically and don't waste them unnecessarily.</li>
99
- <li>Dodge, block, and counter effectively and don't let your opponent hit you too much.</li>
100
- <li>Be aggressive but not reckless and don't leave yourself open for attacks.</li>
101
- <li>Be patient but not passive and don't let your opponent dictate the pace of the fight.</li>
102
- </ul>
103
- <h2>Conclusion</h2>
104
- <p>In conclusion, APK Real Boxing is a fighting game and boxing simulator that offers a realistic and immersive boxing experience on Android. It has many features and benefits that make it stand out from other fighting games on Android, such as the knockout gameplay, the comprehensive career mode, the exhilarating bonus modes, and the amazing graphics. It also has intuitive controls and customization options that make it easy to play and enjoy. It is a game that will challenge your boxing skills and entertain you for hours. If you are looking for a fighting game and boxing simulator that will give you a realistic and immersive boxing experience on Android, you should definitely try APK Real Boxing. You will not regret it! <h3>Summary of the main points</h3>
105
- <p>To summarize, here are the main points of this article:</p>
106
- <ul>
107
- <li>APK Real Boxing is a fighting game and boxing simulator developed by Vivid Games S.A., a studio behind well-known online fighting games on mobile.</li>
108
- <li>It uses Unreal Engine-powered graphics and realistic motion-captured animations to create a stunning and immersive boxing experience.</li>
109
- <li>It has a full-blown career mode for your boxer, where you can knock out over 30 unique boxers with their own adaptive boxing styles and become the best boxer in the world.</li>
110
- <li>It allows you to customize your own boxer with dozens of unlockable hairstyles, tattoos, and gear. You can also train in various mini-games to boost your speed, strength, and stamina.</li>
111
- <li>It has a variety of punches and combos that you can use in the knockout boxing game. You can also tip the odds in your favor with power-ups and feel every jab, hook, and KO uppercut thanks to the intuitive controls.</li>
112
- <li>It has exhilarating bonus modes where you can test your boxing skills against boxeo bosses in Arcade Mode or take on the Underground Tournament to unlock new gear for your boxer.</li>
113
- <li>It has many tips and tricks that can help you improve your boxing skills and win more fights.</li>
114
- </ul>
115
- <h3>Call to action and recommendation</h3>
116
- <p>If you are interested in APK Real Boxing and want to download it, you can do so from various sources online, such as [APKCombo], [Google Play], or [Vivid Games]. You can also visit the official website of APK Real Boxing to learn more about the game and its features. We highly recommend APK Real Boxing to anyone who loves fighting games and boxing simulators. It is a game that will keep you hooked and entertained for hours. So what are you waiting for? Download APK Real Boxing today and start your boxing career!</p>
117
- <h2>FAQs</h2>
118
- <p>Here are some of the frequently asked questions about APK Real Boxing:</p>
119
- <ol>
120
- <li><b>Is APK Real Boxing free to play?</b><br>
121
- Yes, APK Real Boxing is free to play. However, it contains in-app purchases that can enhance your gaming experience.</li>
122
- <li><b>Is APK Real Boxing safe to download?</b><br>
123
- Yes, APK Real Boxing is safe to download. However, you should always download it from reliable sources, such as [APKCombo], [Google Play], or [Vivid Games].</li>
124
- <li><b>Is APK Real Boxing compatible with my device?</b><br>
125
- APK Real Boxing is compatible with most Android devices that have Android 4.1 or higher. However, some devices may have performance issues or bugs due to different specifications.</li>
126
- <li><b>How can I contact the developers of APK Real Boxing?</b><br>
127
- You can contact the developers of APK Real Boxing by visiting their official website or their social media pages. You can also send them an email at [email protected].</li>
128
- <li><b>How can I give feedback or report a problem with APK Real Boxing?</b><br>
129
- You can give feedback or report a problem with APK Real Boxing by using the in-game feedback option or by sending an email to [email protected].</li>
130
- </ol></p> 401be4b1e0<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/22h/vintedois-diffusion-v0-2/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Vintedois Diffusion V0 2
3
- emoji: 📚
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/docs/eval.md DELETED
@@ -1,31 +0,0 @@
1
- ## Eval on ICCV2021-MFR
2
-
3
- coming soon.
4
-
5
-
6
- ## Eval IJBC
7
- You can eval ijbc with pytorch or onnx.
8
-
9
-
10
- 1. Eval IJBC With Onnx
11
- ```shell
12
- CUDA_VISIBLE_DEVICES=0 python onnx_ijbc.py --model-root ms1mv3_arcface_r50 --image-path IJB_release/IJBC --result-dir ms1mv3_arcface_r50
13
- ```
14
-
15
- 2. Eval IJBC With Pytorch
16
- ```shell
17
- CUDA_VISIBLE_DEVICES=0,1 python eval_ijbc.py \
18
- --model-prefix ms1mv3_arcface_r50/backbone.pth \
19
- --image-path IJB_release/IJBC \
20
- --result-dir ms1mv3_arcface_r50 \
21
- --batch-size 128 \
22
- --job ms1mv3_arcface_r50 \
23
- --target IJBC \
24
- --network iresnet50
25
- ```
26
-
27
- ## Inference
28
-
29
- ```shell
30
- python inference.py --weight ms1mv3_arcface_r50/backbone.pth --network r50
31
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/modules/ipex/hijacks.py DELETED
@@ -1,196 +0,0 @@
1
- import contextlib
2
- import importlib
3
- import torch
4
- import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
5
-
6
- # pylint: disable=protected-access, missing-function-docstring, line-too-long, unnecessary-lambda, no-else-return
7
-
8
- class CondFunc: # pylint: disable=missing-class-docstring
9
- def __new__(cls, orig_func, sub_func, cond_func):
10
- self = super(CondFunc, cls).__new__(cls)
11
- if isinstance(orig_func, str):
12
- func_path = orig_func.split('.')
13
- for i in range(len(func_path)-1, -1, -1):
14
- try:
15
- resolved_obj = importlib.import_module('.'.join(func_path[:i]))
16
- break
17
- except ImportError:
18
- pass
19
- for attr_name in func_path[i:-1]:
20
- resolved_obj = getattr(resolved_obj, attr_name)
21
- orig_func = getattr(resolved_obj, func_path[-1])
22
- setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
23
- self.__init__(orig_func, sub_func, cond_func)
24
- return lambda *args, **kwargs: self(*args, **kwargs)
25
- def __init__(self, orig_func, sub_func, cond_func):
26
- self.__orig_func = orig_func
27
- self.__sub_func = sub_func
28
- self.__cond_func = cond_func
29
- def __call__(self, *args, **kwargs):
30
- if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
31
- return self.__sub_func(self.__orig_func, *args, **kwargs)
32
- else:
33
- return self.__orig_func(*args, **kwargs)
34
-
35
- _utils = torch.utils.data._utils
36
- def _shutdown_workers(self):
37
- if torch.utils.data._utils is None or torch.utils.data._utils.python_exit_status is True or torch.utils.data._utils.python_exit_status is None:
38
- return
39
- if hasattr(self, "_shutdown") and not self._shutdown:
40
- self._shutdown = True
41
- try:
42
- if hasattr(self, '_pin_memory_thread'):
43
- self._pin_memory_thread_done_event.set()
44
- self._worker_result_queue.put((None, None))
45
- self._pin_memory_thread.join()
46
- self._worker_result_queue.cancel_join_thread()
47
- self._worker_result_queue.close()
48
- self._workers_done_event.set()
49
- for worker_id in range(len(self._workers)):
50
- if self._persistent_workers or self._workers_status[worker_id]:
51
- self._mark_worker_as_unavailable(worker_id, shutdown=True)
52
- for w in self._workers: # pylint: disable=invalid-name
53
- w.join(timeout=torch.utils.data._utils.MP_STATUS_CHECK_INTERVAL)
54
- for q in self._index_queues: # pylint: disable=invalid-name
55
- q.cancel_join_thread()
56
- q.close()
57
- finally:
58
- if self._worker_pids_set:
59
- torch.utils.data._utils.signal_handling._remove_worker_pids(id(self))
60
- self._worker_pids_set = False
61
- for w in self._workers: # pylint: disable=invalid-name
62
- if w.is_alive():
63
- w.terminate()
64
-
65
- class DummyDataParallel(torch.nn.Module): # pylint: disable=missing-class-docstring, unused-argument, too-few-public-methods
66
- def __new__(cls, module, device_ids=None, output_device=None, dim=0): # pylint: disable=unused-argument
67
- if isinstance(device_ids, list) and len(device_ids) > 1:
68
- print("IPEX backend doesn't support DataParallel on multiple XPU devices")
69
- return module.to("xpu")
70
-
71
- def return_null_context(*args, **kwargs): # pylint: disable=unused-argument
72
- return contextlib.nullcontext()
73
-
74
- def check_device(device):
75
- return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int))
76
-
77
- def return_xpu(device):
78
- return f"xpu:{device[-1]}" if isinstance(device, str) and ":" in device else f"xpu:{device}" if isinstance(device, int) else torch.device("xpu") if isinstance(device, torch.device) else "xpu"
79
-
80
- def ipex_no_cuda(orig_func, *args, **kwargs):
81
- torch.cuda.is_available = lambda: False
82
- orig_func(*args, **kwargs)
83
- torch.cuda.is_available = torch.xpu.is_available
84
-
85
- original_autocast = torch.autocast
86
- def ipex_autocast(*args, **kwargs):
87
- if len(args) > 0 and args[0] == "cuda":
88
- return original_autocast("xpu", *args[1:], **kwargs)
89
- else:
90
- return original_autocast(*args, **kwargs)
91
-
92
- original_torch_cat = torch.cat
93
- def torch_cat(tensor, *args, **kwargs):
94
- if len(tensor) == 3 and (tensor[0].dtype != tensor[1].dtype or tensor[2].dtype != tensor[1].dtype):
95
- return original_torch_cat([tensor[0].to(tensor[1].dtype), tensor[1], tensor[2].to(tensor[1].dtype)], *args, **kwargs)
96
- else:
97
- return original_torch_cat(tensor, *args, **kwargs)
98
-
99
- original_interpolate = torch.nn.functional.interpolate
100
- def interpolate(tensor, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False): # pylint: disable=too-many-arguments
101
- if antialias or align_corners is not None:
102
- return_device = tensor.device
103
- return_dtype = tensor.dtype
104
- return original_interpolate(tensor.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode,
105
- align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype)
106
- else:
107
- return original_interpolate(tensor, size=size, scale_factor=scale_factor, mode=mode,
108
- align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias)
109
-
110
- original_linalg_solve = torch.linalg.solve
111
- def linalg_solve(A, B, *args, **kwargs): # pylint: disable=invalid-name
112
- if A.device != torch.device("cpu") or B.device != torch.device("cpu"):
113
- return_device = A.device
114
- return original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device)
115
- else:
116
- return original_linalg_solve(A, B, *args, **kwargs)
117
-
118
- def ipex_hijacks():
119
- CondFunc('torch.Tensor.to',
120
- lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs),
121
- lambda orig_func, self, device=None, *args, **kwargs: check_device(device))
122
- CondFunc('torch.Tensor.cuda',
123
- lambda orig_func, self, device=None, *args, **kwargs: orig_func(self, return_xpu(device), *args, **kwargs),
124
- lambda orig_func, self, device=None, *args, **kwargs: check_device(device))
125
- CondFunc('torch.empty',
126
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
127
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
128
- CondFunc('torch.load',
129
- lambda orig_func, *args, map_location=None, **kwargs: orig_func(*args, return_xpu(map_location), **kwargs),
130
- lambda orig_func, *args, map_location=None, **kwargs: map_location is None or check_device(map_location))
131
- CondFunc('torch.randn',
132
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
133
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
134
- CondFunc('torch.ones',
135
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
136
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
137
- CondFunc('torch.zeros',
138
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
139
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
140
- CondFunc('torch.tensor',
141
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
142
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
143
- CondFunc('torch.linspace',
144
- lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
145
- lambda orig_func, *args, device=None, **kwargs: check_device(device))
146
-
147
- CondFunc('torch.Generator',
148
- lambda orig_func, device=None: torch.xpu.Generator(device),
149
- lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu")
150
-
151
- CondFunc('torch.batch_norm',
152
- lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
153
- weight if weight is not None else torch.ones(input.size()[1], device=input.device),
154
- bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
155
- lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
156
- CondFunc('torch.instance_norm',
157
- lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
158
- weight if weight is not None else torch.ones(input.size()[1], device=input.device),
159
- bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
160
- lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
161
-
162
- #Functions with dtype errors:
163
- CondFunc('torch.nn.modules.GroupNorm.forward',
164
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
165
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
166
- CondFunc('torch.nn.modules.linear.Linear.forward',
167
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
168
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
169
- CondFunc('torch.nn.modules.conv.Conv2d.forward',
170
- lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
171
- lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
172
- CondFunc('torch.nn.functional.layer_norm',
173
- lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
174
- orig_func(input.to(weight.data.dtype), normalized_shape, weight, *args, **kwargs),
175
- lambda orig_func, input, normalized_shape=None, weight=None, *args, **kwargs:
176
- weight is not None and input.dtype != weight.data.dtype)
177
-
178
- #Diffusers Float64 (ARC GPUs doesn't support double or Float64):
179
- if not torch.xpu.has_fp64_dtype():
180
- CondFunc('torch.from_numpy',
181
- lambda orig_func, ndarray: orig_func(ndarray.astype('float32')),
182
- lambda orig_func, ndarray: ndarray.dtype == float)
183
-
184
- #Broken functions when torch.cuda.is_available is True:
185
- CondFunc('torch.utils.data.dataloader._BaseDataLoaderIter.__init__',
186
- lambda orig_func, *args, **kwargs: ipex_no_cuda(orig_func, *args, **kwargs),
187
- lambda orig_func, *args, **kwargs: True)
188
-
189
- #Functions that make compile mad with CondFunc:
190
- torch.utils.data.dataloader._MultiProcessingDataLoaderIter._shutdown_workers = _shutdown_workers
191
- torch.nn.DataParallel = DummyDataParallel
192
- torch.autocast = ipex_autocast
193
- torch.cat = torch_cat
194
- torch.linalg.solve = linalg_solve
195
- torch.nn.functional.interpolate = interpolate
196
- torch.backends.cuda.sdp_kernel = return_null_context
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- title: Wizardlm-13b-v1.2.Q4_0.gguf
3
- colorFrom: purple
4
- colorTo: blue
5
- sdk: docker
6
- models:
7
- - WizardLM/WizardLM-13B-V1.2
8
- - TheBloke/WizardLM-13B-V1.2-GGUF
9
- tags:
10
- - inference api
11
- - openai-api compatible
12
- - llama-cpp-python
13
- - WizardLM
14
- - gguf
15
- pinned: false
16
- ---
17
-
18
- # WizardLM-13B-V1.2-GGUF
19
-
20
- Please refer to the [index.html](index.html) for more information.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/mono2binaural/src/utils.py DELETED
@@ -1,251 +0,0 @@
1
- """
2
- Copyright (c) Facebook, Inc. and its affiliates.
3
- All rights reserved.
4
-
5
- This source code is licensed under the license found in the
6
- LICENSE file in the root directory of this source tree.
7
- """
8
-
9
- import numpy as np
10
- import torch as th
11
- #import torchaudio as ta
12
-
13
-
14
- class Net(th.nn.Module):
15
-
16
- def __init__(self, model_name="network", use_cuda=True):
17
- super().__init__()
18
- self.use_cuda = use_cuda
19
- self.model_name = model_name
20
-
21
- def save(self, model_dir, suffix=''):
22
- '''
23
- save the network to model_dir/model_name.suffix.net
24
- :param model_dir: directory to save the model to
25
- :param suffix: suffix to append after model name
26
- '''
27
- if self.use_cuda:
28
- self.cpu()
29
-
30
- if suffix == "":
31
- fname = f"{model_dir}/{self.model_name}.net"
32
- else:
33
- fname = f"{model_dir}/{self.model_name}.{suffix}.net"
34
-
35
- th.save(self.state_dict(), fname)
36
- if self.use_cuda:
37
- self.cuda()
38
-
39
- def load_from_file(self, model_file):
40
- '''
41
- load network parameters from model_file
42
- :param model_file: file containing the model parameters
43
- '''
44
- if self.use_cuda:
45
- self.cpu()
46
-
47
- states = th.load(model_file)
48
- self.load_state_dict(states)
49
-
50
- if self.use_cuda:
51
- self.cuda()
52
- print(f"Loaded: {model_file}")
53
-
54
- def load(self, model_dir, suffix=''):
55
- '''
56
- load network parameters from model_dir/model_name.suffix.net
57
- :param model_dir: directory to load the model from
58
- :param suffix: suffix to append after model name
59
- '''
60
- if suffix == "":
61
- fname = f"{model_dir}/{self.model_name}.net"
62
- else:
63
- fname = f"{model_dir}/{self.model_name}.{suffix}.net"
64
- self.load_from_file(fname)
65
-
66
- def num_trainable_parameters(self):
67
- '''
68
- :return: the number of trainable parameters in the model
69
- '''
70
- return sum(p.numel() for p in self.parameters() if p.requires_grad)
71
-
72
-
73
- # class NewbobAdam(th.optim.Adam):
74
-
75
- # def __init__(self,
76
- # weights,
77
- # net,
78
- # artifacts_dir,
79
- # initial_learning_rate=0.001,
80
- # decay=0.5,
81
- # max_decay=0.01
82
- # ):
83
- # '''
84
- # Newbob learning rate scheduler
85
- # :param weights: weights to optimize
86
- # :param net: the network, must be an instance of type src.utils.Net
87
- # :param artifacts_dir: (str) directory to save/restore models to/from
88
- # :param initial_learning_rate: (float) initial learning rate
89
- # :param decay: (float) value to decrease learning rate by when loss doesn't improve further
90
- # :param max_decay: (float) maximum decay of learning rate
91
- # '''
92
- # super().__init__(weights, lr=initial_learning_rate)
93
- # self.last_epoch_loss = np.inf
94
- # self.total_decay = 1
95
- # self.net = net
96
- # self.decay = decay
97
- # self.max_decay = max_decay
98
- # self.artifacts_dir = artifacts_dir
99
- # # store initial state as backup
100
- # if decay < 1.0:
101
- # net.save(artifacts_dir, suffix="newbob")
102
-
103
- # def update_lr(self, loss):
104
- # '''
105
- # update the learning rate based on the current loss value and historic loss values
106
- # :param loss: the loss after the current iteration
107
- # '''
108
- # if loss > self.last_epoch_loss and self.decay < 1.0 and self.total_decay > self.max_decay:
109
- # self.total_decay = self.total_decay * self.decay
110
- # print(f"NewbobAdam: Decay learning rate (loss degraded from {self.last_epoch_loss} to {loss})."
111
- # f"Total decay: {self.total_decay}")
112
- # # restore previous network state
113
- # self.net.load(self.artifacts_dir, suffix="newbob")
114
- # # decrease learning rate
115
- # for param_group in self.param_groups:
116
- # param_group['lr'] = param_group['lr'] * self.decay
117
- # else:
118
- # self.last_epoch_loss = loss
119
- # # save last snapshot to restore it in case of lr decrease
120
- # if self.decay < 1.0 and self.total_decay > self.max_decay:
121
- # self.net.save(self.artifacts_dir, suffix="newbob")
122
-
123
-
124
- # class FourierTransform:
125
- # def __init__(self,
126
- # fft_bins=2048,
127
- # win_length_ms=40,
128
- # frame_rate_hz=100,
129
- # causal=False,
130
- # preemphasis=0.0,
131
- # sample_rate=48000,
132
- # normalized=False):
133
- # self.sample_rate = sample_rate
134
- # self.frame_rate_hz = frame_rate_hz
135
- # self.preemphasis = preemphasis
136
- # self.fft_bins = fft_bins
137
- # self.win_length = int(sample_rate * win_length_ms / 1000)
138
- # self.hop_length = int(sample_rate / frame_rate_hz)
139
- # self.causal = causal
140
- # self.normalized = normalized
141
- # if self.win_length > self.fft_bins:
142
- # print('FourierTransform Warning: fft_bins should be larger than win_length')
143
-
144
- # def _convert_format(self, data, expected_dims):
145
- # if not type(data) == th.Tensor:
146
- # data = th.Tensor(data)
147
- # if len(data.shape) < expected_dims:
148
- # data = data.unsqueeze(0)
149
- # if not len(data.shape) == expected_dims:
150
- # raise Exception(f"FourierTransform: data needs to be a Tensor with {expected_dims} dimensions but got shape {data.shape}")
151
- # return data
152
-
153
- # def _preemphasis(self, audio):
154
- # if self.preemphasis > 0:
155
- # return th.cat((audio[:, 0:1], audio[:, 1:] - self.preemphasis * audio[:, :-1]), dim=1)
156
- # return audio
157
-
158
- # def _revert_preemphasis(self, audio):
159
- # if self.preemphasis > 0:
160
- # for i in range(1, audio.shape[1]):
161
- # audio[:, i] = audio[:, i] + self.preemphasis * audio[:, i-1]
162
- # return audio
163
-
164
- # def _magphase(self, complex_stft):
165
- # mag, phase = ta.functional.magphase(complex_stft, 1.0)
166
- # return mag, phase
167
-
168
- # def stft(self, audio):
169
- # '''
170
- # wrapper around th.stft
171
- # audio: wave signal as th.Tensor
172
- # '''
173
- # hann = th.hann_window(self.win_length)
174
- # hann = hann.cuda() if audio.is_cuda else hann
175
- # spec = th.stft(audio, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length,
176
- # window=hann, center=not self.causal, normalized=self.normalized)
177
- # return spec.contiguous()
178
-
179
- # def complex_spectrogram(self, audio):
180
- # '''
181
- # audio: wave signal as th.Tensor
182
- # return: th.Tensor of size channels x frequencies x time_steps (channels x y_axis x x_axis)
183
- # '''
184
- # self._convert_format(audio, expected_dims=2)
185
- # audio = self._preemphasis(audio)
186
- # return self.stft(audio)
187
-
188
- # def magnitude_phase(self, audio):
189
- # '''
190
- # audio: wave signal as th.Tensor
191
- # return: tuple containing two th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
192
- # '''
193
- # stft = self.complex_spectrogram(audio)
194
- # return self._magphase(stft)
195
-
196
- # def mag_spectrogram(self, audio):
197
- # '''
198
- # audio: wave signal as th.Tensor
199
- # return: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
200
- # '''
201
- # return self.magnitude_phase(audio)[0]
202
-
203
- # def power_spectrogram(self, audio):
204
- # '''
205
- # audio: wave signal as th.Tensor
206
- # return: power spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
207
- # '''
208
- # return th.pow(self.mag_spectrogram(audio), 2.0)
209
-
210
- # def phase_spectrogram(self, audio):
211
- # '''
212
- # audio: wave signal as th.Tensor
213
- # return: phase spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
214
- # '''
215
- # return self.magnitude_phase(audio)[1]
216
-
217
- # def mel_spectrogram(self, audio, n_mels):
218
- # '''
219
- # audio: wave signal as th.Tensor
220
- # n_mels: number of bins used for mel scale warping
221
- # return: mel spectrogram as th.Tensor of size channels x n_mels x time_steps for magnitude and phase spectrum
222
- # '''
223
- # spec = self.power_spectrogram(audio)
224
- # mel_warping = ta.transforms.MelScale(n_mels, self.sample_rate)
225
- # return mel_warping(spec)
226
-
227
- # def complex_spec2wav(self, complex_spec, length):
228
- # '''
229
- # inverse stft
230
- # complex_spec: complex spectrum as th.Tensor of size channels x frequencies x time_steps x 2 (real part/imaginary part)
231
- # length: length of the audio to be reconstructed (in frames)
232
- # '''
233
- # complex_spec = self._convert_format(complex_spec, expected_dims=4)
234
- # hann = th.hann_window(self.win_length)
235
- # hann = hann.cuda() if complex_spec.is_cuda else hann
236
- # wav = ta.functional.istft(complex_spec, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length, window=hann, length=length, center=not self.causal)
237
- # wav = self._revert_preemphasis(wav)
238
- # return wav
239
-
240
- # def magphase2wav(self, mag_spec, phase_spec, length):
241
- # '''
242
- # reconstruction of wav signal from magnitude and phase spectrum
243
- # mag_spec: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps
244
- # phase_spec: phase spectrum as th.Tensor of size channels x frequencies x time_steps
245
- # length: length of the audio to be reconstructed (in frames)
246
- # '''
247
- # mag_spec = self._convert_format(mag_spec, expected_dims=3)
248
- # phase_spec = self._convert_format(phase_spec, expected_dims=3)
249
- # complex_spec = th.stack([mag_spec * th.cos(phase_spec), mag_spec * th.sin(phase_spec)], dim=-1)
250
- # return self.complex_spec2wav(complex_spec, length)
251
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/conditioners.py DELETED
@@ -1,990 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from collections import defaultdict
8
- from copy import deepcopy
9
- from dataclasses import dataclass, field
10
- from itertools import chain
11
- import logging
12
- import math
13
- import random
14
- import re
15
- import typing as tp
16
- import warnings
17
-
18
- from einops import rearrange
19
- from num2words import num2words
20
- import spacy
21
- from transformers import T5EncoderModel, T5Tokenizer # type: ignore
22
- import torchaudio
23
- import torch
24
- from torch import nn
25
- from torch import Tensor
26
- import torch.nn.functional as F
27
- from torch.nn.utils.rnn import pad_sequence
28
-
29
- from .streaming import StreamingModule
30
- from .transformer import create_sin_embedding
31
- from ..data.audio_dataset import SegmentInfo
32
- from ..utils.autocast import TorchAutocast
33
- from ..utils.utils import hash_trick, length_to_mask, collate
34
-
35
-
36
- logger = logging.getLogger(__name__)
37
- TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist)
38
- ConditionType = tp.Tuple[Tensor, Tensor] # condition, mask
39
-
40
-
41
- class WavCondition(tp.NamedTuple):
42
- wav: Tensor
43
- length: Tensor
44
- path: tp.List[tp.Optional[str]] = []
45
-
46
-
47
- def nullify_condition(condition: ConditionType, dim: int = 1):
48
- """This function transforms an input condition to a null condition.
49
- The way it is done by converting it to a single zero vector similarly
50
- to how it is done inside WhiteSpaceTokenizer and NoopTokenizer.
51
-
52
- Args:
53
- condition (ConditionType): a tuple of condition and mask (tp.Tuple[Tensor, Tensor])
54
- dim (int): the dimension that will be truncated (should be the time dimension)
55
- WARNING!: dim should not be the batch dimension!
56
- Returns:
57
- ConditionType: a tuple of null condition and mask
58
- """
59
- assert dim != 0, "dim cannot be the batch dimension!"
60
- assert type(condition) == tuple and \
61
- type(condition[0]) == Tensor and \
62
- type(condition[1]) == Tensor, "'nullify_condition' got an unexpected input type!"
63
- cond, mask = condition
64
- B = cond.shape[0]
65
- last_dim = cond.dim() - 1
66
- out = cond.transpose(dim, last_dim)
67
- out = 0. * out[..., :1]
68
- out = out.transpose(dim, last_dim)
69
- mask = torch.zeros((B, 1), device=out.device).int()
70
- assert cond.dim() == out.dim()
71
- return out, mask
72
-
73
-
74
- def nullify_wav(wav: Tensor) -> WavCondition:
75
- """Create a nullified WavCondition from a wav tensor with appropriate shape.
76
-
77
- Args:
78
- wav (Tensor): tensor of shape [B, T]
79
- Returns:
80
- WavCondition: wav condition with nullified wav.
81
- """
82
- null_wav, _ = nullify_condition((wav, torch.zeros_like(wav)), dim=wav.dim() - 1)
83
- return WavCondition(
84
- wav=null_wav,
85
- length=torch.tensor([0] * wav.shape[0], device=wav.device),
86
- path=['null_wav'] * wav.shape[0]
87
- )
88
-
89
-
90
- @dataclass
91
- class ConditioningAttributes:
92
- text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict)
93
- wav: tp.Dict[str, WavCondition] = field(default_factory=dict)
94
-
95
- def __getitem__(self, item):
96
- return getattr(self, item)
97
-
98
- @property
99
- def text_attributes(self):
100
- return self.text.keys()
101
-
102
- @property
103
- def wav_attributes(self):
104
- return self.wav.keys()
105
-
106
- @property
107
- def attributes(self):
108
- return {"text": self.text_attributes, "wav": self.wav_attributes}
109
-
110
- def to_flat_dict(self):
111
- return {
112
- **{f"text.{k}": v for k, v in self.text.items()},
113
- **{f"wav.{k}": v for k, v in self.wav.items()},
114
- }
115
-
116
- @classmethod
117
- def from_flat_dict(cls, x):
118
- out = cls()
119
- for k, v in x.items():
120
- kind, att = k.split(".")
121
- out[kind][att] = v
122
- return out
123
-
124
-
125
- class SegmentWithAttributes(SegmentInfo):
126
- """Base class for all dataclasses that are used for conditioning.
127
- All child classes should implement `to_condition_attributes` that converts
128
- the existing attributes to a dataclass of type ConditioningAttributes.
129
- """
130
- def to_condition_attributes(self) -> ConditioningAttributes:
131
- raise NotImplementedError()
132
-
133
-
134
- class Tokenizer:
135
- """Base class for all tokenizers
136
- (in case we want to introduce more advances tokenizers in the future).
137
- """
138
- def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]:
139
- raise NotImplementedError()
140
-
141
-
142
- class WhiteSpaceTokenizer(Tokenizer):
143
- """This tokenizer should be used for natural language descriptions.
144
- For example:
145
- ["he didn't, know he's going home.", 'shorter sentence'] =>
146
- [[78, 62, 31, 4, 78, 25, 19, 34],
147
- [59, 77, 0, 0, 0, 0, 0, 0]]
148
- """
149
- PUNCTUATIONS = "?:!.,;"
150
-
151
- def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm",
152
- lemma: bool = True, stopwords: bool = True) -> None:
153
- self.n_bins = n_bins
154
- self.pad_idx = pad_idx
155
- self.lemma = lemma
156
- self.stopwords = stopwords
157
- try:
158
- self.nlp = spacy.load(language)
159
- except IOError:
160
- spacy.cli.download(language) # type: ignore
161
- self.nlp = spacy.load(language)
162
-
163
- @tp.no_type_check
164
- def __call__(
165
- self,
166
- texts: tp.List[tp.Optional[str]],
167
- return_text: bool = False
168
- ) -> tp.Tuple[Tensor, Tensor]:
169
- """Take a list of strings and convert them to a tensor of indices.
170
-
171
- Args:
172
- texts (tp.List[str]): List of strings.
173
- return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False.
174
- Returns:
175
- tp.Tuple[Tensor, Tensor]:
176
- - Indices of words in the LUT.
177
- - And a mask indicating where the padding tokens are
178
- """
179
- output, lengths = [], []
180
- texts = deepcopy(texts)
181
- for i, text in enumerate(texts):
182
- # if current sample doesn't have a certain attribute, replace with pad token
183
- if text is None:
184
- output.append(Tensor([self.pad_idx]))
185
- lengths.append(0)
186
- continue
187
-
188
- # convert numbers to words
189
- text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore
190
- # normalize text
191
- text = self.nlp(text) # type: ignore
192
- # remove stopwords
193
- if self.stopwords:
194
- text = [w for w in text if not w.is_stop] # type: ignore
195
- # remove punctuations
196
- text = [w for w in text if w.text not in self.PUNCTUATIONS] # type: ignore
197
- # lemmatize if needed
198
- text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore
199
-
200
- texts[i] = " ".join(text)
201
- lengths.append(len(text))
202
- # convert to tensor
203
- tokens = Tensor([hash_trick(w, self.n_bins) for w in text])
204
- output.append(tokens)
205
-
206
- mask = length_to_mask(torch.IntTensor(lengths)).int()
207
- padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t()
208
- if return_text:
209
- return padded_output, mask, texts # type: ignore
210
- return padded_output, mask
211
-
212
-
213
- class NoopTokenizer(Tokenizer):
214
- """This tokenizer should be used for global conditioners such as: artist, genre, key, etc.
215
- The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split
216
- strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will
217
- split it to ["Jeff", "Buckley"] and return an index per word.
218
-
219
- For example:
220
- ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101]
221
- ["Metal", "Rock", "Classical"] => [0, 223, 51]
222
- """
223
- def __init__(self, n_bins: int, pad_idx: int = 0):
224
- self.n_bins = n_bins
225
- self.pad_idx = pad_idx
226
-
227
- def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[Tensor, Tensor]:
228
- output, lengths = [], []
229
- for text in texts:
230
- # if current sample doesn't have a certain attribute, replace with pad token
231
- if text is None:
232
- output.append(self.pad_idx)
233
- lengths.append(0)
234
- else:
235
- output.append(hash_trick(text, self.n_bins))
236
- lengths.append(1)
237
-
238
- tokens = torch.LongTensor(output).unsqueeze(1)
239
- mask = length_to_mask(torch.IntTensor(lengths)).int()
240
- return tokens, mask
241
-
242
-
243
- class BaseConditioner(nn.Module):
244
- """Base model for all conditioner modules. We allow the output dim to be different
245
- than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large;
246
- 2) make all condition dims consistent.
247
-
248
- Args:
249
- dim (int): Hidden dim of the model (text-encoder/LUT).
250
- output_dim (int): Output dim of the conditioner.
251
- """
252
- def __init__(self, dim, output_dim):
253
- super().__init__()
254
- self.dim = dim
255
- self.output_dim = output_dim
256
- self.output_proj = nn.Linear(dim, output_dim)
257
-
258
- def tokenize(self, *args, **kwargs) -> tp.Any:
259
- """Should be any part of the processing that will lead to a synchronization
260
- point, e.g. BPE tokenization with transfer to the GPU.
261
-
262
- The returned value will be saved and return later when calling forward().
263
- """
264
- raise NotImplementedError()
265
-
266
- def forward(self, inputs: tp.Any) -> ConditionType:
267
- """Gets input that should be used as conditioning (e.g, genre, description or a waveform).
268
- Outputs a ConditionType, after the input data was embedded as a dense vector.
269
-
270
- Returns:
271
- ConditionType:
272
- - A tensor of size [B, T, D] where B is the batch size, T is the length of the
273
- output embedding and D is the dimension of the embedding.
274
- - And a mask indicating where the padding tokens.
275
- """
276
- raise NotImplementedError()
277
-
278
-
279
- class TextConditioner(BaseConditioner):
280
- ...
281
-
282
-
283
- class LUTConditioner(TextConditioner):
284
- """Lookup table TextConditioner.
285
-
286
- Args:
287
- n_bins (int): Number of bins.
288
- dim (int): Hidden dim of the model (text-encoder/LUT).
289
- output_dim (int): Output dim of the conditioner.
290
- tokenizer (str): Name of the tokenizer.
291
- pad_idx (int, optional): Index for padding token. Defaults to 0.
292
- """
293
- def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0):
294
- super().__init__(dim, output_dim)
295
- self.embed = nn.Embedding(n_bins, dim)
296
- self.tokenizer: Tokenizer
297
- if tokenizer == "whitespace":
298
- self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx)
299
- elif tokenizer == "noop":
300
- self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx)
301
- else:
302
- raise ValueError(f"unrecognized tokenizer `{tokenizer}`.")
303
-
304
- def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
305
- device = self.embed.weight.device
306
- tokens, mask = self.tokenizer(x)
307
- tokens, mask = tokens.to(device), mask.to(device)
308
- return tokens, mask
309
-
310
- def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType:
311
- tokens, mask = inputs
312
- embeds = self.embed(tokens)
313
- embeds = self.output_proj(embeds)
314
- embeds = (embeds * mask.unsqueeze(-1))
315
- return embeds, mask
316
-
317
-
318
- class T5Conditioner(TextConditioner):
319
- """T5-based TextConditioner.
320
-
321
- Args:
322
- name (str): Name of the T5 model.
323
- output_dim (int): Output dim of the conditioner.
324
- finetune (bool): Whether to fine-tune T5 at train time.
325
- device (str): Device for T5 Conditioner.
326
- autocast_dtype (tp.Optional[str], optional): Autocast dtype.
327
- word_dropout (float, optional): Word dropout probability.
328
- normalize_text (bool, optional): Whether to apply text normalization.
329
- """
330
- MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b",
331
- "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large",
332
- "google/flan-t5-xl", "google/flan-t5-xxl"]
333
- MODELS_DIMS = {
334
- "t5-small": 512,
335
- "t5-base": 768,
336
- "t5-large": 1024,
337
- "t5-3b": 1024,
338
- "t5-11b": 1024,
339
- "google/flan-t5-small": 512,
340
- "google/flan-t5-base": 768,
341
- "google/flan-t5-large": 1024,
342
- "google/flan-t5-3b": 1024,
343
- "google/flan-t5-11b": 1024,
344
- }
345
-
346
- def __init__(self, name: str, output_dim: int, finetune: bool, device: str,
347
- autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0.,
348
- normalize_text: bool = False):
349
- assert name in self.MODELS, f"unrecognized t5 model name (should in {self.MODELS})"
350
- super().__init__(self.MODELS_DIMS[name], output_dim)
351
- self.device = device
352
- self.name = name
353
- self.finetune = finetune
354
- self.word_dropout = word_dropout
355
-
356
- if autocast_dtype is None or self.device == 'cpu':
357
- self.autocast = TorchAutocast(enabled=False)
358
- if self.device != 'cpu':
359
- logger.warning("T5 has no autocast, this might lead to NaN")
360
- else:
361
- dtype = getattr(torch, autocast_dtype)
362
- assert isinstance(dtype, torch.dtype)
363
- logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}")
364
- self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype)
365
- # Let's disable logging temporarily because T5 will vomit some errors otherwise.
366
- # thanks https://gist.github.com/simon-weber/7853144
367
- previous_level = logging.root.manager.disable
368
- logging.disable(logging.ERROR)
369
- with warnings.catch_warnings():
370
- warnings.simplefilter("ignore")
371
- try:
372
- self.t5_tokenizer = T5Tokenizer.from_pretrained(name)
373
- t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune)
374
- finally:
375
- logging.disable(previous_level)
376
- if finetune:
377
- self.t5 = t5
378
- else:
379
- # this makes sure that the t5 models is not part
380
- # of the saved checkpoint
381
- self.__dict__["t5"] = t5.to(device)
382
-
383
- self.normalize_text = normalize_text
384
- if normalize_text:
385
- self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True)
386
-
387
- def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]:
388
- # if current sample doesn't have a certain attribute, replace with empty string
389
- entries: tp.List[str] = [xi if xi is not None else "" for xi in x]
390
- if self.normalize_text:
391
- _, _, entries = self.text_normalizer(entries, return_text=True)
392
- if self.word_dropout > 0. and self.training:
393
- new_entries = []
394
- for entry in entries:
395
- words = [word for word in entry.split(" ") if random.random() >= self.word_dropout]
396
- new_entries.append(" ".join(words))
397
- entries = new_entries
398
-
399
- empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""])
400
-
401
- inputs = self.t5_tokenizer(entries, return_tensors="pt", padding=True).to(self.device)
402
- mask = inputs["attention_mask"]
403
- mask[empty_idx, :] = 0 # zero-out index where the input is non-existant
404
- return inputs
405
-
406
- def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType:
407
- mask = inputs["attention_mask"]
408
- with torch.set_grad_enabled(self.finetune), self.autocast:
409
- embeds = self.t5(**inputs).last_hidden_state
410
- embeds = self.output_proj(embeds.to(self.output_proj.weight))
411
- embeds = (embeds * mask.unsqueeze(-1))
412
- return embeds, mask
413
-
414
-
415
- class WaveformConditioner(BaseConditioner):
416
- """Base class for all conditioners that take a waveform as input.
417
- Classes that inherit must implement `_get_wav_embedding` that outputs
418
- a continuous tensor, and `_downsampling_factor` that returns the down-sampling
419
- factor of the embedding model.
420
-
421
- Args:
422
- dim (int): The internal representation dimension.
423
- output_dim (int): Output dimension.
424
- device (tp.Union[torch.device, str]): Device.
425
- """
426
- def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]):
427
- super().__init__(dim, output_dim)
428
- self.device = device
429
-
430
- def tokenize(self, wav_length: WavCondition) -> WavCondition:
431
- wav, length, path = wav_length
432
- assert length is not None
433
- return WavCondition(wav.to(self.device), length.to(self.device), path)
434
-
435
- def _get_wav_embedding(self, wav: Tensor) -> Tensor:
436
- """Gets as input a wav and returns a dense vector of conditions."""
437
- raise NotImplementedError()
438
-
439
- def _downsampling_factor(self):
440
- """Returns the downsampling factor of the embedding model."""
441
- raise NotImplementedError()
442
-
443
- def forward(self, inputs: WavCondition) -> ConditionType:
444
- """
445
- Args:
446
- input (WavCondition): Tuple of (waveform, lengths).
447
- Returns:
448
- ConditionType: Dense vector representing the conditioning along with its' mask.
449
- """
450
- wav, lengths, path = inputs
451
- with torch.no_grad():
452
- embeds = self._get_wav_embedding(wav)
453
- embeds = embeds.to(self.output_proj.weight)
454
- embeds = self.output_proj(embeds)
455
-
456
- if lengths is not None:
457
- lengths = lengths / self._downsampling_factor()
458
- mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore
459
- else:
460
- mask = torch.ones_like(embeds)
461
- embeds = (embeds * mask.unsqueeze(2).to(self.device))
462
-
463
- return embeds, mask
464
-
465
-
466
- class ChromaStemConditioner(WaveformConditioner):
467
- """Chroma conditioner that uses DEMUCS to first filter out drums and bass. The is followed by
468
- the insight the drums and bass often dominate the chroma, leading to the chroma not containing the
469
- information about melody.
470
-
471
- Args:
472
- output_dim (int): Output dimension for the conditioner.
473
- sample_rate (int): Sample rate for the chroma extractor.
474
- n_chroma (int): Number of chroma for the chroma extractor.
475
- radix2_exp (int): Radix2 exponent for the chroma extractor.
476
- duration (float): Duration used during training. This is later used for correct padding
477
- in case we are using chroma as prefix.
478
- match_len_on_eval (bool, optional): If True then all chromas are padded to the training
479
- duration. Defaults to False.
480
- eval_wavs (str, optional): Path to a json egg with waveform, this waveforms are used as
481
- conditions during eval (for cases where we don't want to leak test conditions like MusicCaps).
482
- Defaults to None.
483
- n_eval_wavs (int, optional): Limits the number of waveforms used for conditioning. Defaults to 0.
484
- device (tp.Union[torch.device, str], optional): Device for the conditioner.
485
- **kwargs: Additional parameters for the chroma extractor.
486
- """
487
- def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int,
488
- duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None,
489
- n_eval_wavs: int = 0, device: tp.Union[torch.device, str] = "cpu", **kwargs):
490
- from demucs import pretrained
491
- super().__init__(dim=n_chroma, output_dim=output_dim, device=device)
492
- self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32)
493
- self.sample_rate = sample_rate
494
- self.match_len_on_eval = match_len_on_eval
495
- self.duration = duration
496
- self.__dict__["demucs"] = pretrained.get_model('htdemucs').to(device)
497
- self.stem2idx = {'drums': 0, 'bass': 1, 'other': 2, 'vocal': 3}
498
- self.stem_idx = torch.LongTensor([self.stem2idx['vocal'], self.stem2idx['other']]).to(device)
499
- self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp,
500
- device=device, **kwargs)
501
- self.chroma_len = self._get_chroma_len()
502
-
503
- def _downsampling_factor(self):
504
- return self.chroma.winhop
505
-
506
- def _get_chroma_len(self):
507
- """Get length of chroma during training"""
508
- dummy_wav = torch.zeros((1, self.sample_rate * self.duration), device=self.device)
509
- dummy_chr = self.chroma(dummy_wav)
510
- return dummy_chr.shape[1]
511
-
512
- @torch.no_grad()
513
- def _get_filtered_wav(self, wav):
514
- from demucs.apply import apply_model
515
- from demucs.audio import convert_audio
516
- with self.autocast:
517
- wav = convert_audio(wav, self.sample_rate, self.demucs.samplerate, self.demucs.audio_channels)
518
- stems = apply_model(self.demucs, wav, device=self.device)
519
- stems = stems[:, self.stem_idx] # extract stem
520
- stems = stems.sum(1) # merge extracted stems
521
- stems = stems.mean(1, keepdim=True) # mono
522
- stems = convert_audio(stems, self.demucs.samplerate, self.sample_rate, 1)
523
- return stems
524
-
525
- @torch.no_grad()
526
- def _get_wav_embedding(self, wav):
527
- # avoid 0-size tensors when we are working with null conds
528
- if wav.shape[-1] == 1:
529
- return self.chroma(wav)
530
- stems = self._get_filtered_wav(wav)
531
- chroma = self.chroma(stems)
532
-
533
- if self.match_len_on_eval:
534
- b, t, c = chroma.shape
535
- if t > self.chroma_len:
536
- chroma = chroma[:, :self.chroma_len]
537
- logger.debug(f'chroma was truncated! ({t} -> {chroma.shape[1]})')
538
- elif t < self.chroma_len:
539
- # chroma = F.pad(chroma, (0, 0, 0, self.chroma_len - t))
540
- n_repeat = int(math.ceil(self.chroma_len / t))
541
- chroma = chroma.repeat(1, n_repeat, 1)
542
- chroma = chroma[:, :self.chroma_len]
543
- logger.debug(f'chroma was zero-padded! ({t} -> {chroma.shape[1]})')
544
- return chroma
545
-
546
-
547
- class ChromaExtractor(nn.Module):
548
- """Chroma extraction class, handles chroma extraction and quantization.
549
-
550
- Args:
551
- sample_rate (int): Sample rate.
552
- n_chroma (int): Number of chroma to consider.
553
- radix2_exp (int): Radix2 exponent.
554
- nfft (tp.Optional[int], optional): Number of FFT.
555
- winlen (tp.Optional[int], optional): Window length.
556
- winhop (tp.Optional[int], optional): Window hop size.
557
- argmax (bool, optional): Whether to use argmax. Defaults to False.
558
- norm (float, optional): Norm for chroma normalization. Defaults to inf.
559
- device (tp.Union[torch.device, str], optional): Device to use. Defaults to cpu.
560
- """
561
- def __init__(self, sample_rate: int, n_chroma: int = 12, radix2_exp: int = 12,
562
- nfft: tp.Optional[int] = None, winlen: tp.Optional[int] = None, winhop: tp.Optional[int] = None,
563
- argmax: bool = False, norm: float = torch.inf, device: tp.Union[torch.device, str] = "cpu"):
564
- super().__init__()
565
- from librosa import filters
566
- self.device = device
567
- self.autocast = TorchAutocast(enabled=device != "cpu", device_type=self.device, dtype=torch.float32)
568
- self.winlen = winlen or 2 ** radix2_exp
569
- self.nfft = nfft or self.winlen
570
- self.winhop = winhop or (self.winlen // 4)
571
- self.sr = sample_rate
572
- self.n_chroma = n_chroma
573
- self.norm = norm
574
- self.argmax = argmax
575
- self.window = torch.hann_window(self.winlen).to(device)
576
- self.fbanks = torch.from_numpy(filters.chroma(sr=sample_rate, n_fft=self.nfft, tuning=0,
577
- n_chroma=self.n_chroma)).to(device)
578
- self.spec = torchaudio.transforms.Spectrogram(n_fft=self.nfft, win_length=self.winlen,
579
- hop_length=self.winhop, power=2, center=True,
580
- pad=0, normalized=True).to(device)
581
-
582
- def forward(self, wav):
583
- with self.autocast:
584
- T = wav.shape[-1]
585
- # in case we are getting a wav that was dropped out (nullified)
586
- # make sure wav length is no less that nfft
587
- if T < self.nfft:
588
- pad = self.nfft - T
589
- r = 0 if pad % 2 == 0 else 1
590
- wav = F.pad(wav, (pad // 2, pad // 2 + r), 'constant', 0)
591
- assert wav.shape[-1] == self.nfft, f'expected len {self.nfft} but got {wav.shape[-1]}'
592
- spec = self.spec(wav).squeeze(1)
593
- raw_chroma = torch.einsum("cf,...ft->...ct", self.fbanks, spec)
594
- norm_chroma = torch.nn.functional.normalize(raw_chroma, p=self.norm, dim=-2, eps=1e-6)
595
- norm_chroma = rearrange(norm_chroma, "b d t -> b t d")
596
-
597
- if self.argmax:
598
- idx = norm_chroma.argmax(-1, keepdims=True)
599
- norm_chroma[:] = 0
600
- norm_chroma.scatter_(dim=-1, index=idx, value=1)
601
-
602
- return norm_chroma
603
-
604
-
605
- def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str):
606
- """Utility function for nullifying an attribute inside an ConditioningAttributes object.
607
- If the condition is of type "wav", then nullify it using "nullify_condition".
608
- If the condition is of any other type, set its' value to None.
609
- Works in-place.
610
- """
611
- if condition_type not in ["text", "wav"]:
612
- raise ValueError(
613
- "dropout_condition got an unexpected condition type!"
614
- f" expected 'wav' or 'text' but got '{condition_type}'"
615
- )
616
-
617
- if condition not in getattr(sample, condition_type):
618
- raise ValueError(
619
- "dropout_condition received an unexpected condition!"
620
- f" expected wav={sample.wav.keys()} and text={sample.text.keys()}"
621
- f"but got '{condition}' of type '{condition_type}'!"
622
- )
623
-
624
- if condition_type == "wav":
625
- wav, length, path = sample.wav[condition]
626
- sample.wav[condition] = nullify_wav(wav)
627
- else:
628
- sample.text[condition] = None
629
-
630
- return sample
631
-
632
-
633
- class DropoutModule(nn.Module):
634
- """Base class for all dropout modules."""
635
- def __init__(self, seed: int = 1234):
636
- super().__init__()
637
- self.rng = torch.Generator()
638
- self.rng.manual_seed(seed)
639
-
640
-
641
- class AttributeDropout(DropoutModule):
642
- """Applies dropout with a given probability per attribute. This is different from the behavior of
643
- ClassifierFreeGuidanceDropout as this allows for attributes to be dropped out separately. For example,
644
- "artist" can be dropped while "genre" remains. This is in contrast to ClassifierFreeGuidanceDropout
645
- where if "artist" is dropped "genre" must also be dropped.
646
-
647
- Args:
648
- p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example:
649
- ...
650
- "genre": 0.1,
651
- "artist": 0.5,
652
- "wav": 0.25,
653
- ...
654
- active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False.
655
- seed (int, optional): Random seed.
656
- """
657
- def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234):
658
- super().__init__(seed=seed)
659
- self.active_on_eval = active_on_eval
660
- # construct dict that return the values from p otherwise 0
661
- self.p = {}
662
- for condition_type, probs in p.items():
663
- self.p[condition_type] = defaultdict(lambda: 0, probs)
664
-
665
- def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]:
666
- """
667
- Args:
668
- samples (tp.List[ConditioningAttributes]): List of conditions.
669
- Returns:
670
- tp.List[ConditioningAttributes]: List of conditions after certain attributes were set to None.
671
- """
672
- if not self.training and not self.active_on_eval:
673
- return samples
674
-
675
- samples = deepcopy(samples)
676
-
677
- for condition_type, ps in self.p.items(): # for condition types [text, wav]
678
- for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre])
679
- if torch.rand(1, generator=self.rng).item() < p:
680
- for sample in samples:
681
- dropout_condition(sample, condition_type, condition)
682
-
683
- return samples
684
-
685
- def __repr__(self):
686
- return f"AttributeDropout({dict(self.p)})"
687
-
688
-
689
- class ClassifierFreeGuidanceDropout(DropoutModule):
690
- """Applies Classifier Free Guidance dropout, meaning all attributes
691
- are dropped with the same probability.
692
-
693
- Args:
694
- p (float): Probability to apply condition dropout during training.
695
- seed (int): Random seed.
696
- """
697
- def __init__(self, p: float, seed: int = 1234):
698
- super().__init__(seed=seed)
699
- self.p = p
700
-
701
- def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]:
702
- """
703
- Args:
704
- samples (tp.List[ConditioningAttributes]): List of conditions.
705
- Returns:
706
- tp.List[ConditioningAttributes]: List of conditions after all attributes were set to None.
707
- """
708
- if not self.training:
709
- return samples
710
-
711
- # decide on which attributes to drop in a batched fashion
712
- drop = torch.rand(1, generator=self.rng).item() < self.p
713
- if not drop:
714
- return samples
715
-
716
- # nullify conditions of all attributes
717
- samples = deepcopy(samples)
718
-
719
- for condition_type in ["wav", "text"]:
720
- for sample in samples:
721
- for condition in sample.attributes[condition_type]:
722
- dropout_condition(sample, condition_type, condition)
723
-
724
- return samples
725
-
726
- def __repr__(self):
727
- return f"ClassifierFreeGuidanceDropout(p={self.p})"
728
-
729
-
730
- class ConditioningProvider(nn.Module):
731
- """Main class to provide conditions given all the supported conditioners.
732
-
733
- Args:
734
- conditioners (dict): Dictionary of conditioners.
735
- merge_text_conditions_p (float, optional): Probability to merge all text sources
736
- into a single text condition. Defaults to 0.
737
- drop_desc_p (float, optional): Probability to drop the original description
738
- when merging all text sources into a single text condition. Defaults to 0.
739
- device (tp.Union[torch.device, str], optional): Device for conditioners and output condition types.
740
- """
741
- def __init__(
742
- self,
743
- conditioners: tp.Dict[str, BaseConditioner],
744
- merge_text_conditions_p: float = 0,
745
- drop_desc_p: float = 0,
746
- device: tp.Union[torch.device, str] = "cpu",
747
- ):
748
- super().__init__()
749
- self.device = device
750
- self.merge_text_conditions_p = merge_text_conditions_p
751
- self.drop_desc_p = drop_desc_p
752
- self.conditioners = nn.ModuleDict(conditioners)
753
-
754
- @property
755
- def text_conditions(self):
756
- return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)]
757
-
758
- @property
759
- def wav_conditions(self):
760
- return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)]
761
-
762
- @property
763
- def has_wav_condition(self):
764
- return len(self.wav_conditions) > 0
765
-
766
- def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]:
767
- """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly.
768
- This should be called before starting any real GPU work to avoid synchronization points.
769
- This will return a dict matching conditioner names to their arbitrary tokenized representations.
770
-
771
- Args:
772
- inputs (list[ConditioningAttribres]): List of ConditioningAttributes objects containing
773
- text and wav conditions.
774
- """
775
- assert all([type(x) == ConditioningAttributes for x in inputs]), \
776
- "got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]" \
777
- f" but types were {set([type(x) for x in inputs])}"
778
-
779
- output = {}
780
- text = self._collate_text(inputs)
781
- wavs = self._collate_wavs(inputs)
782
-
783
- assert set(text.keys() | wavs.keys()).issubset(set(self.conditioners.keys())), \
784
- f"got an unexpected attribute! Expected {self.conditioners.keys()}, got {text.keys(), wavs.keys()}"
785
-
786
- for attribute, batch in chain(text.items(), wavs.items()):
787
- output[attribute] = self.conditioners[attribute].tokenize(batch)
788
- return output
789
-
790
- def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]:
791
- """Compute pairs of `(embedding, mask)` using the configured conditioners
792
- and the tokenized representations. The output is for example:
793
-
794
- {
795
- "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])),
796
- "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])),
797
- ...
798
- }
799
-
800
- Args:
801
- tokenized (dict): Dict of tokenized representations as returned by `tokenize()`.
802
- """
803
- output = {}
804
- for attribute, inputs in tokenized.items():
805
- condition, mask = self.conditioners[attribute](inputs)
806
- output[attribute] = (condition, mask)
807
- return output
808
-
809
- def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]:
810
- """Given a list of ConditioningAttributes objects, compile a dictionary where the keys
811
- are the attributes and the values are the aggregated input per attribute.
812
- For example:
813
- Input:
814
- [
815
- ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...),
816
- ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...),
817
- ]
818
- Output:
819
- {
820
- "genre": ["Rock", "Hip-hop"],
821
- "description": ["A rock song with a guitar solo", "A hip-hop verse"]
822
- }
823
- """
824
- batch_per_attribute: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list)
825
-
826
- def _merge_conds(cond, merge_text_conditions_p=0, drop_desc_p=0):
827
- def is_valid(k, v):
828
- k_valid = k in ['key', 'bpm', 'genre', 'moods', 'instrument']
829
- v_valid = v is not None and isinstance(v, (int, float, str, list))
830
- return k_valid and v_valid
831
-
832
- def process_value(v):
833
- if isinstance(v, (int, float, str)):
834
- return v
835
- if isinstance(v, list):
836
- return ", ".join(v)
837
- else:
838
- RuntimeError(f"unknown type for text value! ({type(v), v})")
839
-
840
- desc = cond.text['description']
841
- meta_data = ""
842
- if random.uniform(0, 1) < merge_text_conditions_p:
843
- meta_pairs = [f'{k}: {process_value(v)}' for k, v in cond.text.items() if is_valid(k, v)]
844
- random.shuffle(meta_pairs)
845
- meta_data = ". ".join(meta_pairs)
846
- desc = desc if not random.uniform(0, 1) < drop_desc_p else None
847
-
848
- if desc is None:
849
- desc = meta_data if len(meta_data) > 1 else None
850
- else:
851
- desc = desc.rstrip('.') + ". " + meta_data
852
- cond.text['description'] = desc.strip() if desc else None
853
-
854
- if self.training and self.merge_text_conditions_p:
855
- for sample in samples:
856
- _merge_conds(sample, self.merge_text_conditions_p, self.drop_desc_p)
857
-
858
- texts = [x.text for x in samples]
859
- for text in texts:
860
- for condition in self.text_conditions:
861
- batch_per_attribute[condition].append(text[condition])
862
-
863
- return batch_per_attribute
864
-
865
- def _collate_wavs(self, samples: tp.List[ConditioningAttributes]):
866
- """Generate a dict where the keys are attributes by which we fetch similar wavs,
867
- and the values are Tensors of wavs according to said attribtues.
868
-
869
- *Note*: by the time the samples reach this function, each sample should have some waveform
870
- inside the "wav" attribute. It should be either:
871
- 1. A real waveform
872
- 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset)
873
- 3. A null waveform due to it being dropped in a dropout module (nullified by dropout)
874
-
875
- Args:
876
- samples (tp.List[ConditioningAttributes]): List of ConditioningAttributes samples.
877
- Returns:
878
- dict: A dicionary mapping an attribute name to wavs.
879
- """
880
- wavs = defaultdict(list)
881
- lens = defaultdict(list)
882
- paths = defaultdict(list)
883
- out = {}
884
-
885
- for sample in samples:
886
- for attribute in self.wav_conditions:
887
- wav, length, path = sample.wav[attribute]
888
- wavs[attribute].append(wav.flatten())
889
- lens[attribute].append(length)
890
- paths[attribute].append(path)
891
-
892
- # stack all wavs to a single tensor
893
- for attribute in self.wav_conditions:
894
- stacked_wav, _ = collate(wavs[attribute], dim=0)
895
- out[attribute] = WavCondition(stacked_wav.unsqueeze(1),
896
- torch.cat(lens['self_wav']), paths[attribute]) # type: ignore
897
-
898
- return out
899
-
900
-
901
- class ConditionFuser(StreamingModule):
902
- """Condition fuser handles the logic to combine the different conditions
903
- to the actual model input.
904
-
905
- Args:
906
- fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse
907
- each condition. For example:
908
- {
909
- "prepend": ["description"],
910
- "sum": ["genre", "bpm"],
911
- "cross": ["description"],
912
- }
913
- cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention.
914
- cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used.
915
- """
916
- FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"]
917
-
918
- def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False,
919
- cross_attention_pos_emb_scale: float = 1.0):
920
- super().__init__()
921
- assert all(
922
- [k in self.FUSING_METHODS for k in fuse2cond.keys()]
923
- ), f"got invalid fuse method, allowed methods: {self.FUSING_MEHTODS}"
924
- self.cross_attention_pos_emb = cross_attention_pos_emb
925
- self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale
926
- self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond
927
- self.cond2fuse: tp.Dict[str, str] = {}
928
- for fuse_method, conditions in fuse2cond.items():
929
- for condition in conditions:
930
- self.cond2fuse[condition] = fuse_method
931
-
932
- def forward(
933
- self,
934
- input: Tensor,
935
- conditions: tp.Dict[str, ConditionType]
936
- ) -> tp.Tuple[Tensor, tp.Optional[Tensor]]:
937
- """Fuse the conditions to the provided model input.
938
-
939
- Args:
940
- input (Tensor): Transformer input.
941
- conditions (tp.Dict[str, ConditionType]): Dict of conditions.
942
- Returns:
943
- tp.Tuple[Tensor, Tensor]: The first tensor is the transformer input
944
- after the conditions have been fused. The second output tensor is the tensor
945
- used for cross-attention or None if no cross attention inputs exist.
946
- """
947
- B, T, _ = input.shape
948
-
949
- if 'offsets' in self._streaming_state:
950
- first_step = False
951
- offsets = self._streaming_state['offsets']
952
- else:
953
- first_step = True
954
- offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device)
955
-
956
- assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \
957
- f"given conditions contain unknown attributes for fuser, " \
958
- f"expected {self.cond2fuse.keys()}, got {conditions.keys()}"
959
- cross_attention_output = None
960
- for cond_type, (cond, cond_mask) in conditions.items():
961
- op = self.cond2fuse[cond_type]
962
- if op == "sum":
963
- input += cond
964
- elif op == "input_interpolate":
965
- cond = rearrange(cond, "b t d -> b d t")
966
- cond = F.interpolate(cond, size=input.shape[1])
967
- input += rearrange(cond, "b d t -> b t d")
968
- elif op == "prepend":
969
- if first_step:
970
- input = torch.cat([cond, input], dim=1)
971
- elif op == "cross":
972
- if cross_attention_output is not None:
973
- cross_attention_output = torch.cat([cross_attention_output, cond], dim=1)
974
- else:
975
- cross_attention_output = cond
976
- else:
977
- raise ValueError(f"unknown op ({op})")
978
-
979
- if self.cross_attention_pos_emb and cross_attention_output is not None:
980
- positions = torch.arange(
981
- cross_attention_output.shape[1],
982
- device=cross_attention_output.device
983
- ).view(1, -1, 1)
984
- pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1])
985
- cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb
986
-
987
- if self._is_streaming:
988
- self._streaming_state['offsets'] = offsets + T
989
-
990
- return input, cross_attention_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/js/icons.js DELETED
@@ -1 +0,0 @@
1
- window.FontAwesomeKitConfig={asyncLoading:{enabled:!1},autoA11y:{enabled:!0},baseUrl:"https://ka-f.fontawesome.com",baseUrlKit:"https://kit-pro.fontawesome.com",detectConflictsUntil:null,iconUploads:{},id:96462084,license:"pro",method:"css",minify:{enabled:!0},token:"d0514f1901",v4FontFaceShim:{enabled:!0},v4shim:{enabled:!0},v5FontFaceShim:{enabled:!0},version:"6.1.1"},function(t){"function"==typeof define&&define.amd?define("kit-loader",t):t()}(function(){"use strict";function t(e){return(t="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(e)}function e(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function n(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(t);e&&(o=o.filter(function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable})),n.push.apply(n,o)}return n}function o(t){for(var o=1;o<arguments.length;o++){var r=null!=arguments[o]?arguments[o]:{};o%2?n(Object(r),!0).forEach(function(n){e(t,n,r[n])}):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(r)):n(Object(r)).forEach(function(e){Object.defineProperty(t,e,Object.getOwnPropertyDescriptor(r,e))})}return t}function r(t,e){return function(t){if(Array.isArray(t))return t}(t)||function(t,e){if("undefined"!=typeof Symbol&&Symbol.iterator in Object(t)){var n=[],o=!0,r=!1,i=void 0;try{for(var c,a=t[Symbol.iterator]();!(o=(c=a.next()).done)&&(n.push(c.value),!e||n.length!==e);o=!0);}catch(t){r=!0,i=t}finally{try{o||null==a.return||a.return()}finally{if(r)throw i}}return n}}(t,e)||function(t,e){if(t){if("string"==typeof t)return i(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);return"Object"===n&&t.constructor&&(n=t.constructor.name),"Map"===n||"Set"===n?Array.from(t):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?i(t,e):void 0}}(t,e)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function i(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,o=new Array(e);n<e;n++)o[n]=t[n];return o}function c(t,e){var n=e&&e.addOn||"",o=e&&e.baseFilename||t.license+n,r=e&&e.minify?".min":"",i=e&&e.fileSuffix||t.method,c=e&&e.subdir||t.method;return t.baseUrl+"/releases/"+("latest"===t.version?"latest":"v".concat(t.version))+"/"+c+"/"+o+r+"."+i}function a(t,e){var n=e||["fa"],o="."+Array.prototype.join.call(n,",."),r=t.querySelectorAll(o);Array.prototype.forEach.call(r,function(e){var n=e.getAttribute("title");e.setAttribute("aria-hidden","true");var o=!e.nextElementSibling||!e.nextElementSibling.classList.contains("sr-only");if(n&&o){var r=t.createElement("span");r.innerHTML=n,r.classList.add("sr-only"),e.parentNode.insertBefore(r,e.nextSibling)}})}var u,f=function(){},s="undefined"!=typeof global&&void 0!==global.process&&"function"==typeof global.process.emit,d="undefined"==typeof setImmediate?setTimeout:setImmediate,l=[];function h(){for(var t=0;t<l.length;t++)l[t][0](l[t][1]);l=[],u=!1}function m(t,e){l.push([t,e]),u||(u=!0,d(h,0))}function p(t){var e=t.owner,n=e._state,o=e._data,r=t[n],i=t.then;if("function"==typeof r){n="fulfilled";try{o=r(o)}catch(t){g(i,t)}}v(i,o)||("fulfilled"===n&&b(i,o),"rejected"===n&&g(i,o))}function v(e,n){var o;try{if(e===n)throw new TypeError("A promises callback cannot return that same promise.");if(n&&("function"==typeof n||"object"===t(n))){var r=n.then;if("function"==typeof r)return r.call(n,function(t){o||(o=!0,n===t?y(e,t):b(e,t))},function(t){o||(o=!0,g(e,t))}),!0}}catch(t){return o||g(e,t),!0}return!1}function b(t,e){t!==e&&v(t,e)||y(t,e)}function y(t,e){"pending"===t._state&&(t._state="settled",t._data=e,m(A,t))}function g(t,e){"pending"===t._state&&(t._state="settled",t._data=e,m(S,t))}function w(t){t._then=t._then.forEach(p)}function A(t){t._state="fulfilled",w(t)}function S(t){t._state="rejected",w(t),!t._handled&&s&&global.process.emit("unhandledRejection",t._data,t)}function O(t){global.process.emit("rejectionHandled",t)}function j(t){if("function"!=typeof t)throw new TypeError("Promise resolver "+t+" is not a function");if(this instanceof j==0)throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.");this._then=[],function(t,e){function n(t){g(e,t)}try{t(function(t){b(e,t)},n)}catch(t){n(t)}}(t,this)}j.prototype={constructor:j,_state:"pending",_then:null,_data:void 0,_handled:!1,then:function(t,e){var n={owner:this,then:new this.constructor(f),fulfilled:t,rejected:e};return!e&&!t||this._handled||(this._handled=!0,"rejected"===this._state&&s&&m(O,this)),"fulfilled"===this._state||"rejected"===this._state?m(p,n):this._then.push(n),n.then},catch:function(t){return this.then(null,t)}},j.all=function(t){if(!Array.isArray(t))throw new TypeError("You must pass an array to Promise.all().");return new j(function(e,n){var o=[],r=0;function i(t){return r++,function(n){o[t]=n,--r||e(o)}}for(var c,a=0;a<t.length;a++)(c=t[a])&&"function"==typeof c.then?c.then(i(a),n):o[a]=c;r||e(o)})},j.race=function(t){if(!Array.isArray(t))throw new TypeError("You must pass an array to Promise.race().");return new j(function(e,n){for(var o,r=0;r<t.length;r++)(o=t[r])&&"function"==typeof o.then?o.then(e,n):e(o)})},j.resolve=function(e){return e&&"object"===t(e)&&e.constructor===j?e:new j(function(t){t(e)})},j.reject=function(t){return new j(function(e,n){n(t)})};var F="function"==typeof Promise?Promise:j;function E(t,e){var n=e.fetch,o=e.XMLHttpRequest,r=e.token,i=t;return"URLSearchParams"in window?(i=new URL(t)).searchParams.set("token",r):i=i+"?token="+encodeURIComponent(r),i=i.toString(),new F(function(t,e){if("function"==typeof n)n(i,{mode:"cors",cache:"default"}).then(function(t){if(t.ok)return t.text();throw new Error("")}).then(function(e){t(e)}).catch(e);else if("function"==typeof o){var r=new o;r.addEventListener("loadend",function(){this.responseText?t(this.responseText):e(new Error(""))}),["abort","error","timeout"].map(function(t){r.addEventListener(t,function(){e(new Error(""))})}),r.open("GET",i),r.send()}else e(new Error(""))})}function _(t,e,n){var o=t;return[[/(url\("?)\.\.\/\.\.\/\.\./g,function(t,n){return"".concat(n).concat(e)}],[/(url\("?)\.\.\/webfonts/g,function(t,o){return"".concat(o).concat(e,"/releases/v").concat(n,"/webfonts")}],[/(url\("?)https:\/\/kit-free([^.])*\.fontawesome\.com/g,function(t,n){return"".concat(n).concat(e)}]].forEach(function(t){var e=r(t,2),n=e[0],i=e[1];o=o.replace(n,i)}),o}function C(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){},r=e.document||r,i=a.bind(a,r,["fa","fab","fas","far","fal","fad","fak"]),u=Object.keys(t.iconUploads||{}).length>0;t.autoA11y.enabled&&n(i);var f=[{id:"fa-main",addOn:void 0}];t.v4shim&&t.v4shim.enabled&&f.push({id:"fa-v4-shims",addOn:"-v4-shims"}),t.v5FontFaceShim&&t.v5FontFaceShim.enabled&&f.push({id:"fa-v5-font-face",addOn:"-v5-font-face"}),t.v4FontFaceShim&&t.v4FontFaceShim.enabled&&f.push({id:"fa-v4-font-face",addOn:"-v4-font-face"}),u&&f.push({id:"fa-kit-upload",customCss:!0});var s=f.map(function(n){return new F(function(r,i){E(n.customCss?function(t){return t.baseUrlKit+"/"+t.token+"/"+t.id+"/kit-upload.css"}(t):c(t,{addOn:n.addOn,minify:t.minify.enabled}),e).then(function(i){r(function(t,e){var n=e.contentFilter||function(t,e){return t},o=document.createElement("style"),r=document.createTextNode(n(t,e));return o.appendChild(r),o.media="all",e.id&&o.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&o.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),o}(i,o(o({},e),{},{baseUrl:t.baseUrl,version:t.version,id:n.id,contentFilter:function(t,e){return _(t,e.baseUrl,e.version)}})))}).catch(i)})});return F.all(s)}function P(t,e){var n=document.createElement("SCRIPT"),o=document.createTextNode(t);return n.appendChild(o),n.referrerPolicy="strict-origin",e.id&&n.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n}function U(t){var e,n=[],o=document,r=(o.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(o.readyState);r||o.addEventListener("DOMContentLoaded",e=function(){for(o.removeEventListener("DOMContentLoaded",e),r=1;e=n.shift();)e()}),r?setTimeout(t,0):n.push(t)}try{if(window.FontAwesomeKitConfig){var k=window.FontAwesomeKitConfig,L={detectingConflicts:k.detectConflictsUntil&&new Date<=new Date(k.detectConflictsUntil),detectionIgnoreAttr:"data-fa-detection-ignore",fetch:window.fetch,token:k.token,XMLHttpRequest:window.XMLHttpRequest,document:document},I=document.currentScript,T=I?I.parentElement:document.head;(function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"js"===t.method?function(t,e){e.autoA11y=t.autoA11y.enabled,"pro"===t.license&&(e.autoFetchSvg=!0,e.fetchSvgFrom=t.baseUrl+"/releases/"+("latest"===t.version?"latest":"v".concat(t.version))+"/svgs",e.fetchUploadedSvgFrom=t.uploadsUrl);var n=[];return t.v4shim.enabled&&n.push(new F(function(n,r){E(c(t,{addOn:"-v4-shims",minify:t.minify.enabled}),e).then(function(t){n(P(t,o(o({},e),{},{id:"fa-v4-shims"})))}).catch(r)})),n.push(new F(function(n,r){E(c(t,{minify:t.minify.enabled}),e).then(function(t){var r=P(t,o(o({},e),{},{id:"fa-main"}));n(function(t,e){var n=e&&void 0!==e.autoFetchSvg?e.autoFetchSvg:void 0,o=e&&void 0!==e.autoA11y?e.autoA11y:void 0;return void 0!==o&&t.setAttribute("data-auto-a11y",o?"true":"false"),n&&(t.setAttributeNode(document.createAttribute("data-auto-fetch-svg")),t.setAttribute("data-fetch-svg-from",e.fetchSvgFrom),t.setAttribute("data-fetch-uploaded-svg-from",e.fetchUploadedSvgFrom)),t}(r,e))}).catch(r)})),F.all(n)}(t,e):"css"===t.method?C(t,e,function(t){U(t),function(t){"undefined"!=typeof MutationObserver&&new MutationObserver(t).observe(document,{childList:!0,subtree:!0})}(t)}):void 0})(k,L).then(function(t){t.map(function(t){try{T.insertBefore(t,I?I.nextSibling:null)}catch(e){T.appendChild(t)}}),L.detectingConflicts&&I&&U(function(){I.setAttributeNode(document.createAttribute(L.detectionIgnoreAttr));var t=function(t,e){var n=document.createElement("script");return e&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n.src=c(t,{baseFilename:"conflict-detection",fileSuffix:"js",subdir:"js",minify:t.minify.enabled}),n}(k,L);document.body.appendChild(t)})}).catch(function(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))})}}catch(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))}});
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Aichat.py DELETED
@@ -1,54 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from aiohttp import ClientSession
4
-
5
- from .base_provider import AsyncProvider, format_prompt
6
-
7
-
8
- class Aichat(AsyncProvider):
9
- url = "https://chat-gpt.org/chat"
10
- working = True
11
- supports_gpt_35_turbo = True
12
-
13
- @staticmethod
14
- async def create_async(
15
- model: str,
16
- messages: list[dict[str, str]],
17
- proxy: str = None,
18
- **kwargs
19
- ) -> str:
20
- headers = {
21
- "authority": "chat-gpt.org",
22
- "accept": "*/*",
23
- "cache-control": "no-cache",
24
- "content-type": "application/json",
25
- "origin": "https://chat-gpt.org",
26
- "pragma": "no-cache",
27
- "referer": "https://chat-gpt.org/chat",
28
- "sec-ch-ua-mobile": "?0",
29
- "sec-ch-ua-platform": '"macOS"',
30
- "sec-fetch-dest": "empty",
31
- "sec-fetch-mode": "cors",
32
- "sec-fetch-site": "same-origin",
33
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
34
- }
35
- async with ClientSession(
36
- headers=headers
37
- ) as session:
38
- json_data = {
39
- "message": format_prompt(messages),
40
- "temperature": kwargs.get('temperature', 0.5),
41
- "presence_penalty": 0,
42
- "top_p": kwargs.get('top_p', 1),
43
- "frequency_penalty": 0,
44
- }
45
- async with session.post(
46
- "https://chat-gpt.org/api/text",
47
- proxy=proxy,
48
- json=json_data
49
- ) as response:
50
- response.raise_for_status()
51
- result = await response.json()
52
- if not result['response']:
53
- raise Exception(f"Error Response: {result}")
54
- return result["message"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Bard.py DELETED
@@ -1,92 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import random
5
- import re
6
-
7
- from aiohttp import ClientSession
8
-
9
- from ..base_provider import AsyncProvider, format_prompt, get_cookies
10
-
11
-
12
- class Bard(AsyncProvider):
13
- url = "https://bard.google.com"
14
- needs_auth = True
15
- working = True
16
- _snlm0e = None
17
-
18
- @classmethod
19
- async def create_async(
20
- cls,
21
- model: str,
22
- messages: list[dict[str, str]],
23
- proxy: str = None,
24
- cookies: dict = None,
25
- **kwargs
26
- ) -> str:
27
- prompt = format_prompt(messages)
28
- if proxy and "://" not in proxy:
29
- proxy = f"http://{proxy}"
30
- if not cookies:
31
- cookies = get_cookies(".google.com")
32
-
33
- headers = {
34
- 'authority': 'bard.google.com',
35
- 'origin': 'https://bard.google.com',
36
- 'referer': 'https://bard.google.com/',
37
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
38
- 'x-same-domain': '1',
39
- }
40
-
41
- async with ClientSession(
42
- cookies=cookies,
43
- headers=headers
44
- ) as session:
45
- if not cls._snlm0e:
46
- async with session.get(cls.url, proxy=proxy) as response:
47
- text = await response.text()
48
-
49
- match = re.search(r'SNlM0e\":\"(.*?)\"', text)
50
- if not match:
51
- raise RuntimeError("No snlm0e value.")
52
- cls._snlm0e = match.group(1)
53
-
54
- params = {
55
- 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
56
- '_reqid': random.randint(1111, 9999),
57
- 'rt': 'c'
58
- }
59
-
60
- data = {
61
- 'at': cls._snlm0e,
62
- 'f.req': json.dumps([None, json.dumps([[prompt]])])
63
- }
64
-
65
- intents = '.'.join([
66
- 'assistant',
67
- 'lamda',
68
- 'BardFrontendService'
69
- ])
70
-
71
- async with session.post(
72
- f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
73
- data=data,
74
- params=params,
75
- proxy=proxy
76
- ) as response:
77
- response = await response.text()
78
- response = json.loads(response.splitlines()[3])[0][2]
79
- response = json.loads(response)[4][0][1][0]
80
- return response
81
-
82
- @classmethod
83
- @property
84
- def params(cls):
85
- params = [
86
- ("model", "str"),
87
- ("messages", "list[dict[str, str]]"),
88
- ("stream", "bool"),
89
- ("proxy", "str"),
90
- ]
91
- param = ", ".join([": ".join(p) for p in params])
92
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adr740/CV_XPLORER_POC/get_cv.py DELETED
@@ -1,39 +0,0 @@
1
- import pandas as pd
2
- import openai
3
- from data import data as df
4
- import numpy as np
5
- import os
6
-
7
- openai.api_key = os.environ.get("openai")
8
-
9
- def cosine_similarity(a, b):
10
- return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
11
-
12
-
13
- def get_embedding(text, model="text-embedding-ada-002"):
14
- try:
15
- text = text.replace("\n", " ")
16
- except:
17
- None
18
- return openai.Embedding.create(input = [text], model=model)['data'][0]['embedding']
19
-
20
- def search_cv(search, nb=3, pprint=True):
21
- embedding = get_embedding(search, model='text-embedding-ada-002')
22
- dff = df.copy()
23
- dff['similarities'] = dff.embedding.apply(lambda x: cosine_similarity(x, embedding))
24
- res = dff.sort_values('similarities', ascending=False).head(int(nb))
25
- # try:
26
- # res.drop(columns=["id","hadith_id", "embeding"], inplace=True)
27
- # except:
28
- # pass
29
- return res
30
-
31
- def get_cv(text, nb):
32
- result = search_cv(text,nb).to_dict(orient="records")
33
- final_str = ""
34
- for r in result:
35
- final_str += "#### Candidat avec " + str(round(r["similarities"]*100,2)) + "% de similarité :\n"+ str(r["summary"]).replace("#","")
36
- final_str += "\n\n[-> Lien vers le CV complet]("+ str(r["url"]) + ')'
37
- final_str += "\n\n-----------------------------------------------------------------------------------------------------\n\n"
38
- final_str = final_str.replace("`", "")
39
- return final_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/scripts/__init__.py DELETED
File without changes
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/quadimage.js DELETED
@@ -1,13 +0,0 @@
1
- import QuadImage from './gameobjects/mesh/quad/image/Image.js';
2
- import QuadRenderTexture from './gameobjects/mesh/quad/rendertexture/RenderTexture.js';
3
- import SkewImage from './gameobjects/mesh/quad/skewimage/SkewImage.js';
4
- import SkewRenderTexture from './gameobjects/mesh/quad/skewrendertexture/SkewRenderTexture.js';
5
- import ContainerSkew from './behaviors/containerskew/ContainerSkew.js';
6
-
7
- export {
8
- QuadImage,
9
- QuadRenderTexture,
10
- SkewImage,
11
- SkewRenderTexture,
12
- ContainerSkew
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/imagebox/ImageBox.js DELETED
@@ -1,2 +0,0 @@
1
- import ImageBox from '../../../plugins/imagebox.js';
2
- export default ImageBox;
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/vits/monotonic_align/__init__.py DELETED
@@ -1,19 +0,0 @@
1
- import numpy as np
2
- import torch
3
- from .monotonic_align.core import maximum_path_c
4
-
5
-
6
- def maximum_path(neg_cent, mask):
7
- """ Cython optimized version.
8
- neg_cent: [b, t_t, t_s]
9
- mask: [b, t_t, t_s]
10
- """
11
- device = neg_cent.device
12
- dtype = neg_cent.dtype
13
- neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
14
- path = np.zeros(neg_cent.shape, dtype=np.int32)
15
-
16
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
17
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
18
- maximum_path_c(path, neg_cent, t_t_max, t_s_max)
19
- return torch.from_numpy(path).to(device=device, dtype=dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_onnx_objects.py DELETED
@@ -1,17 +0,0 @@
1
- # This file is autogenerated by the command `make fix-copies`, do not edit.
2
- from ..utils import DummyObject, requires_backends
3
-
4
-
5
- class OnnxRuntimeModel(metaclass=DummyObject):
6
- _backends = ["onnx"]
7
-
8
- def __init__(self, *args, **kwargs):
9
- requires_backends(self, ["onnx"])
10
-
11
- @classmethod
12
- def from_config(cls, *args, **kwargs):
13
- requires_backends(cls, ["onnx"])
14
-
15
- @classmethod
16
- def from_pretrained(cls, *args, **kwargs):
17
- requires_backends(cls, ["onnx"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py'
2
- # model settings
3
- model = dict(
4
- pretrained='open-mmlab://detectron2/resnet101_caffe',
5
- backbone=dict(depth=101))
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101b-d8_769x769_80k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './fcn_r50-d8_769x769_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='torchvision://resnet101',
4
- backbone=dict(type='ResNet', depth=101))
 
 
 
 
 
spaces/AndySAnker/DeepStruc/tools/utils.py DELETED
@@ -1,279 +0,0 @@
1
- import torch, os, yaml, sys
2
- import numpy as np
3
- import matplotlib.pyplot as plt
4
- import pandas as pd
5
- from tqdm import tqdm
6
- from matplotlib.patches import Ellipse
7
- import matplotlib.lines as mlines
8
- from matplotlib.gridspec import GridSpec
9
- import datetime
10
- from tools.data_loader import save_xyz_file
11
- import streamlit as st
12
-
13
- def get_data(args): # Todo: write your own dataloader.
14
- ct = str(datetime.datetime.now()).replace(' ', '_').replace(':','-').replace('.','-')
15
- project_name = f'{args.save_path}/DeepStruc_{ct}'
16
- print(f'\nProject name is: {project_name}')
17
- if not os.path.isdir(f'{project_name}'):
18
- os.mkdir(f'{project_name}')
19
-
20
- samples = args.num_samples
21
- ## Use the uploaded file. Does not support multiple files. Could be written smarter.
22
- files = ['uploaded_file.gr']
23
- this_path = '.'
24
- #this_path = args.data
25
- #if os.path.isdir(this_path):
26
- # files = sorted(os.listdir(this_path))
27
- #else:
28
- # files = [this_path]
29
- # this_path = '.'
30
-
31
- x_list, y_list, name_list = [], [], []
32
- idxx = 0
33
- np_data = np.zeros((len(files)*samples, 2800))
34
- for idx, file in enumerate(files):
35
- for skip_row in range(100):
36
- try:
37
- data = np.loadtxt(f'{this_path}/{file}', skiprows=skip_row)
38
- except ValueError:
39
- continue
40
- data = data.T
41
- x_list.append(data[0])
42
- y_list.append(data[1])
43
- Gr_ph = data[1]
44
- if round(data[0][1] - data[0][0],2) != 0.01:
45
- raise ValueError("The PDF does not have an r-step of 0.01 Å")
46
- try:
47
- start_PDF = np.where((data[0] > 1.995) & (data[0] < 2.005))[0][0]
48
- except:
49
- Gr_ph = np.concatenate((np.zeros((int((data[0][0])/0.01))), Gr_ph))
50
- print("The PDFs first value is above 2 Å. We have added 0's down to 2 Å as a quick fix.")
51
- try:
52
- end_PDF = np.where((data[0] > 29.995) & (data[0] < 30.005))[0][0]
53
- except:
54
- Gr_ph = np.concatenate((Gr_ph, np.zeros((3000-len(Gr_ph)))))
55
- print("The PDFs last value is before 30 Å. We have added 0's up to 30 Å as a quick fix.")
56
- Gr_ph = Gr_ph[200:3000]
57
-
58
- for i in range(samples):
59
- np_data[idxx] = Gr_ph
60
- np_data[idxx] /= np.amax(np_data[idxx])
61
- idxx += 1
62
- name_list.append(file)
63
- break
64
-
65
- fig, ax = plt.subplots()
66
-
67
- plt.plot(x_list[0], y_list[0], label="Input PDF")
68
- plt.plot(np.arange(2, 30, 0.01), np_data[0], label="DeepStruc PDF")
69
- ax.set_xlabel(r'r / $\mathtt{\AA}$')
70
- ax.set_ylabel('G(r) / a.u.')
71
-
72
- ax.set_xlim(0,30)
73
- plt.legend()
74
- plt.title(f'{files[0]}')
75
- plt.tight_layout()
76
- plt.savefig(f'{project_name}/PDFs.png', dpi=300)
77
-
78
- np_data = np_data.reshape((len(files)*samples, 2800, 1))
79
- np_data = torch.tensor(np_data, dtype=torch.float)
80
- return np_data, name_list, project_name
81
-
82
-
83
- def format_predictions(latent_space, data_names, mus, sigmas, sigma_inc):
84
- df_preds = pd.DataFrame(columns=['x', 'y', 'file_name', 'mu', 'sigma', 'sigma_inc'])
85
- for i,j, mu, sigma in zip(latent_space, data_names, mus, sigmas):
86
- if '/' in j:
87
- j = j.split('/')[-1]
88
-
89
- if '.' in j:
90
- j_idx = j.rindex('.')
91
- j = j[:j_idx]
92
-
93
- info_dict = {
94
- 'x': i[0].detach().cpu().numpy(),
95
- 'y': i[1].detach().cpu().numpy(),
96
- 'file_name': j,
97
- 'mu': mu.detach().cpu().numpy(),
98
- 'sigma': sigma.detach().cpu().numpy(),
99
- 'sigma_inc': sigma_inc,}
100
-
101
-
102
- print ("info dict: ", info_dict)
103
- print ("df_preds initial: ", df_preds.head())
104
-
105
- # Append is deprecated and should use concat instead
106
- df_preds = df_preds.append(info_dict, ignore_index=True)
107
-
108
- return df_preds
109
-
110
-
111
- def plot_ls(df, index_highlight):
112
- ideal_ls = './tools/ls_points.csv'
113
- color_dict = {
114
- 'FCC': '#19ADFF',
115
- 'BCC': '#4F8F00',
116
- 'SC': '#941100',
117
- 'Octahedron': '#212121',
118
- 'Icosahedron': '#005493',
119
- 'Decahedron': '#FF950E',
120
- 'HCP': '#FF8AD8',
121
- }
122
- df_ideal = pd.read_csv(ideal_ls, index_col=0) # Get latent space data
123
- # Plotting inputs
124
- ## Training and validation data
125
- MARKER_SIZE_TR = 60
126
- EDGE_LINEWIDTH_TR = 0.0
127
- ALPHA_TR = 0.3
128
-
129
- ## Figure
130
- FIG_SIZE = (10, 4)
131
- MARKER_SIZE_FG = 60
132
- MARKER_FONT_SIZE = 10
133
- MARKER_SCALE = 1.5
134
-
135
- fig = plt.figure(figsize=FIG_SIZE)
136
- gs = GridSpec(1, 5, figure=fig)
137
- ax = fig.add_subplot(gs[0, :4])
138
- ax_legend = fig.add_subplot(gs[0, 4])
139
-
140
- if int(index_highlight) >= len(df):
141
- print(f'\nIndex argument is to large! Need to be smaller than {len(df)} but was {index_highlight}')
142
- raise IndexError
143
- elif int(index_highlight) < -1:
144
- print(f'\nIndex argument invalid! Must be integer from -1 to number of samples generated.')
145
- raise ValueError
146
- elif int(index_highlight)==-1:
147
- pass
148
- elif len(df['file_name'].unique()) > 1:
149
- print(f'\nCan only show highlight index if --data is specific file but {len(df["file_name"].unique())} files were loaded.')
150
- else:
151
- print(f'\nHighlighting index {index_highlight} from the {df["file_name"].unique()[0]} sampling pool.')
152
- ax.scatter(df.iloc[index_highlight]['x'], df.iloc[index_highlight]['y'], c='k', s=40,
153
- linewidth=0.0, marker='o', zorder=3)
154
- ax.scatter(df.iloc[index_highlight]['x'], df.iloc[index_highlight]['y'], c='w', s=25,
155
- linewidth=0.0, marker='o', zorder=3)
156
- ax.scatter(df.iloc[index_highlight]['x'], df.iloc[index_highlight]['y'], c='k', s=10,
157
- linewidth=0.0, marker='o', zorder=3)
158
- ax.scatter(df.iloc[index_highlight]['x'], df.iloc[index_highlight]['y'], c='w', s=1,
159
- linewidth=0.0, marker='o', zorder=3)
160
-
161
- print('\nPlotting DeepStruc training + validation data.')
162
- ax.scatter(df_ideal.iloc[:]['x'].values, df_ideal.iloc[:]['y'].values,
163
- c=[color_dict[str(s)] for s in df_ideal.iloc[:]['stru_type']],
164
- s=MARKER_SIZE_TR * df_ideal.iloc[:]['size'].values,
165
- edgecolors='k', linewidth=EDGE_LINEWIDTH_TR,
166
- alpha=ALPHA_TR)
167
-
168
-
169
- mlines_list = []
170
- for key in color_dict.keys():
171
- mlines_list.append(
172
- mlines.Line2D([], [], MARKER_SIZE_FG, marker='o', c=color_dict[key], linestyle='None', label=key,
173
- mew=1))
174
-
175
- from matplotlib import cm
176
- cm_subsection = np.linspace(0, 1, len(df.file_name.unique()))
177
- data_color = [cm.magma(x) for x in cm_subsection]
178
-
179
- print('\nPlotting DeepStruc structure sampling.')
180
- pbar = tqdm(total=len(df.file_name.unique()))
181
- for idx, file_name in enumerate(df.file_name.unique()):
182
- this_c = np.array([data_color[idx]])
183
-
184
- df_ph = df[df.file_name==file_name]
185
- df_ph.reset_index(drop=True, inplace=True)
186
-
187
- ax.scatter(df_ph['mu'][0][0],df_ph['mu'][0][1], c=this_c, s=10, edgecolors='k',
188
- linewidth=0.5, marker='D',zorder=1)
189
- ellipse = Ellipse((df_ph['mu'][0][0],df_ph['mu'][0][1]),df_ph['sigma'][0][0],df_ph['sigma'][0][1], ec='k', fc=this_c, alpha=0.5, fill=True, zorder=-1)
190
- ax.add_patch(ellipse)
191
-
192
- ellipse = Ellipse((df_ph['mu'][0][0],df_ph['mu'][0][1]),df_ph['x'].var(),df_ph['y'].var(), ec='k', fc=this_c, alpha=0.2, fill=True, zorder=-1)
193
- ax.add_patch(ellipse)
194
-
195
- mlines_list.append(
196
- mlines.Line2D([], [], MARKER_SIZE_FG, marker='D', c=this_c, linestyle='None', label=file_name, mec='k',
197
- mew=1))
198
-
199
- for index, sample in df_ph.iterrows():
200
- ax.scatter(sample['x'], sample['y'], c=this_c, s=10, edgecolors='k',
201
- linewidth=0.8, marker='o', zorder=2)
202
- pbar.update()
203
- pbar.close()
204
-
205
- ax_legend.legend(handles=mlines_list,fancybox=True, #ncol=2, #, bbox_to_anchor=(0.8, 0.5)
206
- markerscale=MARKER_SCALE, fontsize=MARKER_FONT_SIZE, loc='upper right')
207
-
208
- ax.set_xlabel('Latent space $\mathregular{z_0}$', size=10) # Latent Space Feature 1
209
- ax.set_ylabel('Latent space $\mathregular{z_1}$', size=10)
210
-
211
- ax_legend.spines['top'].set_visible(False)
212
- ax_legend.spines['right'].set_visible(False)
213
- ax_legend.spines['bottom'].set_visible(False)
214
- ax_legend.spines['left'].set_visible(False)
215
- ax_legend.get_xaxis().set_ticks([])
216
- ax_legend.get_yaxis().set_ticks([])
217
- ax.get_xaxis().set_ticks([])
218
- ax.get_yaxis().set_ticks([])
219
-
220
- plt.tight_layout()
221
-
222
- return fig
223
-
224
- def get_model(model_dir):
225
- if model_dir == 'DeepStruc':
226
- with open(f'./models/DeepStruc/model_arch.yaml') as file:
227
- model_arch = yaml.full_load(file)
228
- model_path = './models/DeepStruc/models/DeepStruc.ckpt'
229
- return model_path, model_arch
230
- if os.path.isdir(model_dir):
231
- if 'models' in os.listdir(model_dir):
232
- models = sorted(os.listdir(f'{model_dir}/models'))
233
- models = [model for model in models if '.ckpt' in model]
234
- print(f'No specific model was provided. {models[0]} was chosen.')
235
- print('Dataloader might not be sufficient in loading dimensions.')
236
- model_path = f'{model_dir}/models/{models[0]}'
237
- with open(f'{model_dir}/model_arch.yaml') as file:
238
- model_arch = yaml.full_load(file)
239
-
240
- return model_path, model_arch
241
- else:
242
- print(f'Path not understood: {model_dir}')
243
- else:
244
- idx = model_dir.rindex('/')
245
- with open(f'{model_dir[:idx-6]}model_arch.yaml') as file:
246
- model_arch = yaml.full_load(file)
247
-
248
- return model_dir, model_arch
249
-
250
-
251
- def save_predictions(xyz_pred, df, project_name, model_arch, args):
252
- print('\nSaving predicted structures as XYZ files.')
253
- if not os.path.isdir(f'{project_name}'):
254
- os.mkdir(f'{project_name}')
255
-
256
- with open(f'{project_name}/args.yaml', 'w') as outfile:
257
- yaml.dump(vars(args), outfile, allow_unicode=True, default_flow_style=False)
258
-
259
- """
260
- pbar = tqdm(total=len(df))
261
- for count, (idx, row) in enumerate(df.iterrows()):
262
- if not os.path.isdir(f'{project_name}/{row["file_name"]}'):
263
- os.mkdir(f'{project_name}/{row["file_name"]}')
264
- x = f'{float(row["x"]):+.3f}'.replace('.', '-')
265
- y = f'{float(row["y"]):+.3f}'.replace('.', '-')
266
-
267
- these_cords = save_xyz_file('./',
268
- xyz_pred[idx].detach().cpu().numpy(),
269
- f'{count:05}',
270
- [model_arch['norm_vals']['x'],model_arch['norm_vals']['y'],model_arch['norm_vals']['z']])
271
- pbar.update()
272
- pbar.close()
273
- """
274
- # Does not support multiple structure saving
275
- these_cords = save_xyz_file('./',
276
- xyz_pred[args.index_plot].detach().cpu().numpy(),
277
- 'DummyName',
278
- [model_arch['norm_vals']['x'],model_arch['norm_vals']['y'],model_arch['norm_vals']['z']])
279
- return these_cords
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/Kenya_food_classification/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Kenya Food Classification
3
- emoji: 📉
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.44.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/__init__.py DELETED
File without changes
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/scheduler_list.py DELETED
@@ -1,32 +0,0 @@
1
- from diffusers import (
2
- DDIMScheduler,
3
- DPMSolverMultistepScheduler,
4
- EulerAncestralDiscreteScheduler,
5
- EulerDiscreteScheduler,
6
- HeunDiscreteScheduler,
7
- LMSDiscreteScheduler,
8
- )
9
-
10
- diff_scheduler_list = ["DDIM", "EulerA", "Euler", "LMS", "Heun", "UniPC", "DPMSolver"]
11
-
12
-
13
- def get_scheduler_list(pipe, scheduler):
14
- if scheduler == diff_scheduler_list[0]:
15
- pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
16
-
17
- elif scheduler == diff_scheduler_list[1]:
18
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
19
-
20
- elif scheduler == diff_scheduler_list[2]:
21
- pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
22
-
23
- elif scheduler == diff_scheduler_list[3]:
24
- pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
25
-
26
- elif scheduler == diff_scheduler_list[4]:
27
- pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
28
-
29
- elif scheduler == diff_scheduler_list[5]:
30
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
31
-
32
- return pipe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/bert_vits2/text/japanese_bert.py DELETED
@@ -1,47 +0,0 @@
1
- import os
2
-
3
- import torch
4
- from transformers import AutoTokenizer, AutoModelForMaskedLM
5
-
6
- import config
7
- from logger import logger
8
- from utils.download import download_and_verify
9
- from config import DEVICE as device
10
-
11
- URLS = [
12
- "https://huggingface.co/cl-tohoku/bert-base-japanese-v3/resolve/main/pytorch_model.bin",
13
- ]
14
- TARGET_PATH = os.path.join(config.ABS_PATH, "bert_vits2/bert/bert-base-japanese-v3/pytorch_model.bin")
15
- EXPECTED_MD5 = None
16
-
17
- if not os.path.exists(TARGET_PATH):
18
- success, message = download_and_verify(URLS, TARGET_PATH, EXPECTED_MD5)
19
-
20
- try:
21
- logger.info("Loading bert-base-japanese-v3...")
22
- tokenizer = AutoTokenizer.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3")
23
- model = AutoModelForMaskedLM.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/bert-base-japanese-v3").to(
24
- device)
25
- logger.info("Loading finished.")
26
- except Exception as e:
27
- logger.error(e)
28
- logger.error(f"Please download pytorch_model.bin from cl-tohoku/bert-base-japanese-v3.")
29
-
30
-
31
- def get_bert_feature(text, word2ph, device=config.DEVICE):
32
- with torch.no_grad():
33
- inputs = tokenizer(text, return_tensors="pt")
34
- for i in inputs:
35
- inputs[i] = inputs[i].to(device)
36
- res = model(**inputs, output_hidden_states=True)
37
- res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()
38
- assert inputs["input_ids"].shape[-1] == len(word2ph)
39
- word2phone = word2ph
40
- phone_level_feature = []
41
- for i in range(len(word2phone)):
42
- repeat_feature = res[i].repeat(word2phone[i], 1)
43
- phone_level_feature.append(repeat_feature)
44
-
45
- phone_level_feature = torch.cat(phone_level_feature, dim=0)
46
-
47
- return phone_level_feature.T
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ashrafb/Imdf2/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Imdf2
3
- emoji: 🔥
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: streamlit
7
- sdk_version: 1.27.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/collector.py DELETED
@@ -1,505 +0,0 @@
1
- """
2
- The main purpose of this module is to expose LinkCollector.collect_sources().
3
- """
4
-
5
- import collections
6
- import email.message
7
- import functools
8
- import itertools
9
- import json
10
- import logging
11
- import os
12
- import urllib.parse
13
- import urllib.request
14
- from html.parser import HTMLParser
15
- from optparse import Values
16
- from typing import (
17
- TYPE_CHECKING,
18
- Callable,
19
- Dict,
20
- Iterable,
21
- List,
22
- MutableMapping,
23
- NamedTuple,
24
- Optional,
25
- Sequence,
26
- Tuple,
27
- Union,
28
- )
29
-
30
- from pip._vendor import requests
31
- from pip._vendor.requests import Response
32
- from pip._vendor.requests.exceptions import RetryError, SSLError
33
-
34
- from pip._internal.exceptions import NetworkConnectionError
35
- from pip._internal.models.link import Link
36
- from pip._internal.models.search_scope import SearchScope
37
- from pip._internal.network.session import PipSession
38
- from pip._internal.network.utils import raise_for_status
39
- from pip._internal.utils.filetypes import is_archive_file
40
- from pip._internal.utils.misc import redact_auth_from_url
41
- from pip._internal.vcs import vcs
42
-
43
- from .sources import CandidatesFromPage, LinkSource, build_source
44
-
45
- if TYPE_CHECKING:
46
- from typing import Protocol
47
- else:
48
- Protocol = object
49
-
50
- logger = logging.getLogger(__name__)
51
-
52
- ResponseHeaders = MutableMapping[str, str]
53
-
54
-
55
- def _match_vcs_scheme(url: str) -> Optional[str]:
56
- """Look for VCS schemes in the URL.
57
-
58
- Returns the matched VCS scheme, or None if there's no match.
59
- """
60
- for scheme in vcs.schemes:
61
- if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
62
- return scheme
63
- return None
64
-
65
-
66
- class _NotAPIContent(Exception):
67
- def __init__(self, content_type: str, request_desc: str) -> None:
68
- super().__init__(content_type, request_desc)
69
- self.content_type = content_type
70
- self.request_desc = request_desc
71
-
72
-
73
- def _ensure_api_header(response: Response) -> None:
74
- """
75
- Check the Content-Type header to ensure the response contains a Simple
76
- API Response.
77
-
78
- Raises `_NotAPIContent` if the content type is not a valid content-type.
79
- """
80
- content_type = response.headers.get("Content-Type", "Unknown")
81
-
82
- content_type_l = content_type.lower()
83
- if content_type_l.startswith(
84
- (
85
- "text/html",
86
- "application/vnd.pypi.simple.v1+html",
87
- "application/vnd.pypi.simple.v1+json",
88
- )
89
- ):
90
- return
91
-
92
- raise _NotAPIContent(content_type, response.request.method)
93
-
94
-
95
- class _NotHTTP(Exception):
96
- pass
97
-
98
-
99
- def _ensure_api_response(url: str, session: PipSession) -> None:
100
- """
101
- Send a HEAD request to the URL, and ensure the response contains a simple
102
- API Response.
103
-
104
- Raises `_NotHTTP` if the URL is not available for a HEAD request, or
105
- `_NotAPIContent` if the content type is not a valid content type.
106
- """
107
- scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
108
- if scheme not in {"http", "https"}:
109
- raise _NotHTTP()
110
-
111
- resp = session.head(url, allow_redirects=True)
112
- raise_for_status(resp)
113
-
114
- _ensure_api_header(resp)
115
-
116
-
117
- def _get_simple_response(url: str, session: PipSession) -> Response:
118
- """Access an Simple API response with GET, and return the response.
119
-
120
- This consists of three parts:
121
-
122
- 1. If the URL looks suspiciously like an archive, send a HEAD first to
123
- check the Content-Type is HTML or Simple API, to avoid downloading a
124
- large file. Raise `_NotHTTP` if the content type cannot be determined, or
125
- `_NotAPIContent` if it is not HTML or a Simple API.
126
- 2. Actually perform the request. Raise HTTP exceptions on network failures.
127
- 3. Check the Content-Type header to make sure we got a Simple API response,
128
- and raise `_NotAPIContent` otherwise.
129
- """
130
- if is_archive_file(Link(url).filename):
131
- _ensure_api_response(url, session=session)
132
-
133
- logger.debug("Getting page %s", redact_auth_from_url(url))
134
-
135
- resp = session.get(
136
- url,
137
- headers={
138
- "Accept": ", ".join(
139
- [
140
- "application/vnd.pypi.simple.v1+json",
141
- "application/vnd.pypi.simple.v1+html; q=0.1",
142
- "text/html; q=0.01",
143
- ]
144
- ),
145
- # We don't want to blindly returned cached data for
146
- # /simple/, because authors generally expecting that
147
- # twine upload && pip install will function, but if
148
- # they've done a pip install in the last ~10 minutes
149
- # it won't. Thus by setting this to zero we will not
150
- # blindly use any cached data, however the benefit of
151
- # using max-age=0 instead of no-cache, is that we will
152
- # still support conditional requests, so we will still
153
- # minimize traffic sent in cases where the page hasn't
154
- # changed at all, we will just always incur the round
155
- # trip for the conditional GET now instead of only
156
- # once per 10 minutes.
157
- # For more information, please see pypa/pip#5670.
158
- "Cache-Control": "max-age=0",
159
- },
160
- )
161
- raise_for_status(resp)
162
-
163
- # The check for archives above only works if the url ends with
164
- # something that looks like an archive. However that is not a
165
- # requirement of an url. Unless we issue a HEAD request on every
166
- # url we cannot know ahead of time for sure if something is a
167
- # Simple API response or not. However we can check after we've
168
- # downloaded it.
169
- _ensure_api_header(resp)
170
-
171
- logger.debug(
172
- "Fetched page %s as %s",
173
- redact_auth_from_url(url),
174
- resp.headers.get("Content-Type", "Unknown"),
175
- )
176
-
177
- return resp
178
-
179
-
180
- def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
181
- """Determine if we have any encoding information in our headers."""
182
- if headers and "Content-Type" in headers:
183
- m = email.message.Message()
184
- m["content-type"] = headers["Content-Type"]
185
- charset = m.get_param("charset")
186
- if charset:
187
- return str(charset)
188
- return None
189
-
190
-
191
- class CacheablePageContent:
192
- def __init__(self, page: "IndexContent") -> None:
193
- assert page.cache_link_parsing
194
- self.page = page
195
-
196
- def __eq__(self, other: object) -> bool:
197
- return isinstance(other, type(self)) and self.page.url == other.page.url
198
-
199
- def __hash__(self) -> int:
200
- return hash(self.page.url)
201
-
202
-
203
- class ParseLinks(Protocol):
204
- def __call__(self, page: "IndexContent") -> Iterable[Link]:
205
- ...
206
-
207
-
208
- def with_cached_index_content(fn: ParseLinks) -> ParseLinks:
209
- """
210
- Given a function that parses an Iterable[Link] from an IndexContent, cache the
211
- function's result (keyed by CacheablePageContent), unless the IndexContent
212
- `page` has `page.cache_link_parsing == False`.
213
- """
214
-
215
- @functools.lru_cache(maxsize=None)
216
- def wrapper(cacheable_page: CacheablePageContent) -> List[Link]:
217
- return list(fn(cacheable_page.page))
218
-
219
- @functools.wraps(fn)
220
- def wrapper_wrapper(page: "IndexContent") -> List[Link]:
221
- if page.cache_link_parsing:
222
- return wrapper(CacheablePageContent(page))
223
- return list(fn(page))
224
-
225
- return wrapper_wrapper
226
-
227
-
228
- @with_cached_index_content
229
- def parse_links(page: "IndexContent") -> Iterable[Link]:
230
- """
231
- Parse a Simple API's Index Content, and yield its anchor elements as Link objects.
232
- """
233
-
234
- content_type_l = page.content_type.lower()
235
- if content_type_l.startswith("application/vnd.pypi.simple.v1+json"):
236
- data = json.loads(page.content)
237
- for file in data.get("files", []):
238
- link = Link.from_json(file, page.url)
239
- if link is None:
240
- continue
241
- yield link
242
- return
243
-
244
- parser = HTMLLinkParser(page.url)
245
- encoding = page.encoding or "utf-8"
246
- parser.feed(page.content.decode(encoding))
247
-
248
- url = page.url
249
- base_url = parser.base_url or url
250
- for anchor in parser.anchors:
251
- link = Link.from_element(anchor, page_url=url, base_url=base_url)
252
- if link is None:
253
- continue
254
- yield link
255
-
256
-
257
- class IndexContent:
258
- """Represents one response (or page), along with its URL"""
259
-
260
- def __init__(
261
- self,
262
- content: bytes,
263
- content_type: str,
264
- encoding: Optional[str],
265
- url: str,
266
- cache_link_parsing: bool = True,
267
- ) -> None:
268
- """
269
- :param encoding: the encoding to decode the given content.
270
- :param url: the URL from which the HTML was downloaded.
271
- :param cache_link_parsing: whether links parsed from this page's url
272
- should be cached. PyPI index urls should
273
- have this set to False, for example.
274
- """
275
- self.content = content
276
- self.content_type = content_type
277
- self.encoding = encoding
278
- self.url = url
279
- self.cache_link_parsing = cache_link_parsing
280
-
281
- def __str__(self) -> str:
282
- return redact_auth_from_url(self.url)
283
-
284
-
285
- class HTMLLinkParser(HTMLParser):
286
- """
287
- HTMLParser that keeps the first base HREF and a list of all anchor
288
- elements' attributes.
289
- """
290
-
291
- def __init__(self, url: str) -> None:
292
- super().__init__(convert_charrefs=True)
293
-
294
- self.url: str = url
295
- self.base_url: Optional[str] = None
296
- self.anchors: List[Dict[str, Optional[str]]] = []
297
-
298
- def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
299
- if tag == "base" and self.base_url is None:
300
- href = self.get_href(attrs)
301
- if href is not None:
302
- self.base_url = href
303
- elif tag == "a":
304
- self.anchors.append(dict(attrs))
305
-
306
- def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
307
- for name, value in attrs:
308
- if name == "href":
309
- return value
310
- return None
311
-
312
-
313
- def _handle_get_simple_fail(
314
- link: Link,
315
- reason: Union[str, Exception],
316
- meth: Optional[Callable[..., None]] = None,
317
- ) -> None:
318
- if meth is None:
319
- meth = logger.debug
320
- meth("Could not fetch URL %s: %s - skipping", link, reason)
321
-
322
-
323
- def _make_index_content(
324
- response: Response, cache_link_parsing: bool = True
325
- ) -> IndexContent:
326
- encoding = _get_encoding_from_headers(response.headers)
327
- return IndexContent(
328
- response.content,
329
- response.headers["Content-Type"],
330
- encoding=encoding,
331
- url=response.url,
332
- cache_link_parsing=cache_link_parsing,
333
- )
334
-
335
-
336
- def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]:
337
- url = link.url.split("#", 1)[0]
338
-
339
- # Check for VCS schemes that do not support lookup as web pages.
340
- vcs_scheme = _match_vcs_scheme(url)
341
- if vcs_scheme:
342
- logger.warning(
343
- "Cannot look at %s URL %s because it does not support lookup as web pages.",
344
- vcs_scheme,
345
- link,
346
- )
347
- return None
348
-
349
- # Tack index.html onto file:// URLs that point to directories
350
- scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
351
- if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
352
- # add trailing slash if not present so urljoin doesn't trim
353
- # final segment
354
- if not url.endswith("/"):
355
- url += "/"
356
- # TODO: In the future, it would be nice if pip supported PEP 691
357
- # style responses in the file:// URLs, however there's no
358
- # standard file extension for application/vnd.pypi.simple.v1+json
359
- # so we'll need to come up with something on our own.
360
- url = urllib.parse.urljoin(url, "index.html")
361
- logger.debug(" file: URL is directory, getting %s", url)
362
-
363
- try:
364
- resp = _get_simple_response(url, session=session)
365
- except _NotHTTP:
366
- logger.warning(
367
- "Skipping page %s because it looks like an archive, and cannot "
368
- "be checked by a HTTP HEAD request.",
369
- link,
370
- )
371
- except _NotAPIContent as exc:
372
- logger.warning(
373
- "Skipping page %s because the %s request got Content-Type: %s. "
374
- "The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
375
- "application/vnd.pypi.simple.v1+html, and text/html",
376
- link,
377
- exc.request_desc,
378
- exc.content_type,
379
- )
380
- except NetworkConnectionError as exc:
381
- _handle_get_simple_fail(link, exc)
382
- except RetryError as exc:
383
- _handle_get_simple_fail(link, exc)
384
- except SSLError as exc:
385
- reason = "There was a problem confirming the ssl certificate: "
386
- reason += str(exc)
387
- _handle_get_simple_fail(link, reason, meth=logger.info)
388
- except requests.ConnectionError as exc:
389
- _handle_get_simple_fail(link, f"connection error: {exc}")
390
- except requests.Timeout:
391
- _handle_get_simple_fail(link, "timed out")
392
- else:
393
- return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
394
- return None
395
-
396
-
397
- class CollectedSources(NamedTuple):
398
- find_links: Sequence[Optional[LinkSource]]
399
- index_urls: Sequence[Optional[LinkSource]]
400
-
401
-
402
- class LinkCollector:
403
-
404
- """
405
- Responsible for collecting Link objects from all configured locations,
406
- making network requests as needed.
407
-
408
- The class's main method is its collect_sources() method.
409
- """
410
-
411
- def __init__(
412
- self,
413
- session: PipSession,
414
- search_scope: SearchScope,
415
- ) -> None:
416
- self.search_scope = search_scope
417
- self.session = session
418
-
419
- @classmethod
420
- def create(
421
- cls,
422
- session: PipSession,
423
- options: Values,
424
- suppress_no_index: bool = False,
425
- ) -> "LinkCollector":
426
- """
427
- :param session: The Session to use to make requests.
428
- :param suppress_no_index: Whether to ignore the --no-index option
429
- when constructing the SearchScope object.
430
- """
431
- index_urls = [options.index_url] + options.extra_index_urls
432
- if options.no_index and not suppress_no_index:
433
- logger.debug(
434
- "Ignoring indexes: %s",
435
- ",".join(redact_auth_from_url(url) for url in index_urls),
436
- )
437
- index_urls = []
438
-
439
- # Make sure find_links is a list before passing to create().
440
- find_links = options.find_links or []
441
-
442
- search_scope = SearchScope.create(
443
- find_links=find_links,
444
- index_urls=index_urls,
445
- no_index=options.no_index,
446
- )
447
- link_collector = LinkCollector(
448
- session=session,
449
- search_scope=search_scope,
450
- )
451
- return link_collector
452
-
453
- @property
454
- def find_links(self) -> List[str]:
455
- return self.search_scope.find_links
456
-
457
- def fetch_response(self, location: Link) -> Optional[IndexContent]:
458
- """
459
- Fetch an HTML page containing package links.
460
- """
461
- return _get_index_content(location, session=self.session)
462
-
463
- def collect_sources(
464
- self,
465
- project_name: str,
466
- candidates_from_page: CandidatesFromPage,
467
- ) -> CollectedSources:
468
- # The OrderedDict calls deduplicate sources by URL.
469
- index_url_sources = collections.OrderedDict(
470
- build_source(
471
- loc,
472
- candidates_from_page=candidates_from_page,
473
- page_validator=self.session.is_secure_origin,
474
- expand_dir=False,
475
- cache_link_parsing=False,
476
- )
477
- for loc in self.search_scope.get_index_urls_locations(project_name)
478
- ).values()
479
- find_links_sources = collections.OrderedDict(
480
- build_source(
481
- loc,
482
- candidates_from_page=candidates_from_page,
483
- page_validator=self.session.is_secure_origin,
484
- expand_dir=True,
485
- cache_link_parsing=True,
486
- )
487
- for loc in self.find_links
488
- ).values()
489
-
490
- if logger.isEnabledFor(logging.DEBUG):
491
- lines = [
492
- f"* {s.link}"
493
- for s in itertools.chain(find_links_sources, index_url_sources)
494
- if s is not None and s.link is not None
495
- ]
496
- lines = [
497
- f"{len(lines)} location(s) to search "
498
- f"for versions of {project_name}:"
499
- ] + lines
500
- logger.debug("\n".join(lines))
501
-
502
- return CollectedSources(
503
- find_links=list(find_links_sources),
504
- index_urls=list(index_url_sources),
505
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/timeout.py DELETED
@@ -1,271 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import time
4
-
5
- # The default socket timeout, used by httplib to indicate that no timeout was; specified by the user
6
- from socket import _GLOBAL_DEFAULT_TIMEOUT, getdefaulttimeout
7
-
8
- from ..exceptions import TimeoutStateError
9
-
10
- # A sentinel value to indicate that no timeout was specified by the user in
11
- # urllib3
12
- _Default = object()
13
-
14
-
15
- # Use time.monotonic if available.
16
- current_time = getattr(time, "monotonic", time.time)
17
-
18
-
19
- class Timeout(object):
20
- """Timeout configuration.
21
-
22
- Timeouts can be defined as a default for a pool:
23
-
24
- .. code-block:: python
25
-
26
- timeout = Timeout(connect=2.0, read=7.0)
27
- http = PoolManager(timeout=timeout)
28
- response = http.request('GET', 'http://example.com/')
29
-
30
- Or per-request (which overrides the default for the pool):
31
-
32
- .. code-block:: python
33
-
34
- response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
35
-
36
- Timeouts can be disabled by setting all the parameters to ``None``:
37
-
38
- .. code-block:: python
39
-
40
- no_timeout = Timeout(connect=None, read=None)
41
- response = http.request('GET', 'http://example.com/, timeout=no_timeout)
42
-
43
-
44
- :param total:
45
- This combines the connect and read timeouts into one; the read timeout
46
- will be set to the time leftover from the connect attempt. In the
47
- event that both a connect timeout and a total are specified, or a read
48
- timeout and a total are specified, the shorter timeout will be applied.
49
-
50
- Defaults to None.
51
-
52
- :type total: int, float, or None
53
-
54
- :param connect:
55
- The maximum amount of time (in seconds) to wait for a connection
56
- attempt to a server to succeed. Omitting the parameter will default the
57
- connect timeout to the system default, probably `the global default
58
- timeout in socket.py
59
- <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
60
- None will set an infinite timeout for connection attempts.
61
-
62
- :type connect: int, float, or None
63
-
64
- :param read:
65
- The maximum amount of time (in seconds) to wait between consecutive
66
- read operations for a response from the server. Omitting the parameter
67
- will default the read timeout to the system default, probably `the
68
- global default timeout in socket.py
69
- <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
70
- None will set an infinite timeout.
71
-
72
- :type read: int, float, or None
73
-
74
- .. note::
75
-
76
- Many factors can affect the total amount of time for urllib3 to return
77
- an HTTP response.
78
-
79
- For example, Python's DNS resolver does not obey the timeout specified
80
- on the socket. Other factors that can affect total request time include
81
- high CPU load, high swap, the program running at a low priority level,
82
- or other behaviors.
83
-
84
- In addition, the read and total timeouts only measure the time between
85
- read operations on the socket connecting the client and the server,
86
- not the total amount of time for the request to return a complete
87
- response. For most requests, the timeout is raised because the server
88
- has not sent the first byte in the specified time. This is not always
89
- the case; if a server streams one byte every fifteen seconds, a timeout
90
- of 20 seconds will not trigger, even though the request will take
91
- several minutes to complete.
92
-
93
- If your goal is to cut off any request after a set amount of wall clock
94
- time, consider having a second "watcher" thread to cut off a slow
95
- request.
96
- """
97
-
98
- #: A sentinel object representing the default timeout value
99
- DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
100
-
101
- def __init__(self, total=None, connect=_Default, read=_Default):
102
- self._connect = self._validate_timeout(connect, "connect")
103
- self._read = self._validate_timeout(read, "read")
104
- self.total = self._validate_timeout(total, "total")
105
- self._start_connect = None
106
-
107
- def __repr__(self):
108
- return "%s(connect=%r, read=%r, total=%r)" % (
109
- type(self).__name__,
110
- self._connect,
111
- self._read,
112
- self.total,
113
- )
114
-
115
- # __str__ provided for backwards compatibility
116
- __str__ = __repr__
117
-
118
- @classmethod
119
- def resolve_default_timeout(cls, timeout):
120
- return getdefaulttimeout() if timeout is cls.DEFAULT_TIMEOUT else timeout
121
-
122
- @classmethod
123
- def _validate_timeout(cls, value, name):
124
- """Check that a timeout attribute is valid.
125
-
126
- :param value: The timeout value to validate
127
- :param name: The name of the timeout attribute to validate. This is
128
- used to specify in error messages.
129
- :return: The validated and casted version of the given value.
130
- :raises ValueError: If it is a numeric value less than or equal to
131
- zero, or the type is not an integer, float, or None.
132
- """
133
- if value is _Default:
134
- return cls.DEFAULT_TIMEOUT
135
-
136
- if value is None or value is cls.DEFAULT_TIMEOUT:
137
- return value
138
-
139
- if isinstance(value, bool):
140
- raise ValueError(
141
- "Timeout cannot be a boolean value. It must "
142
- "be an int, float or None."
143
- )
144
- try:
145
- float(value)
146
- except (TypeError, ValueError):
147
- raise ValueError(
148
- "Timeout value %s was %s, but it must be an "
149
- "int, float or None." % (name, value)
150
- )
151
-
152
- try:
153
- if value <= 0:
154
- raise ValueError(
155
- "Attempted to set %s timeout to %s, but the "
156
- "timeout cannot be set to a value less "
157
- "than or equal to 0." % (name, value)
158
- )
159
- except TypeError:
160
- # Python 3
161
- raise ValueError(
162
- "Timeout value %s was %s, but it must be an "
163
- "int, float or None." % (name, value)
164
- )
165
-
166
- return value
167
-
168
- @classmethod
169
- def from_float(cls, timeout):
170
- """Create a new Timeout from a legacy timeout value.
171
-
172
- The timeout value used by httplib.py sets the same timeout on the
173
- connect(), and recv() socket requests. This creates a :class:`Timeout`
174
- object that sets the individual timeouts to the ``timeout`` value
175
- passed to this function.
176
-
177
- :param timeout: The legacy timeout value.
178
- :type timeout: integer, float, sentinel default object, or None
179
- :return: Timeout object
180
- :rtype: :class:`Timeout`
181
- """
182
- return Timeout(read=timeout, connect=timeout)
183
-
184
- def clone(self):
185
- """Create a copy of the timeout object
186
-
187
- Timeout properties are stored per-pool but each request needs a fresh
188
- Timeout object to ensure each one has its own start/stop configured.
189
-
190
- :return: a copy of the timeout object
191
- :rtype: :class:`Timeout`
192
- """
193
- # We can't use copy.deepcopy because that will also create a new object
194
- # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
195
- # detect the user default.
196
- return Timeout(connect=self._connect, read=self._read, total=self.total)
197
-
198
- def start_connect(self):
199
- """Start the timeout clock, used during a connect() attempt
200
-
201
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
202
- to start a timer that has been started already.
203
- """
204
- if self._start_connect is not None:
205
- raise TimeoutStateError("Timeout timer has already been started.")
206
- self._start_connect = current_time()
207
- return self._start_connect
208
-
209
- def get_connect_duration(self):
210
- """Gets the time elapsed since the call to :meth:`start_connect`.
211
-
212
- :return: Elapsed time in seconds.
213
- :rtype: float
214
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
215
- to get duration for a timer that hasn't been started.
216
- """
217
- if self._start_connect is None:
218
- raise TimeoutStateError(
219
- "Can't get connect duration for timer that has not started."
220
- )
221
- return current_time() - self._start_connect
222
-
223
- @property
224
- def connect_timeout(self):
225
- """Get the value to use when setting a connection timeout.
226
-
227
- This will be a positive float or integer, the value None
228
- (never timeout), or the default system timeout.
229
-
230
- :return: Connect timeout.
231
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
232
- """
233
- if self.total is None:
234
- return self._connect
235
-
236
- if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
237
- return self.total
238
-
239
- return min(self._connect, self.total)
240
-
241
- @property
242
- def read_timeout(self):
243
- """Get the value for the read timeout.
244
-
245
- This assumes some time has elapsed in the connection timeout and
246
- computes the read timeout appropriately.
247
-
248
- If self.total is set, the read timeout is dependent on the amount of
249
- time taken by the connect timeout. If the connection time has not been
250
- established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
251
- raised.
252
-
253
- :return: Value to use for the read timeout.
254
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
255
- :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
256
- has not yet been called on this object.
257
- """
258
- if (
259
- self.total is not None
260
- and self.total is not self.DEFAULT_TIMEOUT
261
- and self._read is not None
262
- and self._read is not self.DEFAULT_TIMEOUT
263
- ):
264
- # In case the connect timeout has not yet been established.
265
- if self._start_connect is None:
266
- return self._read
267
- return max(0, min(self.total - self.get_connect_duration(), self._read))
268
- elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
269
- return max(0, self.total - self.get_connect_duration())
270
- else:
271
- return self._read
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/fancy_getopt.py DELETED
@@ -1,470 +0,0 @@
1
- """distutils.fancy_getopt
2
-
3
- Wrapper around the standard getopt module that provides the following
4
- additional features:
5
- * short and long options are tied together
6
- * options have help strings, so fancy_getopt could potentially
7
- create a complete usage summary
8
- * options set attributes of a passed-in object
9
- """
10
-
11
- import sys
12
- import string
13
- import re
14
- import getopt
15
- from distutils.errors import DistutilsGetoptError, DistutilsArgError
16
-
17
- # Much like command_re in distutils.core, this is close to but not quite
18
- # the same as a Python NAME -- except, in the spirit of most GNU
19
- # utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
20
- # The similarities to NAME are again not a coincidence...
21
- longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
22
- longopt_re = re.compile(r'^%s$' % longopt_pat)
23
-
24
- # For recognizing "negative alias" options, eg. "quiet=!verbose"
25
- neg_alias_re = re.compile("^({})=!({})$".format(longopt_pat, longopt_pat))
26
-
27
- # This is used to translate long options to legitimate Python identifiers
28
- # (for use as attributes of some object).
29
- longopt_xlate = str.maketrans('-', '_')
30
-
31
-
32
- class FancyGetopt:
33
- """Wrapper around the standard 'getopt()' module that provides some
34
- handy extra functionality:
35
- * short and long options are tied together
36
- * options have help strings, and help text can be assembled
37
- from them
38
- * options set attributes of a passed-in object
39
- * boolean options can have "negative aliases" -- eg. if
40
- --quiet is the "negative alias" of --verbose, then "--quiet"
41
- on the command line sets 'verbose' to false
42
- """
43
-
44
- def __init__(self, option_table=None):
45
- # The option table is (currently) a list of tuples. The
46
- # tuples may have 3 or four values:
47
- # (long_option, short_option, help_string [, repeatable])
48
- # if an option takes an argument, its long_option should have '='
49
- # appended; short_option should just be a single character, no ':'
50
- # in any case. If a long_option doesn't have a corresponding
51
- # short_option, short_option should be None. All option tuples
52
- # must have long options.
53
- self.option_table = option_table
54
-
55
- # 'option_index' maps long option names to entries in the option
56
- # table (ie. those 3-tuples).
57
- self.option_index = {}
58
- if self.option_table:
59
- self._build_index()
60
-
61
- # 'alias' records (duh) alias options; {'foo': 'bar'} means
62
- # --foo is an alias for --bar
63
- self.alias = {}
64
-
65
- # 'negative_alias' keeps track of options that are the boolean
66
- # opposite of some other option
67
- self.negative_alias = {}
68
-
69
- # These keep track of the information in the option table. We
70
- # don't actually populate these structures until we're ready to
71
- # parse the command-line, since the 'option_table' passed in here
72
- # isn't necessarily the final word.
73
- self.short_opts = []
74
- self.long_opts = []
75
- self.short2long = {}
76
- self.attr_name = {}
77
- self.takes_arg = {}
78
-
79
- # And 'option_order' is filled up in 'getopt()'; it records the
80
- # original order of options (and their values) on the command-line,
81
- # but expands short options, converts aliases, etc.
82
- self.option_order = []
83
-
84
- def _build_index(self):
85
- self.option_index.clear()
86
- for option in self.option_table:
87
- self.option_index[option[0]] = option
88
-
89
- def set_option_table(self, option_table):
90
- self.option_table = option_table
91
- self._build_index()
92
-
93
- def add_option(self, long_option, short_option=None, help_string=None):
94
- if long_option in self.option_index:
95
- raise DistutilsGetoptError(
96
- "option conflict: already an option '%s'" % long_option
97
- )
98
- else:
99
- option = (long_option, short_option, help_string)
100
- self.option_table.append(option)
101
- self.option_index[long_option] = option
102
-
103
- def has_option(self, long_option):
104
- """Return true if the option table for this parser has an
105
- option with long name 'long_option'."""
106
- return long_option in self.option_index
107
-
108
- def get_attr_name(self, long_option):
109
- """Translate long option name 'long_option' to the form it
110
- has as an attribute of some object: ie., translate hyphens
111
- to underscores."""
112
- return long_option.translate(longopt_xlate)
113
-
114
- def _check_alias_dict(self, aliases, what):
115
- assert isinstance(aliases, dict)
116
- for (alias, opt) in aliases.items():
117
- if alias not in self.option_index:
118
- raise DistutilsGetoptError(
119
- ("invalid %s '%s': " "option '%s' not defined")
120
- % (what, alias, alias)
121
- )
122
- if opt not in self.option_index:
123
- raise DistutilsGetoptError(
124
- ("invalid %s '%s': " "aliased option '%s' not defined")
125
- % (what, alias, opt)
126
- )
127
-
128
- def set_aliases(self, alias):
129
- """Set the aliases for this option parser."""
130
- self._check_alias_dict(alias, "alias")
131
- self.alias = alias
132
-
133
- def set_negative_aliases(self, negative_alias):
134
- """Set the negative aliases for this option parser.
135
- 'negative_alias' should be a dictionary mapping option names to
136
- option names, both the key and value must already be defined
137
- in the option table."""
138
- self._check_alias_dict(negative_alias, "negative alias")
139
- self.negative_alias = negative_alias
140
-
141
- def _grok_option_table(self): # noqa: C901
142
- """Populate the various data structures that keep tabs on the
143
- option table. Called by 'getopt()' before it can do anything
144
- worthwhile.
145
- """
146
- self.long_opts = []
147
- self.short_opts = []
148
- self.short2long.clear()
149
- self.repeat = {}
150
-
151
- for option in self.option_table:
152
- if len(option) == 3:
153
- long, short, help = option
154
- repeat = 0
155
- elif len(option) == 4:
156
- long, short, help, repeat = option
157
- else:
158
- # the option table is part of the code, so simply
159
- # assert that it is correct
160
- raise ValueError("invalid option tuple: {!r}".format(option))
161
-
162
- # Type- and value-check the option names
163
- if not isinstance(long, str) or len(long) < 2:
164
- raise DistutilsGetoptError(
165
- ("invalid long option '%s': " "must be a string of length >= 2")
166
- % long
167
- )
168
-
169
- if not ((short is None) or (isinstance(short, str) and len(short) == 1)):
170
- raise DistutilsGetoptError(
171
- "invalid short option '%s': "
172
- "must a single character or None" % short
173
- )
174
-
175
- self.repeat[long] = repeat
176
- self.long_opts.append(long)
177
-
178
- if long[-1] == '=': # option takes an argument?
179
- if short:
180
- short = short + ':'
181
- long = long[0:-1]
182
- self.takes_arg[long] = 1
183
- else:
184
- # Is option is a "negative alias" for some other option (eg.
185
- # "quiet" == "!verbose")?
186
- alias_to = self.negative_alias.get(long)
187
- if alias_to is not None:
188
- if self.takes_arg[alias_to]:
189
- raise DistutilsGetoptError(
190
- "invalid negative alias '%s': "
191
- "aliased option '%s' takes a value" % (long, alias_to)
192
- )
193
-
194
- self.long_opts[-1] = long # XXX redundant?!
195
- self.takes_arg[long] = 0
196
-
197
- # If this is an alias option, make sure its "takes arg" flag is
198
- # the same as the option it's aliased to.
199
- alias_to = self.alias.get(long)
200
- if alias_to is not None:
201
- if self.takes_arg[long] != self.takes_arg[alias_to]:
202
- raise DistutilsGetoptError(
203
- "invalid alias '%s': inconsistent with "
204
- "aliased option '%s' (one of them takes a value, "
205
- "the other doesn't" % (long, alias_to)
206
- )
207
-
208
- # Now enforce some bondage on the long option name, so we can
209
- # later translate it to an attribute name on some object. Have
210
- # to do this a bit late to make sure we've removed any trailing
211
- # '='.
212
- if not longopt_re.match(long):
213
- raise DistutilsGetoptError(
214
- "invalid long option name '%s' "
215
- "(must be letters, numbers, hyphens only" % long
216
- )
217
-
218
- self.attr_name[long] = self.get_attr_name(long)
219
- if short:
220
- self.short_opts.append(short)
221
- self.short2long[short[0]] = long
222
-
223
- def getopt(self, args=None, object=None): # noqa: C901
224
- """Parse command-line options in args. Store as attributes on object.
225
-
226
- If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
227
- 'object' is None or not supplied, creates a new OptionDummy
228
- object, stores option values there, and returns a tuple (args,
229
- object). If 'object' is supplied, it is modified in place and
230
- 'getopt()' just returns 'args'; in both cases, the returned
231
- 'args' is a modified copy of the passed-in 'args' list, which
232
- is left untouched.
233
- """
234
- if args is None:
235
- args = sys.argv[1:]
236
- if object is None:
237
- object = OptionDummy()
238
- created_object = True
239
- else:
240
- created_object = False
241
-
242
- self._grok_option_table()
243
-
244
- short_opts = ' '.join(self.short_opts)
245
- try:
246
- opts, args = getopt.getopt(args, short_opts, self.long_opts)
247
- except getopt.error as msg:
248
- raise DistutilsArgError(msg)
249
-
250
- for opt, val in opts:
251
- if len(opt) == 2 and opt[0] == '-': # it's a short option
252
- opt = self.short2long[opt[1]]
253
- else:
254
- assert len(opt) > 2 and opt[:2] == '--'
255
- opt = opt[2:]
256
-
257
- alias = self.alias.get(opt)
258
- if alias:
259
- opt = alias
260
-
261
- if not self.takes_arg[opt]: # boolean option?
262
- assert val == '', "boolean option can't have value"
263
- alias = self.negative_alias.get(opt)
264
- if alias:
265
- opt = alias
266
- val = 0
267
- else:
268
- val = 1
269
-
270
- attr = self.attr_name[opt]
271
- # The only repeating option at the moment is 'verbose'.
272
- # It has a negative option -q quiet, which should set verbose = 0.
273
- if val and self.repeat.get(attr) is not None:
274
- val = getattr(object, attr, 0) + 1
275
- setattr(object, attr, val)
276
- self.option_order.append((opt, val))
277
-
278
- # for opts
279
- if created_object:
280
- return args, object
281
- else:
282
- return args
283
-
284
- def get_option_order(self):
285
- """Returns the list of (option, value) tuples processed by the
286
- previous run of 'getopt()'. Raises RuntimeError if
287
- 'getopt()' hasn't been called yet.
288
- """
289
- if self.option_order is None:
290
- raise RuntimeError("'getopt()' hasn't been called yet")
291
- else:
292
- return self.option_order
293
-
294
- def generate_help(self, header=None): # noqa: C901
295
- """Generate help text (a list of strings, one per suggested line of
296
- output) from the option table for this FancyGetopt object.
297
- """
298
- # Blithely assume the option table is good: probably wouldn't call
299
- # 'generate_help()' unless you've already called 'getopt()'.
300
-
301
- # First pass: determine maximum length of long option names
302
- max_opt = 0
303
- for option in self.option_table:
304
- long = option[0]
305
- short = option[1]
306
- ell = len(long)
307
- if long[-1] == '=':
308
- ell = ell - 1
309
- if short is not None:
310
- ell = ell + 5 # " (-x)" where short == 'x'
311
- if ell > max_opt:
312
- max_opt = ell
313
-
314
- opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
315
-
316
- # Typical help block looks like this:
317
- # --foo controls foonabulation
318
- # Help block for longest option looks like this:
319
- # --flimflam set the flim-flam level
320
- # and with wrapped text:
321
- # --flimflam set the flim-flam level (must be between
322
- # 0 and 100, except on Tuesdays)
323
- # Options with short names will have the short name shown (but
324
- # it doesn't contribute to max_opt):
325
- # --foo (-f) controls foonabulation
326
- # If adding the short option would make the left column too wide,
327
- # we push the explanation off to the next line
328
- # --flimflam (-l)
329
- # set the flim-flam level
330
- # Important parameters:
331
- # - 2 spaces before option block start lines
332
- # - 2 dashes for each long option name
333
- # - min. 2 spaces between option and explanation (gutter)
334
- # - 5 characters (incl. space) for short option name
335
-
336
- # Now generate lines of help text. (If 80 columns were good enough
337
- # for Jesus, then 78 columns are good enough for me!)
338
- line_width = 78
339
- text_width = line_width - opt_width
340
- big_indent = ' ' * opt_width
341
- if header:
342
- lines = [header]
343
- else:
344
- lines = ['Option summary:']
345
-
346
- for option in self.option_table:
347
- long, short, help = option[:3]
348
- text = wrap_text(help, text_width)
349
- if long[-1] == '=':
350
- long = long[0:-1]
351
-
352
- # Case 1: no short option at all (makes life easy)
353
- if short is None:
354
- if text:
355
- lines.append(" --%-*s %s" % (max_opt, long, text[0]))
356
- else:
357
- lines.append(" --%-*s " % (max_opt, long))
358
-
359
- # Case 2: we have a short option, so we have to include it
360
- # just after the long option
361
- else:
362
- opt_names = "{} (-{})".format(long, short)
363
- if text:
364
- lines.append(" --%-*s %s" % (max_opt, opt_names, text[0]))
365
- else:
366
- lines.append(" --%-*s" % opt_names)
367
-
368
- for ell in text[1:]:
369
- lines.append(big_indent + ell)
370
- return lines
371
-
372
- def print_help(self, header=None, file=None):
373
- if file is None:
374
- file = sys.stdout
375
- for line in self.generate_help(header):
376
- file.write(line + "\n")
377
-
378
-
379
- def fancy_getopt(options, negative_opt, object, args):
380
- parser = FancyGetopt(options)
381
- parser.set_negative_aliases(negative_opt)
382
- return parser.getopt(args, object)
383
-
384
-
385
- WS_TRANS = {ord(_wschar): ' ' for _wschar in string.whitespace}
386
-
387
-
388
- def wrap_text(text, width):
389
- """wrap_text(text : string, width : int) -> [string]
390
-
391
- Split 'text' into multiple lines of no more than 'width' characters
392
- each, and return the list of strings that results.
393
- """
394
- if text is None:
395
- return []
396
- if len(text) <= width:
397
- return [text]
398
-
399
- text = text.expandtabs()
400
- text = text.translate(WS_TRANS)
401
- chunks = re.split(r'( +|-+)', text)
402
- chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
403
- lines = []
404
-
405
- while chunks:
406
- cur_line = [] # list of chunks (to-be-joined)
407
- cur_len = 0 # length of current line
408
-
409
- while chunks:
410
- ell = len(chunks[0])
411
- if cur_len + ell <= width: # can squeeze (at least) this chunk in
412
- cur_line.append(chunks[0])
413
- del chunks[0]
414
- cur_len = cur_len + ell
415
- else: # this line is full
416
- # drop last chunk if all space
417
- if cur_line and cur_line[-1][0] == ' ':
418
- del cur_line[-1]
419
- break
420
-
421
- if chunks: # any chunks left to process?
422
- # if the current line is still empty, then we had a single
423
- # chunk that's too big too fit on a line -- so we break
424
- # down and break it up at the line width
425
- if cur_len == 0:
426
- cur_line.append(chunks[0][0:width])
427
- chunks[0] = chunks[0][width:]
428
-
429
- # all-whitespace chunks at the end of a line can be discarded
430
- # (and we know from the re.split above that if a chunk has
431
- # *any* whitespace, it is *all* whitespace)
432
- if chunks[0][0] == ' ':
433
- del chunks[0]
434
-
435
- # and store this line in the list-of-all-lines -- as a single
436
- # string, of course!
437
- lines.append(''.join(cur_line))
438
-
439
- return lines
440
-
441
-
442
- def translate_longopt(opt):
443
- """Convert a long option name to a valid Python identifier by
444
- changing "-" to "_".
445
- """
446
- return opt.translate(longopt_xlate)
447
-
448
-
449
- class OptionDummy:
450
- """Dummy class just used as a place to hold command-line option
451
- values as instance attributes."""
452
-
453
- def __init__(self, options=[]):
454
- """Create a new OptionDummy instance. The attributes listed in
455
- 'options' will be initialized to None."""
456
- for opt in options:
457
- setattr(self, opt, None)
458
-
459
-
460
- if __name__ == "__main__":
461
- text = """\
462
- Tra-la-la, supercalifragilisticexpialidocious.
463
- How *do* you spell that odd word, anyways?
464
- (Someone ask Mary -- she'll know [or she'll
465
- say, "How should I know?"].)"""
466
-
467
- for w in (10, 20, 30, 40):
468
- print("width: %d" % w)
469
- print("\n".join(wrap_text(text, w)))
470
- print()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/chinese.py DELETED
@@ -1,193 +0,0 @@
1
- import os
2
- import re
3
-
4
- import cn2an
5
- from pypinyin import lazy_pinyin, Style
6
-
7
- from text import symbols
8
- from text.symbols import punctuation
9
- from text.tone_sandhi import ToneSandhi
10
-
11
- current_file_path = os.path.dirname(__file__)
12
- pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in
13
- open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()}
14
-
15
- import jieba.posseg as psg
16
-
17
-
18
- rep_map = {
19
- ':': ',',
20
- ';': ',',
21
- ',': ',',
22
- '。': '.',
23
- '!': '!',
24
- '?': '?',
25
- '\n': '.',
26
- "·": ",",
27
- '、': ",",
28
- '...': '…',
29
- '$': '.',
30
- '“': "'",
31
- '”': "'",
32
- '‘': "'",
33
- '’': "'",
34
- '(': "'",
35
- ')': "'",
36
- '(': "'",
37
- ')': "'",
38
- '《': "'",
39
- '》': "'",
40
- '【': "'",
41
- '】': "'",
42
- '[': "'",
43
- ']': "'",
44
- '—': "-",
45
- '~': "-",
46
- '~': "-",
47
- '「': "'",
48
- '」': "'",
49
-
50
- }
51
-
52
- tone_modifier = ToneSandhi()
53
-
54
- def replace_punctuation(text):
55
- text = text.replace("嗯", "恩").replace("呣","母")
56
- pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys()))
57
-
58
- replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
59
-
60
- replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text)
61
-
62
- return replaced_text
63
-
64
- def g2p(text):
65
- pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation))
66
- sentences = [i for i in re.split(pattern, text) if i.strip()!='']
67
- phones, tones, word2ph = _g2p(sentences)
68
- assert sum(word2ph) == len(phones)
69
- assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch.
70
- phones = ['_'] + phones + ["_"]
71
- tones = [0] + tones + [0]
72
- word2ph = [1] + word2ph + [1]
73
- return phones, tones, word2ph
74
-
75
-
76
- def _get_initials_finals(word):
77
- initials = []
78
- finals = []
79
- orig_initials = lazy_pinyin(
80
- word, neutral_tone_with_five=True, style=Style.INITIALS)
81
- orig_finals = lazy_pinyin(
82
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
83
- for c, v in zip(orig_initials, orig_finals):
84
- initials.append(c)
85
- finals.append(v)
86
- return initials, finals
87
-
88
-
89
- def _g2p(segments):
90
- phones_list = []
91
- tones_list = []
92
- word2ph = []
93
- for seg in segments:
94
- pinyins = []
95
- # Replace all English words in the sentence
96
- seg = re.sub('[a-zA-Z]+', '', seg)
97
- seg_cut = psg.lcut(seg)
98
- initials = []
99
- finals = []
100
- seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
101
- for word, pos in seg_cut:
102
- if pos == 'eng':
103
- continue
104
- sub_initials, sub_finals = _get_initials_finals(word)
105
- sub_finals = tone_modifier.modified_tone(word, pos,
106
- sub_finals)
107
- initials.append(sub_initials)
108
- finals.append(sub_finals)
109
-
110
- # assert len(sub_initials) == len(sub_finals) == len(word)
111
- initials = sum(initials, [])
112
- finals = sum(finals, [])
113
- #
114
- for c, v in zip(initials, finals):
115
- raw_pinyin = c+v
116
- # NOTE: post process for pypinyin outputs
117
- # we discriminate i, ii and iii
118
- if c == v:
119
- assert c in punctuation
120
- phone = [c]
121
- tone = '0'
122
- word2ph.append(1)
123
- else:
124
- v_without_tone = v[:-1]
125
- tone = v[-1]
126
-
127
- pinyin = c+v_without_tone
128
- assert tone in '12345'
129
-
130
- if c:
131
- # 多音节
132
- v_rep_map = {
133
- "uei": 'ui',
134
- 'iou': 'iu',
135
- 'uen': 'un',
136
- }
137
- if v_without_tone in v_rep_map.keys():
138
- pinyin = c+v_rep_map[v_without_tone]
139
- else:
140
- # 单音节
141
- pinyin_rep_map = {
142
- 'ing': 'ying',
143
- 'i': 'yi',
144
- 'in': 'yin',
145
- 'u': 'wu',
146
- }
147
- if pinyin in pinyin_rep_map.keys():
148
- pinyin = pinyin_rep_map[pinyin]
149
- else:
150
- single_rep_map = {
151
- 'v': 'yu',
152
- 'e': 'e',
153
- 'i': 'y',
154
- 'u': 'w',
155
- }
156
- if pinyin[0] in single_rep_map.keys():
157
- pinyin = single_rep_map[pinyin[0]]+pinyin[1:]
158
-
159
- assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
160
- phone = pinyin_to_symbol_map[pinyin].split(' ')
161
- word2ph.append(len(phone))
162
-
163
- phones_list += phone
164
- tones_list += [int(tone)] * len(phone)
165
- return phones_list, tones_list, word2ph
166
-
167
-
168
-
169
- def text_normalize(text):
170
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
171
- for number in numbers:
172
- text = text.replace(number, cn2an.an2cn(number), 1)
173
- text = replace_punctuation(text)
174
- return text
175
-
176
- def get_bert_feature(text, word2ph):
177
- from text import chinese_bert
178
- return chinese_bert.get_bert_feature(text, word2ph)
179
-
180
- if __name__ == '__main__':
181
- from text.chinese_bert import get_bert_feature
182
- text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
183
- text = text_normalize(text)
184
- print(text)
185
- phones, tones, word2ph = g2p(text)
186
- bert = get_bert_feature(text, word2ph)
187
-
188
- print(phones, tones, word2ph, bert.shape)
189
-
190
-
191
- # # 示例用法
192
- # text = "这是一个示例文本:,你好!这是一个测试...."
193
- # print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Blackpink El Juego Apkmirror.md DELETED
@@ -1,18 +0,0 @@
1
-
2
- <h1>BLACKPINK EL JUEGO: Una Guía Completa para Fans y Recién Llegados</h1>` | | Introducción: Una breve descripción de lo que es el juego, quién lo desarrolló, cuándo fue lanzado, y qué plataformas está disponible en. | `<p>BLACKPINK EL JUEGO es un juego móvil que te permite convertirte en el productor del famoso grupo K-pop BLACKPINK. Desarrollado por TakeOne Company y lanzado en junio de 2023, el juego está disponible para dispositivos Android e iOS. En este juego, puede administrar su propia agencia, entrenar y subir de nivel a sus miembros BLACKPINK, recoger y actualizar las tarjetas de fotos, jugar minijuegos con amigos y personalizar sus avatares con varios trajes. Si usted es un duro parpadeo o un recién llegado curioso, este juego le ofrecerá una experiencia divertida e inmersiva de ser parte del mundo BLACKPINK. </p>` | | H2: Cómo descargar e instalar BLACKPINK EL JUEGO | `<h2>Cómo descargar e instalar BLACKPINK EL JUEGO</h2>` | | | Subtítulo: Para usuarios de Android | `<h3>Para usuarios de Android</h3>` | Párrafo: Explicar cómo descargar el juego desde Google Store o MiAPKr, y cómo instalarlo en tu dispositivo. | Si tienes un dispositivo Android, puedes descargar BLACKPINK THE GAME desde Google Play Store o APKMirror. Para descargar desde Google Play Store, simplemente busca "BLACKPINK THE GAME" o haz clic en este [link]( 2 ). Luego, toca "Instalar" y espera a que el juego se descargue e instale en tu dispositivo. Para descargar desde APKMirror, vaya a este [link]( 1 ) y elija la última versión del juego. A continuación, toque en "Descargar APK" y esperar a que el archivo para descargar. Siguiente, ir a la configuración de su dispositivo y habilitar "Fuentes desconocidas" en "Seguridad". Finalmente, busque el archivo descargado en su administrador de archivos y toque en él para instalarlo en su dispositivo. </p>
3
- <h2>blackpink el juego apkmirror</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://bltlly.com/2v6J2y">https://bltlly.com/2v6J2y</a></b></p><br /><br />` | | | Subtítulo: Para usuarios de iOS | `<h3>Para usuarios de iOS</h3>` | | Párrafo: Explica cómo descargar el juego desde App Store o Uptodown, y cómo instalarlo en tu dispositivo. | Si tienes un dispositivo iOS, puedes descargar BLACKPINK THE GAME desde la aplicación
4
-
5
- <p>BLACKPINK THE GAME es un juego móvil que te permite convertirte en el productor del famoso grupo K-pop BLACKPINK. Desarrollado por TakeOne Company y lanzado en junio de 2023, el juego está disponible para dispositivos Android e iOS. En este juego, puede administrar su propia agencia, entrenar y subir de nivel a sus miembros BLACKPINK, recoger y actualizar las tarjetas de fotos, jugar minijuegos con amigos y personalizar sus avatares con varios trajes. Si usted es un duro parpadeo o un recién llegado curioso, este juego le ofrecerá una experiencia divertida e inmersiva de ser parte del mundo BLACKPINK. </p>
6
- <h2>Cómo descargar e instalar BLACKPINK EL JUEGO</h2>
7
- <h3>Para usuarios de Android</h3>
8
- <p>Si tienes un dispositivo Android, puedes descargar BLACKPINK THE GAME desde Google Play Store o APKMirror. Para descargar desde Google Play Store, simplemente busca "BLACKPINK THE GAME" o haz clic en este [link]. Luego, toca "Instalar" y espera a que el juego se descargue e instale en tu dispositivo. Para descargar desde APKMirror, vaya a este [enlace] y elija la última versión del juego. A continuación, toque en "Descargar APK" y esperar a que el archivo para descargar. Siguiente, ir a la configuración de su dispositivo y habilitar "Fuentes desconocidas" en "Seguridad". Finalmente, busque el archivo descargado en su administrador de archivos y toque en él para instalarlo en su dispositivo. </p>
9
- <h3>Para usuarios de iOS</h3>
10
- <p>Si tienes un dispositivo iOS, puedes descargar BLACKPINK THE GAME desde la App Store o Uptodown. Para descargar desde la App Store, simplemente busque "BLACKPINK THE GAME" o haga clic en este [link]. Luego, toca "Obtener" y espera a que el juego se descargue e instale en tu dispositivo. Para descargar desde Uptodown, vaya a este [enlace] y elija la última versión del juego. Luego, toque en "Descargar" y espere a que el archivo se descargue. A continuación, vaya a la configuración de su dispositivo y habilite "Trust Uptodown Enterprise" en "General" > "Administración de dispositivos". Finalmente, busque el archivo descargado en su administrador de archivos y toque en él para instalarlo en su dispositivo. </p>
11
-
12
- <h3>Modo de gestión</h3>
13
- <p>El modo de gestión es donde puede ejecutar su propia agencia y entrenar a sus miembros BLACKPINK. Puede construir varias habitaciones en su estudio que ofrecen diferentes beneficios y funciones. Por ejemplo, la sala de desarrollo de mercancías genera oro cada segundo, mientras que el estudio de grabación distribuye álbumes que se utilizan como energía para jugar puzzles. También puedes construir salas de entrenamiento para la voz, la danza, la actuación, etc., donde puedes mejorar las habilidades de tus miembros. Para construir o mejorar las habitaciones, necesita oro y polvo de estrellas. El oro se puede recoger de las habitaciones o ganar completando tareas y rompecabezas. El polvo de estrellas se puede recoger de habitaciones u obtener combinando tarjetas fotográficas. </p>
14
- <h3>Modo de rompecabezas</h3>
15
- <p>El modo de rompecabezas es donde puede borrar los horarios de BLACKPINK y ganar recompensas como oro, álbumes, polvo de estrellas, tarjetas fotográficas y artículos de bonificación. Para jugar al modo puzzle, necesitas usar álbumes como energía. Cada programa consta de varias etapas con diferentes objetivos y dificultades. Necesitas borrar bloques deslizando tu </p>
16
- <p></p> 64aa2da5cf<br />
17
- <br />
18
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Cheat Kick El Amigo 2.md DELETED
@@ -1,125 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar códigos de trucos para Kick the Buddy 2</h1>
3
- <p>Kick the Buddy 2 es un popular juego de simulación donde puedes liberar tu creatividad e imaginación torturando a un muñeco de trapo llamado Buddy con varias armas, herramientas y elementos. Es un juego divertido y que alivia el estrés que te permite experimentar con diferentes formas de destruir, quemar, cortar, explotar y aplastar a Buddy. También puedes personalizar la apariencia, la voz y el fondo de Buddy, así como jugar minijuegos con él. </p>
4
- <p>Sin embargo, si desea llevar su experiencia de juego al siguiente nivel, es posible que desee intentar usar códigos de trucos para Kick the Buddy 2. Los códigos de trucos son comandos o códigos especiales que pueden modificar o mejorar algunos aspectos del juego, como darle dinero ilimitado, oro, armas u objetos. Mediante el uso de códigos de trucos, puede desbloquear todas las características del juego sin gastar dinero real o tiempo. </p>
5
- <h2>descargar cheat kick el amigo 2</h2><br /><p><b><b>DOWNLOAD</b> &rArr;&rArr;&rArr; <a href="https://bltlly.com/2v6MjG">https://bltlly.com/2v6MjG</a></b></p><br /><br />
6
- <p>Usar códigos de trucos para Kick the Buddy 2 puede tener muchos beneficios, como:</p>
7
- <ul>
8
- <li> Puedes acceder a todas las armas, herramientas y elementos del juego sin tener que ver anuncios o completar tareas. </li>
9
- <li>Puedes experimentar con diferentes combinaciones de armas y elementos para crear escenarios más divertidos e hilarantes. </li>
10
- <li> Puede personalizar la apariencia, la voz y el fondo de Buddy para adaptarse a sus preferencias o estado de ánimo. </li>
11
- <li>Puedes desafiarte a ti mismo probando diferentes niveles de dificultad o modos. </li>
12
- <li> Usted puede tener más diversión y satisfacción por golpear a Buddy en cualquier forma que desee. </li>
13
- </ul>
14
- <p>Si usted está interesado en el uso de códigos de trucos para Kick the Buddy 2, es posible que se pregunte cómo encontrarlos y cómo descargarlos. En este artículo, lo guiaremos a través del proceso de encontrar, descargar y usar códigos de trucos para Kick the Buddy 2 en dispositivos Android e iOS. También te daremos algunos consejos y trucos sobre cómo usar códigos de trucos de manera efectiva y responsable. </p>
15
- <h2>Cómo encontrar códigos de trucos para Kick the Buddy 2</h2>
16
-
17
- <p>Por lo tanto, debe ser cuidadoso y selectivo al buscar códigos de trucos en línea. Aquí hay algunos consejos sobre cómo encontrar fuentes confiables y legítimas para los códigos de trucos:</p>
18
- <ul>
19
- <li>Busca sitios web que tengan reseñas, valoraciones, comentarios o comentarios positivos de otros usuarios. También puede consultar foros o comunidades en línea donde los jugadores comparten sus experiencias y recomendaciones. </li>
20
- <li>Busque sitios web que tienen instrucciones claras y detalladas sobre cómo descargar y usar códigos de trucos. También deben proporcionar capturas de pantalla o videos como prueba de que sus códigos de trucos funcionan. </li>
21
- <li <li>Busca sitios web que tengan una base de datos grande y actualizada de códigos de trucos para varios juegos, incluyendo Kick the Buddy 2. También deberían tener una función de búsqueda o una opción de filtro para ayudarte a encontrar los códigos de trucos que necesitas. </li>
22
- </ul>
23
- <p>Algunos ejemplos de sitios web que proporcionan códigos de trucos de juegos son:</p>
24
- <tabla>
25
- <tr>
26
- <th>Sitio web</th>
27
- <th>Descripción</th>
28
- </tr>
29
- <tr>
30
- <td>[CheatCodes.com]( 1 )</td>
31
- <td>Uno de los sitios de código de trucos de juegos más antiguos y populares con un archivo grande y completo de códigos de trucos para varias plataformas. </td>
32
- </tr>
33
- <tr>
34
- <td>[Radar de juegos]( 1 )</td>
35
- <td>Un sitio de revisión de juegos y noticias que también ofrece códigos de trucos, guías, tutoriales y consejos para varios juegos. </td>
36
- </tr>
37
- <tr>
38
- <td>[Sucede un truco]( 1 )</td>
39
- <td>Un sitio dedicado para códigos de trucos y entrenadores de PC que también cuenta con reseñas de juegos, fondos de pantalla y tutoriales. </td>
40
- </tr>
41
- <tr>
42
- <td>[GameWinners]( 1 )</td>
43
- <td>Un sitio que tiene una gran colección de códigos de trucos, preguntas frecuentes, guías y desbloqueables para varios juegos. </td>
44
- </tr>
45
- <tr>
46
- <td>[Código de trucos Central]( 1 )</td>
47
- <td>Un sitio que tiene una amplia base de datos de códigos de trucos, guías, comentarios y noticias para varios juegos. </td>
48
- </tr>
49
- </tabla>
50
- <h2>Cómo descargar códigos de trucos para Kick the Buddy 2</h2>
51
-
52
- <h3>Cómo descargar códigos de trucos para dispositivos Android</h3>
53
- <ol>
54
- <li>Asegúrese de que su dispositivo está conectado a Internet y tiene suficiente espacio de almacenamiento. </li>
55
- <li>Ir a la página web que ofrece los códigos de trucos que desea descargar y siga las instrucciones sobre cómo descargarlos. Es posible que deba ingresar su dirección de correo electrónico, completar una encuesta o ver un anuncio para obtener acceso al enlace de descarga. </li>
56
- <li>Una vez que tenga el enlace de descarga, toque en él y espere a que el archivo para descargar. El archivo puede estar en forma de un APK (Android Package Kit) o un archivo ZIP (comprimido). </li>
57
- <li>Si el archivo es un archivo APK, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en el dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
58
- <li>Si el archivo es un archivo ZIP, debe extraerlo usando una aplicación de administrador de archivos o una aplicación de extractor ZIP. Puedes descargar estas aplicaciones desde Google Play Store si aún no las tienes. </li>
59
- <li>Después de extraer o instalar el archivo, debería ver un nuevo icono en la pantalla de inicio del dispositivo o en el cajón de aplicaciones. Esta es la aplicación de código de trucos que necesita para iniciar antes de jugar Kick the Buddy 2.</li>
60
- </ol>
61
- <h3>Cómo descargar códigos de trucos para dispositivos iOS</h3>
62
- <ol>
63
- <li>Asegúrese de que su dispositivo está conectado a Internet y tiene suficiente espacio de almacenamiento. </li>
64
- <li>Ir a la página web que ofrece los códigos de trucos que desea descargar y siga las instrucciones sobre cómo descargarlos. Es posible que deba ingresar su dirección de correo electrónico, completar una encuesta o ver un anuncio para obtener acceso al enlace de descarga. </li>
65
- <li>Una vez que tenga el enlace de descarga, toque en él y espere a que el archivo para descargar. El archivo puede estar en forma de un IPA (paquete de iOS App Store) o un archivo ZIP (comprimido). </li>
66
-
67
- <li>Si el archivo es un archivo ZIP, debe extraerlo usando una aplicación de administrador de archivos o una aplicación de extractor ZIP. Puedes descargar estas aplicaciones desde la App Store si aún no las tienes. </li>
68
- <li>Después de extraer o instalar el archivo, debería ver un nuevo icono en la pantalla de inicio del dispositivo o en el cajón de aplicaciones. Esta es la aplicación de código de trucos que necesita para iniciar antes de jugar Kick the Buddy 2.</li>
69
- </ol>
70
- <h2>Cómo usar códigos de trucos para Kick the Buddy 2</h2> <p>Después de descargar e instalar la aplicación de código de trucos, debe activarlo y usarlo en el juego. El proceso puede variar dependiendo del tipo de aplicación de código de trucos que tenga y las características que ofrece. Sin embargo, aquí hay algunos pasos generales a seguir:</p>
71
- <ol>
72
- <li>Inicie la aplicación de código de trucos y otorgue los permisos necesarios para acceder a los datos y configuraciones de su dispositivo. </li>
73
- <li>Seleccione el juego que desea engañar de la lista de juegos soportados. En este caso, seleccione Kick the Buddy 2.</li>
74
- <li>Seleccione los códigos de trucos que desea utilizar de la lista de opciones disponibles. Puedes elegir entre diferentes categorías como dinero, oro, armas, artículos o modos. </li>
75
- <li>Toque en el botón aplicar o activar para habilitar los códigos de trucos. Es posible que tenga que esperar unos segundos o minutos para que los códigos de trucos surtan efecto. </li>
76
- <li>Iniciar el juego y disfrutar de jugar con los códigos de trucos. Deberías ver los cambios en la interfaz de tu juego, como el aumento de dinero, oro o armas desbloqueadas. </li>
77
- </ol>
78
- <h2>Cómo usar códigos de trucos para Kick the Buddy 2</h2>
79
- <p>Ahora que ha activado los códigos de trucos, puede usarlos para mejorar su experiencia de juego y divertirse más con Kick the Buddy 2. Sin embargo, debe ser consciente de los tipos de códigos de trucos que está utilizando y sus efectos en el juego y las características. Aquí hay algunos consejos y trucos sobre cómo usar códigos de trucos de manera efectiva y responsable:</p>
80
- <h3>Los tipos de códigos de trucos disponibles para Kick the Buddy 2</h3>
81
-
82
- <ul>
83
- <li>Códigos de trucos de dinero: Estos códigos de trucos le dan cantidades ilimitadas o mayores de dinero en el juego. Puedes usar dinero para comprar más armas, herramientas, elementos u objetos en el juego. </li>
84
- <li>Códigos de trucos de oro: Estos códigos de trucos te dan cantidades ilimitadas o aumentadas de oro en el juego. Puedes usar oro para desbloquear funciones premium, como membresía VIP, armas exclusivas o modos especiales. </li>
85
- <li>Códigos de trucos de armas: Estos códigos de trucos le dan acceso a todas o algunas de las armas en el juego sin tener que comprarlos o ver anuncios. Puedes usar armas para torturar a Buddy de diferentes maneras. </li>
86
- <li>ítem códigos de trucos: Estos códigos de trucos le dan acceso a todos o algunos de los elementos en el juego sin tener que comprarlos o ver anuncios. Puedes usar elementos para personalizar la apariencia, la voz o el fondo de Buddy. </li>
87
- <li>Códigos de trucos de modo: Estos códigos de trucos le dan acceso a diferentes modos o niveles de dificultad en el juego sin tener que desbloquearlos o completar tareas. Puedes usar modos para desafiarte o probar nuevos escenarios. </li>
88
- </ul>
89
- <h3>Los efectos de los códigos de trucos en el juego y las características</h3>
90
- <p>Los códigos de trucos pueden tener varios efectos en el juego y las caracter��sticas de Kick the Buddy 2, dependiendo del tipo y la cantidad de códigos de trucos que esté utilizando. Algunos de ellos son:</p>
91
- <p></p>
92
- <ul>
93
- <li>Los códigos de trucos pueden hacer el juego más fácil o más difícil para usted, dependiendo de su preferencia y nivel de habilidad. Por ejemplo, si quieres pasar un rato relajante con Buddy, puedes usar códigos de trucos de dinero o oro para comprar más armas y artículos. Si quieres tener un momento difícil con Buddy, puedes usar códigos de trucos de modo para aumentar la dificultad o cambiar las reglas. </li>
94
-
95
- <li>Los códigos de trucos pueden hacer que el juego sea más gratificante o menos gratificante para ti, dependiendo de tu objetivo y motivación. Por ejemplo, si quieres pasar un rato gratificante con Buddy, puedes usar códigos de trucos de modo para desbloquear logros o trofeos. Si quieres tener un tiempo menos gratificante con Buddy, puedes usar códigos de trucos de dinero o oro para saltarte algunas tareas o desafíos. </li>
96
- </ul>
97
- <h3>Los consejos y trucos para utilizar códigos de trucos de manera eficaz y responsable</h3>
98
- <p>Los códigos de trucos pueden ser una gran manera de mejorar tu experiencia de juego y divertirte más con Kick the Buddy 2. Sin embargo, debes usarlos con sabiduría y responsabilidad. Aquí hay algunos consejos y trucos sobre cómo hacerlo:</p>
99
- <ul>
100
- <li>Usa códigos de trucos con moderación y moderación. No los uses demasiado ni confíes demasiado en ellos. De lo contrario, puede perder interés en el juego o arruinar su encanto y atractivo original. </li>
101
- <li>Usa códigos de trucos de forma selectiva y apropiada. No los uses para cada aspecto o característica del juego. Úselos solo para las partes o características que encuentre difíciles, aburridas o inaccesibles. </li>
102
- <li>Usa códigos de trucos de forma creativa y experimental. No los uses para repetir las mismas acciones o escenarios. Úsalos para probar cosas nuevas o descubrir nuevas posibilidades. </li>
103
- <li>Use códigos de trampa ética y legalmente. No los use para dañar, ofender o engañar a otros. Úsalos solo para tu propio disfrute y entretenimiento personal. </li>
104
- </ul>
105
- <h2>Conclusión</h2>
106
- <p>Kick the Buddy 2 es un divertido y relajante juego que te permite torturar a un muñeco de trapo llamado Buddy con varias armas, herramientas y elementos. Sin embargo, si quieres darle vida a tu experiencia de juego y divertirte más, puedes usar códigos de trucos para Kick the Buddy 2. Los códigos de trucos son comandos o códigos especiales que pueden modificar o mejorar algunos aspectos del juego, como darte dinero ilimitado, oro, armas u objetos. </p>
107
-
108
- <p>Si estás interesado en usar códigos de trucos para Kick the Buddy 2, te animamos a probarlos y ver por ti mismo cómo pueden mejorar tu experiencia de juego y satisfacción. Sin embargo, también te recordamos que los uses de forma inteligente y responsable, ya que pueden tener algunos efectos negativos en la jugabilidad y las características. También le aconsejamos que sea cuidadoso y selectivo cuando busque códigos de trucos en línea, ya que algunos de ellos pueden ser falsos o dañinos. </p>
109
- <p>Esperamos que hayas disfrutado leyendo este artículo y hayas aprendido algo nuevo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Gracias por su tiempo y atención. </p>
110
- <h2>Preguntas frecuentes</h2>
111
- <p>Aquí hay algunas preguntas frecuentes sobre códigos de trucos para Kick the Buddy 2:</p>
112
- <ol>
113
- <li>Q: ¿Son legales los códigos de trucos para Kick the Buddy 2? </li>
114
- <li>A: Códigos de trucos para Kick the Buddy 2 no son ilegales, ya que no violan ninguna ley o reglamento. Sin embargo, pueden estar en contra de los términos de servicio o políticas del desarrollador o editor de juegos. Por lo tanto, el uso de códigos de trucos puede resultar en algunas consecuencias, como ser expulsado del juego o perder su progreso. </li>
115
- <li>Q: ¿Son seguros los códigos de trucos para Kick the Buddy 2? </li>
116
- <li>A: Códigos de trucos para Kick the Buddy 2 no siempre son seguros, ya que algunos de ellos pueden contener virus, malware, spyware u otro software dañino que puede dañar su dispositivo o robar sus datos. Por lo tanto, debe ser cuidadoso y selectivo al buscar códigos de trucos en línea. Solo debes descargar códigos de trucos de fuentes confiables y de buena reputación que tengan reseñas, valoraciones, comentarios o comentarios positivos de otros usuarios. </li>
117
- <li>Q: ¿Los códigos de trucos para Kick the Buddy 2 funcionan sin conexión? </li>
118
-
119
- <li>Q: ¿Puedo usar códigos de trucos para Kick the Buddy 2 en otros dispositivos? </li>
120
- <li>A: Códigos de trucos para Kick the Buddy 2 pueden o no funcionar en otros dispositivos, dependiendo de la compatibilidad y las especificaciones de los dispositivos. Algunos códigos de trucos solo pueden funcionar en dispositivos o plataformas específicas, como Android o iOS. Algunos códigos de trucos pueden funcionar en múltiples dispositivos o plataformas, pero pueden requerir diferentes pasos o métodos para descargarlos y usarlos. Por lo tanto, debe verificar la compatibilidad y las especificaciones de los códigos de trucos antes de usarlos. </li>
121
- <li>Q: ¿Puedo usar códigos de trucos para Kick the Buddy 2 con otros juegos? </li>
122
- <li>A: Códigos de trucos para Kick the Buddy 2 pueden o no funcionar con otros juegos, dependiendo de la similitud y compatibilidad de los juegos. Algunos códigos de trucos solo pueden funcionar con Kick the Buddy 2 o su secuela o spin-off juegos. Algunos códigos de trucos pueden funcionar con otros juegos que tienen características o mecánicas similares, pero pueden tener diferentes efectos o resultados. Por lo tanto, es necesario comprobar la similitud y compatibilidad de los juegos antes de usar códigos de trucos. </li>
123
- </ol></p> 64aa2da5cf<br />
124
- <br />
125
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Derby De Demolicin 3 Mod Apk.md DELETED
@@ -1,58 +0,0 @@
1
-
2
- <h1>Descargar Demolición Derby 3 Mod APK y disfrutar del último juego de destrucción de coches</h1>
3
- <p>Si eres un fan de las carreras de coches y los juegos de choque, entonces te encantará Demolition Derby 3. Este es un juego donde puedes conducir tu coche a otros coches y causar tanto daño como sea posible. También puede personalizar su coche con diferentes piezas y trabajos de pintura, y competir con otros jugadores en línea o fuera de línea. En este artículo, le diremos todo lo que necesita saber sobre Demolition Derby 3, y cómo descargar la versión mod APK del juego que le da dinero ilimitado, todos los coches desbloqueados, y sin anuncios. </p>
4
- <h2>¿Qué es Demolition Derby 3?</h2>
5
- <p>Demolition Derby 3 es un juego de destrucción de automóviles desarrollado por Beer Money Games. Es la secuela de la popular Demolition Derby 2, que tiene más de 50 millones de descargas en Google Play. En Demolition Derby 3, puedes elegir entre más de 40 coches diferentes, cada uno con sus propias estadísticas y habilidades. También puede actualizar su coche con varias partes, como motores, armaduras, ruedas, alerones y más. También puede cambiar el color y el diseño de su coche para que parezca único. </p>
6
- <h2>descargar derby de demolición 3 mod apk</h2><br /><p><b><b>Download Zip</b> &#10037;&#10037;&#10037; <a href="https://bltlly.com/2v6KEf">https://bltlly.com/2v6KEf</a></b></p><br /><br />
7
- <h3>Características de Demolition Derby 3</h3>
8
- <p>Demolition Derby 3 tiene muchas características que hacen que sea divertido y emocionante jugar. Aquí están algunas de ellas:</p>
9
- <h4>Modo multijugador</h4>
10
- <p>Puedes jugar Demolition Derby 3 online con otros jugadores de todo el mundo. Puede unirse o crear un lobby, y elegir entre diferentes modos de juego, como free-for-all, deathmatch equipo, capturar la bandera, rey de la colina, y más. También puedes chatear con otros jugadores y hacer amigos o enemigos. </p>
11
- <h4>Coches personalizables</h4>
12
- <p>Puede personalizar su coche con más de 1000 piezas y calcomanías. Puede cambiar el motor, la transmisión, la suspensión, los frenos, los neumáticos, la armadura, el escape, turbo, nitro y más. También puede cambiar el color y el diseño de su automóvil con varios trabajos de pintura, pegatinas, llamas, rayas y más. Puedes hacer que tu auto se vea genial o loco. </p>
13
-
14
- <p>Puedes conducir tu coche en diferentes arenas y eventos. Hay más de 20 arenas para elegir, cada una con su propio diseño y obstáculos. Algunas arenas tienen rampas, lazos, puentes, túneles, paredes, barriles, cajas y más. También puedes participar en diferentes eventos, como carreras de demolición, carreras de eliminación, último hombre en pie, modo de supervivencia, modo de truco, batallas de jefes y más. Cada evento tiene sus propias reglas y recompensas. </p>
15
- <h4>Física y gráficos realistas</h4>
16
- <p>Demolition Derby 3 tiene física y gráficos realistas que hacen que el juego sea más inmersivo y realista. Los coches tienen modelos realistas de daños que muestran las abolladuras, arañazos, chispas, humo, fuego y explosiones que ocurren cuando chocan. Los coches también tienen sonidos realistas que coinciden con sus motores, frenos, bocinas, accidentes y más. Las arenas tienen iluminación realista y sombras que crean una atmósfera dinámica. </p>
17
- <h2>¿Por qué descargar Demolition Derby 3 Mod APK? </h2>
18
- <p>Demolition Derby 3 es un juego gratuito para jugar en dispositivos Android. Sin embargo, también tiene algunas limitaciones y desventajas que pueden afectar su experiencia de juego. Por ejemplo, es necesario ganar dinero en el juego para comprar y mejorar sus coches, que puede tomar mucho tiempo y esfuerzo. También necesitas ver anuncios para obtener algunas recompensas adicionales, que pueden ser molestas y distracciones. Además, no todos los coches están disponibles en el juego, y algunos de ellos están bloqueados detrás de un muro de pago. </p>
19
- <p>Es por eso que es posible que desee descargar la versión mod APK de Demolition Derby 3. Esta es una versión modificada del juego que le da algunas ventajas y beneficios que no se pueden obtener en la versión original. Estos son algunos de ellos:</p>
20
- <h3>Beneficios de la demolición Derby 3 Mod APK</h3>
21
- <p>Demolición Derby 3 Mod APK tiene muchos beneficios que hacen el juego más agradable y satisfactorio. Estos son algunos de ellos:</p>
22
- <p></p>
23
- <h4>Dinero ilimitado</h4>
24
-
25
- <h4>Todos los coches desbloqueados</h4>
26
- <p>Con Demolition Derby 3 Mod APK, usted tendrá todos los coches desbloqueados en el juego. Esto significa que puede elegir entre más de 40 coches diferentes, cada uno con sus propias estadísticas y habilidades. También puedes personalizar tu coche con más de 1000 piezas y calcomanías. Puedes tener la colección de coches más diversa y única del juego. </p>
27
- <h4>No hay anuncios</h4>
28
- <p>Con Demolition Derby 3 Mod APK, no tendrás anuncios en el juego. Esto significa que puedes jugar el juego sin interrupciones o distracciones. También puedes disfrutar del juego sin perder tiempo ni datos al ver anuncios. Puedes tener la experiencia de juego más fluida y fluida. </p>
29
- <h2>¿Cómo descargar e instalar Demolition Derby 3 Mod APK? </h2>
30
- <p>Si está interesado en descargar e instalar Demolition Derby 3 Mod APK, entonces usted necesita seguir algunos pasos simples. Aquí están:</p>
31
- <h3>Pasos para descargar e instalar Demolition Derby 3 Mod APK</h3>
32
- <h4>Paso 1: Habilitar fuentes desconocidas</h4>
33
- <p>El primer paso es habilitar fuentes desconocidas en su dispositivo Android. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y enciéndala. </p>
34
- <h4>Paso 2: Descargar el archivo mod APK</h4>
35
- <p>El siguiente paso es descargar el archivo mod APK de Demolition Derby 3 de una fuente confiable. Puedes usar el siguiente enlace para descargarlo directamente a tu dispositivo. </p>
36
- <p><a href="">Descargar Demolition Derby 3 Mod APK</a></p>
37
- <h4>Paso 3: Instalar el archivo mod APK</h4>
38
- <p>El tercer paso es instalar el archivo mod APK de Demolition Derby 3 en su dispositivo. Para hacer esto, busque el archivo descargado en su administrador de archivos, toque en él y siga las instrucciones en la pantalla. </p>
39
- <h4>Paso 4: Iniciar el juego y disfrutar de</h4>
40
-
41
- <h2>Conclusión</h2>
42
- <p>Demolition Derby 3 es un divertido y emocionante juego de destrucción de coches que te permite conducir tu coche en otros coches y causar tanto daño como sea posible. También puede personalizar su coche con diferentes piezas y trabajos de pintura, y competir con otros jugadores en línea o fuera de línea. Sin embargo, si desea disfrutar del juego más, es posible que desee descargar la versión mod APK del juego que le da dinero ilimitado, todos los coches desbloqueados, y sin anuncios. En este artículo, te hemos dicho todo lo que necesitas saber sobre Demolition Derby 3, y cómo descargar e instalar su versión mod APK. Esperamos que este artículo fue útil para usted, y que usted tendrá un gran tiempo jugando Demolition Derby 3.</p>
43
- <h2>Preguntas frecuentes</h2>
44
- <p>Aquí hay algunas preguntas frecuentes sobre Demolition Derby 3 Mod APK:</p>
45
- <ul>
46
- <li><b> ¿Es Demolition Derby 3 Mod APK seguro de usar? </b></li>
47
- <p>Sí, Demolición Derby 3 Mod APK es seguro de usar siempre y cuando se descarga de una fuente de confianza como la nuestra. No contiene ningún virus o malware que pueda dañar su dispositivo o datos. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas, y asegúrese de que tiene una copia de seguridad de sus datos en caso de que algo salga mal. </p>
48
- <li><b> ¿Es Demolition Derby 3 Mod APK compatible con mi dispositivo? </b></li>
49
- <p>Demolición Derby 3 Mod APK es compatible con la mayoría de los dispositivos Android que se ejecutan en Android 4.4 o superior. Sin embargo, algunos dispositivos pueden no soportar el juego o el mod APK debido a diferentes especificaciones o ajustes. Si encuentras algún problema mientras juegas el juego o instalas el mod APK, puedes intentar cambiar la configuración del dispositivo, actualizar el software del dispositivo o contactar al desarrollador del juego para obtener ayuda. </p>
50
- <li><b>¿Puedo jugar Demolition Derby 3 Mod APK en línea con otros jugadores? </b></li>
51
-
52
- <li><b>¿Puedo actualizar Demolition Derby 3 Mod APK a la última versión? </b></li>
53
- <p>Sí, puede actualizar Demolition Derby 3 Mod APK a la última versión cada vez que hay una nueva actualización disponible. Sin embargo, no debes actualizar el juego desde Google Play, ya que esto sobreescribirá la versión mod APK y eliminará todos los beneficios que tengas. En su lugar, usted debe descargar la última versión mod APK de nuestro sitio web, e instalarlo sobre el existente. De esta manera, mantendrás todo tu progreso y beneficios en el juego. </p>
54
- <li><b>¿Puedo solicitar más características o mods para Demolition Derby 3 Mod APK? </b></li>
55
- <p>Sí, puede solicitar más características o mods para Demolition Derby 3 Mod APK dejando un comentario en nuestro sitio web. Haremos todo lo posible para satisfacer sus solicitudes y proporcionarle la mejor experiencia de juego posible. Sin embargo, no podemos garantizar que podamos agregar todas las características o mods que desee, ya que algunos de ellos pueden ser demasiado difíciles o imposibles de implementar. Agradecemos su comprensión y apoyo. </p>
56
- </ul></p> 64aa2da5cf<br />
57
- <br />
58
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/translate.py DELETED
@@ -1,78 +0,0 @@
1
- # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
2
- # Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License"). You
5
- # may not use this file except in compliance with the License. A copy of
6
- # the License is located at
7
- #
8
- # http://aws.amazon.com/apache2.0/
9
- #
10
- # or in the "license" file accompanying this file. This file is
11
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
12
- # ANY KIND, either express or implied. See the License for the specific
13
- # language governing permissions and limitations under the License.
14
- import copy
15
-
16
- from botocore.utils import merge_dicts
17
-
18
-
19
- def build_retry_config(
20
- endpoint_prefix, retry_model, definitions, client_retry_config=None
21
- ):
22
- service_config = retry_model.get(endpoint_prefix, {})
23
- resolve_references(service_config, definitions)
24
- # We want to merge the global defaults with the service specific
25
- # defaults, with the service specific defaults taking precedence.
26
- # So we use the global defaults as the base.
27
- #
28
- # A deepcopy is done on the retry defaults because it ensures the
29
- # retry model has no chance of getting mutated when the service specific
30
- # configuration or client retry config is merged in.
31
- final_retry_config = {
32
- '__default__': copy.deepcopy(retry_model.get('__default__', {}))
33
- }
34
- resolve_references(final_retry_config, definitions)
35
- # The merge the service specific config on top.
36
- merge_dicts(final_retry_config, service_config)
37
- if client_retry_config is not None:
38
- _merge_client_retry_config(final_retry_config, client_retry_config)
39
- return final_retry_config
40
-
41
-
42
- def _merge_client_retry_config(retry_config, client_retry_config):
43
- max_retry_attempts_override = client_retry_config.get('max_attempts')
44
- if max_retry_attempts_override is not None:
45
- # In the retry config, the max_attempts refers to the maximum number
46
- # of requests in general will be made. However, for the client's
47
- # retry config it refers to how many retry attempts will be made at
48
- # most. So to translate this number from the client config, one is
49
- # added to convert it to the maximum number request that will be made
50
- # by including the initial request.
51
- #
52
- # It is also important to note that if we ever support per operation
53
- # configuration in the retry model via the client, we will need to
54
- # revisit this logic to make sure max_attempts gets applied
55
- # per operation.
56
- retry_config['__default__']['max_attempts'] = (
57
- max_retry_attempts_override + 1
58
- )
59
-
60
-
61
- def resolve_references(config, definitions):
62
- """Recursively replace $ref keys.
63
-
64
- To cut down on duplication, common definitions can be declared
65
- (and passed in via the ``definitions`` attribute) and then
66
- references as {"$ref": "name"}, when this happens the reference
67
- dict is placed with the value from the ``definition`` dict.
68
-
69
- This is recursively done.
70
-
71
- """
72
- for key, value in config.items():
73
- if isinstance(value, dict):
74
- if len(value) == 1 and list(value.keys())[0] == '$ref':
75
- # Then we need to resolve this reference.
76
- config[key] = definitions[list(value.values())[0]]
77
- else:
78
- resolve_references(value, definitions)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py DELETED
@@ -1,36 +0,0 @@
1
- """
2
- This module provides means to detect the App Engine environment.
3
- """
4
-
5
- import os
6
-
7
-
8
- def is_appengine():
9
- return is_local_appengine() or is_prod_appengine()
10
-
11
-
12
- def is_appengine_sandbox():
13
- """Reports if the app is running in the first generation sandbox.
14
-
15
- The second generation runtimes are technically still in a sandbox, but it
16
- is much less restrictive, so generally you shouldn't need to check for it.
17
- see https://cloud.google.com/appengine/docs/standard/runtimes
18
- """
19
- return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
20
-
21
-
22
- def is_local_appengine():
23
- return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
24
- "SERVER_SOFTWARE", ""
25
- ).startswith("Development/")
26
-
27
-
28
- def is_prod_appengine():
29
- return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
30
- "SERVER_SOFTWARE", ""
31
- ).startswith("Google App Engine/")
32
-
33
-
34
- def is_prod_appengine_mvms():
35
- """Deprecated."""
36
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/datasets/gqa/eval/result_eval.py DELETED
@@ -1,61 +0,0 @@
1
- # --------------------------------------------------------
2
- # OpenVQA
3
- # Written by Yuhao Cui https://github.com/cuiyuhao1996
4
- # --------------------------------------------------------
5
-
6
- from openvqa.datasets.gqa.eval.gqa_eval import GQAEval
7
- import json, pickle
8
- import numpy as np
9
-
10
-
11
- def eval(__C, dataset, ans_ix_list, pred_list, result_eval_file, ensemble_file, log_file, valid=False):
12
- result_eval_file = result_eval_file + '.json'
13
-
14
- qid_list = [qid for qid in dataset.qid_list]
15
- ans_size = dataset.ans_size
16
-
17
- result = [{
18
- 'questionId': qid_list[ix],
19
- 'prediction': dataset.ix_to_ans[str(ans_ix_list[ix])],
20
- } for ix in range(len(qid_list))]
21
-
22
- print('Save the result to file: {}'.format(result_eval_file))
23
- json.dump(result, open(result_eval_file, 'w'))
24
-
25
- if __C.TEST_SAVE_PRED:
26
- print('Save the prediction vector to file: {}'.format(ensemble_file))
27
-
28
- pred_list = np.array(pred_list).reshape(-1, ans_size)
29
- result_pred = [{
30
- 'pred': pred_list[qix],
31
- 'qid': int(qid_list[qix])
32
- } for qix in range(qid_list.__len__())]
33
- pickle.dump(result_pred, open(ensemble_file, 'wb+'), protocol=-1)
34
-
35
-
36
- if valid:
37
- # create vqa object and vqaRes object
38
- ques_file_path = __C.RAW_PATH[__C.DATASET][__C.SPLIT['val']]
39
- choices_path = None
40
- if __C.SPLIT['val'] + '_choices' in __C.RAW_PATH[__C.DATASET]:
41
- choices_path = __C.RAW_PATH[__C.DATASET][__C.SPLIT['val'] + '_choices']
42
-
43
- eval_gqa = GQAEval(__C, result_eval_file, ques_file_path, choices_path, EVAL_CONSISTENCY=False)
44
- result_string, detail_result_string = eval_gqa.get_str_result()
45
-
46
- print('Write to log file: {}'.format(log_file))
47
- logfile = open(log_file, 'a+')
48
-
49
- for result_string_ in result_string:
50
- logfile.write(result_string_)
51
- logfile.write('\n')
52
- print(result_string_)
53
-
54
- for detail_result_string_ in detail_result_string:
55
- logfile.write(detail_result_string_)
56
- logfile.write("\n")
57
-
58
- logfile.write('\n')
59
- logfile.close()
60
-
61
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/exec.py DELETED
@@ -1,58 +0,0 @@
1
- # --------------------------------------------------------
2
- # OpenVQA
3
- # Written by Yuhao Cui https://github.com/cuiyuhao1996
4
- # Modified to add trojan result extraction options
5
- # --------------------------------------------------------
6
-
7
- import os, copy
8
- from openvqa.datasets.dataset_loader import DatasetLoader
9
- from utils.train_engine import train_engine
10
- from utils.test_engine import test_engine
11
- from utils.extract_engine import extract_engine
12
-
13
- class Execution:
14
- def __init__(self, __C):
15
- self.__C = __C
16
-
17
- if __C.RUN_MODE != 'extract':
18
- print('Loading dataset........')
19
- self.dataset = DatasetLoader(__C).DataSet()
20
-
21
- # If trigger the evaluation after every epoch
22
- # Will create a new cfgs with RUN_MODE = 'val'
23
- self.dataset_eval = None
24
- if __C.EVAL_EVERY_EPOCH:
25
- __C_eval = copy.deepcopy(__C)
26
- setattr(__C_eval, 'RUN_MODE', 'val')
27
- # modification - force eval set to clean when in train mode
28
- setattr(__C_eval, 'VER', 'clean')
29
-
30
- print('Loading validation set for per-epoch evaluation........')
31
- self.dataset_eval = DatasetLoader(__C_eval).DataSet()
32
-
33
-
34
- def run(self, run_mode):
35
- if run_mode == 'train':
36
- if self.__C.RESUME is False:
37
- self.empty_log(self.__C.VERSION)
38
- train_engine(self.__C, self.dataset, self.dataset_eval)
39
-
40
- elif run_mode == 'val':
41
- test_engine(self.__C, self.dataset, validation=True)
42
-
43
- elif run_mode == 'test':
44
- test_engine(self.__C, self.dataset)
45
-
46
- elif run_mode == 'extract':
47
- extract_engine(self.__C)
48
-
49
- else:
50
- exit(-1)
51
-
52
-
53
- def empty_log(self, version):
54
- print('Initializing log file........')
55
- if (os.path.exists(self.__C.LOG_PATH + '/log_run_' + version + '.txt')):
56
- os.remove(self.__C.LOG_PATH + '/log_run_' + version + '.txt')
57
- print('Finished!')
58
- print('')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tools/pybind11NewTools.cmake DELETED
@@ -1,203 +0,0 @@
1
- # tools/pybind11NewTools.cmake -- Build system for the pybind11 modules
2
- #
3
- # Copyright (c) 2020 Wenzel Jakob <[email protected]> and Henry Schreiner
4
- #
5
- # All rights reserved. Use of this source code is governed by a
6
- # BSD-style license that can be found in the LICENSE file.
7
-
8
- get_property(
9
- is_config
10
- TARGET pybind11::headers
11
- PROPERTY IMPORTED)
12
-
13
- if(pybind11_FIND_QUIETLY)
14
- set(_pybind11_quiet QUIET)
15
- endif()
16
-
17
- if(CMAKE_VERSION VERSION_LESS 3.12)
18
- message(FATAL_ERROR "You cannot use the new FindPython module with CMake < 3.12")
19
- endif()
20
-
21
- if(NOT Python_FOUND
22
- AND NOT Python3_FOUND
23
- AND NOT Python2_FOUND)
24
- if(NOT DEFINED Python_FIND_IMPLEMENTATIONS)
25
- set(Python_FIND_IMPLEMENTATIONS CPython PyPy)
26
- endif()
27
-
28
- # GitHub Actions like activation
29
- if(NOT DEFINED Python_ROOT_DIR AND DEFINED ENV{pythonLocation})
30
- set(Python_ROOT_DIR "$ENV{pythonLocation}")
31
- endif()
32
-
33
- find_package(Python REQUIRED COMPONENTS Interpreter Development ${_pybind11_quiet})
34
-
35
- # If we are in submodule mode, export the Python targets to global targets.
36
- # If this behavior is not desired, FindPython _before_ pybind11.
37
- if(NOT is_config)
38
- set_property(TARGET Python::Python PROPERTY IMPORTED_GLOBAL TRUE)
39
- set_property(TARGET Python::Interpreter PROPERTY IMPORTED_GLOBAL TRUE)
40
- if(TARGET Python::Module)
41
- set_property(TARGET Python::Module PROPERTY IMPORTED_GLOBAL TRUE)
42
- endif()
43
- endif()
44
- endif()
45
-
46
- if(Python_FOUND)
47
- set(_Python
48
- Python
49
- CACHE INTERNAL "" FORCE)
50
- elseif(Python3_FOUND AND NOT Python2_FOUND)
51
- set(_Python
52
- Python3
53
- CACHE INTERNAL "" FORCE)
54
- elseif(Python2_FOUND AND NOT Python3_FOUND)
55
- set(_Python
56
- Python2
57
- CACHE INTERNAL "" FORCE)
58
- else()
59
- message(AUTHOR_WARNING "Python2 and Python3 both present, pybind11 in "
60
- "PYBIND11_NOPYTHON mode (manually activate to silence warning)")
61
- set(_pybind11_nopython ON)
62
- return()
63
- endif()
64
-
65
- if(PYBIND11_MASTER_PROJECT)
66
- if(${_Python}_INTERPRETER_ID MATCHES "PyPy")
67
- message(STATUS "PyPy ${${_Python}_PyPy_VERSION} (Py ${${_Python}_VERSION})")
68
- else()
69
- message(STATUS "${_Python} ${${_Python}_VERSION}")
70
- endif()
71
- endif()
72
-
73
- # Debug check - see https://stackoverflow.com/questions/646518/python-how-to-detect-debug-Interpreter
74
- execute_process(COMMAND ${_Python}::Python -c "import sys; print(hasattr(sys, 'gettotalrefcount'))"
75
- OUTPUT_VARIABLE PYTHON_IS_DEBUG)
76
-
77
- # Python debug libraries expose slightly different objects before 3.8
78
- # https://docs.python.org/3.6/c-api/intro.html#debugging-builds
79
- # https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
80
- if(PYTHON_IS_DEBUG)
81
- set_property(
82
- TARGET pybind11::pybind11
83
- APPEND
84
- PROPERTY INTERFACE_COMPILE_DEFINITIONS Py_DEBUG)
85
- endif()
86
-
87
- # Check on every access - since Python2 and Python3 could have been used - do nothing in that case.
88
-
89
- if(DEFINED ${_Python}_INCLUDE_DIRS)
90
- set_property(
91
- TARGET pybind11::pybind11
92
- APPEND
93
- PROPERTY INTERFACE_INCLUDE_DIRECTORIES $<BUILD_INTERFACE:${${_Python}_INCLUDE_DIRS}>)
94
- endif()
95
-
96
- if(DEFINED ${_Python}_VERSION AND ${_Python}_VERSION VERSION_LESS 3)
97
- set_property(
98
- TARGET pybind11::pybind11
99
- APPEND
100
- PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python2_no_register)
101
- endif()
102
-
103
- # In CMake 3.18+, you can find these separately, so include an if
104
- if(TARGET ${_Python}::${_Python})
105
- set_property(
106
- TARGET pybind11::embed
107
- APPEND
108
- PROPERTY INTERFACE_LINK_LIBRARIES ${_Python}::${_Python})
109
- endif()
110
-
111
- # CMake 3.15+ has this
112
- if(TARGET ${_Python}::Module)
113
- set_property(
114
- TARGET pybind11::module
115
- APPEND
116
- PROPERTY INTERFACE_LINK_LIBRARIES ${_Python}::Module)
117
- else()
118
- set_property(
119
- TARGET pybind11::module
120
- APPEND
121
- PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python_link_helper)
122
- endif()
123
-
124
- function(pybind11_add_module target_name)
125
- cmake_parse_arguments(PARSE_ARGV 1 ARG "STATIC;SHARED;MODULE;THIN_LTO;NO_EXTRAS" "" "")
126
-
127
- if(ARG_ADD_LIBRARY_STATIC)
128
- set(type STATIC)
129
- elseif(ARG_ADD_LIBRARY_SHARED)
130
- set(type SHARED)
131
- else()
132
- set(type MODULE)
133
- endif()
134
-
135
- if("${_Python}" STREQUAL "Python")
136
- python_add_library(${target_name} ${type} WITH_SOABI ${ARG_UNPARSED_ARGUMENTS})
137
- elseif("${_Python}" STREQUAL "Python3")
138
- python3_add_library(${target_name} ${type} WITH_SOABI ${ARG_UNPARSED_ARGUMENTS})
139
- elseif("${_Python}" STREQUAL "Python2")
140
- python2_add_library(${target_name} ${type} WITH_SOABI ${ARG_UNPARSED_ARGUMENTS})
141
- else()
142
- message(FATAL_ERROR "Cannot detect FindPython version: ${_Python}")
143
- endif()
144
-
145
- target_link_libraries(${target_name} PRIVATE pybind11::headers)
146
-
147
- if(type STREQUAL "MODULE")
148
- target_link_libraries(${target_name} PRIVATE pybind11::module)
149
- else()
150
- target_link_libraries(${target_name} PRIVATE pybind11::embed)
151
- endif()
152
-
153
- if(MSVC)
154
- target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
155
- endif()
156
-
157
- if(DEFINED ${_Python}_VERSION AND ${_Python}_VERSION VERSION_LESS 3)
158
- target_link_libraries(${target_name} PRIVATE pybind11::python2_no_register)
159
- endif()
160
-
161
- set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden"
162
- CUDA_VISIBILITY_PRESET "hidden")
163
-
164
- if(ARG_NO_EXTRAS)
165
- return()
166
- endif()
167
-
168
- if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
169
- if(ARG_THIN_LTO)
170
- target_link_libraries(${target_name} PRIVATE pybind11::thin_lto)
171
- else()
172
- target_link_libraries(${target_name} PRIVATE pybind11::lto)
173
- endif()
174
- endif()
175
-
176
- if(NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo)
177
- # Strip unnecessary sections of the binary on Linux/Mac OS
178
- pybind11_strip(${target_name})
179
- endif()
180
-
181
- if(MSVC)
182
- target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
183
- endif()
184
- endfunction()
185
-
186
- function(pybind11_extension name)
187
- set_property(TARGET ${name} PROPERTY PREFIX "")
188
-
189
- if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
190
- set_property(TARGET ${name} PROPERTY SUFFIX ".pyd")
191
- endif()
192
-
193
- if(${_Python}_SOABI)
194
- get_property(
195
- suffix
196
- TARGET ${name}
197
- PROPERTY SUFFIX)
198
- if(NOT suffix)
199
- set(suffix "${CMAKE_SHARED_MODULE_SUFFIX}")
200
- endif()
201
- set_property(TARGET ${name} PROPERTY SUFFIX ".${${_Python}_SOABI}${suffix}")
202
- endif()
203
- endfunction()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/utils/collect_env.py DELETED
@@ -1,16 +0,0 @@
1
- from mmcv.utils import collect_env as collect_base_env
2
- from mmcv.utils import get_git_hash
3
-
4
- import mmdet
5
-
6
-
7
- def collect_env():
8
- """Collect the information of the running environments."""
9
- env_info = collect_base_env()
10
- env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
11
- return env_info
12
-
13
-
14
- if __name__ == '__main__':
15
- for name, val in collect_env().items():
16
- print(f'{name}: {val}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/saicinpainting/evaluation/evaluator.py DELETED
@@ -1,220 +0,0 @@
1
- import logging
2
- import math
3
- from typing import Dict
4
-
5
- import numpy as np
6
- import torch
7
- import torch.nn as nn
8
- import tqdm
9
- from torch.utils.data import DataLoader
10
-
11
- from saicinpainting.evaluation.utils import move_to_device
12
-
13
- LOGGER = logging.getLogger(__name__)
14
-
15
-
16
- class InpaintingEvaluator():
17
- def __init__(self, dataset, scores, area_grouping=True, bins=10, batch_size=32, device='cuda',
18
- integral_func=None, integral_title=None, clamp_image_range=None):
19
- """
20
- :param dataset: torch.utils.data.Dataset which contains images and masks
21
- :param scores: dict {score_name: EvaluatorScore object}
22
- :param area_grouping: in addition to the overall scores, allows to compute score for the groups of samples
23
- which are defined by share of area occluded by mask
24
- :param bins: number of groups, partition is generated by np.linspace(0., 1., bins + 1)
25
- :param batch_size: batch_size for the dataloader
26
- :param device: device to use
27
- """
28
- self.scores = scores
29
- self.dataset = dataset
30
-
31
- self.area_grouping = area_grouping
32
- self.bins = bins
33
-
34
- self.device = torch.device(device)
35
-
36
- self.dataloader = DataLoader(self.dataset, shuffle=False, batch_size=batch_size)
37
-
38
- self.integral_func = integral_func
39
- self.integral_title = integral_title
40
- self.clamp_image_range = clamp_image_range
41
-
42
- def _get_bin_edges(self):
43
- bin_edges = np.linspace(0, 1, self.bins + 1)
44
-
45
- num_digits = max(0, math.ceil(math.log10(self.bins)) - 1)
46
- interval_names = []
47
- for idx_bin in range(self.bins):
48
- start_percent, end_percent = round(100 * bin_edges[idx_bin], num_digits), \
49
- round(100 * bin_edges[idx_bin + 1], num_digits)
50
- start_percent = '{:.{n}f}'.format(start_percent, n=num_digits)
51
- end_percent = '{:.{n}f}'.format(end_percent, n=num_digits)
52
- interval_names.append("{0}-{1}%".format(start_percent, end_percent))
53
-
54
- groups = []
55
- for batch in self.dataloader:
56
- mask = batch['mask']
57
- batch_size = mask.shape[0]
58
- area = mask.to(self.device).reshape(batch_size, -1).mean(dim=-1)
59
- bin_indices = np.searchsorted(bin_edges, area.detach().cpu().numpy(), side='right') - 1
60
- # corner case: when area is equal to 1, bin_indices should return bins - 1, not bins for that element
61
- bin_indices[bin_indices == self.bins] = self.bins - 1
62
- groups.append(bin_indices)
63
- groups = np.hstack(groups)
64
-
65
- return groups, interval_names
66
-
67
- def evaluate(self, model=None):
68
- """
69
- :param model: callable with signature (image_batch, mask_batch); should return inpainted_batch
70
- :return: dict with (score_name, group_type) as keys, where group_type can be either 'overall' or
71
- name of the particular group arranged by area of mask (e.g. '10-20%')
72
- and score statistics for the group as values.
73
- """
74
- results = dict()
75
- if self.area_grouping:
76
- groups, interval_names = self._get_bin_edges()
77
- else:
78
- groups = None
79
-
80
- for score_name, score in tqdm.auto.tqdm(self.scores.items(), desc='scores'):
81
- score.to(self.device)
82
- with torch.no_grad():
83
- score.reset()
84
- for batch in tqdm.auto.tqdm(self.dataloader, desc=score_name, leave=False):
85
- batch = move_to_device(batch, self.device)
86
- image_batch, mask_batch = batch['image'], batch['mask']
87
- if self.clamp_image_range is not None:
88
- image_batch = torch.clamp(image_batch,
89
- min=self.clamp_image_range[0],
90
- max=self.clamp_image_range[1])
91
- if model is None:
92
- assert 'inpainted' in batch, \
93
- 'Model is None, so we expected precomputed inpainting results at key "inpainted"'
94
- inpainted_batch = batch['inpainted']
95
- else:
96
- inpainted_batch = model(image_batch, mask_batch)
97
- score(inpainted_batch, image_batch, mask_batch)
98
- total_results, group_results = score.get_value(groups=groups)
99
-
100
- results[(score_name, 'total')] = total_results
101
- if groups is not None:
102
- for group_index, group_values in group_results.items():
103
- group_name = interval_names[group_index]
104
- results[(score_name, group_name)] = group_values
105
-
106
- if self.integral_func is not None:
107
- results[(self.integral_title, 'total')] = dict(mean=self.integral_func(results))
108
-
109
- return results
110
-
111
-
112
- def ssim_fid100_f1(metrics, fid_scale=100):
113
- ssim = metrics[('ssim', 'total')]['mean']
114
- fid = metrics[('fid', 'total')]['mean']
115
- fid_rel = max(0, fid_scale - fid) / fid_scale
116
- f1 = 2 * ssim * fid_rel / (ssim + fid_rel + 1e-3)
117
- return f1
118
-
119
-
120
- def lpips_fid100_f1(metrics, fid_scale=100):
121
- neg_lpips = 1 - metrics[('lpips', 'total')]['mean'] # invert, so bigger is better
122
- fid = metrics[('fid', 'total')]['mean']
123
- fid_rel = max(0, fid_scale - fid) / fid_scale
124
- f1 = 2 * neg_lpips * fid_rel / (neg_lpips + fid_rel + 1e-3)
125
- return f1
126
-
127
-
128
-
129
- class InpaintingEvaluatorOnline(nn.Module):
130
- def __init__(self, scores, bins=10, image_key='image', inpainted_key='inpainted',
131
- integral_func=None, integral_title=None, clamp_image_range=None):
132
- """
133
- :param scores: dict {score_name: EvaluatorScore object}
134
- :param bins: number of groups, partition is generated by np.linspace(0., 1., bins + 1)
135
- :param device: device to use
136
- """
137
- super().__init__()
138
- LOGGER.info(f'{type(self)} init called')
139
- self.scores = nn.ModuleDict(scores)
140
- self.image_key = image_key
141
- self.inpainted_key = inpainted_key
142
- self.bins_num = bins
143
- self.bin_edges = np.linspace(0, 1, self.bins_num + 1)
144
-
145
- num_digits = max(0, math.ceil(math.log10(self.bins_num)) - 1)
146
- self.interval_names = []
147
- for idx_bin in range(self.bins_num):
148
- start_percent, end_percent = round(100 * self.bin_edges[idx_bin], num_digits), \
149
- round(100 * self.bin_edges[idx_bin + 1], num_digits)
150
- start_percent = '{:.{n}f}'.format(start_percent, n=num_digits)
151
- end_percent = '{:.{n}f}'.format(end_percent, n=num_digits)
152
- self.interval_names.append("{0}-{1}%".format(start_percent, end_percent))
153
-
154
- self.groups = []
155
-
156
- self.integral_func = integral_func
157
- self.integral_title = integral_title
158
- self.clamp_image_range = clamp_image_range
159
-
160
- LOGGER.info(f'{type(self)} init done')
161
-
162
- def _get_bins(self, mask_batch):
163
- batch_size = mask_batch.shape[0]
164
- area = mask_batch.view(batch_size, -1).mean(dim=-1).detach().cpu().numpy()
165
- bin_indices = np.clip(np.searchsorted(self.bin_edges, area) - 1, 0, self.bins_num - 1)
166
- return bin_indices
167
-
168
- def forward(self, batch: Dict[str, torch.Tensor]):
169
- """
170
- Calculate and accumulate metrics for batch. To finalize evaluation and obtain final metrics, call evaluation_end
171
- :param batch: batch dict with mandatory fields mask, image, inpainted (can be overriden by self.inpainted_key)
172
- """
173
- result = {}
174
- with torch.no_grad():
175
- image_batch, mask_batch, inpainted_batch = batch[self.image_key], batch['mask'], batch[self.inpainted_key]
176
- if self.clamp_image_range is not None:
177
- image_batch = torch.clamp(image_batch,
178
- min=self.clamp_image_range[0],
179
- max=self.clamp_image_range[1])
180
- self.groups.extend(self._get_bins(mask_batch))
181
-
182
- for score_name, score in self.scores.items():
183
- result[score_name] = score(inpainted_batch, image_batch, mask_batch)
184
- return result
185
-
186
- def process_batch(self, batch: Dict[str, torch.Tensor]):
187
- return self(batch)
188
-
189
- def evaluation_end(self, states=None):
190
- """:return: dict with (score_name, group_type) as keys, where group_type can be either 'overall' or
191
- name of the particular group arranged by area of mask (e.g. '10-20%')
192
- and score statistics for the group as values.
193
- """
194
- LOGGER.info(f'{type(self)}: evaluation_end called')
195
-
196
- self.groups = np.array(self.groups)
197
-
198
- results = {}
199
- for score_name, score in self.scores.items():
200
- LOGGER.info(f'Getting value of {score_name}')
201
- cur_states = [s[score_name] for s in states] if states is not None else None
202
- total_results, group_results = score.get_value(groups=self.groups, states=cur_states)
203
- LOGGER.info(f'Getting value of {score_name} done')
204
- results[(score_name, 'total')] = total_results
205
-
206
- for group_index, group_values in group_results.items():
207
- group_name = self.interval_names[group_index]
208
- results[(score_name, group_name)] = group_values
209
-
210
- if self.integral_func is not None:
211
- results[(self.integral_title, 'total')] = dict(mean=self.integral_func(results))
212
-
213
- LOGGER.info(f'{type(self)}: reset scores')
214
- self.groups = []
215
- for sc in self.scores.values():
216
- sc.reset()
217
- LOGGER.info(f'{type(self)}: reset scores done')
218
-
219
- LOGGER.info(f'{type(self)}: evaluation_end done')
220
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/layers/csrc/vision.cpp DELETED
@@ -1,129 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- #include <torch/extension.h>
4
- #include "ROIAlignRotated/ROIAlignRotated.h"
5
- #include "box_iou_rotated/box_iou_rotated.h"
6
- #include "cocoeval/cocoeval.h"
7
- #include "deformable/deform_conv.h"
8
- #include "nms_rotated/nms_rotated.h"
9
-
10
- namespace detectron2 {
11
-
12
- #if defined(WITH_CUDA) || defined(WITH_HIP)
13
- extern int get_cudart_version();
14
- #endif
15
-
16
- std::string get_cuda_version() {
17
- #if defined(WITH_CUDA) || defined(WITH_HIP)
18
- std::ostringstream oss;
19
-
20
- #if defined(WITH_CUDA)
21
- oss << "CUDA ";
22
- #else
23
- oss << "HIP ";
24
- #endif
25
-
26
- // copied from
27
- // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231
28
- auto printCudaStyleVersion = [&](int v) {
29
- oss << (v / 1000) << "." << (v / 10 % 100);
30
- if (v % 10 != 0) {
31
- oss << "." << (v % 10);
32
- }
33
- };
34
- printCudaStyleVersion(get_cudart_version());
35
- return oss.str();
36
- #else // neither CUDA nor HIP
37
- return std::string("not available");
38
- #endif
39
- }
40
-
41
- bool has_cuda() {
42
- #if defined(WITH_CUDA)
43
- return true;
44
- #else
45
- return false;
46
- #endif
47
- }
48
-
49
- // similar to
50
- // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp
51
- std::string get_compiler_version() {
52
- std::ostringstream ss;
53
- #if defined(__GNUC__)
54
- #ifndef __clang__
55
-
56
- #if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8))
57
- #error "GCC >= 4.9 is required!"
58
- #endif
59
-
60
- { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; }
61
- #endif
62
- #endif
63
-
64
- #if defined(__clang_major__)
65
- {
66
- ss << "clang " << __clang_major__ << "." << __clang_minor__ << "."
67
- << __clang_patchlevel__;
68
- }
69
- #endif
70
-
71
- #if defined(_MSC_VER)
72
- { ss << "MSVC " << _MSC_FULL_VER; }
73
- #endif
74
- return ss.str();
75
- }
76
-
77
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
78
- m.def("get_compiler_version", &get_compiler_version, "get_compiler_version");
79
- m.def("get_cuda_version", &get_cuda_version, "get_cuda_version");
80
- m.def("has_cuda", &has_cuda, "has_cuda");
81
-
82
- m.def("box_iou_rotated", &box_iou_rotated, "IoU for rotated boxes");
83
-
84
- m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward");
85
- m.def(
86
- "deform_conv_backward_input",
87
- &deform_conv_backward_input,
88
- "deform_conv_backward_input");
89
- m.def(
90
- "deform_conv_backward_filter",
91
- &deform_conv_backward_filter,
92
- "deform_conv_backward_filter");
93
- m.def(
94
- "modulated_deform_conv_forward",
95
- &modulated_deform_conv_forward,
96
- "modulated_deform_conv_forward");
97
- m.def(
98
- "modulated_deform_conv_backward",
99
- &modulated_deform_conv_backward,
100
- "modulated_deform_conv_backward");
101
-
102
- m.def("nms_rotated", &nms_rotated, "NMS for rotated boxes");
103
-
104
- m.def(
105
- "roi_align_rotated_forward",
106
- &ROIAlignRotated_forward,
107
- "Forward pass for Rotated ROI-Align Operator");
108
- m.def(
109
- "roi_align_rotated_backward",
110
- &ROIAlignRotated_backward,
111
- "Backward pass for Rotated ROI-Align Operator");
112
-
113
- m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate");
114
- m.def(
115
- "COCOevalEvaluateImages",
116
- &COCOeval::EvaluateImages,
117
- "COCOeval::EvaluateImages");
118
- pybind11::class_<COCOeval::InstanceAnnotation>(m, "InstanceAnnotation")
119
- .def(pybind11::init<uint64_t, double, double, bool, bool>());
120
- pybind11::class_<COCOeval::ImageEvaluation>(m, "ImageEvaluation")
121
- .def(pybind11::init<>());
122
- }
123
-
124
- #ifdef TORCH_LIBRARY
125
- TORCH_LIBRARY(detectron2, m) {
126
- m.def("nms_rotated", &nms_rotated);
127
- }
128
- #endif
129
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/memory/local.py DELETED
@@ -1,136 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import dataclasses
4
- import os
5
- from typing import Any, List
6
-
7
- import numpy as np
8
- import orjson
9
-
10
- from autogpt.llm_utils import create_embedding_with_ada
11
- from autogpt.memory.base import MemoryProviderSingleton
12
-
13
- EMBED_DIM = 1536
14
- SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
15
-
16
-
17
- def create_default_embeddings():
18
- return np.zeros((0, EMBED_DIM)).astype(np.float32)
19
-
20
-
21
- @dataclasses.dataclass
22
- class CacheContent:
23
- texts: List[str] = dataclasses.field(default_factory=list)
24
- embeddings: np.ndarray = dataclasses.field(
25
- default_factory=create_default_embeddings
26
- )
27
-
28
-
29
- class LocalCache(MemoryProviderSingleton):
30
- """A class that stores the memory in a local file"""
31
-
32
- def __init__(self, cfg) -> None:
33
- """Initialize a class instance
34
-
35
- Args:
36
- cfg: Config object
37
-
38
- Returns:
39
- None
40
- """
41
- self.filename = f"{cfg.memory_index}.json"
42
- if os.path.exists(self.filename):
43
- try:
44
- with open(self.filename, "w+b") as f:
45
- file_content = f.read()
46
- if not file_content.strip():
47
- file_content = b"{}"
48
- f.write(file_content)
49
-
50
- loaded = orjson.loads(file_content)
51
- self.data = CacheContent(**loaded)
52
- except orjson.JSONDecodeError:
53
- print(f"Error: The file '{self.filename}' is not in JSON format.")
54
- self.data = CacheContent()
55
- else:
56
- print(
57
- f"Warning: The file '{self.filename}' does not exist. "
58
- "Local memory would not be saved to a file."
59
- )
60
- self.data = CacheContent()
61
-
62
- def add(self, text: str):
63
- """
64
- Add text to our list of texts, add embedding as row to our
65
- embeddings-matrix
66
-
67
- Args:
68
- text: str
69
-
70
- Returns: None
71
- """
72
- if "Command Error:" in text:
73
- return ""
74
- self.data.texts.append(text)
75
-
76
- embedding = create_embedding_with_ada(text)
77
-
78
- vector = np.array(embedding).astype(np.float32)
79
- vector = vector[np.newaxis, :]
80
- self.data.embeddings = np.concatenate(
81
- [
82
- self.data.embeddings,
83
- vector,
84
- ],
85
- axis=0,
86
- )
87
-
88
- with open(self.filename, "wb") as f:
89
- out = orjson.dumps(self.data, option=SAVE_OPTIONS)
90
- f.write(out)
91
- return text
92
-
93
- def clear(self) -> str:
94
- """
95
- Clears the redis server.
96
-
97
- Returns: A message indicating that the memory has been cleared.
98
- """
99
- self.data = CacheContent()
100
- return "Obliviated"
101
-
102
- def get(self, data: str) -> list[Any] | None:
103
- """
104
- Gets the data from the memory that is most relevant to the given data.
105
-
106
- Args:
107
- data: The data to compare to.
108
-
109
- Returns: The most relevant data.
110
- """
111
- return self.get_relevant(data, 1)
112
-
113
- def get_relevant(self, text: str, k: int) -> list[Any]:
114
- """ "
115
- matrix-vector mult to find score-for-each-row-of-matrix
116
- get indices for top-k winning scores
117
- return texts for those indices
118
- Args:
119
- text: str
120
- k: int
121
-
122
- Returns: List[str]
123
- """
124
- embedding = create_embedding_with_ada(text)
125
-
126
- scores = np.dot(self.data.embeddings, embedding)
127
-
128
- top_k_indices = np.argsort(scores)[-k:][::-1]
129
-
130
- return [self.data.texts[i] for i in top_k_indices]
131
-
132
- def get_stats(self) -> tuple[int, tuple[int, ...]]:
133
- """
134
- Returns: The stats of the local cache.
135
- """
136
- return len(self.data.texts), self.data.embeddings.shape
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/webvid_datasets.py DELETED
@@ -1,122 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import os
9
- from video_llama.datasets.datasets.base_dataset import BaseDataset
10
- from video_llama.datasets.datasets.caption_datasets import CaptionDataset
11
- import pandas as pd
12
- import decord
13
- from decord import VideoReader
14
- import random
15
- import torch
16
- from torch.utils.data.dataloader import default_collate
17
- class WebvidDataset(BaseDataset):
18
- def __init__(self, vis_processor, text_processor, vis_root, ann_root):
19
- """
20
- vis_root (string): Root directory of video (e.g. webvid_eval/video/)
21
- ann_root (string): Root directory of video (e.g. webvid_eval/annotations/)
22
- split (string): val or test
23
- """
24
- super().__init__(vis_processor=vis_processor, text_processor=text_processor)
25
-
26
-
27
- # 读取一个路径下所有的
28
-
29
- ts_df = []
30
- for file_name in os.listdir(ann_root):
31
- if file_name.endswith('.csv'):
32
- df = pd.read_csv(os.path.join(ann_root, file_name))
33
- ts_df.append(df)
34
-
35
- merged_df = pd.concat(ts_df)
36
- self.annotation = merged_df
37
- self.vis_root = vis_root
38
- self.resize_size = 224
39
- self.num_frm = 8
40
- self.frm_sampling_strategy = 'headtail'
41
-
42
- def _get_video_path(self, sample):
43
- rel_video_fp = os.path.join(sample['page_dir'], str(sample['videoid']) + '.mp4')
44
- full_video_fp = os.path.join(self.vis_root, rel_video_fp)
45
- return full_video_fp
46
-
47
- def __getitem__(self, index):
48
- num_retries = 10 # skip error videos
49
- for _ in range(num_retries):
50
- sample = self.annotation.iloc[index]
51
- sample_dict = sample.to_dict()
52
- video_id = sample_dict['videoid']
53
-
54
- if 'name' in sample_dict.keys():
55
- text = sample_dict['name'].strip()
56
- else:
57
- raise NotImplementedError("Un-supported text annotation format.")
58
-
59
- # fetch video
60
- video_path = self._get_video_path(sample_dict)
61
- # if os.path.exists(video_path):
62
- try:
63
- video = self.vis_processor(video_path)
64
- except:
65
- print(f"Failed to load examples with video: {video_path}. "
66
- f"Will randomly sample an example as a replacement.")
67
- index = random.randint(0, len(self) - 1)
68
- continue
69
- caption = self.text_processor(text)
70
-
71
- # print(video.size())
72
- if video is None or caption is None \
73
- or video.size()!=torch.Size([3,self.vis_processor.n_frms,224,224]):
74
- print(f"Failed to load examples with video: {video_path}. "
75
- f"Will randomly sample an example as a replacement.")
76
- index = random.randint(0, len(self) - 1)
77
- continue
78
- else:
79
- break
80
- else:
81
- raise RuntimeError(f"Failed to fetch video after {num_retries} retries.")
82
- # "image_id" is kept to stay compatible with the COCO evaluation format
83
- return {
84
- "image": video,
85
- "text_input": caption,
86
- "type":'video',
87
- }
88
-
89
- def __len__(self):
90
- return len(self.annotation)
91
-
92
- # def collater(self, samples):
93
- # new_result = {}
94
- # new_result['image'] = default_collate( [sample["image"] for sample in samples])
95
- # new_result['text_input'] = default_collate( [sample["text_input"] for sample in samples])
96
- # return new_result
97
-
98
- class WebvidDatasetEvalDataset(BaseDataset):
99
- def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
100
- """
101
- vis_root (string): Root directory of images (e.g. coco/images/)
102
- ann_root (string): directory to store the annotation file
103
- split (string): val or test
104
- """
105
- super().__init__(vis_processor, text_processor, vis_root, ann_paths)
106
-
107
- def __getitem__(self, index):
108
-
109
- ann = self.annotation[index]
110
-
111
- vname = ann["video"]
112
- video_path = os.path.join(self.vis_root, vname)
113
-
114
- video = self.vis_processor(video_path)
115
-
116
- return {
117
- "video": video,
118
- "image_id": ann["image_id"],
119
- "instance_id": ann["instance_id"],
120
- }
121
-
122
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DHEIVER/ThyroidTumorClassificationModel/app.py DELETED
@@ -1,66 +0,0 @@
1
- import gradio as gr
2
- from transformers import AutoFeatureExtractor, AutoModelForImageClassification
3
- from PIL import Image
4
- import torch
5
- import datetime
6
-
7
- # Carregue o extrator de recursos e o modelo
8
- extractor = AutoFeatureExtractor.from_pretrained("SerdarHelli/ThyroidTumorClassificationModel")
9
- model = AutoModelForImageClassification.from_pretrained("SerdarHelli/ThyroidTumorClassificationModel")
10
-
11
- # Função para classificar a imagem
12
- def classify_image(image):
13
- # Pré-processa a imagem usando o extrator
14
- inputs = extractor(images=image, return_tensors="pt")
15
-
16
- # Passa a imagem pelo modelo
17
- outputs = model(**inputs)
18
-
19
- # Obtém as probabilidades das classes
20
- logits = outputs.logits
21
-
22
- # Calcula as probabilidades finais usando o softmax
23
- probabilities = torch.softmax(logits, dim=1)
24
-
25
- # Obtém a classe com a maior probabilidade
26
- predicted_class = torch.argmax(probabilities, dim=1).item()
27
-
28
- # Rótulos de classe personalizados com base no seu modelo
29
- class_labels = ["Sem Tumor", "Tumor"]
30
-
31
- # Rótulo da classe prevista
32
- predicted_label = class_labels[predicted_class]
33
-
34
- # Obtém a data e hora atual
35
- current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
36
-
37
- # Formate a saída em HTML com data e hora
38
- result_html = f"""
39
- <h2>Resultado da Classificação</h2>
40
- <p><strong>Classe Predita:</strong> {predicted_label}</p>
41
- <p><strong>Data e Hora:</strong> {current_time}</p>
42
- """
43
-
44
- # Retorna o resultado formatado em HTML
45
- return result_html
46
-
47
- # Crie uma interface Gradio com detalhes sobre o Classificador de Tumor da Tireoide
48
- iface = gr.Interface(
49
- fn=classify_image,
50
- inputs=gr.inputs.Image(),
51
- outputs=gr.outputs.HTML(), # Saída formatada com HTML
52
- title="Classificador de Tumor da Tireoide",
53
- description="""
54
- <p>Este é um classificador de imagens de tumores da tireoide.</p>
55
- <p>Para usá-lo:</p>
56
- <ol>
57
- <li>Clique no botão 'Escolher Arquivo' para fazer o upload de uma imagem da tireoide.</li>
58
- <li>Aguarde a classificação automática.</li>
59
- <li>O resultado mostrará a classe predita e a data e hora da classificação.</li>
60
- </ol>
61
- <p>Este classificador é baseado em um modelo pré-treinado e pode ajudar a identificar a presença de tumores da tireoide em imagens médicas.</p>
62
- """,
63
- )
64
-
65
- # Inicie a interface Gradio
66
- iface.launch()