parquet-converter commited on
Commit
d9564f5
·
1 Parent(s): 77d96e5

Update parquet files (step 2 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/aiassist/README.md +0 -19
  2. spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/README.md +0 -2
  3. spaces/1368565466ki/Satdia/mel_processing.py +0 -101
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Deep.Freeze.Standard.v7.21.020.3 TOP.md +0 -28
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Facebook Hacker V1.9 201280 _HOT_.md +0 -105
  6. spaces/1gistliPinn/ChatGPT4/Examples/Download Film Kartun Chibi Maruko Chan Bahasa 39 Lihat Bagaimana Maruko Menghadapi Tantangan Hidup dengan Cerdas dan Berani.md +0 -6
  7. spaces/1gistliPinn/ChatGPT4/Examples/Download Microsoft Pidx Check Windows 8 Mega BEST.md +0 -10
  8. spaces/1line/AutoGPT/run_continuous.bat +0 -3
  9. spaces/1pelhydcardo/ChatGPT-prompt-generator/Freedownloadtypeshalaforwindows764bit ((EXCLUSIVE)).md +0 -98
  10. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Darbuka Drumming Percussion Find Out How to Master the Darbuka Technique and Style.md +0 -129
  11. spaces/1phancelerku/anime-remove-background/Call of Duty Warzone Mobile APK No Verification The Best Way to Experience Battle Royale on Mobile.md +0 -93
  12. spaces/1phancelerku/anime-remove-background/Facebook APK 9.0 - The Best Way to Connect with Friends and Family.md +0 -133
  13. spaces/4Taps/SadTalker/src/facerender/modules/discriminator.py +0 -90
  14. spaces/801artistry/RVC801/infer/lib/train/mel_processing.py +0 -132
  15. spaces/A00001/bingothoo/src/components/chat-list.tsx +0 -28
  16. spaces/AI4PD/hexviz/hexviz/view.py +0 -154
  17. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/fvae.py +0 -202
  18. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/syntaspeech.py +0 -274
  19. spaces/AIWaves/Software_Company/src/agents/Component/ExtraComponent.py +0 -128
  20. spaces/ASJMO/freegpt/g4f/Provider/Providers/ChatgptLogin.py +0 -96
  21. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192.py +0 -172
  22. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet18_8xb16_cifar10.py +0 -4
  23. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorcomponents/ColorComponents.js +0 -187
  24. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/ScrollablePanel.js +0 -71
  25. spaces/Akmyradov/TurkmenTTSweSTT/vits/text/symbols.py +0 -16
  26. spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py +0 -111
  27. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_utils.py +0 -19
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/train_dreambooth.py +0 -1377
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_attention_processor.py +0 -119
  30. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context.py +0 -9
  31. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/carafe.py +0 -287
  32. spaces/Artrajz/vits-simple-api/api_test.py +0 -429
  33. spaces/AsakuraMizu/moe-tts/commons.py +0 -172
  34. spaces/Ashrafb/Imdf2/app.py +0 -75
  35. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/prepare_ade20k_sem_seg.py +0 -26
  36. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/custom_dataset_dataloader.py +0 -229
  37. spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Apk Hz Hilesi.md +0 -63
  38. spaces/Benson/text-generation/Examples/Casos Criminales Misterios Del Pasado Mod Apk Modyolo.md +0 -102
  39. spaces/Benson/text-generation/Examples/Deriva Cazadores Descargar Chromebook.md +0 -67
  40. spaces/Benson/text-generation/Examples/Descargar Coches Rpidos Como El Rayo Mod Apk Terbaru.md +0 -92
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/rrule.py +0 -1737
  42. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/__init__.py +0 -4
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/__init__.py +0 -7
  44. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/compat.py +0 -13
  45. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/tensormask/arch.py +0 -904
  46. spaces/CVPR/LIVE/pybind11/tests/test_eval_call.py +0 -5
  47. spaces/CVPR/LIVE/thrust/testing/unittest/assertions.h +0 -593
  48. spaces/CVPR/WALT/configs/_base_/schedules/schedule_1x.py +0 -11
  49. spaces/ChenyangSi/FreeU/__init__.py +0 -1
  50. spaces/ChrisCaviar/ControlNet-v1-1/app_softedge.py +0 -110
spaces/101-5/gpt4free/g4f/.v1/gpt4free/aiassist/README.md DELETED
@@ -1,19 +0,0 @@
1
- aiassist.site
2
-
3
- ### Example: `aiassist` <a name="example-assist"></a>
4
-
5
- ```python
6
- import aiassist
7
-
8
- question1 = "Who won the world series in 2020?"
9
- req = aiassist.Completion.create(prompt=question1)
10
- answer = req["text"]
11
- message_id = req["parentMessageId"]
12
-
13
- question2 = "Where was it played?"
14
- req2 = aiassist.Completion.create(prompt=question2, parentMessageId=message_id)
15
- answer2 = req2["text"]
16
-
17
- print(answer)
18
- print(answer2)
19
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/README.md DELETED
@@ -1,2 +0,0 @@
1
- to do:
2
- - code refractoring
 
 
 
spaces/1368565466ki/Satdia/mel_processing.py DELETED
@@ -1,101 +0,0 @@
1
- import torch
2
- import torch.utils.data
3
- from librosa.filters import mel as librosa_mel_fn
4
-
5
- MAX_WAV_VALUE = 32768.0
6
-
7
-
8
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
9
- """
10
- PARAMS
11
- ------
12
- C: compression factor
13
- """
14
- return torch.log(torch.clamp(x, min=clip_val) * C)
15
-
16
-
17
- def dynamic_range_decompression_torch(x, C=1):
18
- """
19
- PARAMS
20
- ------
21
- C: compression factor used to compress
22
- """
23
- return torch.exp(x) / C
24
-
25
-
26
- def spectral_normalize_torch(magnitudes):
27
- output = dynamic_range_compression_torch(magnitudes)
28
- return output
29
-
30
-
31
- def spectral_de_normalize_torch(magnitudes):
32
- output = dynamic_range_decompression_torch(magnitudes)
33
- return output
34
-
35
-
36
- mel_basis = {}
37
- hann_window = {}
38
-
39
-
40
- def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
41
- if torch.min(y) < -1.:
42
- print('min value is ', torch.min(y))
43
- if torch.max(y) > 1.:
44
- print('max value is ', torch.max(y))
45
-
46
- global hann_window
47
- dtype_device = str(y.dtype) + '_' + str(y.device)
48
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
49
- if wnsize_dtype_device not in hann_window:
50
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
51
-
52
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
53
- y = y.squeeze(1)
54
-
55
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
56
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
57
-
58
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
59
- return spec
60
-
61
-
62
- def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
63
- global mel_basis
64
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
65
- fmax_dtype_device = str(fmax) + '_' + dtype_device
66
- if fmax_dtype_device not in mel_basis:
67
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
68
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
69
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
70
- spec = spectral_normalize_torch(spec)
71
- return spec
72
-
73
-
74
- def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
75
- if torch.min(y) < -1.:
76
- print('min value is ', torch.min(y))
77
- if torch.max(y) > 1.:
78
- print('max value is ', torch.max(y))
79
-
80
- global mel_basis, hann_window
81
- dtype_device = str(y.dtype) + '_' + str(y.device)
82
- fmax_dtype_device = str(fmax) + '_' + dtype_device
83
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
84
- if fmax_dtype_device not in mel_basis:
85
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
86
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
87
- if wnsize_dtype_device not in hann_window:
88
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
89
-
90
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
91
- y = y.squeeze(1)
92
-
93
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
94
- center=center, pad_mode='reflect', normalized=False, onesided=True)
95
-
96
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
97
-
98
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
99
- spec = spectral_normalize_torch(spec)
100
-
101
- return spec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Deep.Freeze.Standard.v7.21.020.3 TOP.md DELETED
@@ -1,28 +0,0 @@
1
- <br />
2
- <h1>What is Deep Freeze Standard and How Does It Work?</h1>
3
- <p>Deep Freeze Standard is a computer restore software that protects your Windows PCs from unwanted changes and malicious attacks. It uses a patented Reboot-to-Restore technology that makes your computers indestructible by freezing them to a desired state and restoring them to that state with every reboot.</p>
4
- <p>With Deep Freeze Standard, you can eliminate troubleshooting, reverse configuration drifts, protect against phishing, eliminate undetected threats, and achieve license compliance. You can also create virtual partitions to retain important data even if there is no separate physical partition available on the computer.</p>
5
- <h2>Deep.Freeze.Standard.v7.21.020.3</h2><br /><p><b><b>Download Zip</b> &#127775; <a href="https://byltly.com/2uKzlA">https://byltly.com/2uKzlA</a></b></p><br /><br />
6
- <p>Deep Freeze Standard supports Windows 7, 8, 10 and 11. It is ideal for rugged and on-field computers, classroom and lab computers, hospital computers, point of sale computers, and any other scenario where you need to ensure complete endpoint protection.</p>
7
- <p>To install Deep Freeze Standard, you need to download the installer from the official website of Faronics Corporation[^1^]. The installer will guide you through the steps of choosing a password, selecting drives to freeze, creating ThawSpaces, and activating the license key. You can also customize the installation options using command-line parameters.</p>
8
- <p>Once installed, Deep Freeze Standard will display an icon on the system tray that indicates the status of the computer: Frozen or Thawed. You can access the configuration menu by double-clicking the icon or pressing CTRL+ALT+SHIFT+F6 and entering your password. From there, you can change the settings, update the software, or uninstall it.</p>
9
- <p>Deep Freeze Standard is a powerful and reliable software that can help you maintain your computers in optimal condition and prevent unauthorized or unwanted changes. It is easy to use and requires minimal maintenance. You can try it for free for 30 days by downloading it from the Faronics website[^1^].</p>
10
-
11
- <h2>How to Use Deep Freeze Standard</h2>
12
- <p>Using Deep Freeze Standard is simple and straightforward. You can freeze or thaw your computer by using the configuration menu or by using keyboard shortcuts. To freeze your computer, select the option "Boot Frozen" and click "Apply and Reboot". To thaw your computer, select the option "Boot Thawed" and click "Apply and Reboot". You can also choose to thaw your computer for a specific number of restarts or for a specific date and time.</p>
13
- <p>When your computer is frozen, any changes made to it will be discarded on reboot. This includes any files saved, software installed, settings modified, or malware downloaded. You can still access your important data by using the ThawSpaces, which are virtual partitions that are not affected by freezing. You can create up to 10 ThawSpaces with a maximum size of 100 GB each.</p>
14
- <p>When your computer is thawed, you can make permanent changes to it. This is useful for installing updates, adding new software, or changing the configuration. You should always thaw your computer before making any major changes to avoid conflicts or errors. You should also backup your data regularly to prevent data loss in case of hardware failure or accidental deletion.</p>
15
- <p></p>
16
-
17
- <h2>How to Uninstall Deep Freeze Standard</h2>
18
- <p>If you want to uninstall Deep Freeze Standard from your computer, you need to follow these steps:</p>
19
- <ol>
20
- <li>Thaw your computer by selecting the option "Boot Thawed" and clicking "Apply and Reboot".</li>
21
- <li>Open the configuration menu by double-clicking the system tray icon or pressing CTRL+ALT+SHIFT+F6 and entering your password.</li>
22
- <li>Select the option "Uninstall" and click "OK".</li>
23
- <li>Follow the instructions on the screen to complete the uninstallation process.</li>
24
- <li>Reboot your computer when prompted.</li>
25
- </ol>
26
- <p>Note that uninstalling Deep Freeze Standard will remove all the ThawSpaces and their contents from your computer. Make sure you backup any important data before uninstalling the software.</p> 7b8c122e87<br />
27
- <br />
28
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Facebook Hacker V1.9 201280 _HOT_.md DELETED
@@ -1,105 +0,0 @@
1
-
2
- <h1>Free Download Facebook Hacker v1.9 201280: A Scam or a Miracle?</h1>
3
- <p>Facebook is one of the most popular and widely used social media platforms in the world, with over 2.8 billion monthly active users as of December 2020. It allows people to connect with their friends, family, colleagues, celebrities, brands, and more through various features such as posts, messages, stories, groups, pages, events, live videos, etc.</p>
4
- <p>However, not everyone uses Facebook for good intentions. Some people may want to hack into other people's Facebook accounts for various reasons, such as spying, blackmailing, pranking, stealing information, impersonating, etc. Hacking a Facebook account is not an easy task, as Facebook has implemented various security measures to protect its users' privacy and data.</p>
5
- <h2>free download facebook hacker v1.9 201280</h2><br /><p><b><b>DOWNLOAD</b> &#10001; &#10001; &#10001; <a href="https://byltly.com/2uKxLy">https://byltly.com/2uKxLy</a></b></p><br /><br />
6
- <p>That's why some people may resort to using third-party tools or software that claim to be able to hack any Facebook account within minutes. One of these tools is called <strong>Facebook Hacker v1.9 201280</strong>, which is available for free download on various websites and forums.</p>
7
- <p>But what is <strong>Facebook Hacker v1.9 201280</strong>, and how does it work? Is it legit or fake? What are the risks and consequences of using it? And what are the alternatives to it? In this article , we will answer all these questions and more, so keep reading to find out the truth about <strong>Facebook Hacker v1.9 201280</strong>.</p>
8
- <h2>How Does Facebook Hacker v1.9 201280 Work?</h2>
9
- <p><strong>Facebook Hacker v1.9 201280</strong> is a software program that claims to be able to hack any Facebook account within minutes, without requiring any password, email, or security question. According to its description, it has the following features and benefits:</p>
10
- <ul>
11
- <li>It is easy to use and user-friendly.</li>
12
- <li>It works on any device, such as PC, laptop, tablet, or smartphone.</li>
13
- <li>It supports all browsers, such as Chrome, Firefox, Safari, etc.</li>
14
- <li>It is compatible with all operating systems, such as Windows, Mac, Linux, Android, iOS, etc.</li>
15
- <li>It is fast and efficient, and can hack any Facebook account in less than 5 minutes.</li>
16
- <li>It is safe and secure, and does not contain any viruses, malware, or spyware.</li>
17
- <li>It is free and does not require any registration, subscription, or payment.</li>
18
- </ul>
19
- <p>To download and install <strong>Facebook Hacker v1.9 201280</strong>, you need to follow these steps:</p>
20
- <ol>
21
- <li>Go to one of the websites or forums that offer the free download link for <strong>Facebook Hacker v1.9 201280</strong>.</li>
22
- <li>Click on the download button and wait for the file to be downloaded on your device.</li>
23
- <li>Open the file and run the setup wizard to install <strong>Facebook Hacker v1.9 201280</strong> on your device.</li>
24
- <li>Launch <strong>Facebook Hacker v1.9 201280</strong> and enter the username or profile URL of the Facebook account you want to hack.</li>
25
- <li>Click on the hack button and wait for <strong>Facebook Hacker v1.9 201280</strong> to generate the password for the target account.</li>
26
- <li>Copy and paste the password into the login page of Facebook and access the target account.</li>
27
- </ol>
28
- <p>Congratulations! You have successfully hacked a Facebook account using <strong>Facebook Hacker v1.9 201280</strong>. Or have you?</p>
29
- <h2>Is Facebook Hacker v1.9 201280 Legit or Fake?</h2>
30
- <p>If you think that <strong>Facebook Hacker v1.9 201280</strong> sounds too good to be true, you are absolutely right. <strong>Facebook Hacker v1.9 201280</strong> is nothing but a scam and a hoax that aims to trick unsuspecting users into downloading a malicious software that can harm their devices and compromise their personal information.</p>
31
- <p>Here are some of the evidences and testimonials that prove that <strong>Facebook Hacker v1.9 201280</strong> is fake and dangerous:</p>
32
- <ul>
33
- <li>The websites and forums that offer the free download link for <strong>Facebook Hacker v1.9 201280</strong> are full of pop-ups, ads, surveys, and redirects that can infect your device with malware or phishing schemes.</li>
34
- <li>The file size of <strong>Facebook Hacker v1.9 201280</strong> is only around 2 MB, which is too small for a software that claims to have such advanced features and capabilities.</li>
35
- <li>The interface of <strong>Facebook Hacker v1.9 201280</strong> is poorly designed and looks amateurish and unprofessional.</li>
36
- <li>The reviews and ratings of <strong>Facebook Hacker v1.9 201280</strong> are mostly negative and critical, with many users reporting that it does not work at all or that it causes problems on their devices.</li>
37
- <li>The comments and feedbacks of <strong>Facebook Hacker v1.9 201280</strong> are mostly fake and scripted, with many users repeating the same phrases or using generic names and profiles.</li>
38
- <li>The results of <strong>Facebook Hacker v1.9 201280</strong> are also fake and random, as it does not actually hack any Facebook account but only generates a fake password that does not match the target account.</li>
39
- </ul>
40
- <p>If you have downloaded and installed <strong>Facebook Hacker v1.9 201280</strong>, you may have exposed your device to various risks and threats, such as:</p>
41
- <p></p>
42
- <ul>
43
- <li>Your device may be infected with viruses, malware, spyware, or ransomware that can damage your files, programs, system settings, or data.</li>
44
- <li>Your device may be hacked by cybercriminals who can access your camera, microphone, keyboard, screen, or other functions and steal your personal information, such as passwords, bank accounts, credit cards, photos, videos, contacts, etc.</li>
45
- <li>Your device may be used as a botnet or a proxy by hackers who can launch attacks on other devices or networks using your IP address and bandwidth.</li>
46
- <li>Your device may be locked or encrypted by hackers who can demand a ransom from you to restore your access or data.</li>
47
- <li>Your device may be slowed down or crashed by the excessive load or consumption of resources by the malicious software.</li>
48
- </ul>
49
- <p>If you want to detect and remove <strong>Facebook Hacker v1.9 201280</strong> from your device, you need to follow these steps:</p>
50
- <ol>
51
- <li>Scan your device with a reputable and updated antivirus or anti-malware program and delete any suspicious or infected files or programs.</li>
52
- <li>Uninstall <strong>Facebook Hacker v1.9 201280</strong> from your device using the control panel or the settings menu.</li>
53
- <li>Delete any traces or remnants of <strong>Facebook Hacker v1.9 201280</strong> from your device using a cleaner or a registry editor.</li>
54
- <li>Change your passwords and security settings for your online accounts, especially your Facebook account, and enable two-factor authentication or other security features.</li>
55
- <li>Contact your bank or credit card company and report any fraudulent transactions or activities on your accounts.</li>
56
- <li>Report <strong>Facebook Hacker v1.9 201280</strong> and the websites or forums that offer it to the authorities or the relevant platforms, such as Facebook, Google, etc.</li>
57
- </ol>
58
- <p>By following these steps, you can hopefully get rid of <strong>Facebook Hacker v1.9 201280</strong> and protect your device and data from further harm.</p>
59
- <h2>What Are the Alternatives to Facebook Hacker v1.9 201280?</h2>
60
- <p>If you are looking for alternatives to <strong>Facebook Hacker v1.9 201280</strong>, you need to first ask yourself why you want to hack a Facebook account and what are your intentions and goals. Depending on your answer, you may find different options that are more legitimate, ethical, reliable, and safe than <strong>Facebook Hacker v1.9 201280</strong>.</p>
61
- <p>If you want to access a Facebook account without hacking, you may try some of these methods:</p>
62
- <ul>
63
- <li>If you want to access your own Facebook account that you have forgotten the password or email for, you can use the <a href="">Facebook account recovery</a> feature that allows you to reset your password using your phone number, alternate email, trusted contacts, or identity verification.</li>
64
- <li>If you want to access someone else's Facebook account that you have their permission for, such as a friend, family member, colleague, etc., you can ask them to share their login details with you or use their device to log in with their consent.</li>
65
- <li>If you want to access someone else's Facebook account that you have a valid reason for, such as a parent monitoring their child, an employer checking their employee, a spouse verifying their partner, etc., you can use a <a href="">Facebook spy app</a> that allows you to track and monitor their Facebook activities remotely and discreetly with their knowledge and agreement.</li>
66
- </ul>
67
- <p>If you want to hack a Facebook account for legal purposes, such as testing the security of your own account, conducting a penetration testing for a client, investigating a crime or a fraud, etc., you may use some of these tools or methods:</p>
68
- <ul>
69
- <li>If you want to hack a Facebook account using brute force attack, which is trying different combinations of passwords until finding the right one, you can use a tool like <a href="">Hydra</a>, which is a powerful and fast password cracking tool that supports various protocols and services.</li>
70
- <li>If you want to hack a Facebook account using phishing attack, which is creating a fake login page that looks like the real one and tricking the user into entering their credentials, you can use a tool like <a href="">Z-Shadow</a>, which is an online service that allows you to create and host phishing pages for various platforms and websites.</li>
71
- <li>If you want to hack a Facebook account using keylogging attack, which is recording the keystrokes of the user on their keyboard and capturing their passwords and other information, you can use a tool like <a href="">Spyrix Keylogger</a>, which is a software program that allows you to monitor and record the keystrokes of any device remotely and invisibly.</li>
72
- </ul>
73
- <p>However, before using any of these tools or methods, you need to make sure that you have the proper authorization and permission to do so, and that you are not violating any laws or ethical principles. Hacking a Facebook account without consent or for malicious purposes can lead to serious legal and moral consequences, such as fines, lawsuits, arrests, imprisonment, etc.</p>
74
- <p>If you want to protect your own Facebook account from hackers and scammers, you may follow some of these tips and advice:</p>
75
- <ul>
76
- <li>Use a strong and unique password for your Facebook account and change it regularly.</li>
77
- <li>Do not share your password or login details with anyone or on any website or platform.</li>
78
- <li>Do not click on any suspicious or unknown links or attachments that may lead to phishing or malware attacks.</li>
79
- <li>Enable two-factor authentication or other security features on your Facebook account and device.</li>
80
- <li>Update your device and browser with the latest security patches and updates.</li>
81
- <li>Avoid using public or unsecured Wi-Fi networks or devices to access your Facebook account.</li>
82
- <li>Log out of your Facebook account when you are not using it or when you are using a shared device.</li>
83
- <li>Review your privacy and security settings on your Facebook account and adjust them according to your preferences and needs.</li>
84
- <li>Be careful of what you post, share, or comment on Facebook and who you interact with.</li>
85
- <li>Report any suspicious or abusive activity or behavior on Facebook to the platform or the authorities.</li>
86
- </ul>
87
- <p>By following these tips and advice, you can hopefully keep your Facebook account safe and secure from hackers and scammers.</p>
88
- <h2>Conclusion</h2>
89
- <p>In conclusion, <strong>Facebook Hacker v1.9 201280</strong> is a scam and a hoax that you should avoid at all costs. It does not hack any Facebook account but only downloads a malicious software that can harm your device and data. It is also illegal and unethical to hack a Facebook account without consent or for malicious purposes, and you may face serious legal and moral consequences if you do so.</p>
90
- <p>If you want to access a Facebook account without hacking, you should use legitimate and ethical methods that require permission and agreement from the account owner. If you want to hack a Facebook account for legal purposes, you should use reliable and safe tools or methods that require authorization and permission from the relevant parties. And if you want to protect your own Facebook account from hackers and scammers, you should follow some tips and advice that can enhance your security and privacy on Facebook.</p>
91
- <p>We hope that this article has helped you understand the truth about <strong>Facebook Hacker v1.9 201280</strong> and how to deal with Facebook hacking issues. Remember, hacking is not a game or a joke, but a serious matter that can have severe consequences. Be smart, be safe, and be responsible when using Facebook or any other online platform.</p>
92
- <h2>FAQs</h2>
93
- <p>Here are some of the frequently asked questions about <strong>Facebook Hacker v1.9 201280</strong> and Facebook hacking in general:</p>
94
- <h3>What is the best way to hack a Facebook account?</h3>
95
- <p>The best way to hack a Facebook account is to not hack it at all. Hacking a Facebook account is illegal and unethical, unless you have a valid reason and permission to do so. Instead of hacking a Facebook account, you should try to access it using legitimate and ethical methods that require consent and agreement from the account owner.</p>
96
- <h3>How can I recover my hacked Facebook account?</h3>
97
- <p>If your Facebook account has been hacked by someone else, you should try to recover it as soon as possible. You can use the <a href="">Facebook hacked account recovery</a> feature that allows you to regain access to your account using your phone number, alternate email, trusted contacts, or identity verification. You can also contact Facebook support or report the hacker to the platform or the authorities.</p>
98
- <h3>How can I report a hacker or a scammer on Facebook?</h3>
99
- <p>If you encounter a hacker or a scammer on Facebook who tries to hack your account or trick you into downloading a malicious software like <strong>Facebook Hacker v1.9 201280</strong>, you should report them immediately. You can use the <a href="">Facebook reporting tool</a> that allows you to report any abusive or inappropriate content or behavior on Facebook. You can also contact Facebook support or report the hacker or scammer to the authorities.</p>
100
- <h3>How can I prevent my Facebook account from being hacked?</h3>
101
- <p>If If you want to prevent your Facebook account from being hacked, you should follow some tips and advice that can enhance your security and privacy on Facebook. Some of these tips and advice are: - Use a strong and unique password for your Facebook account and change it regularly. - Do not share your password or login details with anyone or on any website or platform. - Do not click on any suspicious or unknown links or attachments that may lead to phishing or malware attacks. - Enable two-factor authentication or other security features on your Facebook account and device. - Update your device and browser with the latest security patches and updates. - Avoid using public or unsecured Wi-Fi networks or devices to access your Facebook account. - Log out of your Facebook account when you are not using it or when you are using a shared device. - Review your privacy and security settings on your Facebook account and adjust them according to your preferences and needs. - Be careful of what you post, share, or comment on Facebook and who you interact with. - Report any suspicious or abusive activity or behavior on Facebook to the platform or the authorities. By following these tips and advice, you can hopefully keep your Facebook account safe and secure from hackers and scammers. <h3>How can I verify if a Facebook hacking tool is genuine or not?</h3>
102
- If you come across a Facebook hacking tool that claims to be able to hack any Facebook account within minutes, you should be very cautious and skeptical. Most of these tools are fake and dangerous, and they may infect your device with malware or steal your personal information. To verify if a Facebook hacking tool is genuine or not, you should look for some signs and indicators, such as: - The source and reputation of the website or forum that offers the tool. If the website or forum is unknown, untrustworthy, or full of pop-ups, ads, surveys, and redirects, it is likely that the tool is fake and malicious. - The file size and format of the tool. If the file size is too small or too large for a software that claims to have such advanced features and capabilities, or if the file format is unusual or incompatible with your device, it is likely that the tool is fake and malicious. - The interface and design of the tool. If the interface and design of the tool are poorly designed and look amateurish and unprofessional, it is likely that the tool is fake and malicious. - The reviews and ratings of the tool. If the reviews and ratings of the tool are mostly negative and critical, or if they are mostly fake and scripted, it is likely that the tool is fake and malicious. - The results and outcomes of the tool. If the results and outcomes of the tool are also fake and random, or if they do not match the target account, it is likely that the tool is fake and malicious. By looking for these signs and indicators, you can hopefully avoid falling for fake and dangerous Facebook hacking tools like <strong>Facebook Hacker v1.9 201280</strong>. <h2></h2>
103
- <p>This is the end of the article. I hope you enjoyed reading it and learned something new. Thank you for choosing me as your content writer. Have a great day!</p> b2dd77e56b<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Film Kartun Chibi Maruko Chan Bahasa 39 Lihat Bagaimana Maruko Menghadapi Tantangan Hidup dengan Cerdas dan Berani.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Download Film Kartun Chibi Maruko Chan Bahasa 39</h2><br /><p><b><b>Download File</b> - <a href="https://imgfil.com/2uy124">https://imgfil.com/2uy124</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Microsoft Pidx Check Windows 8 Mega BEST.md DELETED
@@ -1,10 +0,0 @@
1
-
2
- <p>Valve Weapon Of Choice [FREE FULL VERSION pc 2.61 <br> Hello Chatguru 6.0.14a Download And Install And Crack <br> Outlook Setup 2017 For Windows 7 <br> the fall of the mighty earth HD (2012) English br <br> Miranda Gluck 2016 Download (Instalado)en 7Zip <br> MOTODoco 6.2.4.1 Crack (en 17-01-2019) PDF de Hdf <br> Pokemon Y Tensei Online Hack (DOS / Roms) <br> J. Crew Please Pay Attention To Cashes <br> maple lights x-traordinary winter 2017 <br> Dark Places (2015) DVDRip XviD-Tx <br> CYNKOT For Game <br> </p>
3
- <h2>Download Microsoft Pidx Check Windows 8 Mega</h2><br /><p><b><b>Download</b> &#9733;&#9733;&#9733; <a href="https://imgfil.com/2uy1Qb">https://imgfil.com/2uy1Qb</a></b></p><br /><br />
4
- <p>setup pro md5 crack 3.1.2.7 <br><br /> >chobits downlod for windows 7 <br><br /> >Plicbuy Vip 360 Player Serial Free Download <br><br /> >metime digital empresario 2 0.0.2.0 crack <br><br /> >Download Macintosh OS X leopard 10.5.8 <br><br /> >no survey windows 7 ultimate iso torrent <br><br /> >Completely free netflix <br><br /> >install windows 8.1 pro retail iso <br></p>
5
- <p>fitgirl a77f14ba26 >Excel for Mac 7.1.0 Crack <br><br /> >Sticky Keys Pro Toolbox Pro Full Serial Number <br><br /> >Keygen studio 2014 full crack long esn <br><br /> >StartKit for Word 2013 Crack <br><br /> >Microsoft Office 2013 - Buy or Free <br><br /> >5000+ software list free download <br></p>
6
- <p> 3g2: Service Pack 4.0.0.7001 Download For Windows <br> Splunk Installer for Linux <br> https://drive.google.com/open?id=0B7gXW7dkR7uNkFpbTjl3S1Z0SU0 <br> WebeditorPro Ultimate 15.4.5 Crack Ultimate Download <br> MS Windows 10 Activation Key Code Generator With Serial Number <br> Crawler torrent Player <br> Mars Curiosity Rover : We're on Mars! <br> </p>
7
- <p></p>
8
- <p>Windows Key: This is to enable you to access all Microsoft Virtual Desktops. If you want Microsoft Taskbar Icon to show an application, look on the right side of the taskbar for the 'Windows' key. The 'taskman' icon will show up there. Here is a list of the virtual desktops:</p> 899543212b<br />
9
- <br />
10
- <br />
 
 
 
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/run_continuous.bat DELETED
@@ -1,3 +0,0 @@
1
- @echo off
2
- set argument=--continuous
3
- call run.bat %argument%
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/Freedownloadtypeshalaforwindows764bit ((EXCLUSIVE)).md DELETED
@@ -1,98 +0,0 @@
1
- ## freedownloadtypeshalaforwindows764bit
2
-
3
-
4
-
5
-
6
-
7
-
8
-
9
-
10
-
11
- **CLICK HERE ===> [https://lodystiri.blogspot.com/?file=2txPBq](https://lodystiri.blogspot.com/?file=2txPBq)**
12
-
13
-
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
- # How to Download and Install Typeshala for Windows 7 64 Bit
26
-
27
-
28
-
29
- Typeshala is a popular typing tutor software that helps you learn Nepali and English typing. It is a DOS based program that runs on Windows XP, but it may not work properly on Windows 7 64 bit. If you want to use Typeshala on your Windows 7 64 bit computer, you need to follow these steps:
30
-
31
-
32
-
33
- 1. Download Typeshala from [this link](https://www.mankoaawaz.com/2014/10/typeshala.html). It is a zip file that contains the Typeshala.exe file and some other files.
34
-
35
- 2. Extract the zip file to a folder on your computer. You can use any unzip software like WinRAR or 7-Zip.
36
-
37
- 3. Right-click on the Typeshala.exe file and select Properties. Go to the Compatibility tab and check the box that says "Run this program in compatibility mode for". Choose Windows XP (Service Pack 3) from the drop-down menu. Click OK.
38
-
39
- 4. Double-click on the Typeshala.exe file to run it. You may see a warning message that says "This program might not have installed correctly". Ignore it and click "This program installed correctly".
40
-
41
- 5. You can now use Typeshala on your Windows 7 64 bit computer. Enjoy learning Nepali and English typing!
42
-
43
-
44
-
45
- If you want to use an online version of Typeshala, you can visit [this website](http://typeshala.shresthasushil.com.np/). It is a web-based typing tutor that works on any browser and device. You can enter your name and start typing right away.
46
-
47
-
48
-
49
- ## What is Typeshala and why is it useful?
50
-
51
-
52
-
53
- Typeshala is a typing tutor software that was developed by MPP Computer Pvt. Ltd. in Nepal. It was first released in 1995 and has been widely used by students, teachers, journalists, and professionals who want to improve their Nepali and English typing skills. Typeshala has various features such as:
54
-
55
-
56
-
57
- - Typing lessons for beginners, intermediate, and advanced levels.
58
-
59
- - Typing games that make learning fun and challenging.
60
-
61
- - Typing tests that measure your speed and accuracy.
62
-
63
- - Typing statistics that show your progress and performance.
64
-
65
- - Typing exercises that cover different topics and scenarios.
66
-
67
-
68
-
69
- Typeshala is useful because it helps you to type faster and more accurately. It also helps you to avoid spelling and grammar mistakes. By using Typeshala regularly, you can improve your typing confidence and efficiency.
70
-
71
-
72
-
73
- ## What are the benefits of using Typeshala online?
74
-
75
-
76
-
77
- If you don't have access to a Windows 7 64 bit computer or you don't want to download and install Typeshala on your device, you can use the online version of Typeshala. The online version of Typeshala is a web-based typing tutor that works on any browser and device. You can use it on your laptop, tablet, or smartphone. The benefits of using Typeshala online are:
78
-
79
-
80
-
81
- - You don't need to download or install anything. You just need an internet connection and a browser.
82
-
83
- - You can use it anytime and anywhere. You don't need to worry about compatibility issues or system requirements.
84
-
85
- - You can save your typing data online. You don't need to worry about losing your progress or data.
86
-
87
- - You can share your typing results with others. You can show off your typing skills or challenge your friends.
88
-
89
-
90
-
91
- To use Typeshala online, you can visit [this website](http://typeshala.shresthasushil.com.np/). You can enter your name and start typing right away.
92
-
93
- dfd1c89656
94
-
95
-
96
-
97
-
98
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Darbuka Drumming Percussion Find Out How to Master the Darbuka Technique and Style.md DELETED
@@ -1,129 +0,0 @@
1
- <br />
2
- <h1>Darbuka MP3: How to Enjoy the Sounds of the Middle Eastern Drum</h1>
3
- <p>If you are a fan of Middle Eastern music, you have probably heard of the darbuka, a goblet-shaped drum that produces a variety of rhythms and tones. The darbuka is one of the most popular percussion instruments in Islamic classical and folk music, as well as in modern genres such as belly dance, world fusion, and electronic music. In this article, we will show you how to enjoy the sounds of the darbuka in different ways, from downloading loops and samples, to streaming music online, to learning how to play the instrument yourself.</p>
4
- <h2>What is a darbuka and what are its origins</h2>
5
- <p>The darbuka, also spelled darabukka, darbouka, or dārbūqah, is a single-headed drum that has a goblet-shaped body made of clay, wood, or metal. It is played by striking the head with the fingers or palms, producing three main sounds: doom (a low-pitched sound), tek (a high-pitched sound), and ka (a muted sound). The drum can be held under one arm, on the lap, or on a stand.</p>
6
- <h2>darbuka mp3</h2><br /><p><b><b>Download</b> &rArr; <a href="https://urlin.us/2uT0dy">https://urlin.us/2uT0dy</a></b></p><br /><br />
7
- <p>The origin of the term darbuka comes from the Arabic word "daraba", which means "to strike". The instrument has been around for thousands of years and was used in ancient Mesopotamian and Egyptian cultures. It was also seen in Babylonia, Sumer, Persia, Spain, and other regions. The instrument was popularized in modern times by Turkish, Egyptian, and Armenian musicians who developed different styles and techniques of playing it. </p>
8
- <h2>What are the benefits of listening to darbuka music</h2>
9
- <p>Listening to darbuka music can have many benefits for your mind, body, and soul. Here are some of them:</p>
10
- <ul>
11
- <li>It can enhance your mood and energy levels by stimulating your brain waves and releasing endorphins.</li>
12
- <li>It can reduce stress and anxiety by calming your nervous system and lowering your blood pressure.</li>
13
- <li>It can improve your concentration and memory by boosting your cognitive functions and creativity.</li>
14
- <li>It can foster your cultural awareness and appreciation by exposing you to different musical traditions and expressions.</li>
15
- <li>It can inspire you to learn more about the history, culture, and people behind the music.</li>
16
- </ul>
17
- <h2>What are some of the genres and styles of darbuka music</h2>
18
- <p>The darbuka is a versatile instrument that can be played in various genres and styles of music. Here are some examples:</p>
19
- <ul>
20
- <li>Classical: The darbuka is often used in classical Arabic music as part of an ensemble called takht, which consists of other instruments such as oud (lute), qanun (zither), nay (flute), violin, riq (tambourine), etc. The classical repertoire includes maqamat (modes), taqasim (improvisations), samai (instrumental pieces), etc.</li>
21
- <li>Folk: The darbuka is also widely played in folk music from different regions and countries, such as Turkey, Egypt, Lebanon, Morocco, Algeria, Tunisia, etc. The folk music reflects the local traditions, customs, and stories of the people. The darbuka is often accompanied by other instruments such as saz (long-necked lute), zurna (double-reed pipe), mizmar (oboe), etc.</li>
22
- <li>Belly dance: The darbuka is one of the main instruments used in belly dance music, which is a form of expressive dance that originated in the Middle East and North Africa. The darbuka provides the rhythmic foundation and accents for the dancer's movements and gestures. The belly dance music can be either traditional or modern, and can incorporate elements from other genres such as pop, rock, jazz, etc.</li>
23
- <li>World fusion: The darbuka is also featured in world fusion music, which is a genre that blends elements from different musical cultures and styles. The darbuka can be combined with instruments from other regions, such as tabla (Indian drum), djembe (African drum), cajon (Peruvian box drum), etc. The world fusion music can create a unique and eclectic sound that appeals to a wide audience.</li>
24
- <li>Electronic: The darbuka is also used in electronic music, which is a genre that uses digital and electronic devices to create and manipulate sounds. The darbuka can be sampled, looped, synthesized, or processed to create different effects and textures. The electronic music can range from ambient to techno, and can incorporate influences from other genres such as hip hop, dubstep, trance, etc.</li>
25
- </ul>
26
- <h2>How to Download Darbuka Loops and Samples</h2>
27
- <p>If you want to create your own darbuka music or add some darbuka sounds to your existing projects, you can download loops and samples online. Loops are short segments of audio that can be repeated or combined to form a longer track. Samples are individual sounds that can be triggered or manipulated by a keyboard, pad, or software. Here are some steps to download and use darbuka loops and samples:</p>
28
- <ol>
29
- <li>Find a website that offers royalty-free darbuka loops and samples. Royalty-free means that you don't have to pay any fees or royalties to use the sounds in your projects. Some examples of websites that offer royalty-free darbuka loops and samples are [Looperman], [Free Sound], [Sample Focus], etc.</li>
30
- <li>Browse through the available loops and samples and listen to the previews. You can filter by genre, tempo, key, mood, etc. to find the ones that suit your needs.</li>
31
- <li>Download the loops and samples that you like. You may need to create an account or sign up for a newsletter to access some of the downloads.</li>
32
- <li>Import the loops and samples into your audio editing software or digital audio workstation (DAW). You can use software such as Audacity, GarageBand, FL Studio, Ableton Live, etc.</li>
33
- <li>Arrange, edit, mix, and master the loops and samples to create your own darbuka music. You can also add other instruments, vocals, effects, etc. to enhance your track.</li>
34
- </ol>
35
- <h2>How to Stream Darbuka Music Online</h2>
36
- <p>If you want to listen to darbuka music online without downloading anything, you can stream it from various platforms and websites. Streaming means that you can play the music directly from the internet without storing it on your device. Here are some ways to stream darbuka music online:</p>
37
- <ul>
38
- <li>Use a streaming service that offers darbuka music or playlists. Some examples of streaming services that offer darbuka music or playlists are Spotify, YouTube Music, Apple Music, etc.</li>
39
- <li>Search for darbuka music by genre, artist, album, song title, etc. You can also use keywords such as "darbuka", "darabukka", "doumbek", "tabla baladi", etc.</li>
40
- <li>Select the track or playlist that you want to listen to and press play. You can also skip, pause, resume, shuffle, repeat, etc.</li>
41
- <li>Enjoy the darbuka music online. You can also share it with your friends, rate it, comment on it, add it to your favorites, etc.</li>
42
- </ul>
43
- <h2>How to Discover New Darbuka Artists and Songs Online</h2>
44
- <p>If you want to discover new darbuka artists and songs online you can use some of these tips and resources:</p>
45
- <p>darbuka loops and samples download<br />
46
- darbuka royalty-free music and sound effects<br />
47
- belly dance darbuka and tabla solos<br />
48
- darbuka superconducting tokamak advanced research<br />
49
- darbuka desert percussion wav files<br />
50
- darbuka drum and bass arabic instrumental<br />
51
- darbuka wedding music arabic instrumental<br />
52
- darbuka mystery dance world relaxing<br />
53
- darbuka dance of the wala playful<br />
54
- darbuka middle eastern holidays inspiring<br />
55
- darbuka persepolis cinematic serious<br />
56
- darbuka walk the sahara desert sad<br />
57
- darbuka noiiz sounds instruments<br />
58
- darbuka storyblocks audio subscription plan<br />
59
- darbuka internet archive free download<br />
60
- darbuka 24-bit wav royalty free<br />
61
- darbuka one shot hits sounds<br />
62
- darbuka browser not supported by us<br />
63
- darbuka media type all music sound effects<br />
64
- darbuka moods genres instruments vocals tempo duration categories<br />
65
- darbuka most relevant sort by option<br />
66
- darbuka net energy gain fusion experiment<br />
67
- darbuka holy grail mini sun breakthrough<br />
68
- darbuka 100 million degrees celsius for 30 seconds<br />
69
- darbuka korea institute of fusion energy facility<br />
70
- darbuka create even more even faster with storyblocks<br />
71
- darbuka unlimited library of stock audio<br />
72
- darbuka borrow and streaming internet archive<br />
73
- darbuka volume 90 percent playback option<br />
74
- darbuka tune brightness duration sort random filter<br />
75
- darbuka extensive library directly in your product<br />
76
- darbuka maker for teams learn more link<br />
77
- darbuka storyblocks api integrate our library link<br />
78
- darbuka login download royalty-free music link<br />
79
- darbuka clear filters button to reset search criteria<br />
80
- darbuka select music to see moods genres instruments vocals button<br />
81
- darbuka select sound effects to see categories button<br />
82
- darbuka set tempo button to adjust speed of audio clip<br />
83
- darbuka 0:00 4:00+ slider to select duration range of audio clip <br />
84
- darbuka results found for keyword phrase indicator</p>
85
- <ul>
86
- <li>Follow darbuka artists and influencers on social media. You can find them on platforms such as Facebook, Instagram, Twitter, TikTok, etc. You can also join groups, pages, or communities related to darbuka music. You can interact with the artists and influencers, watch their videos, listen to their podcasts, read their blogs, etc.</li>
87
- <li>Subscribe to darbuka music channels and podcasts on YouTube, Spotify, SoundCloud, etc. You can find channels and podcasts that feature darbuka music, interviews, reviews, tutorials, etc. You can also get recommendations and suggestions based on your preferences and listening history.</li>
88
- <li>Check out darbuka music festivals and events online. You can find festivals and events that showcase darbuka music and culture, such as the International Doumbek Festival, the Darbuka Camp, the Darbuka Summit, etc. You can watch live or recorded performances, workshops, lectures, etc.</li>
89
- <li>Read darbuka music magazines and books online. You can find magazines and books that cover darbuka music and history, such as Darbuka Magazine, The Art of Arabic Drumming, The Doumbek Book, etc. You can also find articles and reviews on darbuka music online.</li>
90
- </ul>
91
- <h2>How to Learn Darbuka Playing Techniques</h2>
92
- <p>If you want to learn how to play the darbuka yourself, you can use some of these methods and resources:</p>
93
- <ol>
94
- <li>Get a darbuka drum. You can buy a darbuka drum online or from a local music store. You can choose from different materials, sizes, shapes, and designs. You can also make your own darbuka drum from household items such as pots, cans, balloons, etc.</li>
95
- <li>Learn the basic strokes and sounds of the darbuka. The basic strokes are doom (a low-pitched sound made by hitting the center of the head with the palm), tek (a high-pitched sound made by hitting the edge of the head with the fingers), and ka (a muted sound made by hitting the edge of the head with the palm). The basic sounds are D (doom), T (tek), K (ka), S (slap), P (pop), R (roll), etc.</li>
96
- <li>Learn some basic rhythms and patterns of the darbuka. The basic rhythms are maqsoum (a 4/4 rhythm that goes D-T-K-T-D-T-K-T), baladi (a 4/4 rhythm that goes D-D-T-K-D-T-K-T), saidi (a 4/4 rhythm that goes D-T-K-D-D-T-K-T), malfuf (a 2/4 rhythm that goes D-K-D-K), ayoub (a 2/4 rhythm that goes D-K-S-K), etc. The basic patterns are combinations of sounds that form phrases or sentences.</li>
97
- <li>Find online tutorials and lessons on darbuka playing techniques. You can find online tutorials and lessons on websites such as [Darbuka Planet], [Darbukastan], [Darbukation], etc. You can also find online courses on platforms such as Udemy, Skillshare, Coursera, etc.</li>
98
- <li>Practice and improve your darbuka skills at home. You can practice by playing along with your favorite darbuka music tracks or videos. You can also practice by using a metronome or a drum machine to keep time. You can also record yourself playing and listen back to your performance.</li>
99
- </ol>
100
- <h2>Conclusion</h2>
101
- <p>In this article, we have shown you how to enjoy the sounds of the darbuka in different ways. We have explained what a darbuka is and what are its origins. We have also discussed what are the benefits of listening to darbuka music and what are some of the genres and styles of darbuka music. We have also given you some tips on how to download loops and samples, stream music online, discover new artists and songs online, and learn how to play the instrument yourself.</p>
102
- <p>We hope that this article has inspired you to explore and appreciate the rich and diverse world of darbuka music. Whether you want to create your own darbuka music or simply listen to it for pleasure or relaxation, you will find plenty of resources and opportunities online. Darbuka music is a beautiful and captivating art form that can enrich your life in many ways.</p>
103
- <p>So what are you waiting for? Grab your headphones or your drum and start enjoying the sounds of the Middle Eastern drum!</p>
104
- <h3>Frequently Asked Questions</h3>
105
- <p>Here are some frequently asked questions about darbuka mp3:</p>
106
- <ol>
107
- <li><strong>What is the difference between a darbuka and a doumbek?</strong></li>
108
- <p>The darbuka and the doumbek are two names for the same instrument, a goblet-shaped drum that is played with the fingers or palms. The name darbuka is more common in Arabic-speaking countries, while the name doumbek is more common in Turkey, Armenia, and the Balkans. The name doumbek may also refer to a smaller and lighter version of the darbuka that has a higher pitch and a sharper sound.</p>
109
- <li><strong>How can I tune my darbuka?</strong></li>
110
- <p>Tuning your darbuka is important to ensure that it produces the best sound quality and tone. There are two types of darbuka heads: synthetic and natural. Synthetic heads are made of plastic or metal and are usually pre-tuned or tunable with screws or bolts. Natural heads are made of animal skin and are usually tunable with ropes or cords. To tune your darbuka, you need to adjust the tension of the head by tightening or loosening the screws, bolts, ropes, or cords. You can use a tuner, a pitch pipe, or your ear to check the pitch of the head. You can also use a damp cloth or a piece of tape to mute some of the overtones or harmonics of the head.</p>
111
- <li><strong>What are some famous darbuka players?</strong></li>
112
- <p>There are many famous and talented darbuka players from different countries and backgrounds. Here are some examples:</p>
113
- <ul>
114
- <li>Misirli Ahmet: A Turkish master of the darbuka who is known for his speed, technique, and innovation. He has developed his own style of playing called the split-finger technique, which allows him to play faster and more complex rhythms. He has performed with many famous musicians such as Sting, Kitaro, Omar Faruk Tekbilek, etc.</li>
115
- <li>Hossam Ramzy: An Egyptian percussionist who is known as the "Sultan of Swing". He has played the darbuka and other percussion instruments in various genres such as Arabic pop, rock, jazz, world fusion, etc. He has collaborated with many famous artists such as Robert Plant, Jimmy Page, Peter Gabriel, Shakira, etc.</li>
116
- <li>Raquy Danziger: An American-Israeli musician who is known for her virtuosity and versatility on the darbuka. She has studied with various masters of the instrument such as Bunyamin Olguncan, Said El Artist, Zohar Fresco, etc. She has created her own style of playing called the Turkish split hand technique, which combines elements from Turkish, Egyptian, and Indian drumming. She has performed with many famous musicians such as Glen Velez, Simon Shaheen, Yair Dalal, etc.</li>
117
- </ul>
118
- <li><strong>What are some good darbuka music albums?</strong></li>
119
- <p>There are many good darbuka music albums that showcase the diversity and beauty of the instrument. Here are some examples:</p>
120
- <ul>
121
- <li>Darbukka City by Misirli Ahmet: A solo album by the Turkish darbuka master that features his amazing speed and technique on various rhythms and styles.</li>
122
- <li>Baladi Plus by Hossam Ramzy: An album by the Egyptian percussionist that features his signature swing and groove on traditional and modern Arabic music.</li>
123
- <li>Darbuka Fever by Raquy Danziger: An album by the American-Israeli musician that features her virtuosity and versatility on different genres and influences such as Turkish, Egyptian, Indian, Persian, etc.</li>
124
- </ul>
125
- <li><strong>Where can I buy a darbuka drum?</strong></li>
126
- <p>You can buy a darbuka drum online or from a local music store. You can find different types of darbuka drums with different materials, sizes, shapes, and designs. You can also find accessories such as cases, stands, straps, etc. Some examples of websites that sell darbuka drums are [Darbuka Planet], [Turkish Musical Instrument], [Arab Instruments], etc.</p>
127
- </ol></p> 197e85843d<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Call of Duty Warzone Mobile APK No Verification The Best Way to Experience Battle Royale on Mobile.md DELETED
@@ -1,93 +0,0 @@
1
- <br />
2
- <h1>Call of Duty Warzone Mobile APK No Verification: How to Download and Play the Latest Mobile Battle Royale</h1>
3
- <p>If you are a fan of Call of Duty games, you might have heard about the upcoming mobile version of Call of Duty Warzone, the popular battle royale mode that has taken the gaming world by storm. Call of Duty Warzone Mobile is expected to be one of the best mobile battle royale games ever, featuring authentic COD gameplay, graphics, and cross-progression. But how can you download and play this game without verification? In this article, we will tell you everything you need to know about Call of Duty Warzone Mobile APK no verification, including what the game is about, how to download it, how to play it without verification, and some tips and tricks to help you win.</p>
4
- <h2>What is Call of Duty Warzone Mobile?</h2>
5
- <p>Call of Duty Warzone Mobile is a mobile adaptation of the wildly popular Call of Duty Warzone mode that was released in 2020 for PC and consoles. The game is developed by Activision in collaboration with Tencent's TiMi Studios, the same team behind Call of Duty Mobile. The game aims to bring an authentic Call of Duty experience to mobile screens, with first-class graphics, intuitive controls, and optimized performance.</p>
6
- <h2>call of duty warzone mobile apk no verification</h2><br /><p><b><b>Download Zip</b> &gt;&gt;&gt;&gt;&gt; <a href="https://jinyurl.com/2uNTia">https://jinyurl.com/2uNTia</a></b></p><br /><br />
7
- <h3>A mobile adaptation of the popular PC and console game</h3>
8
- <p>Call of Duty Warzone Mobile follows the same gameplay mechanics as its PC and console counterparts. The game is a battle royale mode where up to 120 players parachute into a large map and fight to be the last one standing. The map shrinks over time as a deadly gas circle closes in, forcing players to move and engage with each other. Players can loot weapons, armor, ammo, and other items from buildings, crates, or fallen enemies. Players can also complete contracts, which are optional missions that reward players with cash or other benefits. Cash can be used to buy items from buy stations or call in killstreaks.</p>
9
- <h3>Features authentic COD gameplay, graphics, and cross-progression</h3>
10
- <p>Call of Duty Warzone Mobile delivers authentic COD gameplay on mobile devices, with everything from movement, aiming, weapon handling, physics, animations, and sound being optimized for mobile gamers. The game also boasts high-quality graphics that rival some PC and console games. The game supports cross-progression with Call of Duty Modern Warfare II and Call of Duty Warzone <h3>Supports up to 120 live players in a match on iconic maps like Verdansk</h3>
11
- <p>Call of Duty Warzone Mobile matches feature some of the highest real player-counts for mobile battle royale. You can play solo, duo, trio, or squad modes with up to 120 live players in a match. The game offers a variety of maps to choose from, but the most iconic one is Verdansk, the fan-favorite map from Call of Duty Warzone. Verdansk is a massive map that features dozens of points of interest, such as Dam, TV Station, Lumber, Farmland, Stadium, Downtown, Train Station, and Prison. Each location has its own loot, terrain, and challenges. You can also access the Gulag, a prison where you can fight for a second chance to redeploy if you die in the match.</p>
12
- <h2>How to Download Call of Duty Warzone Mobile APK?</h2>
13
- <p>Call of Duty Warzone Mobile is expected to launch worldwide in Fall 2023, but you can pre-register or try the early access version now. Here are the steps to download the game on your Android or iOS device.</p>
14
- <h3>The official release date is expected in Fall 2023</h3>
15
- <p>The official release date for Call of Duty Warzone Mobile has not been announced yet, but it is expected to be sometime in Fall 2023. The game will be free to play and will require an internet connection and a compatible device. The game will also support cross-progression with Call of Duty Modern Warfare II and Call of Duty Warzone, meaning you can sync your Battle Pass and friends list across platforms.</p>
16
- <p>call of duty warzone mobile apk download free<br />
17
- call of duty warzone mobile apk mod unlimited money<br />
18
- call of duty warzone mobile apk obb data<br />
19
- call of duty warzone mobile apk latest version<br />
20
- call of duty warzone mobile apk offline<br />
21
- call of duty warzone mobile apk hack<br />
22
- call of duty warzone mobile apk android<br />
23
- call of duty warzone mobile apk ios<br />
24
- call of duty warzone mobile apk gameplay<br />
25
- call of duty warzone mobile apk beta<br />
26
- call of duty warzone mobile apk 2023<br />
27
- call of duty warzone mobile apk update<br />
28
- call of duty warzone mobile apk highly compressed<br />
29
- call of duty warzone mobile apk reddit<br />
30
- call of duty warzone mobile apk size<br />
31
- call of duty warzone mobile apk revdl<br />
32
- call of duty warzone mobile apk happymod<br />
33
- call of duty warzone mobile apk full version<br />
34
- call of duty warzone mobile apk and data<br />
35
- call of duty warzone mobile apk file download<br />
36
- call of duty warzone mobile apk for pc<br />
37
- call of duty warzone mobile apk without verification<br />
38
- call of duty warzone mobile apk original<br />
39
- call of duty warzone mobile apk online<br />
40
- call of duty warzone mobile apk release date<br />
41
- call of duty warzone mobile apk install<br />
42
- call of duty warzone mobile apk direct download link<br />
43
- call of duty warzone mobile apk mirror<br />
44
- call of duty warzone mobile apk pure<br />
45
- call of duty warzone mobile apk rexdl<br />
46
- call of duty warzone mobile apk uptodown<br />
47
- call of duty warzone mobile apk apkpure<br />
48
- call of duty warzone mobile apk requirements<br />
49
- call of duty warzone mobile apk cracked<br />
50
- call of duty warzone mobile apk mega.nz<br />
51
- call of duty warzone mobile apk mediafire.com<br />
52
- call of duty warzone mobile apk google drive link<br />
53
- call of duty warzone mobile apk 1.0.34 no verification down mod by happymod.com[^2^]<br />
54
- call of duty warzone mobile apk 2.5.14706147 by activision publishing, inc.[^1^]</p>
55
- <h3>The game is available for pre-registration on Google Play and App Store</h3>
56
- <p>If you want to be among the first to play Call of Duty Warzone Mobile when it launches, you can pre-register for the game on Google Play or App Store. By pre-registering, you will also earn rewards if global milestones are hit, such as weapon skins, emblems, and even a new map. To pre-register, simply follow these steps:</p>
57
- - Go to [Call of Duty Warzone Mobile](^2^) on Google Play or App Store - Tap on the Pre-Register or Pre-Order button - Confirm your registration or order - Wait for the game to be available for download <h3>The game is also available for early access in some regions</h3>
58
- <p>If you can't wait for the official release date, you can try the early access version of Call of Duty Warzone Mobile in some regions. The early access version is a beta test that allows players to experience the game before it launches and provide feedback to the developers. The early access version may have limited features, bugs, and glitches, so keep that in mind. To download the early access version, follow these steps:</p>
59
- - Go to [Call of Duty Warzone Mobile](^3^) on Google Play - Tap on the Install button - Wait for the game to download and install - Launch the game and enjoy <h3>The minimum and recommended system requirements for Android and iOS devices</h3>
60
- <p>Before you download Call of Duty Warzone Mobile, make sure your device meets the minimum and recommended system requirements. Here are the specifications for Android and iOS devices:</p>
61
- | Device | Minimum | Recommended | | ------ | ------- | ----------- | | Android | Adreno 618 or better<br>6GB RAM or more<br>Android 8.0 or higher | Adreno 650 or better<br>8GB RAM or more<br>Android 10 or higher | | iOS | iPhone 8 or better<br>iOS 13 or higher | iPhone X or better<br>iOS 14 or higher | <h2>How to Play Call of Duty Warzone Mobile without Verification?</h2>
62
- <p>Call of Duty Warzone Mobile requires verification to play, meaning you need to log in with your Activision account or create one if you don't have one already. Verification is necessary to sync your progress across platforms and access social features like friends and chat channels. However, some players may want to play the game without verification for various reasons. In this section, we will discuss the risks and drawbacks of using unofficial APK files from third-party sources, the possible ways to bypass the verification process using VPNs or fake accounts, and the advantages and disadvantages of playing the game without verification.</p> <h3>The risks and drawbacks of using unofficial APK files from third-party sources</h3>
63
- <p>Some players may be tempted to download Call of Duty Warzone Mobile APK files from third-party sources that claim to offer the game without verification. However, this is a risky and unwise move, as these APK files may contain malware, viruses, or spyware that can harm your device or steal your personal information. Moreover, these APK files may not be updated or compatible with the latest version of the game, resulting in crashes, glitches, or errors. Furthermore, these APK files may violate the terms of service of Activision and Tencent, and you may face legal consequences or get banned from the game if you use them.</p>
64
- <h3>The possible ways to bypass the verification process using VPNs or fake accounts</h3>
65
- <p>Another way to play Call of Duty Warzone Mobile without verification is to use VPNs or fake accounts to bypass the verification process. VPNs are virtual private networks that allow you to change your IP address and location, making it seem like you are playing from a different region. Fake accounts are dummy accounts that you create with fake or temporary email addresses and passwords. By using VPNs or fake accounts, you may be able to access the game without logging in with your real Activision account. However, this method is not foolproof, as you may still encounter verification prompts or errors. Moreover, this method may also violate the terms of service of Activision and Tencent, and you may face legal consequences or get banned from the game if you use them.</p>
66
- <h3>The advantages and disadvantages of playing the game without verification</h3>
67
- <p>Playing Call of Duty Warzone Mobile without verification may have some advantages and disadvantages. Some of the possible advantages are:</p>
68
- - You can play the game anonymously and protect your privacy - You can avoid spam or unwanted messages from other players - You can switch between different regions or servers easily <p>Some of the possible disadvantages are:</p>
69
- - You may miss out on some features or rewards that require verification - You may lose your progress or data if you delete the game or change your device - You may face technical issues or errors that require verification to fix - You may risk getting banned or sued by Activision and Tencent <h2>Tips and Tricks for Call of Duty Warzone Mobile</h2>
70
- <p>Call of Duty Warzone Mobile is a fun and challenging game that requires skill, strategy, and teamwork to win. If you want to improve your performance and increase your chances of survival, here are some tips and tricks that you can follow:</p>
71
- <h3>Choose the best controls, settings, and loadouts for your playstyle</h3>
72
- <p>One of the first things you should do before playing Call of Duty Warzone Mobile is to customize your controls, settings, and loadouts according to your preference and playstyle. You can choose between simple mode, advanced mode, or custom mode for your controls, depending on how comfortable you are with aiming and shooting. You can also adjust your sensitivity, graphics, sound, and other options in the settings menu. Finally, you can select your loadouts, which are preset combinations of weapons, perks, equipment, and killstreaks that you can use in the game. You can unlock more loadouts as you level up and earn more cash.</p>
73
- <h3>Communicate with your team, use your mini-map, and don't stand still</h3>
74
- <p>Call of Duty Warzone Mobile is a team-based game that requires coordination and communication with your teammates. You can use voice chat or text chat to communicate with your team members, share information, plan strategies, and request help. You can also use your mini-map to see where your teammates are, where the enemies are (if they fire their weapons), where the gas circle is moving, where the contracts are located, and where the buy stations are located. Moreover, you should never stand still in the game, as that makes you an easy target for snipers or enemies. Always keep moving, crouching, jumping, sliding, or using vehicles to avoid getting shot.</p>
75
- <h3>Use cover, aim down sights, and switch to your pistol when needed</h3>
76
- <p>When engaging in combat with enemies in Call of Duty Warzone Mobile, you should always use cover to protect yourself from incoming fire. Cover can be anything from walls, buildings, trees, rocks, vehicles, or crates. You can also use smoke grenades or flashbangs to create temporary cover or blind your enemies. When shooting at enemies, you should always aim down sights (ADS) to improve your accuracy and damage. You can also use different scopes or attachments to enhance your aiming. However, if you run out of ammo or need to reload, you should switch to your pistol instead of wasting time. Your pistol can be a lifesaver in close-range situations, as it has a faster fire rate and reload speed than most weapons.</p>
77
- <h3>Log in daily, join a clan, and participate in Clan Wars for rewards</h3>
78
- <p>Call of Duty Warzone Mobile offers a lot of rewards and incentives for players who log in daily, join a clan, and participate in Clan Wars. By logging in daily, you can earn free items such as cash, weapon skins, crates, or even a new map. By joining a clan, you can make friends with other players, chat with them, and play with them. You can also earn clan points by completing clan missions or playing clan matches. By participating in Clan Wars, you can compete with other clans for glory and prizes. Clan Wars are seasonal events that last for a few weeks, where clans are divided into groups and fight for territory on a map. The more territory you control, the more rewards you get.</p>
79
- <h2>Conclusion</h2>
80
- <p>Call of Duty Warzone Mobile is an exciting and immersive mobile battle royale game that brings the authentic COD experience to your fingertips. The game is expected to launch worldwide in Fall 2023, but you can pre-register or try the early access version now. The game requires verification to play, but there are some ways to avoid it at your own risk. The game offers a lot of fun and challenge, but you can improve your skills with some tips and tricks. If you are looking for a new mobile game to play, Call of Duty Warzone Mobile is definitely worth checking out.</p>
81
- <h2>FAQs</h2>
82
- <h3>Is Call of Duty Warzone Mobile free to play?</h3>
83
- <p>Yes, Call of Duty Warzone Mobile is free to play and does not require any purchase or subscription to download or play. However, the game may offer optional in-app purchases or ads that can enhance your gameplay or support the developers.</p>
84
- <h3>Is Call of Duty Warzone Mobile cross-platform?</h3>
85
- <p>Yes, Call of Duty Warzone Mobile supports cross-platform play with Call of Duty Modern Warfare II and Call of Duty Warzone on PC and consoles. This means you can play with or against players on different devices and platforms. You can also sync your progress and Battle Pass across platforms using your Activision account.</p>
86
- <h3>How do I update Call of Duty Warzone Mobile?</h3>
87
- <p>To update Call of Duty Warzone Mobile, you need to go to Google Play or App Store and check for updates. If there is an update available, you need to download and install it before you can play the game. You may also need to restart your device after updating the game.</p>
88
- <h3>How do I report a bug or a hacker in Call of Duty Warzone Mobile?</h3>
89
- <p>To report a bug or a hacker in Call of Duty Warzone Mobile, you need to go to the settings menu and tap on the feedback button. You can then choose the type of feedback you want to send, such as bug report, hacker report, suggestion, or praise. You can also attach screenshots or videos to support your feedback. You will then receive a confirmation message that your feedback has been sent.</p>
90
- <h3>How do I contact customer support for Call of Duty Warzone Mobile?</h3>
91
- <p>To contact customer support for Call of Duty Warzone Mobile, you need to go to the settings menu and tap on the help button. You can then choose the topic that relates to your issue, such as account, gameplay, purchase, or technical. You can then browse through the FAQs or contact the support team via email or chat.</p> 197e85843d<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Facebook APK 9.0 - The Best Way to Connect with Friends and Family.md DELETED
@@ -1,133 +0,0 @@
1
- <br />
2
- <h1>Facebook APK 9.0: What You Need to Know</h1>
3
- <p>Facebook is one of the most popular social media platforms in the world, with over 2 billion monthly active users. It allows you to connect with your friends and family, discover new things, and communicate with ease. But did you know that there is a way to enjoy Facebook even more on your Android device? It's called Facebook APK 9.0, and it's a modified version of the official Facebook app that offers faster performance, access to beta features, and no need to update manually.</p>
4
- <h2>facebook apk 9.0</h2><br /><p><b><b>Download File</b> &#9999; &#9999; &#9999; <a href="https://jinyurl.com/2uNMzM">https://jinyurl.com/2uNMzM</a></b></p><br /><br />
5
- <p>In this article, we will tell you everything you need to know about Facebook APK 9.0, including how to download and install it on your Android device, what features it offers, what are its pros and cons, and whether you should give it a try or not.</p>
6
- <h2>Introduction</h2>
7
- <h3>What is Facebook APK 9.0 and why you should download it</h3>
8
- <p>Facebook APK 9.0 is an Android application package (APK) file that contains a modified version of the official Facebook app for Android devices. An APK file is a compressed file that contains all the code, resources, and assets needed to run an app on an Android device.</p>
9
- <p>Facebook APK 9.0 is not available on the Google Play Store, which means you have to download it from a third-party source (such as [APKCombo](^1^) or [Uptodown]( ^2^)). You can think of it as a beta version of the official app, which means you can enjoy some features and updates that are not yet available to the public. Some of these features include:</p>
10
- <ul>
11
- <li>A faster and smoother user interface that consumes less battery and data</li>
12
- <li>A dark mode option that reduces eye strain and saves battery life</li>
13
- <li>A privacy mode option that lets you hide your online status and read receipts</li>
14
- <li>A video downloader option that lets you save videos from Facebook to your device</li>
15
- <li>A customizer option that lets you change the theme, font, and icon of the app</li>
16
- </ul>
17
- <p>By downloading Facebook APK 9.0, you can enjoy a better Facebook experience on your Android device, without having to wait for the official app to update.</p>
18
- <h3>How to download and install Facebook APK 9.0 on your Android device</h3>
19
- <p>Downloading and installing Facebook APK 9.0 on your Android device is easy and simple, but you need to follow some steps carefully to avoid any problems. Here are the steps you need to follow:</p>
20
- <p>facebook apk 9.0 free download<br />
21
- facebook apk 9.0 latest version<br />
22
- facebook apk 9.0 for android<br />
23
- facebook apk 9.0 mod<br />
24
- facebook apk 9.0 old version<br />
25
- facebook apk 9.0 update<br />
26
- facebook apk 9.0 beta<br />
27
- facebook apk 9.0 lite<br />
28
- facebook apk 9.0 dark mode<br />
29
- facebook apk 9.0 pro<br />
30
- facebook apk 9.0 premium<br />
31
- facebook apk 9.0 offline<br />
32
- facebook apk 9.0 cracked<br />
33
- facebook apk 9.0 hack<br />
34
- facebook apk 9.0 no ads<br />
35
- facebook apk 9.0 download uptodown<br />
36
- facebook apk 9.0 download apkpure<br />
37
- facebook apk 9.0 download apkcombo<br />
38
- facebook apk 9.0 download for pc<br />
39
- facebook apk 9.0 download for ios<br />
40
- facebook apk 9.0 features<br />
41
- facebook apk 9.0 review<br />
42
- facebook apk 9.0 changelog<br />
43
- facebook apk 9.0 size<br />
44
- facebook apk 9.0 requirements<br />
45
- facebook apk 9.0 install<br />
46
- facebook apk 9.0 uninstall<br />
47
- facebook apk 9.0 permissions<br />
48
- facebook apk 9.0 privacy policy<br />
49
- facebook apk 9.0 terms of service<br />
50
- facebook apk 9.0 vs messenger<br />
51
- facebook apk 9.0 vs whatsapp<br />
52
- facebook apk 9.0 vs instagram<br />
53
- facebook apk 9.0 vs snapchat<br />
54
- facebook apk 9.0 vs tiktok<br />
55
- facebook apk 9.0 problems<br />
56
- facebook apk 9.0 issues<br />
57
- facebook apk 9.0 bugs<br />
58
- facebook apk 9.0 fixes<br />
59
- facebook apk 9.0 solutions<br />
60
- facebook apk 9.0 tips and tricks<br />
61
- facebook apk 9.0 tutorials and guides<br />
62
- facebook apk 9.0 faqs and answers<br />
63
- facebook apk 9.0 feedback and ratings<br />
64
- facebook apk 9.0 support and contact</p>
65
- <ol>
66
- <li>First, you need to uninstall the official Facebook app from your device, if you have it installed. You can do this by going to Settings > Apps > Facebook > Uninstall.</li>
67
- <li>Next, you need to enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources > Toggle On.</li>
68
- <li>Then, you need to download the Facebook APK 9.0 file from a trusted source (such as [APKCombo] or [Uptodown]). You can use your browser or a file manager app to do this.</li>
69
- <li>After downloading the file, you need to locate it on your device and tap on it to start the installation process. You may see a warning message asking you to confirm the installation. Tap on Install and wait for the process to finish.</li>
70
- <li>Finally, you need to launch the app and log in with your Facebook account. You may see a message asking you to allow some permissions for the app. Tap on Allow and enjoy Facebook APK 9.0 on your device.</li>
71
- </ol>
72
- <p>Note: If you encounter any issues or errors during the installation process, you may need to clear the cache and data of the app by going to Settings > Apps > Facebook APK 9.0 > Storage > Clear Cache and Clear Data.</p>
73
- <h2>Features of Facebook APK 9.0</h2>
74
- <h3>Connect with Friends and Family</h3>
75
- <p>One of the main features of Facebook APK 9.0 is that it allows you to connect with your friends and family in various ways. You can:</p>
76
- <ul>
77
- <li>Set status updates and use Facebook emoji to let people know what's on your mind or how you feel</li>
78
- <li>Share photos, videos, and memories with your friends or on your timeline, and edit them with filters, stickers, and effects</li>
79
- <li>Get notifications when friends like and comment on your posts, or when they tag you in photos or posts</li>
80
- </ul>
81
- <p>Facebook APK 9.0 also lets you see what your friends are up to by showing you their posts in your news feed. You can like, comment, or react to their posts, or share them with others. You can also create stories that disappear after 24 hours, or watch stories from your friends and pages you follow.</p>
82
- <h3>Discover New Things</h3>
83
- <p>Another feature of Facebook APK 9.0 is that it helps you discover new things that match your interests or curiosity. You can:</p>
84
- <ul>
85
- <li>Join groups and pages that relate to your hobbies, passions, or causes, and interact with other members who share your views or goals</li>
86
- <li>Follow celebrities, influencers, and news sources that you admire or want to learn from, and see their posts, videos, and live streams</li>
87
- <li>Watch live videos and stories from around the world, covering topics such as sports, entertainment, politics, or education</li>
88
- </ul>
89
- <p>Facebook APK 9.0 also gives you access to a variety of games, apps, and services that you can use for fun or convenience. You can play games with your friends or other people online, use apps that enhance your productivity or creativity, or use services that offer shopping, dating, or travel options.</p>
90
- <h3>Communicate with Ease</h3>
91
- <p>The last feature of Facebook APK 9.0 is that it enables you to communicate with ease with anyone on Facebook. You can:</p>
92
- <ul>
93
- <li>Send and receive messages with Facebook Messenger, which is integrated with the app. You can chat with one person or a group of people, and send photos, videos, voice messages, or documents</li>
94
- <li>Make voice and video calls with friends and family, either one-on-one or in groups, and use filters, effects, or masks to make them more fun</li>
95
- <li>Use stickers, GIFs, and emojis to express yourself better and add some humor or emotion to your conversations</li>
96
- </ul>
97
- <p>Facebook APK 9.0 also allows you to communicate with people who are not on Facebook, by using their phone numbers or email addresses. You can also sync your contacts with the app, so you can see who is on Facebook and who is not.</p>
98
- <h2>Pros and Cons of Facebook APK 9.0</h2>
99
- <h3>Pros</h3>
100
- <p>As you can see, Facebook APK 9.0 offers many benefits that make it a great alternative to the official Facebook app. Some of the pros of using Facebook APK 9.0 are:</p>
101
- <ul>
102
- <li>Faster and smoother performance than the official app, which means you can use Facebook without any lag or delay</li>
103
- <li>Access to beta features and updates before anyone else, which means you can enjoy the latest and greatest features of Facebook before they are released to the public</li>
104
- <li>No need to update the app manually every time, which means you can save time and hassle by not having to check for updates or download them</li>
105
- </ul>
106
- <h3>Cons</h3>
107
- <p>However, Facebook APK 9.0 also has some drawbacks that you should be aware of before downloading it. Some of the cons of using Facebook APK 9.0 are:</p>
108
- <ul>
109
- <li>May contain bugs and glitches that affect the user experience, which means you may encounter some errors or problems while using the app</li>
110
- <li>May not be compatible with some devices or Android versions, which means you may not be able to install or run the app on your device</li>
111
- <li>May pose security risks if downloaded from untrusted sources, which means you may expose your device or data to malware or hackers if you download the app from a shady website or link</li>
112
- </ul>
113
- <h2>Conclusion</h2>
114
- <p>In conclusion, Facebook APK 9.0 is a modified version of the official Facebook app that offers many advantages over the original app. It allows you to connect with your friends and family, discover new things, and communicate with ease on your Android device. It also gives you faster performance, access to beta features, and no need to update manually.</p>
115
- <p>However, Facebook APK 9.0 also has some disadvantages that you should consider before downloading it. It may contain bugs and glitches, may not be compatible with your device or Android version, and may pose security risks if downloaded from untrusted sources.</p>
116
- <p>Therefore, we recommend that you download Facebook APK 9.0 only from a trusted source (such as [APKCombo] or [Uptodown]), and only if you are willing to take the risk of using a beta version of the app. Otherwise, you may want to stick with the official Facebook app for a more stable and secure experience.</p>
117
- <h2>Frequently Asked Questions (FAQs)</h2>
118
- <h3>Q: What is the difference between Facebook APK 9.0 and Facebook Lite?</h3>
119
- <p>A: Facebook Lite is another version of the official Facebook app that is designed for low-end devices or slow internet connections. It has a smaller size, consumes less data, and works faster than the regular app. However, it also has fewer features and functions than the regular app. Facebook APK 9.0 is a modified version of the regular app that has more features and functions than the regular app, but also requires more data and resources than the regular app.</p>
120
- <h3>Q: Is Facebook APK 9.0 safe to use?</h3>
121
- <p>A: Facebook APK 9.0 is safe to use as long as you download it from a trusted source (such as [APKCombo] or [Uptodown]). However, since it is a beta version of the app, it may contain bugs and glitches that affect the user experience or security. Therefore, you should always be careful when using Facebook APK 9.0 and avoid sharing sensitive information or clicking on suspicious links.</p>
122
- <h3>Q: How do I update Facebook APK 9.0?</h3>
123
- <p>A: Unlike the official Facebook app, which updates automatically through the Google Play Store, Facebook APK 9.0 does not update automatically. You have to download the latest version of the app from a trusted source (such as [APKCombo] or [Uptodown]) and install it over the existing app.</p>
124
- <h3>Q: Can I use both Facebook APK 9.0 and the official Facebook app on my device?</h <p>A: No, you cannot use both Facebook APK 9.0 and the official Facebook app on your device at the same time. You have to uninstall one of them before installing the other. This is because they have the same package name and signature, which means they cannot coexist on the same device.</p>
125
- <h3>Q: What are some alternatives to Facebook APK 9.0?</h3>
126
- <p>A: If you are looking for some alternatives to Facebook APK 9.0, you may want to try some of these apps:</p>
127
- <ul>
128
- <li>[Friendly for Facebook]: This is a lightweight and customizable app that combines Facebook and Messenger in one place. You can also download videos, block ads, use multiple accounts, and more.</li>
129
- <li>[Swipe for Facebook]: This is a fast and smooth app that lets you access Facebook and Messenger with a swipe gesture. You can also customize the app's appearance, use night mode, download images, and more.</li>
130
- <li>[Folio for Facebook]: This is a secure and battery-saving app that wraps the Facebook website in a native app. You can also use multiple accounts, lock the app with a password, download media, and more.</li>
131
- </ul></p> 401be4b1e0<br />
132
- <br />
133
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/facerender/modules/discriminator.py DELETED
@@ -1,90 +0,0 @@
1
- from torch import nn
2
- import torch.nn.functional as F
3
- from facerender.modules.util import kp2gaussian
4
- import torch
5
-
6
-
7
- class DownBlock2d(nn.Module):
8
- """
9
- Simple block for processing video (encoder).
10
- """
11
-
12
- def __init__(self, in_features, out_features, norm=False, kernel_size=4, pool=False, sn=False):
13
- super(DownBlock2d, self).__init__()
14
- self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size)
15
-
16
- if sn:
17
- self.conv = nn.utils.spectral_norm(self.conv)
18
-
19
- if norm:
20
- self.norm = nn.InstanceNorm2d(out_features, affine=True)
21
- else:
22
- self.norm = None
23
- self.pool = pool
24
-
25
- def forward(self, x):
26
- out = x
27
- out = self.conv(out)
28
- if self.norm:
29
- out = self.norm(out)
30
- out = F.leaky_relu(out, 0.2)
31
- if self.pool:
32
- out = F.avg_pool2d(out, (2, 2))
33
- return out
34
-
35
-
36
- class Discriminator(nn.Module):
37
- """
38
- Discriminator similar to Pix2Pix
39
- """
40
-
41
- def __init__(self, num_channels=3, block_expansion=64, num_blocks=4, max_features=512,
42
- sn=False, **kwargs):
43
- super(Discriminator, self).__init__()
44
-
45
- down_blocks = []
46
- for i in range(num_blocks):
47
- down_blocks.append(
48
- DownBlock2d(num_channels if i == 0 else min(max_features, block_expansion * (2 ** i)),
49
- min(max_features, block_expansion * (2 ** (i + 1))),
50
- norm=(i != 0), kernel_size=4, pool=(i != num_blocks - 1), sn=sn))
51
-
52
- self.down_blocks = nn.ModuleList(down_blocks)
53
- self.conv = nn.Conv2d(self.down_blocks[-1].conv.out_channels, out_channels=1, kernel_size=1)
54
- if sn:
55
- self.conv = nn.utils.spectral_norm(self.conv)
56
-
57
- def forward(self, x):
58
- feature_maps = []
59
- out = x
60
-
61
- for down_block in self.down_blocks:
62
- feature_maps.append(down_block(out))
63
- out = feature_maps[-1]
64
- prediction_map = self.conv(out)
65
-
66
- return feature_maps, prediction_map
67
-
68
-
69
- class MultiScaleDiscriminator(nn.Module):
70
- """
71
- Multi-scale (scale) discriminator
72
- """
73
-
74
- def __init__(self, scales=(), **kwargs):
75
- super(MultiScaleDiscriminator, self).__init__()
76
- self.scales = scales
77
- discs = {}
78
- for scale in scales:
79
- discs[str(scale).replace('.', '-')] = Discriminator(**kwargs)
80
- self.discs = nn.ModuleDict(discs)
81
-
82
- def forward(self, x):
83
- out_dict = {}
84
- for scale, disc in self.discs.items():
85
- scale = str(scale).replace('-', '.')
86
- key = 'prediction_' + scale
87
- feature_maps, prediction_map = disc(x[key])
88
- out_dict['feature_maps_' + scale] = feature_maps
89
- out_dict['prediction_map_' + scale] = prediction_map
90
- return out_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/train/mel_processing.py DELETED
@@ -1,132 +0,0 @@
1
- import torch
2
- import torch.utils.data
3
- from librosa.filters import mel as librosa_mel_fn
4
- import logging
5
-
6
- logger = logging.getLogger(__name__)
7
-
8
- MAX_WAV_VALUE = 32768.0
9
-
10
-
11
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
12
- """
13
- PARAMS
14
- ------
15
- C: compression factor
16
- """
17
- return torch.log(torch.clamp(x, min=clip_val) * C)
18
-
19
-
20
- def dynamic_range_decompression_torch(x, C=1):
21
- """
22
- PARAMS
23
- ------
24
- C: compression factor used to compress
25
- """
26
- return torch.exp(x) / C
27
-
28
-
29
- def spectral_normalize_torch(magnitudes):
30
- return dynamic_range_compression_torch(magnitudes)
31
-
32
-
33
- def spectral_de_normalize_torch(magnitudes):
34
- return dynamic_range_decompression_torch(magnitudes)
35
-
36
-
37
- # Reusable banks
38
- mel_basis = {}
39
- hann_window = {}
40
-
41
-
42
- def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
43
- """Convert waveform into Linear-frequency Linear-amplitude spectrogram.
44
-
45
- Args:
46
- y :: (B, T) - Audio waveforms
47
- n_fft
48
- sampling_rate
49
- hop_size
50
- win_size
51
- center
52
- Returns:
53
- :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram
54
- """
55
- # Validation
56
- if torch.min(y) < -1.07:
57
- logger.debug("min value is %s", str(torch.min(y)))
58
- if torch.max(y) > 1.07:
59
- logger.debug("max value is %s", str(torch.max(y)))
60
-
61
- # Window - Cache if needed
62
- global hann_window
63
- dtype_device = str(y.dtype) + "_" + str(y.device)
64
- wnsize_dtype_device = str(win_size) + "_" + dtype_device
65
- if wnsize_dtype_device not in hann_window:
66
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
67
- dtype=y.dtype, device=y.device
68
- )
69
-
70
- # Padding
71
- y = torch.nn.functional.pad(
72
- y.unsqueeze(1),
73
- (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
74
- mode="reflect",
75
- )
76
- y = y.squeeze(1)
77
-
78
- # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2)
79
- spec = torch.stft(
80
- y,
81
- n_fft,
82
- hop_length=hop_size,
83
- win_length=win_size,
84
- window=hann_window[wnsize_dtype_device],
85
- center=center,
86
- pad_mode="reflect",
87
- normalized=False,
88
- onesided=True,
89
- return_complex=False,
90
- )
91
-
92
- # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame)
93
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
94
- return spec
95
-
96
-
97
- def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
98
- # MelBasis - Cache if needed
99
- global mel_basis
100
- dtype_device = str(spec.dtype) + "_" + str(spec.device)
101
- fmax_dtype_device = str(fmax) + "_" + dtype_device
102
- if fmax_dtype_device not in mel_basis:
103
- mel = librosa_mel_fn(
104
- sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax
105
- )
106
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
107
- dtype=spec.dtype, device=spec.device
108
- )
109
-
110
- # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame)
111
- melspec = torch.matmul(mel_basis[fmax_dtype_device], spec)
112
- melspec = spectral_normalize_torch(melspec)
113
- return melspec
114
-
115
-
116
- def mel_spectrogram_torch(
117
- y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
118
- ):
119
- """Convert waveform into Mel-frequency Log-amplitude spectrogram.
120
-
121
- Args:
122
- y :: (B, T) - Waveforms
123
- Returns:
124
- melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram
125
- """
126
- # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame)
127
- spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center)
128
-
129
- # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame)
130
- melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax)
131
-
132
- return melspec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/chat-list.tsx DELETED
@@ -1,28 +0,0 @@
1
- import React from 'react'
2
-
3
- import { Separator } from '@/components/ui/separator'
4
- import { ChatMessage } from '@/components/chat-message'
5
- import { ChatMessageModel } from '@/lib/bots/bing/types'
6
-
7
- export interface ChatList {
8
- messages: ChatMessageModel[]
9
- }
10
-
11
- export function ChatList({ messages }: ChatList) {
12
- if (!messages.length) {
13
- return null
14
- }
15
-
16
- return (
17
- <div className="chat-container relative flex flex-col">
18
- {messages.map((message, index) => (
19
- <React.Fragment key={index}>
20
- <ChatMessage message={message} />
21
- {index < messages.length - 1 && (
22
- <Separator className="my-2" />
23
- )}
24
- </React.Fragment>
25
- ))}
26
- </div>
27
- )
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI4PD/hexviz/hexviz/view.py DELETED
@@ -1,154 +0,0 @@
1
- from io import StringIO
2
-
3
- import streamlit as st
4
- from Bio.PDB import PDBParser
5
-
6
- from hexviz.attention import get_pdb_file, get_pdb_from_seq
7
-
8
- menu_items = {
9
- "Get Help": "https://huggingface.co/spaces/aksell/hexviz/discussions/new",
10
- "Report a bug": "https://huggingface.co/spaces/aksell/hexviz/discussions/new",
11
- "About": "Created by [Aksel Lenes](https://github.com/aksell/) from Noelia Ferruz's group at the Institute of Molecular Biology of Barcelona. Read more at https://www.aiproteindesign.com/",
12
- }
13
-
14
-
15
- def get_selecte_model_index(models):
16
- selected_model_name = st.session_state.get("selected_model_name", None)
17
- if selected_model_name is None:
18
- return 0
19
- else:
20
- return next(
21
- (i for i, model in enumerate(models) if model.name.value == selected_model_name),
22
- None,
23
- )
24
-
25
-
26
- def clear_model_state():
27
- if "plot_heads" in st.session_state:
28
- del st.session_state.plot_heads
29
- if "plot_layers" in st.session_state:
30
- del st.session_state.plot_layers
31
- if "selected_head" in st.session_state:
32
- del st.session_state.selected_head
33
- if "selected_layer" in st.session_state:
34
- del st.session_state.selected_layer
35
- if "plot_layers" in st.session_state:
36
- del st.session_state.plot_layers
37
- if "plot_heads" in st.session_state:
38
- del st.session_state.plot_heads
39
- if "label_tokens" in st.session_state:
40
- del st.session_state.label_tokens
41
-
42
-
43
- def select_model(models):
44
- if "selected_model_name" not in st.session_state:
45
- st.session_state.selected_model_name = models[0].name.value
46
- selected_model_name = st.selectbox(
47
- "Select model",
48
- [model.name.value for model in models],
49
- key="selected_model_name",
50
- on_change=clear_model_state,
51
- )
52
- select_model = next(
53
- (model for model in models if model.name.value == selected_model_name), None
54
- )
55
- return select_model
56
-
57
-
58
- def clear_pdb_state():
59
- if "selected_chains" in st.session_state:
60
- del st.session_state.selected_chains
61
- if "selected_chain" in st.session_state:
62
- del st.session_state.selected_chain
63
- if "sequence_slice" in st.session_state:
64
- del st.session_state.sequence_slice
65
- if "uploaded_pdb_str" in st.session_state:
66
- del st.session_state.uploaded_pdb_str
67
-
68
-
69
- def select_pdb():
70
- if "pdb_id" not in st.session_state:
71
- st.session_state.pdb_id = "2FZ5"
72
- pdb_id = st.text_input(label="1.PDB ID", key="pdb_id", on_change=clear_pdb_state)
73
- return pdb_id
74
-
75
-
76
- def select_protein(pdb_code, uploaded_file, input_sequence):
77
- # We get the pdb from 1 of 3 places:
78
- # 1. Cached pdb from session storage
79
- # 2. PDB file from uploaded file
80
- # 3. PDB file fetched based on the pdb_code input
81
- parser = PDBParser()
82
- if uploaded_file is not None:
83
- pdb_str = uploaded_file.read().decode("utf-8")
84
- st.session_state["uploaded_pdb_str"] = pdb_str
85
- source = f"uploaded pdb file {uploaded_file.name}"
86
- structure = parser.get_structure("Userfile", StringIO(pdb_str))
87
- elif input_sequence:
88
- pdb_str = get_pdb_from_seq(str(input_sequence))
89
- if not pdb_str:
90
- st.error("ESMfold error, unable to fold sequence")
91
- return None, None, None
92
- else:
93
- structure = parser.get_structure("ESMFold", StringIO(pdb_str))
94
- if "selected_chains" in st.session_state:
95
- del st.session_state.selected_chains
96
- source = "Input sequence + ESM-fold"
97
- elif "uploaded_pdb_str" in st.session_state:
98
- pdb_str = st.session_state.uploaded_pdb_str
99
- source = "Uploaded file stored in cache"
100
- structure = parser.get_structure("userfile", StringIO(pdb_str))
101
- else:
102
- file = get_pdb_file(pdb_code)
103
- pdb_str = file.read()
104
- source = f"PDB ID: {pdb_code}"
105
- structure = parser.get_structure(pdb_code, StringIO(pdb_str))
106
-
107
- return pdb_str, structure, source
108
-
109
-
110
- def select_heads_and_layers(sidebar, model):
111
- sidebar.markdown(
112
- """
113
- Select Heads and Layers
114
- ---
115
- """
116
- )
117
- if "plot_heads" not in st.session_state:
118
- st.session_state.plot_heads = (1, model.heads // 2)
119
- head_range = sidebar.slider(
120
- "Heads to plot", min_value=1, max_value=model.heads, key="plot_heads", step=1
121
- )
122
- if "plot_layers" not in st.session_state:
123
- st.session_state.plot_layers = (1, model.layers // 2)
124
- layer_range = sidebar.slider(
125
- "Layers to plot", min_value=1, max_value=model.layers, key="plot_layers", step=1
126
- )
127
-
128
- if "plot_step_size" not in st.session_state:
129
- st.session_state.plot_step_size = 1
130
- step_size = sidebar.number_input(
131
- "Optional step size to skip heads and layers",
132
- key="plot_step_size",
133
- min_value=1,
134
- max_value=model.layers,
135
- )
136
- layer_sequence = list(range(layer_range[0] - 1, layer_range[1], step_size))
137
- head_sequence = list(range(head_range[0] - 1, head_range[1], step_size))
138
-
139
- return layer_sequence, head_sequence
140
-
141
-
142
- def select_sequence_slice(sequence_length):
143
- st.sidebar.markdown(
144
- """
145
- Sequence segment to plot
146
- ---
147
- """
148
- )
149
- if "sequence_slice" not in st.session_state:
150
- st.session_state.sequence_slice = (1, min(50, sequence_length))
151
- slice = st.sidebar.slider(
152
- "Sequence", key="sequence_slice", min_value=1, max_value=sequence_length, step=1
153
- )
154
- return slice
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/fvae.py DELETED
@@ -1,202 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.distributions as dist
4
- from torch import nn
5
-
6
- from modules.commons.conv import ConditionalConvBlocks
7
- from modules.commons.normalizing_flow.res_flow import ResFlow
8
- from modules.commons.wavenet import WN
9
-
10
-
11
- class FVAEEncoder(nn.Module):
12
- def __init__(self, c_in, hidden_size, c_latent, kernel_size,
13
- n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'):
14
- super().__init__()
15
- self.strides = strides
16
- self.hidden_size = hidden_size
17
- if np.prod(strides) == 1:
18
- self.pre_net = nn.Conv1d(c_in, hidden_size, kernel_size=1)
19
- else:
20
- self.pre_net = nn.Sequential(*[
21
- nn.Conv1d(c_in, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2)
22
- if i == 0 else
23
- nn.Conv1d(hidden_size, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2)
24
- for i, s in enumerate(strides)
25
- ])
26
- if nn_type == 'wn':
27
- self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout)
28
- elif nn_type == 'conv':
29
- self.nn = ConditionalConvBlocks(
30
- hidden_size, c_cond, hidden_size, None, kernel_size,
31
- layers_in_block=2, is_BTC=False, num_layers=n_layers)
32
-
33
- self.out_proj = nn.Conv1d(hidden_size, c_latent * 2, 1)
34
- self.latent_channels = c_latent
35
-
36
- def forward(self, x, nonpadding, cond):
37
- x = self.pre_net(x)
38
- nonpadding = nonpadding[:, :, ::np.prod(self.strides)][:, :, :x.shape[-1]]
39
- x = x * nonpadding
40
- x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding
41
- x = self.out_proj(x)
42
- m, logs = torch.split(x, self.latent_channels, dim=1)
43
- z = (m + torch.randn_like(m) * torch.exp(logs))
44
- return z, m, logs, nonpadding
45
-
46
-
47
- class FVAEDecoder(nn.Module):
48
- def __init__(self, c_latent, hidden_size, out_channels, kernel_size,
49
- n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'):
50
- super().__init__()
51
- self.strides = strides
52
- self.hidden_size = hidden_size
53
- self.pre_net = nn.Sequential(*[
54
- nn.ConvTranspose1d(c_latent, hidden_size, kernel_size=s, stride=s)
55
- if i == 0 else
56
- nn.ConvTranspose1d(hidden_size, hidden_size, kernel_size=s, stride=s)
57
- for i, s in enumerate(strides)
58
- ])
59
- if nn_type == 'wn':
60
- self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout)
61
- elif nn_type == 'conv':
62
- self.nn = ConditionalConvBlocks(
63
- hidden_size, c_cond, hidden_size, [1] * n_layers, kernel_size,
64
- layers_in_block=2, is_BTC=False)
65
- self.out_proj = nn.Conv1d(hidden_size, out_channels, 1)
66
-
67
- def forward(self, x, nonpadding, cond):
68
- x = self.pre_net(x)
69
- x = x * nonpadding
70
- x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding
71
- x = self.out_proj(x)
72
- return x
73
-
74
-
75
- class FVAE(nn.Module):
76
- def __init__(self,
77
- c_in_out, hidden_size, c_latent,
78
- kernel_size, enc_n_layers, dec_n_layers, c_cond, strides,
79
- use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None,
80
- encoder_type='wn', decoder_type='wn'):
81
- super(FVAE, self).__init__()
82
- self.strides = strides
83
- self.hidden_size = hidden_size
84
- self.latent_size = c_latent
85
- self.use_prior_flow = use_prior_flow
86
- if np.prod(strides) == 1:
87
- self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1)
88
- else:
89
- self.g_pre_net = nn.Sequential(*[
90
- nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2)
91
- for i, s in enumerate(strides)
92
- ])
93
- self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size,
94
- enc_n_layers, c_cond, strides=strides, nn_type=encoder_type)
95
- if use_prior_flow:
96
- self.prior_flow = ResFlow(
97
- c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond)
98
- self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size,
99
- dec_n_layers, c_cond, strides=strides, nn_type=decoder_type)
100
- self.prior_dist = dist.Normal(0, 1)
101
-
102
- def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0, **kwargs):
103
- """
104
-
105
- :param x: [B, C_in_out, T]
106
- :param nonpadding: [B, 1, T]
107
- :param cond: [B, C_g, T]
108
- :return:
109
- """
110
- if nonpadding is None:
111
- nonpadding = 1
112
- cond_sqz = self.g_pre_net(cond)
113
- if not infer:
114
- z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz)
115
- q_dist = dist.Normal(m_q, logs_q.exp())
116
- if self.use_prior_flow:
117
- logqx = q_dist.log_prob(z_q)
118
- z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz)
119
- logpx = self.prior_dist.log_prob(z_p)
120
- loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1]
121
- else:
122
- loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist)
123
- loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1]
124
- z_p = None
125
- return z_q, loss_kl, z_p, m_q, logs_q
126
- else:
127
- latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]]
128
- z_p = torch.randn(latent_shape).to(cond.device) * noise_scale
129
- if self.use_prior_flow:
130
- z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True)
131
- return z_p
132
-
133
-
134
- class SyntaFVAE(nn.Module):
135
- def __init__(self,
136
- c_in_out, hidden_size, c_latent,
137
- kernel_size, enc_n_layers, dec_n_layers, c_cond, strides,
138
- use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None,
139
- encoder_type='wn', decoder_type='wn'):
140
- super(SyntaFVAE, self).__init__()
141
- self.strides = strides
142
- self.hidden_size = hidden_size
143
- self.latent_size = c_latent
144
- self.use_prior_flow = use_prior_flow
145
- if np.prod(strides) == 1:
146
- self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1)
147
- else:
148
- self.g_pre_net = nn.Sequential(*[
149
- nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2)
150
- for i, s in enumerate(strides)
151
- ])
152
- self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size,
153
- enc_n_layers, c_cond, strides=strides, nn_type=encoder_type)
154
- if use_prior_flow:
155
- self.prior_flow = ResFlow(
156
- c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond)
157
- self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size,
158
- dec_n_layers, c_cond, strides=strides, nn_type=decoder_type)
159
- self.prior_dist = dist.Normal(0, 1)
160
- self.graph_encoder = GraphAuxEnc(in_dim=hidden_size, hid_dim=hidden_size,out_dim=hidden_size)
161
-
162
- def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0,
163
- mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None):
164
- """
165
-
166
- :param x: target mel, [B, C_in_out, T]
167
- :param nonpadding: [B, 1, T]
168
- :param cond: phoneme encoding, [B, C_g, T]
169
- :return:
170
- """
171
- word_len = ph2word.max(dim=1)[0]
172
- ph_encoding_for_graph = cond.detach() + 0.1 * (cond - cond.detach()) # only 0.1x grad can pass through
173
- _, ph_out_word_encoding_for_graph = GraphAuxEnc.ph_encoding_to_word_encoding(ph_encoding_for_graph.transpose(1,2), mel2word, word_len)
174
- t_m = mel2word.shape[-1]
175
- g_graph = self.graph_encoder.word_forward(graph_lst=graph_lst, word_encoding=ph_out_word_encoding_for_graph, etypes_lst=etypes_lst)
176
- g_graph = g_graph.transpose(1,2)
177
- g_graph = GraphAuxEnc._postprocess_word2ph(g_graph,mel2word,t_m)
178
- g_graph = g_graph.transpose(1,2)
179
- cond = cond + g_graph * 1.
180
-
181
- if nonpadding is None:
182
- nonpadding = 1
183
- cond_sqz = self.g_pre_net(cond)
184
- if not infer:
185
- z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz)
186
- q_dist = dist.Normal(m_q, logs_q.exp())
187
- if self.use_prior_flow:
188
- logqx = q_dist.log_prob(z_q)
189
- z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz)
190
- logpx = self.prior_dist.log_prob(z_p)
191
- loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1]
192
- else:
193
- loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist)
194
- loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1]
195
- z_p = None
196
- return z_q, loss_kl, z_p, m_q, logs_q
197
- else:
198
- latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]]
199
- z_p = torch.randn(latent_shape).to(cond.device) * noise_scale
200
- if self.use_prior_flow:
201
- z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True)
202
- return z_p
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/syntaspeech.py DELETED
@@ -1,274 +0,0 @@
1
- import math
2
- import torch
3
- from torch import nn
4
- from torch.nn import Linear
5
- from utils.hparams import hparams
6
- from modules.commons.conv import ConvBlocks, ConditionalConvBlocks
7
- from modules.commons.common_layers import Embedding
8
- from modules.commons.rel_transformer import RelTransformerEncoder
9
- from modules.commons.transformer import MultiheadAttention, FFTBlocks
10
- from modules.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word
11
- from modules.tts.fastspeech import FS_DECODERS, FastSpeech
12
- from modules.portaspeech.fvae import SyntaFVAE, FVAE
13
- from utils.nn.seq_utils import group_hidden_by_segs
14
- from modules.fastspeech.tts_modules import SyntaDurationPredictor
15
-
16
-
17
- class SinusoidalPosEmb(nn.Module):
18
- def __init__(self, dim):
19
- super().__init__()
20
- self.dim = dim
21
- def forward(self, x):
22
- """
23
-
24
- :param x: [B, T]
25
- :return: [B, T, H]
26
- """
27
- device = x.device
28
- half_dim = self.dim // 2
29
- emb = math.log(10000) / (half_dim - 1)
30
- emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
31
- emb = x[:, :, None] * emb[None, :]
32
- emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
33
- return emb
34
-
35
-
36
- class SyntaSpeech(FastSpeech):
37
- def __init__(self, ph_dict_size, word_dict_size, out_dims=None):
38
- super().__init__(ph_dict_size, out_dims)
39
- # build linguistic encoder
40
- if hparams['num_spk'] > 1:
41
- self.spk_embed_proj = Embedding(hparams['num_spk'], self.hidden_size)
42
- if hparams['use_word_encoder']:
43
- self.word_encoder = RelTransformerEncoder(
44
- word_dict_size, self.hidden_size, self.hidden_size, self.hidden_size, 2,
45
- hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
46
- if hparams['dur_level'] == 'word':
47
- if hparams['word_encoder_type'] == 'rel_fft':
48
- self.ph2word_encoder = RelTransformerEncoder(
49
- 0, self.hidden_size, self.hidden_size, self.hidden_size, 2,
50
- hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
51
- if hparams['word_encoder_type'] == 'fft':
52
- self.ph2word_encoder = FFTBlocks(
53
- self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads'])
54
- self.sin_pos = SinusoidalPosEmb(self.hidden_size)
55
- self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
56
- self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
57
- self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
58
- self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False)
59
- self.attn.enable_torch_version = False
60
- if hparams['text_encoder_postnet']:
61
- self.text_encoder_postnet = ConvBlocks(
62
- self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2)
63
- else:
64
- self.sin_pos = SinusoidalPosEmb(self.hidden_size)
65
-
66
- predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
67
- self.dur_predictor = SyntaDurationPredictor(
68
- self.hidden_size,
69
- n_chans=predictor_hidden,
70
- n_layers=hparams['dur_predictor_layers'],
71
- dropout_rate=hparams['predictor_dropout'],
72
- kernel_size=hparams['dur_predictor_kernel'])
73
- # build VAE decoder
74
- if hparams['use_fvae']:
75
- del self.decoder
76
- del self.mel_out
77
- if hparams.get("use_gae_in_prior", True):
78
- self.fvae = SyntaFVAE(
79
- c_in_out=self.out_dims,
80
- hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'],
81
- kernel_size=hparams['fvae_kernel_size'],
82
- enc_n_layers=hparams['fvae_enc_n_layers'],
83
- dec_n_layers=hparams['fvae_dec_n_layers'],
84
- c_cond=self.hidden_size,
85
- use_prior_flow=hparams['use_prior_flow'],
86
- flow_hidden=hparams['prior_flow_hidden'],
87
- flow_kernel_size=hparams['prior_flow_kernel_size'],
88
- flow_n_steps=hparams['prior_flow_n_blocks'],
89
- strides=[hparams['fvae_strides']],
90
- encoder_type=hparams['fvae_encoder_type'],
91
- decoder_type=hparams['fvae_decoder_type'],
92
- )
93
- else:
94
- self.fvae = FVAE(
95
- c_in_out=self.out_dims,
96
- hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'],
97
- kernel_size=hparams['fvae_kernel_size'],
98
- enc_n_layers=hparams['fvae_enc_n_layers'],
99
- dec_n_layers=hparams['fvae_dec_n_layers'],
100
- c_cond=self.hidden_size,
101
- use_prior_flow=hparams['use_prior_flow'],
102
- flow_hidden=hparams['prior_flow_hidden'],
103
- flow_kernel_size=hparams['prior_flow_kernel_size'],
104
- flow_n_steps=hparams['prior_flow_n_blocks'],
105
- strides=[hparams['fvae_strides']],
106
- encoder_type=hparams['fvae_encoder_type'],
107
- decoder_type=hparams['fvae_decoder_type'],
108
- )
109
- else:
110
- self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
111
- self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
112
- if hparams['use_pitch_embed']:
113
- self.pitch_embed = Embedding(300, self.hidden_size, 0)
114
- if hparams['add_word_pos']:
115
- self.word_pos_proj = Linear(self.hidden_size, self.hidden_size)
116
-
117
- def build_embedding(self, dictionary, embed_dim):
118
- num_embeddings = len(dictionary)
119
- emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
120
- return emb
121
-
122
- def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None,
123
- spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None,
124
- global_step=None, graph_lst=None, etypes_lst=None, *args, **kwargs):
125
-
126
- if hparams['use_spk_embed']:
127
- spk_embed = spk_embed
128
- elif hparams['use_spk_id']:
129
- spk_embed = self.spk_embed_proj(spk_id)[:, None, :]
130
- else:
131
- spk_embed = 0
132
-
133
- ret = {}
134
- style_embed = self.forward_style_embed(spk_embed, spk_id) # speaker embedding, [B, 1, C]
135
- x, tgt_nonpadding = self.run_text_encoder(
136
- txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst=graph_lst, etypes_lst=etypes_lst, **kwargs)
137
- x = x + style_embed # it maybe necessary to achieve multi-speaker
138
- x = x * tgt_nonpadding
139
- ret['nonpadding'] = tgt_nonpadding
140
- if hparams['use_pitch_embed']:
141
- x = x + self.pitch_embed(pitch)
142
- ret['decoder_inp'] = x
143
- if infer and (mel2ph is None or mel2word is None):
144
- mel2word = ret['mel2word']
145
- ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step,
146
- mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
147
- return ret
148
-
149
- def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst, etypes_lst, **kwargs):
150
- word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word]
151
- src_nonpadding = (txt_tokens > 0).float()[:, :, None]
152
- use_bert = hparams.get("use_bert") is True
153
- if use_bert:
154
- ph_encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word,
155
- graph_lst=graph_lst, etypes_lst=etypes_lst,
156
- cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed
157
- else:
158
- ph_encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed
159
- if hparams['use_word_encoder']:
160
- word_encoder_out = self.word_encoder(word_tokens) + style_embed
161
- ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word)
162
-
163
- dur_input = ph_encoder_out * src_nonpadding
164
- if hparams['dur_level'] == 'word':
165
- word_encoder_out = 0
166
- h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0]
167
- word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word)
168
- if hparams['use_word_encoder']:
169
- word_encoder_out = word_encoder_out + self.word_encoder(word_tokens)
170
- mel2word = self.forward_dur(dur_input, mel2word, ret, ph2word=ph2word, word_len=word_len, graph_lst=graph_lst, etypes_lst=etypes_lst)
171
- mel2word = clip_mel2token_to_multiple(mel2word, hparams['frames_multiple'])
172
- ret['mel2word'] = mel2word
173
- tgt_nonpadding = (mel2word > 0).float()[:, :, None]
174
- enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H]
175
- dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
176
- dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph]
177
- x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask)
178
- if hparams['add_word_pos']:
179
- x = x + self.word_pos_proj(dec_pos)
180
- ret['attn'] = weight
181
- else:
182
- mel2ph = self.forward_dur(dur_input, mel2ph, ret)
183
- mel2ph = clip_mel2token_to_multiple(mel2ph, hparams['frames_multiple'])
184
- mel2word = mel2ph_to_mel2word(mel2ph, ph2word)
185
- x = expand_states(ph_encoder_out, mel2ph)
186
- if hparams['add_word_pos']:
187
- dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
188
- x = x + self.word_pos_proj(dec_pos)
189
- tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
190
- if hparams['use_word_encoder']:
191
- x = x + expand_states(word_encoder_out, mel2word)
192
- return x, tgt_nonpadding
193
-
194
- def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask):
195
- ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1))
196
- word_enc_out_expend = expand_states(word_encoder_out, mel2word)
197
- word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1)
198
- if hparams['text_encoder_postnet']:
199
- word_enc_out_expend = self.dec_res_proj(word_enc_out_expend)
200
- word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend)
201
- dec_q = x_res = word_enc_out_expend
202
- else:
203
- dec_q = self.dec_query_proj(word_enc_out_expend)
204
- x_res = self.dec_res_proj(word_enc_out_expend)
205
- ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1)
206
- x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9)
207
- x = x.transpose(0, 1)
208
- x = x + x_res
209
- return x, weight
210
-
211
- def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0,
212
- mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None):
213
- if not hparams['use_fvae']:
214
- x = self.decoder(x)
215
- x = self.mel_out(x)
216
- ret['kl'] = 0
217
- return x * tgt_nonpadding
218
- else:
219
- # x is the phoneme encoding
220
- x = x.transpose(1, 2) # [B, H, T]
221
- tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T]
222
- if infer:
223
- z = self.fvae(cond=x, infer=True, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
224
- else:
225
- tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T]
226
- z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae(
227
- tgt_mels, tgt_nonpadding_BHT, cond=x, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
228
- if global_step < hparams['posterior_start_steps']:
229
- z = torch.randn_like(z)
230
- x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2)
231
- ret['pre_mel_out'] = x_recon
232
- return x_recon
233
-
234
- def forward_dur(self, dur_input, mel2word, ret, **kwargs):
235
- """
236
-
237
- :param dur_input: [B, T_txt, H]
238
- :param mel2ph: [B, T_mel]
239
- :param txt_tokens: [B, T_txt]
240
- :param ret:
241
- :return:
242
- """
243
- word_len = kwargs['word_len']
244
- ph2word = kwargs['ph2word']
245
- graph_lst = kwargs['graph_lst']
246
- etypes_lst = kwargs['etypes_lst']
247
- src_padding = dur_input.data.abs().sum(-1) == 0
248
- dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach())
249
- dur = self.dur_predictor(dur_input, src_padding, ph2word, graph_lst, etypes_lst)
250
-
251
- B, T_ph = ph2word.shape
252
- dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur)
253
- dur = dur[:, 1:]
254
- ret['dur'] = dur
255
- if mel2word is None:
256
- mel2word = self.length_regulator(dur).detach()
257
- return mel2word
258
-
259
- def get_pos_embed(self, word2word, x2word):
260
- x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph]
261
- x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1)
262
- x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H]
263
- return x_pos
264
-
265
- def store_inverse_all(self):
266
- def remove_weight_norm(m):
267
- try:
268
- if hasattr(m, 'store_inverse'):
269
- m.store_inverse()
270
- nn.utils.remove_weight_norm(m)
271
- except ValueError: # this module didn't have weight norm
272
- return
273
-
274
- self.apply(remove_weight_norm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Software_Company/src/agents/Component/ExtraComponent.py DELETED
@@ -1,128 +0,0 @@
1
- from .ToolComponent import ToolComponent
2
- import json
3
- from utils import flatten_dict,get_embedding,matching_category,search_with_api,limit_keys,limit_values
4
- import os
5
-
6
-
7
- class CategoryRequirementsComponent(ToolComponent):
8
- def __init__(self, information_path):
9
- super().__init__()
10
- self.information_dataset = []
11
- self.leaf_name = []
12
- for toy_path in information_path:
13
- with open(toy_path, encoding="utf-8") as json_file:
14
- data = json.load(json_file)
15
- for d in data:
16
- if "/" in d["cat_leaf_name"]:
17
- leaf_names = d["cat_leaf_name"].split("/") + [d["cat_leaf_name"]]
18
- else:
19
- leaf_names = [d["cat_leaf_name"]]
20
- for name in leaf_names:
21
- self.leaf_name.append(name)
22
- new_d = d.copy()
23
- new_d["cat_leaf_name"] = name
24
- new_d["information"] = flatten_dict(new_d["information"])
25
- self.information_dataset.append(new_d)
26
-
27
- self.target_embbeding = get_embedding(
28
- self.leaf_name
29
- )
30
-
31
- def search_information(self, category, information_dataset):
32
- knowledge = {}
33
- for d in information_dataset:
34
- if category == d["cat_leaf_name"]:
35
- knowledge = d["information"]
36
- knowledge = {
37
- key: value
38
- for key, value in knowledge.items()
39
- if (value and key != "相关分类")
40
- }
41
- break
42
- return knowledge
43
-
44
- def func(self, agent):
45
- prompt = ""
46
- messages = agent.long_term_memory
47
- outputdict = {}
48
- functions = [
49
- {
50
- "name": "search_information",
51
- "description": "根据用户所需要购买商品的种类跟用户的需求去寻找用户所需要的商品",
52
- "parameters": {
53
- "type": "object",
54
- "properties": {
55
- "category": {
56
- "type": "string",
57
- "description": "用户现在所需要的商品类别,比如纸尿布,笔记本电脑等,注意,只能有一个",
58
- },
59
- "requirements": {
60
- "type": "string",
61
- "description": "用户现在的需求,比如说便宜,安踏品牌等等,可以有多个需求,中间以“ ”分隔",
62
- },
63
- },
64
- "required": ["category", "requirements"],
65
- },
66
- }
67
- ]
68
-
69
- response = agent.LLM.get_response(
70
- messages,
71
- None,
72
- None,
73
- functions=functions,
74
- stream=False,
75
- function_call={"name": "search_information"},
76
- )
77
- response_message = json.loads(response["function_call"]["arguments"])
78
- category = (
79
- response_message["category"] if response_message["category"] else None
80
- )
81
- requirements = (
82
- response_message["requirements"]
83
- if response_message["requirements"]
84
- else category
85
- )
86
- if not (category or requirements):
87
- return {}
88
-
89
- topk_result = matching_category(
90
- category, self.leaf_name, None, self.target_embbeding, top_k=3
91
- )
92
-
93
- top1_score = topk_result[1][0]
94
- request_items, top_category = search_with_api(requirements, category)
95
-
96
-
97
- MIN_CATEGORY_SIM = eval(os.environ["MIN_CATEGORY_SIM"]
98
- ) if "MIN_CATEGORY_SIM" in os.environ else 0.7
99
-
100
- if top1_score > MIN_CATEGORY_SIM:
101
- agent.environment.shared_memory["category"] = topk_result[0][0]
102
- category = topk_result[0][0]
103
- information = self.search_information(
104
- topk_result[0][0], self.information_dataset
105
- )
106
- information = limit_keys(information, 3)
107
- information = limit_values(information, 2)
108
- prompt += f"""你需要知道的是:用户目前选择的商品是{category},该商品信息为{information}。你需要根据这些商品信息来详细介绍商品,比如详细介绍商品有哪些品牌,有哪些分类等等,并且询问用户是否有更多的需求。"""
109
- if category in top_category:
110
- top_category.remove(category)
111
-
112
- recommend = "\n经过搜索后,推荐商品如下:\n"
113
- prompt += "筛选出的商品如下:\n"
114
-
115
- for i, request_item in enumerate(request_items):
116
-
117
- itemTitle = request_item["itemTitle"]
118
- itemPrice = request_item["itemPrice"]
119
- itemPicUrl = request_item["itemPicUrl"]
120
- recommend += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]({itemPicUrl})\n"
121
- prompt += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]\n"
122
- outputdict["recommend"] = recommend
123
- print(recommend)
124
- else:
125
- prompt += f"""你需要知道的是:用户目前选择的商品是{category},而我们店里没有这类商品,但是我们店里有一些近似商品,如{top_category},{topk_result[0][0]},你需要对这些近似商品进行介绍,并引导用户购买"""
126
- outputdict["prompt"] = prompt
127
- return outputdict
128
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/g4f/Provider/Providers/ChatgptLogin.py DELETED
@@ -1,96 +0,0 @@
1
- import os
2
- from ...typing import sha256, Dict, get_type_hints
3
- import requests
4
- import re
5
- import base64
6
-
7
- url = 'https://chatgptlogin.ac'
8
- model = ['gpt-3.5-turbo']
9
- supports_stream = False
10
- needs_auth = False
11
-
12
-
13
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
- def get_nonce():
15
- res = requests.get('https://chatgptlogin.ac/use-chatgpt-free/', headers={
16
- "Referer": "https://chatgptlogin.ac/use-chatgpt-free/",
17
- "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
18
- })
19
-
20
- src = re.search(r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">', res.text).group(1)
21
- decoded_string = base64.b64decode(src.split(",")[-1]).decode('utf-8')
22
- return re.search(r"let restNonce = '(.*?)';", decoded_string).group(1)
23
-
24
- def transform(messages: list) -> list:
25
- def html_encode(string: str) -> str:
26
- table = {
27
- '"': '&quot;',
28
- "'": '&#39;',
29
- '&': '&amp;',
30
- '>': '&gt;',
31
- '<': '&lt;',
32
- '\n': '<br>',
33
- '\t': '&nbsp;&nbsp;&nbsp;&nbsp;',
34
- ' ': '&nbsp;'
35
- }
36
-
37
- for key in table:
38
- string = string.replace(key, table[key])
39
-
40
- return string
41
-
42
- return [{
43
- 'id': os.urandom(6).hex(),
44
- 'role': message['role'],
45
- 'content': message['content'],
46
- 'who': 'AI: ' if message['role'] == 'assistant' else 'User: ',
47
- 'html': html_encode(message['content'])} for message in messages]
48
-
49
- headers = {
50
- 'authority': 'chatgptlogin.ac',
51
- 'accept': '*/*',
52
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
53
- 'content-type': 'application/json',
54
- 'origin': 'https://chatgptlogin.ac',
55
- 'referer': 'https://chatgptlogin.ac/use-chatgpt-free/',
56
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
57
- 'sec-ch-ua-mobile': '?0',
58
- 'sec-ch-ua-platform': '"Windows"',
59
- 'sec-fetch-dest': 'empty',
60
- 'sec-fetch-mode': 'cors',
61
- 'sec-fetch-site': 'same-origin',
62
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
63
- 'x-wp-nonce': get_nonce()
64
- }
65
-
66
- conversation = transform(messages)
67
-
68
- json_data = {
69
- 'env': 'chatbot',
70
- 'session': 'N/A',
71
- 'prompt': 'Converse as if you were an AI assistant. Be friendly, creative.',
72
- 'context': 'Converse as if you were an AI assistant. Be friendly, creative.',
73
- 'messages': conversation,
74
- 'newMessage': messages[-1]['content'],
75
- 'userName': '<div class="mwai-name-text">User:</div>',
76
- 'aiName': '<div class="mwai-name-text">AI:</div>',
77
- 'model': 'gpt-3.5-turbo',
78
- 'temperature': 0.8,
79
- 'maxTokens': 1024,
80
- 'maxResults': 1,
81
- 'apiKey': '',
82
- 'service': 'openai',
83
- 'embeddingsIndex': '',
84
- 'stop': '',
85
- 'clientId': os.urandom(6).hex()
86
- }
87
-
88
- response = requests.post('https://chatgptlogin.ac/wp-json/ai-chatbot/v1/chat',
89
- headers=headers, json=json_data)
90
-
91
- return response.json()['reply']
92
-
93
-
94
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
95
- '(%s)' % ', '.join(
96
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/mmpose_1_x/configs/fashion_2d_keypoint/topdown_heatmap/deepfashion2/td_hm_res50_4xb64-120e_deepfashion2_long_sleeved_shirt_256x192.py DELETED
@@ -1,172 +0,0 @@
1
- _base_ = [
2
- '../../../_base_/default_runtime.py',
3
- '../../../_base_/datasets/deepfashion2.py'
4
- ]
5
-
6
- default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
7
-
8
- resume = False # 断点恢复
9
- load_from = None # 模型权重加载
10
- train_cfg = dict(by_epoch=True, max_epochs=120, val_interval=10) # 训练轮数,测试间隔
11
- param_scheduler = [
12
- dict( # warmup策略
13
- type='LinearLR',
14
- begin=0,
15
- end=500,
16
- start_factor=0.001,
17
- by_epoch=False),
18
- dict( # scheduler
19
- type='MultiStepLR',
20
- begin=0,
21
- end=60,
22
- milestones=[20, 40],
23
- gamma=0.1,
24
- by_epoch=True)
25
- ]
26
- optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005)) # 优化器和学习率
27
- auto_scale_lr = dict(base_batch_size=512) # 根据batch_size自动缩放学习率
28
-
29
- backend_args = dict(backend='local') # 数据加载后端设置,默认从本地硬盘加载
30
- dataset_type = 'DeepFashion2Dataset' # 数据集类名 DeepFashionDataset
31
- data_mode = 'topdown' # 算法结构类型,用于指定标注信息加载策略
32
- data_root = 'data/deepfashion2/' # 数据存放路径
33
- # 定义数据编解码器,用于生成target和对pred进行解码,同时包含了输入图片和输出heatmap尺寸等信息
34
- codec = dict(
35
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
36
-
37
- train_pipeline = [
38
- dict(type='LoadImage'),
39
- dict(type='GetBBoxCenterScale'),
40
- dict(type='RandomFlip', direction='horizontal'),
41
- dict(
42
- type='RandomBBoxTransform',
43
- shift_prob=0,
44
- rotate_factor=60,
45
- scale_factor=(0.75, 1.25)),
46
- dict(type='TopdownAffine', input_size=codec['input_size']),
47
- dict(type='GenerateTarget', encoder=codec),
48
- dict(type='PackPoseInputs')
49
- ]
50
- val_pipeline = [ # 测试时数据增强
51
- dict(type='LoadImage', backend_args=backend_args), # 加载图片
52
- dict(type='GetBBoxCenterScale'), # 根据bbox获取center和scale
53
- dict(type='TopdownAffine', input_size=codec['input_size']), # 根据变换矩阵更新目标数据
54
- dict(type='PackPoseInputs') # 对target进行打包用于训练
55
- ]
56
- train_dataloader = dict( # 训练数据加载
57
- batch_size=64, # 批次大小
58
- num_workers=6, # 数据加载进程数
59
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
60
- sampler=dict(type='DefaultSampler', shuffle=True), # 采样策略,打乱数据
61
- dataset=dict(
62
- type=dataset_type, # 数据集类名
63
- data_root=data_root, # 数据集路径
64
- data_mode=data_mode, # 算法类型
65
- ann_file='train/deepfashion2_long_sleeved_shirt.json', # 标注文件路径
66
- data_prefix=dict(img='train/image/'), # 图像路径
67
- pipeline=train_pipeline # 数据流水线
68
- ))
69
- val_dataloader = dict(
70
- batch_size=32,
71
- num_workers=6,
72
- persistent_workers=True, # 在不活跃时维持进程不终止,避免反复启动进程的开销
73
- drop_last=False,
74
- sampler=dict(type='DefaultSampler', shuffle=False), # 采样策略,不进行打乱
75
- dataset=dict(
76
- type=dataset_type, # 数据集类名
77
- data_root=data_root, # 数据集路径
78
- data_mode=data_mode, # 算法类型
79
- ann_file='validation/deepfashion2_long_sleeved_shirt.json', # 标注文件路径
80
- data_prefix=dict(img='validation/image/'), # 图像路径
81
- test_mode=True, # 测试模式开关
82
- pipeline=val_pipeline # 数据流水线
83
- ))
84
- test_dataloader = val_dataloader # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
85
-
86
- channel_cfg = dict(
87
- num_output_channels=294,
88
- dataset_joints=294,
89
- dataset_channel=[
90
- [
91
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
92
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
93
- 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
94
- 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
95
- 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
96
- 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
97
- 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
98
- 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
99
- 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
100
- 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
101
- 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
102
- 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
103
- 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193,
104
- 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
105
- 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
106
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
107
- 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245,
108
- 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
109
- 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
110
- 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
111
- 285, 286, 287, 288, 289, 290, 291, 292, 293
112
- ],
113
- ],
114
- inference_channel=[
115
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
116
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
117
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
118
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
119
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
120
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
121
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
122
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
123
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
124
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
125
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
126
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
127
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
128
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
129
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
130
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
131
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
132
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
133
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
134
- 290, 291, 292, 293
135
- ])
136
-
137
- model = dict(
138
- type='TopdownPoseEstimator', # 模型结构决定了算法流程
139
- data_preprocessor=dict( # 数据归一化和通道顺序调整,作为模型的一部分
140
- type='PoseDataPreprocessor',
141
- mean=[123.675, 116.28, 103.53],
142
- std=[58.395, 57.12, 57.375],
143
- bgr_to_rgb=True),
144
- backbone=dict(
145
- type='ResNet',
146
- depth=50,
147
- init_cfg=dict(
148
- type='Pretrained', # 预训练参数,只加载backbone权重用于迁移学习
149
- checkpoint='torchvision://resnet50')),
150
- head=dict( # 模型头部
151
- type='HeatmapHead',
152
- in_channels=2048,
153
- out_channels=channel_cfg['num_output_channels'],
154
- # deconv_out_channels=None,
155
- loss=dict(type='KeypointMSELoss', use_target_weight=True), # 损失函数
156
- decoder=codec), # 解码器,将heatmap解码成坐标值
157
- test_cfg=dict(
158
- flip_test=True, # 开启测试时水平翻转集成
159
- flip_mode='heatmap', # 对heatmap进行翻转
160
- shift_heatmap=True, # 对翻转后的结果进行平移提高精度
161
- ))
162
-
163
- val_evaluator = [
164
- dict(type='PCKAccuracy', thr=0.2),
165
- dict(type='AUC'),
166
- dict(type='EPE'),
167
- ]
168
- test_evaluator = val_evaluator # 默认情况下不区分验证集和测试集,用户根据需要来自行定义
169
-
170
- visualizer = dict(
171
- vis_backends=[dict(type='LocalVisBackend'),
172
- dict(type='WandbVisBackend')])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet18_8xb16_cifar10.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/resnet18_cifar.py', '../_base_/datasets/cifar10_bs16.py',
3
- '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py'
4
- ]
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorcomponents/ColorComponents.js DELETED
@@ -1,187 +0,0 @@
1
- import Sizer from '../../sizer/Sizer.js';
2
- import IsGameObject from '../../../../plugins/utils/system/IsGameObject.js';
3
- import CreateLabel from '../../utils/build/CreateLabel.js';
4
- import CreateInputText from '../../utils/build/CreateInputText.js';
5
-
6
- const GetValue = Phaser.Utils.Objects.GetValue;
7
- const Color = Phaser.Display.Color;
8
- const ColorToRGBA = Phaser.Display.Color.ColorToRGBA;
9
- const HSVToRGB = Phaser.Display.Color.HSVToRGB;
10
- const Clamp = Phaser.Math.Clamp;
11
-
12
- class ColorComponents extends Sizer {
13
- constructor(scene, config) {
14
- if (config === undefined) {
15
- config = {};
16
- }
17
- config.orientation = 0;
18
- super(scene, config);
19
- this.type = 'rexColorComponents';
20
-
21
- this.colorObject = new Color();
22
-
23
- // Add elements
24
- var background = GetValue(config, 'background', undefined);
25
-
26
- var formatLabel = GetValue(config, 'formatLabel', undefined);
27
- if (!IsGameObject(formatLabel)) {
28
- formatLabel = CreateLabel(scene, formatLabel)
29
- .resetDisplayContent();
30
- }
31
-
32
- var components = [];
33
- if (config.inputText0 && config.inputText1 && config.inputText2) {
34
- components.push(config.inputText0);
35
- components.push(config.inputText1);
36
- components.push(config.inputText2);
37
- } else {
38
- var inputTextConfig = GetValue(config, 'inputText');
39
- for (var i = 0; i < 3; i++) {
40
- var inputText = CreateInputText(scene, inputTextConfig)
41
- .setMaxLength(3)
42
- .setNumberInput()
43
-
44
- components.push(inputText);
45
- }
46
- }
47
-
48
- if (background) {
49
- this.addBackground(background);
50
- }
51
-
52
- var proportion = GetValue(config, 'proportion.formatLabel', 0);
53
- var defaultExpand = (formatLabel.isRexContainerLite) ? true : false;
54
- var expand = GetValue(config, 'expand.formatLabel', defaultExpand);
55
- this.add(
56
- formatLabel,
57
- { proportion: proportion, expand: expand }
58
- );
59
-
60
- var proportion = (GetValue(inputTextConfig, 'width') === undefined) ? 1 : 0;
61
- var expand = (GetValue(inputTextConfig, 'height') === undefined) ? true : false;
62
- for (var i = 0, cnt = components.length; i < cnt; i++) {
63
- this.add(
64
- components[i],
65
- { proportion: proportion, expand: expand }
66
- )
67
- }
68
-
69
- this.addChildrenMap('background', background);
70
- this.addChildrenMap('formatLabel', formatLabel);
71
- this.addChildrenMap('components', components);
72
-
73
- this.onClick(formatLabel, this.toggleColorFormat, this);
74
-
75
- for (var i = 0, cnt = components.length; i < cnt; i++) {
76
- components[i].on('close', function () {
77
- this.updateColorObject();
78
- this.setValue(this.colorObject.color);
79
- }, this);
80
- }
81
-
82
- var callback = GetValue(config, 'valuechangeCallback', null);
83
- if (callback !== null) {
84
- var scope = GetValue(config, 'valuechangeCallbackScope', undefined);
85
- this.on('valuechange', callback, scope);
86
- }
87
-
88
- formatLabel.setText('RGB');
89
- this.setValue(GetValue(config, 'value', 0xffffff));
90
- }
91
-
92
- get value() {
93
- return this._value;
94
- }
95
-
96
- set value(value) {
97
- value = Clamp(Math.floor(value), 0, 0xffffff);
98
-
99
- if (this._value === value) {
100
- return;
101
- }
102
-
103
- this._value = value;
104
-
105
- this.colorObject.setFromRGB(ColorToRGBA(value));
106
- this.updateComponents();
107
-
108
- this.emit('valuechange', this._value);
109
- }
110
-
111
- setValue(value) {
112
- this.value = value;
113
- return this;
114
- }
115
-
116
- get color() {
117
- return this._value;
118
- }
119
-
120
- set color(color) {
121
- this.value = color;
122
- }
123
-
124
- setColor(color) {
125
- this.color = color;
126
- return this;
127
- }
128
-
129
- get colorFormat() {
130
- return this.childrenMap.formatLabel.text;
131
- }
132
-
133
- set colorFormat(value) {
134
- if (this.colorFormat === value) {
135
- return;
136
- }
137
- this.childrenMap.formatLabel.setText(value);
138
- this.updateComponents();
139
- }
140
-
141
- setColorFormat(colrType) {
142
- this.colorFormat = colrType;
143
- return this;
144
- }
145
-
146
- toggleColorFormat() {
147
- this.colorFormat = (this.colorFormat === 'RGB') ? 'HSV' : 'RGB';
148
- return this;
149
- }
150
-
151
- updateComponents() {
152
- var components = this.childrenMap.components;
153
- var value0, value1, value2
154
- if (this.colorFormat === 'RGB') {
155
- value0 = this.colorObject.red;
156
- value1 = this.colorObject.green;
157
- value2 = this.colorObject.blue;
158
- } else { // colorFormat === 'HSV'
159
- value0 = Math.floor(this.colorObject.h * 360);
160
- value1 = Math.floor(this.colorObject.s * 100);
161
- value2 = Math.floor(this.colorObject.v * 100);
162
- }
163
-
164
- components[0].setValue(value0);
165
- components[1].setValue(value1);
166
- components[2].setValue(value2);
167
- return this;
168
- }
169
-
170
- updateColorObject() {
171
- var components = this.childrenMap.components;
172
- if (this.colorFormat === 'RGB') {
173
- var red = Clamp(components[0].value, 0, 255);
174
- var green = Clamp(components[1].value, 0, 255);
175
- var blue = Clamp(components[2].value, 0, 255);
176
- this.colorObject.setTo(red, green, blue);
177
- } else {
178
- var h = Clamp(components[0].value, 0, 359) / 360;
179
- var s = Clamp(components[1].value, 0, 100) / 100;
180
- var v = Clamp(components[2].value, 0, 100) / 100;
181
- this.colorObject.setFromRGB(HSVToRGB(h, s, v));
182
- }
183
- return this;
184
- }
185
- }
186
-
187
- export default ColorComponents;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/ScrollablePanel.js DELETED
@@ -1,71 +0,0 @@
1
- import Scrollable from '../utils/scrollable/Scrollable.js';
2
- import GetScrollMode from '../utils/GetScrollMode.js';
3
- import ScrollableBlock from './scrollableblock/ScrollableBlock.js';
4
- import SetChildrenInteractive from '../utils/setchildreninteractive/SetChildrenInteractive.js';
5
-
6
- const GetValue = Phaser.Utils.Objects.GetValue;
7
-
8
- class ScrollablePanel extends Scrollable {
9
- constructor(scene, config) {
10
- if (config === undefined) {
11
- config = {};
12
- }
13
-
14
- // Create scrollable-block
15
- var scrollMode = GetScrollMode(config);
16
- var panelConfig = GetValue(config, 'panel', undefined);
17
- if (panelConfig === undefined) {
18
- panelConfig = {};
19
- }
20
- panelConfig.scrollMode = scrollMode;
21
- panelConfig.clamplChildOY = GetValue(config, 'clamplChildOY', false);
22
- var scrollableBlock = new ScrollableBlock(scene, panelConfig);
23
- scene.add.existing(scrollableBlock); // Important: Add to display list for touch detecting
24
- var panelWidth = GetValue(panelConfig, 'width', undefined);
25
- var panelHeight = GetValue(panelConfig, 'height', undefined);
26
- var proportion, expand;
27
- if (scrollMode === 0) {
28
- proportion = (panelWidth === undefined) ? 1 : 0;
29
- expand = (panelHeight === undefined);
30
- } else {
31
- proportion = (panelHeight === undefined) ? 1 : 0;
32
- expand = (panelWidth === undefined);
33
- }
34
-
35
- // Fill config of scrollable
36
- config.type = 'rexScrollablePanel';
37
- config.child = {
38
- gameObject: scrollableBlock,
39
- proportion: proportion,
40
- expand: expand,
41
- };
42
- var spaceConfig = GetValue(config, 'space', undefined);
43
- if (spaceConfig) {
44
- spaceConfig.child = spaceConfig.panel;
45
- }
46
- super(scene, config);
47
-
48
- this.addChildrenMap('panel', scrollableBlock.child);
49
- this.addChildrenMap('panelLayer', scrollableBlock.maskLayer);
50
- this.addChildrenMap('mask', scrollableBlock.maskGameObject);
51
- }
52
-
53
- setChildrenInteractive(config) {
54
- if (config === undefined) {
55
- config = {};
56
- }
57
-
58
- if (!config.hasOwnProperty('eventEmitter')) {
59
- config.eventEmitter = this;
60
- }
61
-
62
- if (!config.hasOwnProperty('targets')) {
63
- config.targets = [this.childrenMap.panel];
64
- }
65
-
66
- SetChildrenInteractive(this.childrenMap.child, config);
67
- return this;
68
- }
69
- }
70
-
71
- export default ScrollablePanel;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/vits/text/symbols.py DELETED
@@ -1,16 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Defines the set of symbols used in text input to the model.
5
- '''
6
- _pad = '_'
7
- _punctuation = ';:,.!?¡¿—…"«»“” '
8
- _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
9
- _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
10
-
11
-
12
- # Export all symbols:
13
- symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
14
-
15
- # Special symbol ids
16
- SPACE_ID = symbols.index(" ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py DELETED
@@ -1,111 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : test_sync_batchnorm.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
-
9
- import unittest
10
-
11
- import torch
12
- import torch.nn as nn
13
- from torch.autograd import Variable
14
-
15
- from sync_batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, DataParallelWithCallback
16
- from sync_batchnorm.unittest import TorchTestCase
17
-
18
-
19
- def handy_var(a, unbias=True):
20
- n = a.size(0)
21
- asum = a.sum(dim=0)
22
- as_sum = (a ** 2).sum(dim=0) # a square sum
23
- sumvar = as_sum - asum * asum / n
24
- if unbias:
25
- return sumvar / (n - 1)
26
- else:
27
- return sumvar / n
28
-
29
-
30
- def _find_bn(module):
31
- for m in module.modules():
32
- if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, SynchronizedBatchNorm1d, SynchronizedBatchNorm2d)):
33
- return m
34
-
35
-
36
- class SyncTestCase(TorchTestCase):
37
- def _syncParameters(self, bn1, bn2):
38
- bn1.reset_parameters()
39
- bn2.reset_parameters()
40
- if bn1.affine and bn2.affine:
41
- bn2.weight.data.copy_(bn1.weight.data)
42
- bn2.bias.data.copy_(bn1.bias.data)
43
-
44
- def _checkBatchNormResult(self, bn1, bn2, input, is_train, cuda=False):
45
- """Check the forward and backward for the customized batch normalization."""
46
- bn1.train(mode=is_train)
47
- bn2.train(mode=is_train)
48
-
49
- if cuda:
50
- input = input.cuda()
51
-
52
- self._syncParameters(_find_bn(bn1), _find_bn(bn2))
53
-
54
- input1 = Variable(input, requires_grad=True)
55
- output1 = bn1(input1)
56
- output1.sum().backward()
57
- input2 = Variable(input, requires_grad=True)
58
- output2 = bn2(input2)
59
- output2.sum().backward()
60
-
61
- self.assertTensorClose(input1.data, input2.data)
62
- self.assertTensorClose(output1.data, output2.data)
63
- self.assertTensorClose(input1.grad, input2.grad)
64
- self.assertTensorClose(_find_bn(bn1).running_mean, _find_bn(bn2).running_mean)
65
- self.assertTensorClose(_find_bn(bn1).running_var, _find_bn(bn2).running_var)
66
-
67
- def testSyncBatchNormNormalTrain(self):
68
- bn = nn.BatchNorm1d(10)
69
- sync_bn = SynchronizedBatchNorm1d(10)
70
-
71
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True)
72
-
73
- def testSyncBatchNormNormalEval(self):
74
- bn = nn.BatchNorm1d(10)
75
- sync_bn = SynchronizedBatchNorm1d(10)
76
-
77
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False)
78
-
79
- def testSyncBatchNormSyncTrain(self):
80
- bn = nn.BatchNorm1d(10, eps=1e-5, affine=False)
81
- sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
82
- sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
83
-
84
- bn.cuda()
85
- sync_bn.cuda()
86
-
87
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True, cuda=True)
88
-
89
- def testSyncBatchNormSyncEval(self):
90
- bn = nn.BatchNorm1d(10, eps=1e-5, affine=False)
91
- sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
92
- sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
93
-
94
- bn.cuda()
95
- sync_bn.cuda()
96
-
97
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False, cuda=True)
98
-
99
- def testSyncBatchNorm2DSyncTrain(self):
100
- bn = nn.BatchNorm2d(10)
101
- sync_bn = SynchronizedBatchNorm2d(10)
102
- sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
103
-
104
- bn.cuda()
105
- sync_bn.cuda()
106
-
107
- self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16), True, cuda=True)
108
-
109
-
110
- if __name__ == '__main__':
111
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_utils.py DELETED
@@ -1,19 +0,0 @@
1
- import torch
2
- import numpy as np
3
- import random
4
- import onnxruntime as ort
5
- def set_random_seed(seed=0):
6
- ort.set_seed(seed)
7
- torch.manual_seed(seed)
8
- torch.cuda.manual_seed(seed)
9
- torch.backends.cudnn.deterministic = True
10
- random.seed(seed)
11
- np.random.seed(seed)
12
-
13
- def runonnx(model_path, **kwargs):
14
- ort_session = ort.InferenceSession(model_path)
15
- outputs = ort_session.run(
16
- None,
17
- kwargs
18
- )
19
- return outputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/train_dreambooth.py DELETED
@@ -1,1377 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
-
16
- import argparse
17
- import gc
18
- import hashlib
19
- import itertools
20
- import logging
21
- import math
22
- import os
23
- import shutil
24
- import warnings
25
- from pathlib import Path
26
-
27
- import numpy as np
28
- import torch
29
- import torch.nn.functional as F
30
- import torch.utils.checkpoint
31
- import transformers
32
- from accelerate import Accelerator
33
- from accelerate.logging import get_logger
34
- from accelerate.utils import ProjectConfiguration, set_seed
35
- from huggingface_hub import create_repo, model_info, upload_folder
36
- from packaging import version
37
- from PIL import Image
38
- from PIL.ImageOps import exif_transpose
39
- from torch.utils.data import Dataset
40
- from torchvision import transforms
41
- from tqdm.auto import tqdm
42
- from transformers import AutoTokenizer, PretrainedConfig
43
-
44
- import diffusers
45
- from diffusers import (
46
- AutoencoderKL,
47
- DDPMScheduler,
48
- DiffusionPipeline,
49
- DPMSolverMultistepScheduler,
50
- StableDiffusionPipeline,
51
- UNet2DConditionModel,
52
- )
53
- from diffusers.optimization import get_scheduler
54
- from diffusers.utils import check_min_version, is_wandb_available
55
- from diffusers.utils.import_utils import is_xformers_available
56
-
57
-
58
- if is_wandb_available():
59
- import wandb
60
-
61
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
62
- check_min_version("0.19.0")
63
-
64
- logger = get_logger(__name__)
65
-
66
-
67
- def save_model_card(
68
- repo_id: str,
69
- images=None,
70
- base_model=str,
71
- train_text_encoder=False,
72
- prompt=str,
73
- repo_folder=None,
74
- pipeline: DiffusionPipeline = None,
75
- ):
76
- img_str = ""
77
- for i, image in enumerate(images):
78
- image.save(os.path.join(repo_folder, f"image_{i}.png"))
79
- img_str += f"![img_{i}](./image_{i}.png)\n"
80
-
81
- yaml = f"""
82
- ---
83
- license: creativeml-openrail-m
84
- base_model: {base_model}
85
- instance_prompt: {prompt}
86
- tags:
87
- - {'stable-diffusion' if isinstance(pipeline, StableDiffusionPipeline) else 'if'}
88
- - {'stable-diffusion-diffusers' if isinstance(pipeline, StableDiffusionPipeline) else 'if-diffusers'}
89
- - text-to-image
90
- - diffusers
91
- - dreambooth
92
- inference: true
93
- ---
94
- """
95
- model_card = f"""
96
- # DreamBooth - {repo_id}
97
-
98
- This is a dreambooth model derived from {base_model}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/).
99
- You can find some example images in the following. \n
100
- {img_str}
101
-
102
- DreamBooth for the text encoder was enabled: {train_text_encoder}.
103
- """
104
- with open(os.path.join(repo_folder, "README.md"), "w") as f:
105
- f.write(yaml + model_card)
106
-
107
-
108
- def log_validation(
109
- text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype, epoch, prompt_embeds, negative_prompt_embeds
110
- ):
111
- logger.info(
112
- f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
113
- f" {args.validation_prompt}."
114
- )
115
-
116
- pipeline_args = {}
117
-
118
- if vae is not None:
119
- pipeline_args["vae"] = vae
120
-
121
- if text_encoder is not None:
122
- text_encoder = accelerator.unwrap_model(text_encoder)
123
-
124
- # create pipeline (note: unet and vae are loaded again in float32)
125
- pipeline = DiffusionPipeline.from_pretrained(
126
- args.pretrained_model_name_or_path,
127
- tokenizer=tokenizer,
128
- text_encoder=text_encoder,
129
- unet=accelerator.unwrap_model(unet),
130
- revision=args.revision,
131
- torch_dtype=weight_dtype,
132
- **pipeline_args,
133
- )
134
-
135
- # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
136
- scheduler_args = {}
137
-
138
- if "variance_type" in pipeline.scheduler.config:
139
- variance_type = pipeline.scheduler.config.variance_type
140
-
141
- if variance_type in ["learned", "learned_range"]:
142
- variance_type = "fixed_small"
143
-
144
- scheduler_args["variance_type"] = variance_type
145
-
146
- pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
147
- pipeline = pipeline.to(accelerator.device)
148
- pipeline.set_progress_bar_config(disable=True)
149
-
150
- if args.pre_compute_text_embeddings:
151
- pipeline_args = {
152
- "prompt_embeds": prompt_embeds,
153
- "negative_prompt_embeds": negative_prompt_embeds,
154
- }
155
- else:
156
- pipeline_args = {"prompt": args.validation_prompt}
157
-
158
- # run inference
159
- generator = None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
160
- images = []
161
- if args.validation_images is None:
162
- for _ in range(args.num_validation_images):
163
- with torch.autocast("cuda"):
164
- image = pipeline(**pipeline_args, num_inference_steps=25, generator=generator).images[0]
165
- images.append(image)
166
- else:
167
- for image in args.validation_images:
168
- image = Image.open(image)
169
- image = pipeline(**pipeline_args, image=image, generator=generator).images[0]
170
- images.append(image)
171
-
172
- for tracker in accelerator.trackers:
173
- if tracker.name == "tensorboard":
174
- np_images = np.stack([np.asarray(img) for img in images])
175
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
176
- if tracker.name == "wandb":
177
- tracker.log(
178
- {
179
- "validation": [
180
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images)
181
- ]
182
- }
183
- )
184
-
185
- del pipeline
186
- torch.cuda.empty_cache()
187
-
188
- return images
189
-
190
-
191
- def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
192
- text_encoder_config = PretrainedConfig.from_pretrained(
193
- pretrained_model_name_or_path,
194
- subfolder="text_encoder",
195
- revision=revision,
196
- )
197
- model_class = text_encoder_config.architectures[0]
198
-
199
- if model_class == "CLIPTextModel":
200
- from transformers import CLIPTextModel
201
-
202
- return CLIPTextModel
203
- elif model_class == "RobertaSeriesModelWithTransformation":
204
- from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
205
-
206
- return RobertaSeriesModelWithTransformation
207
- elif model_class == "T5EncoderModel":
208
- from transformers import T5EncoderModel
209
-
210
- return T5EncoderModel
211
- else:
212
- raise ValueError(f"{model_class} is not supported.")
213
-
214
-
215
- def parse_args(input_args=None):
216
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
217
- parser.add_argument(
218
- "--pretrained_model_name_or_path",
219
- type=str,
220
- default=None,
221
- required=True,
222
- help="Path to pretrained model or model identifier from huggingface.co/models.",
223
- )
224
- parser.add_argument(
225
- "--revision",
226
- type=str,
227
- default=None,
228
- required=False,
229
- help=(
230
- "Revision of pretrained model identifier from huggingface.co/models. Trainable model components should be"
231
- " float32 precision."
232
- ),
233
- )
234
- parser.add_argument(
235
- "--tokenizer_name",
236
- type=str,
237
- default=None,
238
- help="Pretrained tokenizer name or path if not the same as model_name",
239
- )
240
- parser.add_argument(
241
- "--instance_data_dir",
242
- type=str,
243
- default=None,
244
- required=True,
245
- help="A folder containing the training data of instance images.",
246
- )
247
- parser.add_argument(
248
- "--class_data_dir",
249
- type=str,
250
- default=None,
251
- required=False,
252
- help="A folder containing the training data of class images.",
253
- )
254
- parser.add_argument(
255
- "--instance_prompt",
256
- type=str,
257
- default=None,
258
- required=True,
259
- help="The prompt with identifier specifying the instance",
260
- )
261
- parser.add_argument(
262
- "--class_prompt",
263
- type=str,
264
- default=None,
265
- help="The prompt to specify images in the same class as provided instance images.",
266
- )
267
- parser.add_argument(
268
- "--with_prior_preservation",
269
- default=False,
270
- action="store_true",
271
- help="Flag to add prior preservation loss.",
272
- )
273
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
274
- parser.add_argument(
275
- "--num_class_images",
276
- type=int,
277
- default=100,
278
- help=(
279
- "Minimal class images for prior preservation loss. If there are not enough images already present in"
280
- " class_data_dir, additional images will be sampled with class_prompt."
281
- ),
282
- )
283
- parser.add_argument(
284
- "--output_dir",
285
- type=str,
286
- default="text-inversion-model",
287
- help="The output directory where the model predictions and checkpoints will be written.",
288
- )
289
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
290
- parser.add_argument(
291
- "--resolution",
292
- type=int,
293
- default=512,
294
- help=(
295
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
296
- " resolution"
297
- ),
298
- )
299
- parser.add_argument(
300
- "--center_crop",
301
- default=False,
302
- action="store_true",
303
- help=(
304
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
305
- " cropped. The images will be resized to the resolution first before cropping."
306
- ),
307
- )
308
- parser.add_argument(
309
- "--train_text_encoder",
310
- action="store_true",
311
- help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
312
- )
313
- parser.add_argument(
314
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
315
- )
316
- parser.add_argument(
317
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
318
- )
319
- parser.add_argument("--num_train_epochs", type=int, default=1)
320
- parser.add_argument(
321
- "--max_train_steps",
322
- type=int,
323
- default=None,
324
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
325
- )
326
- parser.add_argument(
327
- "--checkpointing_steps",
328
- type=int,
329
- default=500,
330
- help=(
331
- "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. "
332
- "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference."
333
- "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components."
334
- "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step"
335
- "instructions."
336
- ),
337
- )
338
- parser.add_argument(
339
- "--checkpoints_total_limit",
340
- type=int,
341
- default=None,
342
- help=(
343
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
344
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
345
- " for more details"
346
- ),
347
- )
348
- parser.add_argument(
349
- "--resume_from_checkpoint",
350
- type=str,
351
- default=None,
352
- help=(
353
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
354
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
355
- ),
356
- )
357
- parser.add_argument(
358
- "--gradient_accumulation_steps",
359
- type=int,
360
- default=1,
361
- help="Number of updates steps to accumulate before performing a backward/update pass.",
362
- )
363
- parser.add_argument(
364
- "--gradient_checkpointing",
365
- action="store_true",
366
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
367
- )
368
- parser.add_argument(
369
- "--learning_rate",
370
- type=float,
371
- default=5e-6,
372
- help="Initial learning rate (after the potential warmup period) to use.",
373
- )
374
- parser.add_argument(
375
- "--scale_lr",
376
- action="store_true",
377
- default=False,
378
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
379
- )
380
- parser.add_argument(
381
- "--lr_scheduler",
382
- type=str,
383
- default="constant",
384
- help=(
385
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
386
- ' "constant", "constant_with_warmup"]'
387
- ),
388
- )
389
- parser.add_argument(
390
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
391
- )
392
- parser.add_argument(
393
- "--lr_num_cycles",
394
- type=int,
395
- default=1,
396
- help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
397
- )
398
- parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
399
- parser.add_argument(
400
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
401
- )
402
- parser.add_argument(
403
- "--dataloader_num_workers",
404
- type=int,
405
- default=0,
406
- help=(
407
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
408
- ),
409
- )
410
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
411
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
412
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
413
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
414
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
415
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
416
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
417
- parser.add_argument(
418
- "--hub_model_id",
419
- type=str,
420
- default=None,
421
- help="The name of the repository to keep in sync with the local `output_dir`.",
422
- )
423
- parser.add_argument(
424
- "--logging_dir",
425
- type=str,
426
- default="logs",
427
- help=(
428
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
429
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
430
- ),
431
- )
432
- parser.add_argument(
433
- "--allow_tf32",
434
- action="store_true",
435
- help=(
436
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
437
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
438
- ),
439
- )
440
- parser.add_argument(
441
- "--report_to",
442
- type=str,
443
- default="tensorboard",
444
- help=(
445
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
446
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
447
- ),
448
- )
449
- parser.add_argument(
450
- "--validation_prompt",
451
- type=str,
452
- default=None,
453
- help="A prompt that is used during validation to verify that the model is learning.",
454
- )
455
- parser.add_argument(
456
- "--num_validation_images",
457
- type=int,
458
- default=4,
459
- help="Number of images that should be generated during validation with `validation_prompt`.",
460
- )
461
- parser.add_argument(
462
- "--validation_steps",
463
- type=int,
464
- default=100,
465
- help=(
466
- "Run validation every X steps. Validation consists of running the prompt"
467
- " `args.validation_prompt` multiple times: `args.num_validation_images`"
468
- " and logging the images."
469
- ),
470
- )
471
- parser.add_argument(
472
- "--mixed_precision",
473
- type=str,
474
- default=None,
475
- choices=["no", "fp16", "bf16"],
476
- help=(
477
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
478
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
479
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
480
- ),
481
- )
482
- parser.add_argument(
483
- "--prior_generation_precision",
484
- type=str,
485
- default=None,
486
- choices=["no", "fp32", "fp16", "bf16"],
487
- help=(
488
- "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
489
- " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
490
- ),
491
- )
492
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
493
- parser.add_argument(
494
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
495
- )
496
- parser.add_argument(
497
- "--set_grads_to_none",
498
- action="store_true",
499
- help=(
500
- "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
501
- " behaviors, so disable this argument if it causes any problems. More info:"
502
- " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
503
- ),
504
- )
505
-
506
- parser.add_argument(
507
- "--offset_noise",
508
- action="store_true",
509
- default=False,
510
- help=(
511
- "Fine-tuning against a modified noise"
512
- " See: https://www.crosslabs.org//blog/diffusion-with-offset-noise for more information."
513
- ),
514
- )
515
- parser.add_argument(
516
- "--pre_compute_text_embeddings",
517
- action="store_true",
518
- help="Whether or not to pre-compute text embeddings. If text embeddings are pre-computed, the text encoder will not be kept in memory during training and will leave more GPU memory available for training the rest of the model. This is not compatible with `--train_text_encoder`.",
519
- )
520
- parser.add_argument(
521
- "--tokenizer_max_length",
522
- type=int,
523
- default=None,
524
- required=False,
525
- help="The maximum length of the tokenizer. If not set, will default to the tokenizer's max length.",
526
- )
527
- parser.add_argument(
528
- "--text_encoder_use_attention_mask",
529
- action="store_true",
530
- required=False,
531
- help="Whether to use attention mask for the text encoder",
532
- )
533
- parser.add_argument(
534
- "--skip_save_text_encoder", action="store_true", required=False, help="Set to not save text encoder"
535
- )
536
- parser.add_argument(
537
- "--validation_images",
538
- required=False,
539
- default=None,
540
- nargs="+",
541
- help="Optional set of images to use for validation. Used when the target pipeline takes an initial image as input such as when training image variation or superresolution.",
542
- )
543
- parser.add_argument(
544
- "--class_labels_conditioning",
545
- required=False,
546
- default=None,
547
- help="The optional `class_label` conditioning to pass to the unet, available values are `timesteps`.",
548
- )
549
-
550
- if input_args is not None:
551
- args = parser.parse_args(input_args)
552
- else:
553
- args = parser.parse_args()
554
-
555
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
556
- if env_local_rank != -1 and env_local_rank != args.local_rank:
557
- args.local_rank = env_local_rank
558
-
559
- if args.with_prior_preservation:
560
- if args.class_data_dir is None:
561
- raise ValueError("You must specify a data directory for class images.")
562
- if args.class_prompt is None:
563
- raise ValueError("You must specify prompt for class images.")
564
- else:
565
- # logger is not available yet
566
- if args.class_data_dir is not None:
567
- warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
568
- if args.class_prompt is not None:
569
- warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
570
-
571
- if args.train_text_encoder and args.pre_compute_text_embeddings:
572
- raise ValueError("`--train_text_encoder` cannot be used with `--pre_compute_text_embeddings`")
573
-
574
- return args
575
-
576
-
577
- class DreamBoothDataset(Dataset):
578
- """
579
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
580
- It pre-processes the images and the tokenizes prompts.
581
- """
582
-
583
- def __init__(
584
- self,
585
- instance_data_root,
586
- instance_prompt,
587
- tokenizer,
588
- class_data_root=None,
589
- class_prompt=None,
590
- class_num=None,
591
- size=512,
592
- center_crop=False,
593
- encoder_hidden_states=None,
594
- instance_prompt_encoder_hidden_states=None,
595
- tokenizer_max_length=None,
596
- ):
597
- self.size = size
598
- self.center_crop = center_crop
599
- self.tokenizer = tokenizer
600
- self.encoder_hidden_states = encoder_hidden_states
601
- self.instance_prompt_encoder_hidden_states = instance_prompt_encoder_hidden_states
602
- self.tokenizer_max_length = tokenizer_max_length
603
-
604
- self.instance_data_root = Path(instance_data_root)
605
- if not self.instance_data_root.exists():
606
- raise ValueError(f"Instance {self.instance_data_root} images root doesn't exists.")
607
-
608
- self.instance_images_path = list(Path(instance_data_root).iterdir())
609
- self.num_instance_images = len(self.instance_images_path)
610
- self.instance_prompt = instance_prompt
611
- self._length = self.num_instance_images
612
-
613
- if class_data_root is not None:
614
- self.class_data_root = Path(class_data_root)
615
- self.class_data_root.mkdir(parents=True, exist_ok=True)
616
- self.class_images_path = list(self.class_data_root.iterdir())
617
- if class_num is not None:
618
- self.num_class_images = min(len(self.class_images_path), class_num)
619
- else:
620
- self.num_class_images = len(self.class_images_path)
621
- self._length = max(self.num_class_images, self.num_instance_images)
622
- self.class_prompt = class_prompt
623
- else:
624
- self.class_data_root = None
625
-
626
- self.image_transforms = transforms.Compose(
627
- [
628
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
629
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
630
- transforms.ToTensor(),
631
- transforms.Normalize([0.5], [0.5]),
632
- ]
633
- )
634
-
635
- def __len__(self):
636
- return self._length
637
-
638
- def __getitem__(self, index):
639
- example = {}
640
- instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
641
- instance_image = exif_transpose(instance_image)
642
-
643
- if not instance_image.mode == "RGB":
644
- instance_image = instance_image.convert("RGB")
645
- example["instance_images"] = self.image_transforms(instance_image)
646
-
647
- if self.encoder_hidden_states is not None:
648
- example["instance_prompt_ids"] = self.encoder_hidden_states
649
- else:
650
- text_inputs = tokenize_prompt(
651
- self.tokenizer, self.instance_prompt, tokenizer_max_length=self.tokenizer_max_length
652
- )
653
- example["instance_prompt_ids"] = text_inputs.input_ids
654
- example["instance_attention_mask"] = text_inputs.attention_mask
655
-
656
- if self.class_data_root:
657
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
658
- class_image = exif_transpose(class_image)
659
-
660
- if not class_image.mode == "RGB":
661
- class_image = class_image.convert("RGB")
662
- example["class_images"] = self.image_transforms(class_image)
663
-
664
- if self.instance_prompt_encoder_hidden_states is not None:
665
- example["class_prompt_ids"] = self.instance_prompt_encoder_hidden_states
666
- else:
667
- class_text_inputs = tokenize_prompt(
668
- self.tokenizer, self.class_prompt, tokenizer_max_length=self.tokenizer_max_length
669
- )
670
- example["class_prompt_ids"] = class_text_inputs.input_ids
671
- example["class_attention_mask"] = class_text_inputs.attention_mask
672
-
673
- return example
674
-
675
-
676
- def collate_fn(examples, with_prior_preservation=False):
677
- has_attention_mask = "instance_attention_mask" in examples[0]
678
-
679
- input_ids = [example["instance_prompt_ids"] for example in examples]
680
- pixel_values = [example["instance_images"] for example in examples]
681
-
682
- if has_attention_mask:
683
- attention_mask = [example["instance_attention_mask"] for example in examples]
684
-
685
- # Concat class and instance examples for prior preservation.
686
- # We do this to avoid doing two forward passes.
687
- if with_prior_preservation:
688
- input_ids += [example["class_prompt_ids"] for example in examples]
689
- pixel_values += [example["class_images"] for example in examples]
690
-
691
- if has_attention_mask:
692
- attention_mask += [example["class_attention_mask"] for example in examples]
693
-
694
- pixel_values = torch.stack(pixel_values)
695
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
696
-
697
- input_ids = torch.cat(input_ids, dim=0)
698
-
699
- batch = {
700
- "input_ids": input_ids,
701
- "pixel_values": pixel_values,
702
- }
703
-
704
- if has_attention_mask:
705
- attention_mask = torch.cat(attention_mask, dim=0)
706
- batch["attention_mask"] = attention_mask
707
-
708
- return batch
709
-
710
-
711
- class PromptDataset(Dataset):
712
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
713
-
714
- def __init__(self, prompt, num_samples):
715
- self.prompt = prompt
716
- self.num_samples = num_samples
717
-
718
- def __len__(self):
719
- return self.num_samples
720
-
721
- def __getitem__(self, index):
722
- example = {}
723
- example["prompt"] = self.prompt
724
- example["index"] = index
725
- return example
726
-
727
-
728
- def model_has_vae(args):
729
- config_file_name = os.path.join("vae", AutoencoderKL.config_name)
730
- if os.path.isdir(args.pretrained_model_name_or_path):
731
- config_file_name = os.path.join(args.pretrained_model_name_or_path, config_file_name)
732
- return os.path.isfile(config_file_name)
733
- else:
734
- files_in_repo = model_info(args.pretrained_model_name_or_path, revision=args.revision).siblings
735
- return any(file.rfilename == config_file_name for file in files_in_repo)
736
-
737
-
738
- def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
739
- if tokenizer_max_length is not None:
740
- max_length = tokenizer_max_length
741
- else:
742
- max_length = tokenizer.model_max_length
743
-
744
- text_inputs = tokenizer(
745
- prompt,
746
- truncation=True,
747
- padding="max_length",
748
- max_length=max_length,
749
- return_tensors="pt",
750
- )
751
-
752
- return text_inputs
753
-
754
-
755
- def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None):
756
- text_input_ids = input_ids.to(text_encoder.device)
757
-
758
- if text_encoder_use_attention_mask:
759
- attention_mask = attention_mask.to(text_encoder.device)
760
- else:
761
- attention_mask = None
762
-
763
- prompt_embeds = text_encoder(
764
- text_input_ids,
765
- attention_mask=attention_mask,
766
- )
767
- prompt_embeds = prompt_embeds[0]
768
-
769
- return prompt_embeds
770
-
771
-
772
- def main(args):
773
- logging_dir = Path(args.output_dir, args.logging_dir)
774
-
775
- accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
776
-
777
- accelerator = Accelerator(
778
- gradient_accumulation_steps=args.gradient_accumulation_steps,
779
- mixed_precision=args.mixed_precision,
780
- log_with=args.report_to,
781
- project_config=accelerator_project_config,
782
- )
783
-
784
- if args.report_to == "wandb":
785
- if not is_wandb_available():
786
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
787
-
788
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
789
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
790
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
791
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
792
- raise ValueError(
793
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
794
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
795
- )
796
-
797
- # Make one log on every process with the configuration for debugging.
798
- logging.basicConfig(
799
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
800
- datefmt="%m/%d/%Y %H:%M:%S",
801
- level=logging.INFO,
802
- )
803
- logger.info(accelerator.state, main_process_only=False)
804
- if accelerator.is_local_main_process:
805
- transformers.utils.logging.set_verbosity_warning()
806
- diffusers.utils.logging.set_verbosity_info()
807
- else:
808
- transformers.utils.logging.set_verbosity_error()
809
- diffusers.utils.logging.set_verbosity_error()
810
-
811
- # If passed along, set the training seed now.
812
- if args.seed is not None:
813
- set_seed(args.seed)
814
-
815
- # Generate class images if prior preservation is enabled.
816
- if args.with_prior_preservation:
817
- class_images_dir = Path(args.class_data_dir)
818
- if not class_images_dir.exists():
819
- class_images_dir.mkdir(parents=True)
820
- cur_class_images = len(list(class_images_dir.iterdir()))
821
-
822
- if cur_class_images < args.num_class_images:
823
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
824
- if args.prior_generation_precision == "fp32":
825
- torch_dtype = torch.float32
826
- elif args.prior_generation_precision == "fp16":
827
- torch_dtype = torch.float16
828
- elif args.prior_generation_precision == "bf16":
829
- torch_dtype = torch.bfloat16
830
- pipeline = DiffusionPipeline.from_pretrained(
831
- args.pretrained_model_name_or_path,
832
- torch_dtype=torch_dtype,
833
- safety_checker=None,
834
- revision=args.revision,
835
- )
836
- pipeline.set_progress_bar_config(disable=True)
837
-
838
- num_new_images = args.num_class_images - cur_class_images
839
- logger.info(f"Number of class images to sample: {num_new_images}.")
840
-
841
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
842
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
843
-
844
- sample_dataloader = accelerator.prepare(sample_dataloader)
845
- pipeline.to(accelerator.device)
846
-
847
- for example in tqdm(
848
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
849
- ):
850
- images = pipeline(example["prompt"]).images
851
-
852
- for i, image in enumerate(images):
853
- hash_image = hashlib.sha1(image.tobytes()).hexdigest()
854
- image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
855
- image.save(image_filename)
856
-
857
- del pipeline
858
- if torch.cuda.is_available():
859
- torch.cuda.empty_cache()
860
-
861
- # Handle the repository creation
862
- if accelerator.is_main_process:
863
- if args.output_dir is not None:
864
- os.makedirs(args.output_dir, exist_ok=True)
865
-
866
- if args.push_to_hub:
867
- repo_id = create_repo(
868
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
869
- ).repo_id
870
-
871
- # Load the tokenizer
872
- if args.tokenizer_name:
873
- tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
874
- elif args.pretrained_model_name_or_path:
875
- tokenizer = AutoTokenizer.from_pretrained(
876
- args.pretrained_model_name_or_path,
877
- subfolder="tokenizer",
878
- revision=args.revision,
879
- use_fast=False,
880
- )
881
-
882
- # import correct text encoder class
883
- text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
884
-
885
- # Load scheduler and models
886
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
887
- text_encoder = text_encoder_cls.from_pretrained(
888
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
889
- )
890
-
891
- if model_has_vae(args):
892
- vae = AutoencoderKL.from_pretrained(
893
- args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
894
- )
895
- else:
896
- vae = None
897
-
898
- unet = UNet2DConditionModel.from_pretrained(
899
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
900
- )
901
-
902
- # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
903
- def save_model_hook(models, weights, output_dir):
904
- for model in models:
905
- sub_dir = "unet" if isinstance(model, type(accelerator.unwrap_model(unet))) else "text_encoder"
906
- model.save_pretrained(os.path.join(output_dir, sub_dir))
907
-
908
- # make sure to pop weight so that corresponding model is not saved again
909
- weights.pop()
910
-
911
- def load_model_hook(models, input_dir):
912
- while len(models) > 0:
913
- # pop models so that they are not loaded again
914
- model = models.pop()
915
-
916
- if isinstance(model, type(accelerator.unwrap_model(text_encoder))):
917
- # load transformers style into model
918
- load_model = text_encoder_cls.from_pretrained(input_dir, subfolder="text_encoder")
919
- model.config = load_model.config
920
- else:
921
- # load diffusers style into model
922
- load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
923
- model.register_to_config(**load_model.config)
924
-
925
- model.load_state_dict(load_model.state_dict())
926
- del load_model
927
-
928
- accelerator.register_save_state_pre_hook(save_model_hook)
929
- accelerator.register_load_state_pre_hook(load_model_hook)
930
-
931
- if vae is not None:
932
- vae.requires_grad_(False)
933
-
934
- if not args.train_text_encoder:
935
- text_encoder.requires_grad_(False)
936
-
937
- if args.enable_xformers_memory_efficient_attention:
938
- if is_xformers_available():
939
- import xformers
940
-
941
- xformers_version = version.parse(xformers.__version__)
942
- if xformers_version == version.parse("0.0.16"):
943
- logger.warn(
944
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
945
- )
946
- unet.enable_xformers_memory_efficient_attention()
947
- else:
948
- raise ValueError("xformers is not available. Make sure it is installed correctly")
949
-
950
- if args.gradient_checkpointing:
951
- unet.enable_gradient_checkpointing()
952
- if args.train_text_encoder:
953
- text_encoder.gradient_checkpointing_enable()
954
-
955
- # Check that all trainable models are in full precision
956
- low_precision_error_string = (
957
- "Please make sure to always have all model weights in full float32 precision when starting training - even if"
958
- " doing mixed precision training. copy of the weights should still be float32."
959
- )
960
-
961
- if accelerator.unwrap_model(unet).dtype != torch.float32:
962
- raise ValueError(
963
- f"Unet loaded as datatype {accelerator.unwrap_model(unet).dtype}. {low_precision_error_string}"
964
- )
965
-
966
- if args.train_text_encoder and accelerator.unwrap_model(text_encoder).dtype != torch.float32:
967
- raise ValueError(
968
- f"Text encoder loaded as datatype {accelerator.unwrap_model(text_encoder).dtype}."
969
- f" {low_precision_error_string}"
970
- )
971
-
972
- # Enable TF32 for faster training on Ampere GPUs,
973
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
974
- if args.allow_tf32:
975
- torch.backends.cuda.matmul.allow_tf32 = True
976
-
977
- if args.scale_lr:
978
- args.learning_rate = (
979
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
980
- )
981
-
982
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
983
- if args.use_8bit_adam:
984
- try:
985
- import bitsandbytes as bnb
986
- except ImportError:
987
- raise ImportError(
988
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
989
- )
990
-
991
- optimizer_class = bnb.optim.AdamW8bit
992
- else:
993
- optimizer_class = torch.optim.AdamW
994
-
995
- # Optimizer creation
996
- params_to_optimize = (
997
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
998
- )
999
- optimizer = optimizer_class(
1000
- params_to_optimize,
1001
- lr=args.learning_rate,
1002
- betas=(args.adam_beta1, args.adam_beta2),
1003
- weight_decay=args.adam_weight_decay,
1004
- eps=args.adam_epsilon,
1005
- )
1006
-
1007
- if args.pre_compute_text_embeddings:
1008
-
1009
- def compute_text_embeddings(prompt):
1010
- with torch.no_grad():
1011
- text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=args.tokenizer_max_length)
1012
- prompt_embeds = encode_prompt(
1013
- text_encoder,
1014
- text_inputs.input_ids,
1015
- text_inputs.attention_mask,
1016
- text_encoder_use_attention_mask=args.text_encoder_use_attention_mask,
1017
- )
1018
-
1019
- return prompt_embeds
1020
-
1021
- pre_computed_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
1022
- validation_prompt_negative_prompt_embeds = compute_text_embeddings("")
1023
-
1024
- if args.validation_prompt is not None:
1025
- validation_prompt_encoder_hidden_states = compute_text_embeddings(args.validation_prompt)
1026
- else:
1027
- validation_prompt_encoder_hidden_states = None
1028
-
1029
- if args.instance_prompt is not None:
1030
- pre_computed_instance_prompt_encoder_hidden_states = compute_text_embeddings(args.instance_prompt)
1031
- else:
1032
- pre_computed_instance_prompt_encoder_hidden_states = None
1033
-
1034
- text_encoder = None
1035
- tokenizer = None
1036
-
1037
- gc.collect()
1038
- torch.cuda.empty_cache()
1039
- else:
1040
- pre_computed_encoder_hidden_states = None
1041
- validation_prompt_encoder_hidden_states = None
1042
- validation_prompt_negative_prompt_embeds = None
1043
- pre_computed_instance_prompt_encoder_hidden_states = None
1044
-
1045
- # Dataset and DataLoaders creation:
1046
- train_dataset = DreamBoothDataset(
1047
- instance_data_root=args.instance_data_dir,
1048
- instance_prompt=args.instance_prompt,
1049
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
1050
- class_prompt=args.class_prompt,
1051
- class_num=args.num_class_images,
1052
- tokenizer=tokenizer,
1053
- size=args.resolution,
1054
- center_crop=args.center_crop,
1055
- encoder_hidden_states=pre_computed_encoder_hidden_states,
1056
- instance_prompt_encoder_hidden_states=pre_computed_instance_prompt_encoder_hidden_states,
1057
- tokenizer_max_length=args.tokenizer_max_length,
1058
- )
1059
-
1060
- train_dataloader = torch.utils.data.DataLoader(
1061
- train_dataset,
1062
- batch_size=args.train_batch_size,
1063
- shuffle=True,
1064
- collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
1065
- num_workers=args.dataloader_num_workers,
1066
- )
1067
-
1068
- # Scheduler and math around the number of training steps.
1069
- overrode_max_train_steps = False
1070
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1071
- if args.max_train_steps is None:
1072
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1073
- overrode_max_train_steps = True
1074
-
1075
- lr_scheduler = get_scheduler(
1076
- args.lr_scheduler,
1077
- optimizer=optimizer,
1078
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1079
- num_training_steps=args.max_train_steps * accelerator.num_processes,
1080
- num_cycles=args.lr_num_cycles,
1081
- power=args.lr_power,
1082
- )
1083
-
1084
- # Prepare everything with our `accelerator`.
1085
- if args.train_text_encoder:
1086
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1087
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
1088
- )
1089
- else:
1090
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1091
- unet, optimizer, train_dataloader, lr_scheduler
1092
- )
1093
-
1094
- # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
1095
- # as these weights are only used for inference, keeping weights in full precision is not required.
1096
- weight_dtype = torch.float32
1097
- if accelerator.mixed_precision == "fp16":
1098
- weight_dtype = torch.float16
1099
- elif accelerator.mixed_precision == "bf16":
1100
- weight_dtype = torch.bfloat16
1101
-
1102
- # Move vae and text_encoder to device and cast to weight_dtype
1103
- if vae is not None:
1104
- vae.to(accelerator.device, dtype=weight_dtype)
1105
-
1106
- if not args.train_text_encoder and text_encoder is not None:
1107
- text_encoder.to(accelerator.device, dtype=weight_dtype)
1108
-
1109
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1110
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1111
- if overrode_max_train_steps:
1112
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1113
- # Afterwards we recalculate our number of training epochs
1114
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1115
-
1116
- # We need to initialize the trackers we use, and also store our configuration.
1117
- # The trackers initializes automatically on the main process.
1118
- if accelerator.is_main_process:
1119
- tracker_config = vars(args)
1120
- tracker_config.pop("validation_images")
1121
- accelerator.init_trackers("dreambooth", config=tracker_config)
1122
-
1123
- # Train!
1124
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1125
-
1126
- logger.info("***** Running training *****")
1127
- logger.info(f" Num examples = {len(train_dataset)}")
1128
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1129
- logger.info(f" Num Epochs = {args.num_train_epochs}")
1130
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1131
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1132
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1133
- logger.info(f" Total optimization steps = {args.max_train_steps}")
1134
- global_step = 0
1135
- first_epoch = 0
1136
-
1137
- # Potentially load in the weights and states from a previous save
1138
- if args.resume_from_checkpoint:
1139
- if args.resume_from_checkpoint != "latest":
1140
- path = os.path.basename(args.resume_from_checkpoint)
1141
- else:
1142
- # Get the mos recent checkpoint
1143
- dirs = os.listdir(args.output_dir)
1144
- dirs = [d for d in dirs if d.startswith("checkpoint")]
1145
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1146
- path = dirs[-1] if len(dirs) > 0 else None
1147
-
1148
- if path is None:
1149
- accelerator.print(
1150
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1151
- )
1152
- args.resume_from_checkpoint = None
1153
- else:
1154
- accelerator.print(f"Resuming from checkpoint {path}")
1155
- accelerator.load_state(os.path.join(args.output_dir, path))
1156
- global_step = int(path.split("-")[1])
1157
-
1158
- resume_global_step = global_step * args.gradient_accumulation_steps
1159
- first_epoch = global_step // num_update_steps_per_epoch
1160
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
1161
-
1162
- # Only show the progress bar once on each machine.
1163
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
1164
- progress_bar.set_description("Steps")
1165
-
1166
- for epoch in range(first_epoch, args.num_train_epochs):
1167
- unet.train()
1168
- if args.train_text_encoder:
1169
- text_encoder.train()
1170
- for step, batch in enumerate(train_dataloader):
1171
- # Skip steps until we reach the resumed step
1172
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
1173
- if step % args.gradient_accumulation_steps == 0:
1174
- progress_bar.update(1)
1175
- continue
1176
-
1177
- with accelerator.accumulate(unet):
1178
- pixel_values = batch["pixel_values"].to(dtype=weight_dtype)
1179
-
1180
- if vae is not None:
1181
- # Convert images to latent space
1182
- model_input = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
1183
- model_input = model_input * vae.config.scaling_factor
1184
- else:
1185
- model_input = pixel_values
1186
-
1187
- # Sample noise that we'll add to the model input
1188
- if args.offset_noise:
1189
- noise = torch.randn_like(model_input) + 0.1 * torch.randn(
1190
- model_input.shape[0], model_input.shape[1], 1, 1, device=model_input.device
1191
- )
1192
- else:
1193
- noise = torch.randn_like(model_input)
1194
- bsz, channels, height, width = model_input.shape
1195
- # Sample a random timestep for each image
1196
- timesteps = torch.randint(
1197
- 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
1198
- )
1199
- timesteps = timesteps.long()
1200
-
1201
- # Add noise to the model input according to the noise magnitude at each timestep
1202
- # (this is the forward diffusion process)
1203
- noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
1204
-
1205
- # Get the text embedding for conditioning
1206
- if args.pre_compute_text_embeddings:
1207
- encoder_hidden_states = batch["input_ids"]
1208
- else:
1209
- encoder_hidden_states = encode_prompt(
1210
- text_encoder,
1211
- batch["input_ids"],
1212
- batch["attention_mask"],
1213
- text_encoder_use_attention_mask=args.text_encoder_use_attention_mask,
1214
- )
1215
-
1216
- if accelerator.unwrap_model(unet).config.in_channels == channels * 2:
1217
- noisy_model_input = torch.cat([noisy_model_input, noisy_model_input], dim=1)
1218
-
1219
- if args.class_labels_conditioning == "timesteps":
1220
- class_labels = timesteps
1221
- else:
1222
- class_labels = None
1223
-
1224
- # Predict the noise residual
1225
- model_pred = unet(
1226
- noisy_model_input, timesteps, encoder_hidden_states, class_labels=class_labels
1227
- ).sample
1228
-
1229
- if model_pred.shape[1] == 6:
1230
- model_pred, _ = torch.chunk(model_pred, 2, dim=1)
1231
-
1232
- # Get the target for loss depending on the prediction type
1233
- if noise_scheduler.config.prediction_type == "epsilon":
1234
- target = noise
1235
- elif noise_scheduler.config.prediction_type == "v_prediction":
1236
- target = noise_scheduler.get_velocity(model_input, noise, timesteps)
1237
- else:
1238
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1239
-
1240
- if args.with_prior_preservation:
1241
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
1242
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
1243
- target, target_prior = torch.chunk(target, 2, dim=0)
1244
-
1245
- # Compute instance loss
1246
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1247
-
1248
- # Compute prior loss
1249
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
1250
-
1251
- # Add the prior loss to the instance loss.
1252
- loss = loss + args.prior_loss_weight * prior_loss
1253
- else:
1254
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1255
-
1256
- accelerator.backward(loss)
1257
- if accelerator.sync_gradients:
1258
- params_to_clip = (
1259
- itertools.chain(unet.parameters(), text_encoder.parameters())
1260
- if args.train_text_encoder
1261
- else unet.parameters()
1262
- )
1263
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1264
- optimizer.step()
1265
- lr_scheduler.step()
1266
- optimizer.zero_grad(set_to_none=args.set_grads_to_none)
1267
-
1268
- # Checks if the accelerator has performed an optimization step behind the scenes
1269
- if accelerator.sync_gradients:
1270
- progress_bar.update(1)
1271
- global_step += 1
1272
-
1273
- if accelerator.is_main_process:
1274
- if global_step % args.checkpointing_steps == 0:
1275
- # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1276
- if args.checkpoints_total_limit is not None:
1277
- checkpoints = os.listdir(args.output_dir)
1278
- checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1279
- checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1280
-
1281
- # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1282
- if len(checkpoints) >= args.checkpoints_total_limit:
1283
- num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1284
- removing_checkpoints = checkpoints[0:num_to_remove]
1285
-
1286
- logger.info(
1287
- f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1288
- )
1289
- logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1290
-
1291
- for removing_checkpoint in removing_checkpoints:
1292
- removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1293
- shutil.rmtree(removing_checkpoint)
1294
-
1295
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1296
- accelerator.save_state(save_path)
1297
- logger.info(f"Saved state to {save_path}")
1298
-
1299
- images = []
1300
-
1301
- if args.validation_prompt is not None and global_step % args.validation_steps == 0:
1302
- images = log_validation(
1303
- text_encoder,
1304
- tokenizer,
1305
- unet,
1306
- vae,
1307
- args,
1308
- accelerator,
1309
- weight_dtype,
1310
- epoch,
1311
- validation_prompt_encoder_hidden_states,
1312
- validation_prompt_negative_prompt_embeds,
1313
- )
1314
-
1315
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1316
- progress_bar.set_postfix(**logs)
1317
- accelerator.log(logs, step=global_step)
1318
-
1319
- if global_step >= args.max_train_steps:
1320
- break
1321
-
1322
- # Create the pipeline using using the trained modules and save it.
1323
- accelerator.wait_for_everyone()
1324
- if accelerator.is_main_process:
1325
- pipeline_args = {}
1326
-
1327
- if text_encoder is not None:
1328
- pipeline_args["text_encoder"] = accelerator.unwrap_model(text_encoder)
1329
-
1330
- if args.skip_save_text_encoder:
1331
- pipeline_args["text_encoder"] = None
1332
-
1333
- pipeline = DiffusionPipeline.from_pretrained(
1334
- args.pretrained_model_name_or_path,
1335
- unet=accelerator.unwrap_model(unet),
1336
- revision=args.revision,
1337
- **pipeline_args,
1338
- )
1339
-
1340
- # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1341
- scheduler_args = {}
1342
-
1343
- if "variance_type" in pipeline.scheduler.config:
1344
- variance_type = pipeline.scheduler.config.variance_type
1345
-
1346
- if variance_type in ["learned", "learned_range"]:
1347
- variance_type = "fixed_small"
1348
-
1349
- scheduler_args["variance_type"] = variance_type
1350
-
1351
- pipeline.scheduler = pipeline.scheduler.from_config(pipeline.scheduler.config, **scheduler_args)
1352
-
1353
- pipeline.save_pretrained(args.output_dir)
1354
-
1355
- if args.push_to_hub:
1356
- save_model_card(
1357
- repo_id,
1358
- images=images,
1359
- base_model=args.pretrained_model_name_or_path,
1360
- train_text_encoder=args.train_text_encoder,
1361
- prompt=args.instance_prompt,
1362
- repo_folder=args.output_dir,
1363
- pipeline=pipeline,
1364
- )
1365
- upload_folder(
1366
- repo_id=repo_id,
1367
- folder_path=args.output_dir,
1368
- commit_message="End of training",
1369
- ignore_patterns=["step_*", "epoch_*"],
1370
- )
1371
-
1372
- accelerator.end_training()
1373
-
1374
-
1375
- if __name__ == "__main__":
1376
- args = parse_args()
1377
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_attention_processor.py DELETED
@@ -1,119 +0,0 @@
1
- import tempfile
2
- import unittest
3
-
4
- import numpy as np
5
- import torch
6
-
7
- from diffusers import DiffusionPipeline
8
- from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor
9
-
10
-
11
- class AttnAddedKVProcessorTests(unittest.TestCase):
12
- def get_constructor_arguments(self, only_cross_attention: bool = False):
13
- query_dim = 10
14
-
15
- if only_cross_attention:
16
- cross_attention_dim = 12
17
- else:
18
- # when only cross attention is not set, the cross attention dim must be the same as the query dim
19
- cross_attention_dim = query_dim
20
-
21
- return {
22
- "query_dim": query_dim,
23
- "cross_attention_dim": cross_attention_dim,
24
- "heads": 2,
25
- "dim_head": 4,
26
- "added_kv_proj_dim": 6,
27
- "norm_num_groups": 1,
28
- "only_cross_attention": only_cross_attention,
29
- "processor": AttnAddedKVProcessor(),
30
- }
31
-
32
- def get_forward_arguments(self, query_dim, added_kv_proj_dim):
33
- batch_size = 2
34
-
35
- hidden_states = torch.rand(batch_size, query_dim, 3, 2)
36
- encoder_hidden_states = torch.rand(batch_size, 4, added_kv_proj_dim)
37
- attention_mask = None
38
-
39
- return {
40
- "hidden_states": hidden_states,
41
- "encoder_hidden_states": encoder_hidden_states,
42
- "attention_mask": attention_mask,
43
- }
44
-
45
- def test_only_cross_attention(self):
46
- # self and cross attention
47
-
48
- torch.manual_seed(0)
49
-
50
- constructor_args = self.get_constructor_arguments(only_cross_attention=False)
51
- attn = Attention(**constructor_args)
52
-
53
- self.assertTrue(attn.to_k is not None)
54
- self.assertTrue(attn.to_v is not None)
55
-
56
- forward_args = self.get_forward_arguments(
57
- query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"]
58
- )
59
-
60
- self_and_cross_attn_out = attn(**forward_args)
61
-
62
- # only self attention
63
-
64
- torch.manual_seed(0)
65
-
66
- constructor_args = self.get_constructor_arguments(only_cross_attention=True)
67
- attn = Attention(**constructor_args)
68
-
69
- self.assertTrue(attn.to_k is None)
70
- self.assertTrue(attn.to_v is None)
71
-
72
- forward_args = self.get_forward_arguments(
73
- query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"]
74
- )
75
-
76
- only_cross_attn_out = attn(**forward_args)
77
-
78
- self.assertTrue((only_cross_attn_out != self_and_cross_attn_out).all())
79
-
80
-
81
- class DeprecatedAttentionBlockTests(unittest.TestCase):
82
- def test_conversion_when_using_device_map(self):
83
- pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None)
84
-
85
- pre_conversion = pipe(
86
- "foo",
87
- num_inference_steps=2,
88
- generator=torch.Generator("cpu").manual_seed(0),
89
- output_type="np",
90
- ).images
91
-
92
- # the initial conversion succeeds
93
- pipe = DiffusionPipeline.from_pretrained(
94
- "hf-internal-testing/tiny-stable-diffusion-pipe", device_map="sequential", safety_checker=None
95
- )
96
-
97
- conversion = pipe(
98
- "foo",
99
- num_inference_steps=2,
100
- generator=torch.Generator("cpu").manual_seed(0),
101
- output_type="np",
102
- ).images
103
-
104
- with tempfile.TemporaryDirectory() as tmpdir:
105
- # save the converted model
106
- pipe.save_pretrained(tmpdir)
107
-
108
- # can also load the converted weights
109
- pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="sequential", safety_checker=None)
110
-
111
- after_conversion = pipe(
112
- "foo",
113
- num_inference_steps=2,
114
- generator=torch.Generator("cpu").manual_seed(0),
115
- output_type="np",
116
- ).images
117
-
118
- self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-5))
119
- self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-5))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_80k_pascal_context.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/pascal_context.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
5
- model = dict(
6
- decode_head=dict(num_classes=60),
7
- auxiliary_head=dict(num_classes=60),
8
- test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
9
- optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/carafe.py DELETED
@@ -1,287 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from torch.autograd import Function
6
- from torch.nn.modules.module import Module
7
-
8
- from ..cnn import UPSAMPLE_LAYERS, normal_init, xavier_init
9
- from ..utils import ext_loader
10
-
11
- ext_module = ext_loader.load_ext('_ext', [
12
- 'carafe_naive_forward', 'carafe_naive_backward', 'carafe_forward',
13
- 'carafe_backward'
14
- ])
15
-
16
-
17
- class CARAFENaiveFunction(Function):
18
-
19
- @staticmethod
20
- def symbolic(g, features, masks, kernel_size, group_size, scale_factor):
21
- return g.op(
22
- 'mmcv::MMCVCARAFENaive',
23
- features,
24
- masks,
25
- kernel_size_i=kernel_size,
26
- group_size_i=group_size,
27
- scale_factor_f=scale_factor)
28
-
29
- @staticmethod
30
- def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
31
- assert scale_factor >= 1
32
- assert masks.size(1) == kernel_size * kernel_size * group_size
33
- assert masks.size(-1) == features.size(-1) * scale_factor
34
- assert masks.size(-2) == features.size(-2) * scale_factor
35
- assert features.size(1) % group_size == 0
36
- assert (kernel_size - 1) % 2 == 0 and kernel_size >= 1
37
- ctx.kernel_size = kernel_size
38
- ctx.group_size = group_size
39
- ctx.scale_factor = scale_factor
40
- ctx.feature_size = features.size()
41
- ctx.mask_size = masks.size()
42
-
43
- n, c, h, w = features.size()
44
- output = features.new_zeros((n, c, h * scale_factor, w * scale_factor))
45
- ext_module.carafe_naive_forward(
46
- features,
47
- masks,
48
- output,
49
- kernel_size=kernel_size,
50
- group_size=group_size,
51
- scale_factor=scale_factor)
52
-
53
- if features.requires_grad or masks.requires_grad:
54
- ctx.save_for_backward(features, masks)
55
- return output
56
-
57
- @staticmethod
58
- def backward(ctx, grad_output):
59
- assert grad_output.is_cuda
60
-
61
- features, masks = ctx.saved_tensors
62
- kernel_size = ctx.kernel_size
63
- group_size = ctx.group_size
64
- scale_factor = ctx.scale_factor
65
-
66
- grad_input = torch.zeros_like(features)
67
- grad_masks = torch.zeros_like(masks)
68
- ext_module.carafe_naive_backward(
69
- grad_output.contiguous(),
70
- features,
71
- masks,
72
- grad_input,
73
- grad_masks,
74
- kernel_size=kernel_size,
75
- group_size=group_size,
76
- scale_factor=scale_factor)
77
-
78
- return grad_input, grad_masks, None, None, None
79
-
80
-
81
- carafe_naive = CARAFENaiveFunction.apply
82
-
83
-
84
- class CARAFENaive(Module):
85
-
86
- def __init__(self, kernel_size, group_size, scale_factor):
87
- super(CARAFENaive, self).__init__()
88
-
89
- assert isinstance(kernel_size, int) and isinstance(
90
- group_size, int) and isinstance(scale_factor, int)
91
- self.kernel_size = kernel_size
92
- self.group_size = group_size
93
- self.scale_factor = scale_factor
94
-
95
- def forward(self, features, masks):
96
- return carafe_naive(features, masks, self.kernel_size, self.group_size,
97
- self.scale_factor)
98
-
99
-
100
- class CARAFEFunction(Function):
101
-
102
- @staticmethod
103
- def symbolic(g, features, masks, kernel_size, group_size, scale_factor):
104
- return g.op(
105
- 'mmcv::MMCVCARAFE',
106
- features,
107
- masks,
108
- kernel_size_i=kernel_size,
109
- group_size_i=group_size,
110
- scale_factor_f=scale_factor)
111
-
112
- @staticmethod
113
- def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
114
- assert scale_factor >= 1
115
- assert masks.size(1) == kernel_size * kernel_size * group_size
116
- assert masks.size(-1) == features.size(-1) * scale_factor
117
- assert masks.size(-2) == features.size(-2) * scale_factor
118
- assert features.size(1) % group_size == 0
119
- assert (kernel_size - 1) % 2 == 0 and kernel_size >= 1
120
- ctx.kernel_size = kernel_size
121
- ctx.group_size = group_size
122
- ctx.scale_factor = scale_factor
123
- ctx.feature_size = features.size()
124
- ctx.mask_size = masks.size()
125
-
126
- n, c, h, w = features.size()
127
- output = features.new_zeros((n, c, h * scale_factor, w * scale_factor))
128
- routput = features.new_zeros(output.size(), requires_grad=False)
129
- rfeatures = features.new_zeros(features.size(), requires_grad=False)
130
- rmasks = masks.new_zeros(masks.size(), requires_grad=False)
131
- ext_module.carafe_forward(
132
- features,
133
- masks,
134
- rfeatures,
135
- routput,
136
- rmasks,
137
- output,
138
- kernel_size=kernel_size,
139
- group_size=group_size,
140
- scale_factor=scale_factor)
141
-
142
- if features.requires_grad or masks.requires_grad:
143
- ctx.save_for_backward(features, masks, rfeatures)
144
- return output
145
-
146
- @staticmethod
147
- def backward(ctx, grad_output):
148
- assert grad_output.is_cuda
149
-
150
- features, masks, rfeatures = ctx.saved_tensors
151
- kernel_size = ctx.kernel_size
152
- group_size = ctx.group_size
153
- scale_factor = ctx.scale_factor
154
-
155
- rgrad_output = torch.zeros_like(grad_output, requires_grad=False)
156
- rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False)
157
- rgrad_input = torch.zeros_like(features, requires_grad=False)
158
- rgrad_masks = torch.zeros_like(masks, requires_grad=False)
159
- grad_input = torch.zeros_like(features, requires_grad=False)
160
- grad_masks = torch.zeros_like(masks, requires_grad=False)
161
- ext_module.carafe_backward(
162
- grad_output.contiguous(),
163
- rfeatures,
164
- masks,
165
- rgrad_output,
166
- rgrad_input_hs,
167
- rgrad_input,
168
- rgrad_masks,
169
- grad_input,
170
- grad_masks,
171
- kernel_size=kernel_size,
172
- group_size=group_size,
173
- scale_factor=scale_factor)
174
- return grad_input, grad_masks, None, None, None
175
-
176
-
177
- carafe = CARAFEFunction.apply
178
-
179
-
180
- class CARAFE(Module):
181
- """ CARAFE: Content-Aware ReAssembly of FEatures
182
-
183
- Please refer to https://arxiv.org/abs/1905.02188 for more details.
184
-
185
- Args:
186
- kernel_size (int): reassemble kernel size
187
- group_size (int): reassemble group size
188
- scale_factor (int): upsample ratio
189
-
190
- Returns:
191
- upsampled feature map
192
- """
193
-
194
- def __init__(self, kernel_size, group_size, scale_factor):
195
- super(CARAFE, self).__init__()
196
-
197
- assert isinstance(kernel_size, int) and isinstance(
198
- group_size, int) and isinstance(scale_factor, int)
199
- self.kernel_size = kernel_size
200
- self.group_size = group_size
201
- self.scale_factor = scale_factor
202
-
203
- def forward(self, features, masks):
204
- return carafe(features, masks, self.kernel_size, self.group_size,
205
- self.scale_factor)
206
-
207
-
208
- @UPSAMPLE_LAYERS.register_module(name='carafe')
209
- class CARAFEPack(nn.Module):
210
- """A unified package of CARAFE upsampler that contains: 1) channel
211
- compressor 2) content encoder 3) CARAFE op.
212
-
213
- Official implementation of ICCV 2019 paper
214
- CARAFE: Content-Aware ReAssembly of FEatures
215
- Please refer to https://arxiv.org/abs/1905.02188 for more details.
216
-
217
- Args:
218
- channels (int): input feature channels
219
- scale_factor (int): upsample ratio
220
- up_kernel (int): kernel size of CARAFE op
221
- up_group (int): group size of CARAFE op
222
- encoder_kernel (int): kernel size of content encoder
223
- encoder_dilation (int): dilation of content encoder
224
- compressed_channels (int): output channels of channels compressor
225
-
226
- Returns:
227
- upsampled feature map
228
- """
229
-
230
- def __init__(self,
231
- channels,
232
- scale_factor,
233
- up_kernel=5,
234
- up_group=1,
235
- encoder_kernel=3,
236
- encoder_dilation=1,
237
- compressed_channels=64):
238
- super(CARAFEPack, self).__init__()
239
- self.channels = channels
240
- self.scale_factor = scale_factor
241
- self.up_kernel = up_kernel
242
- self.up_group = up_group
243
- self.encoder_kernel = encoder_kernel
244
- self.encoder_dilation = encoder_dilation
245
- self.compressed_channels = compressed_channels
246
- self.channel_compressor = nn.Conv2d(channels, self.compressed_channels,
247
- 1)
248
- self.content_encoder = nn.Conv2d(
249
- self.compressed_channels,
250
- self.up_kernel * self.up_kernel * self.up_group *
251
- self.scale_factor * self.scale_factor,
252
- self.encoder_kernel,
253
- padding=int((self.encoder_kernel - 1) * self.encoder_dilation / 2),
254
- dilation=self.encoder_dilation,
255
- groups=1)
256
- self.init_weights()
257
-
258
- def init_weights(self):
259
- for m in self.modules():
260
- if isinstance(m, nn.Conv2d):
261
- xavier_init(m, distribution='uniform')
262
- normal_init(self.content_encoder, std=0.001)
263
-
264
- def kernel_normalizer(self, mask):
265
- mask = F.pixel_shuffle(mask, self.scale_factor)
266
- n, mask_c, h, w = mask.size()
267
- # use float division explicitly,
268
- # to void inconsistency while exporting to onnx
269
- mask_channel = int(mask_c / float(self.up_kernel**2))
270
- mask = mask.view(n, mask_channel, -1, h, w)
271
-
272
- mask = F.softmax(mask, dim=2, dtype=mask.dtype)
273
- mask = mask.view(n, mask_c, h, w).contiguous()
274
-
275
- return mask
276
-
277
- def feature_reassemble(self, x, mask):
278
- x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor)
279
- return x
280
-
281
- def forward(self, x):
282
- compressed_x = self.channel_compressor(x)
283
- mask = self.content_encoder(compressed_x)
284
- mask = self.kernel_normalizer(mask)
285
-
286
- x = self.feature_reassemble(x, mask)
287
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/api_test.py DELETED
@@ -1,429 +0,0 @@
1
- import json
2
- import re
3
- import requests
4
- import os
5
- import time
6
- import random
7
- import string
8
- from requests_toolbelt.multipart.encoder import MultipartEncoder
9
-
10
- absolute_path = os.path.dirname(__file__)
11
- base_url = "http://127.0.0.1:23456"
12
-
13
-
14
- # 映射表
15
- def voice_speakers():
16
- url = f"{base_url}/voice/speakers"
17
-
18
- res = requests.post(url=url)
19
- json = res.json()
20
- for i in json:
21
- print(i)
22
- for j in json[i]:
23
- print(j)
24
- return json
25
-
26
-
27
- # 语音合成 voice vits
28
- def voice_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, save_audio=True,
29
- save_path=None):
30
- fields = {
31
- "text": text,
32
- "id": str(id),
33
- "format": format,
34
- "lang": lang,
35
- "length": str(length),
36
- "noise": str(noise),
37
- "noisew": str(noisew),
38
- "max": str(max)
39
- }
40
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
41
-
42
- m = MultipartEncoder(fields=fields, boundary=boundary)
43
- headers = {"Content-Type": m.content_type}
44
- url = f"{base_url}/voice/vits"
45
-
46
- res = requests.post(url=url, data=m, headers=headers)
47
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
48
- if save_path is not None:
49
- path = os.path.join(save_path, fname)
50
- else:
51
- path = os.path.join(absolute_path, fname)
52
- if save_audio:
53
- with open(path, "wb") as f:
54
- f.write(res.content)
55
- print(path)
56
- return path
57
- return None
58
-
59
-
60
- def voice_vits_streaming(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50,
61
- save_audio=True, save_path=None):
62
- fields = {
63
- "text": text,
64
- "id": str(id),
65
- "format": format,
66
- "lang": lang,
67
- "length": str(length),
68
- "noise": str(noise),
69
- "noisew": str(noisew),
70
- "max": str(max),
71
- "streaming": 'True'
72
- }
73
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
74
-
75
- m = MultipartEncoder(fields=fields, boundary=boundary)
76
- headers = {"Content-Type": m.content_type}
77
- url = f"{base_url}/voice"
78
-
79
- res = requests.post(url=url, data=m, headers=headers)
80
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
81
- if save_path is not None:
82
- path = os.path.join(save_path, fname)
83
- else:
84
- path = os.path.join(absolute_path, fname)
85
- if save_audio:
86
- with open(path, "wb") as f:
87
- f.write(res.content)
88
- print(path)
89
- return path
90
- return None
91
-
92
-
93
- def voice_vits_streaming(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50,
94
- save_path=None):
95
- fields = {
96
- "text": text,
97
- "id": str(id),
98
- "format": format,
99
- "lang": lang,
100
- "length": str(length),
101
- "noise": str(noise),
102
- "noisew": str(noisew),
103
- "max": str(max),
104
- "streaming": 'True'
105
- }
106
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
107
-
108
- m = MultipartEncoder(fields=fields, boundary=boundary)
109
- headers = {"Content-Type": m.content_type}
110
- url = f"{base_url}/voice"
111
-
112
- res = requests.post(url=url, data=m, headers=headers, stream=True)
113
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
114
- if save_path is not None:
115
- path = os.path.join(save_path, fname)
116
- else:
117
- path = os.path.join(absolute_path, fname)
118
- audio = res.content
119
-
120
- def get_file_size_from_bytes(byte_data):
121
- file_size_offset = 4
122
- file_size_length = 4
123
-
124
- try:
125
- file_size_bytes = byte_data[file_size_offset:file_size_offset + file_size_length]
126
- file_size = int.from_bytes(file_size_bytes, byteorder='little')
127
- return file_size + 8
128
- except IndexError:
129
- return None
130
-
131
- audio = None
132
- p = 0
133
- audio_size = None
134
- audios = []
135
-
136
- for chunk in res.iter_content(chunk_size=1024):
137
- if audio is None:
138
- audio = chunk
139
- else:
140
- audio += chunk
141
-
142
- p += len(chunk)
143
- if audio_size is not None:
144
- if p >= audio_size:
145
- p = p - audio_size
146
- audios.append(audio[:audio_size])
147
- audio = audio[audio_size:]
148
- audio_size = get_file_size_from_bytes(audio)
149
- else:
150
- audio_size = get_file_size_from_bytes(audio)
151
- for i, audio in enumerate(audios):
152
- with open(f"{path[:-4]}-{i}.wav", "wb") as f:
153
- f.write(audio)
154
-
155
- print(f"{path[:-4]}-{i}.wav")
156
- return path
157
-
158
-
159
- # 语音转换 hubert-vits
160
- def voice_hubert_vits(upload_path, id, format="wav", length=1, noise=0.667, noisew=0.8, save_audio=True,
161
- save_path=None):
162
- upload_name = os.path.basename(upload_path)
163
- upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
164
-
165
- with open(upload_path, 'rb') as upload_file:
166
- fields = {
167
- "upload": (upload_name, upload_file, upload_type),
168
- "id": str(id),
169
- "format": format,
170
- "length": str(length),
171
- "noise": str(noise),
172
- "noisew": str(noisew),
173
- }
174
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
175
-
176
- m = MultipartEncoder(fields=fields, boundary=boundary)
177
- headers = {"Content-Type": m.content_type}
178
- url = f"{base_url}/voice/hubert-vits"
179
-
180
- res = requests.post(url=url, data=m, headers=headers)
181
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
182
- if save_path is not None:
183
- path = os.path.join(save_path, fname)
184
- else:
185
- path = os.path.join(absolute_path, fname)
186
- if save_audio:
187
- with open(path, "wb") as f:
188
- f.write(res.content)
189
- print(path)
190
- return path
191
- return None
192
-
193
-
194
- # 维度情感模型 w2v2-vits
195
- def voice_w2v2_vits(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, emotion=0,
196
- save_audio=True, save_path=None):
197
- fields = {
198
- "text": text,
199
- "id": str(id),
200
- "format": format,
201
- "lang": lang,
202
- "length": str(length),
203
- "noise": str(noise),
204
- "noisew": str(noisew),
205
- "max": str(max),
206
- "emotion": str(emotion)
207
- }
208
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
209
-
210
- m = MultipartEncoder(fields=fields, boundary=boundary)
211
- headers = {"Content-Type": m.content_type}
212
- url = f"{base_url}/voice/w2v2-vits"
213
-
214
- res = requests.post(url=url, data=m, headers=headers)
215
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
216
- if save_path is not None:
217
- path = os.path.join(save_path, fname)
218
- else:
219
- path = os.path.join(absolute_path, fname)
220
- if save_audio:
221
- with open(path, "wb") as f:
222
- f.write(res.content)
223
- print(path)
224
- return path
225
- return None
226
-
227
-
228
- # 语音转换 同VITS模型内角色之间的音色转换
229
- def voice_conversion(upload_path, original_id, target_id, save_audio=True, save_path=None):
230
- upload_name = os.path.basename(upload_path)
231
- upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
232
-
233
- with open(upload_path, 'rb') as upload_file:
234
- fields = {
235
- "upload": (upload_name, upload_file, upload_type),
236
- "original_id": str(original_id),
237
- "target_id": str(target_id),
238
- }
239
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
240
- m = MultipartEncoder(fields=fields, boundary=boundary)
241
-
242
- headers = {"Content-Type": m.content_type}
243
- url = f"{base_url}/voice/conversion"
244
-
245
- res = requests.post(url=url, data=m, headers=headers)
246
-
247
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
248
- if save_path is not None:
249
- path = os.path.join(save_path, fname)
250
- else:
251
- path = os.path.join(absolute_path, fname)
252
-
253
- if save_audio:
254
- with open(path, "wb") as f:
255
- f.write(res.content)
256
- print(path)
257
- return path
258
- return None
259
-
260
-
261
- def voice_ssml(ssml, save_audio=True, save_path=None):
262
- fields = {
263
- "ssml": ssml,
264
- }
265
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
266
-
267
- m = MultipartEncoder(fields=fields, boundary=boundary)
268
- headers = {"Content-Type": m.content_type}
269
- url = f"{base_url}/voice/ssml"
270
-
271
- res = requests.post(url=url, data=m, headers=headers)
272
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
273
- if save_path is not None:
274
- path = os.path.join(save_path, fname)
275
- else:
276
- path = os.path.join(absolute_path, fname)
277
-
278
- if save_audio:
279
- with open(path, "wb") as f:
280
- f.write(res.content)
281
- print(path)
282
- return path
283
- return None
284
-
285
-
286
- def voice_dimensional_emotion(upload_path, save_audio=True,
287
- save_path=None):
288
- upload_name = os.path.basename(upload_path)
289
- upload_type = f'audio/{upload_name.split(".")[1]}' # wav,ogg
290
-
291
- with open(upload_path, 'rb') as upload_file:
292
- fields = {
293
- "upload": (upload_name, upload_file, upload_type),
294
- }
295
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
296
-
297
- m = MultipartEncoder(fields=fields, boundary=boundary)
298
- headers = {"Content-Type": m.content_type}
299
- url = f"{base_url}/voice/dimension-emotion"
300
-
301
- res = requests.post(url=url, data=m, headers=headers)
302
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
303
- if save_path is not None:
304
- path = os.path.join(save_path, fname)
305
- else:
306
- path = os.path.join(absolute_path, fname)
307
- if save_audio:
308
- with open(path, "wb") as f:
309
- f.write(res.content)
310
- print(path)
311
- return path
312
- return None
313
-
314
-
315
- def vits_json(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50,
316
- save_path=None):
317
- fields = {
318
- "text": text,
319
- "id": str(id),
320
- "format": format,
321
- "lang": lang,
322
- "length": str(length),
323
- "noise": str(noise),
324
- "noisew": str(noisew),
325
- "max": str(max)
326
- }
327
- f = json.dumps(fields)
328
- url = f"{base_url}/voice"
329
- header = {"Content-Type": 'application/json'}
330
- res = requests.post(url=url, data=f, headers=header)
331
-
332
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
333
- if save_path is not None:
334
- path = os.path.join(save_path, fname)
335
- else:
336
- path = os.path.join(absolute_path, fname)
337
-
338
- with open(path, "wb") as f:
339
- f.write(res.content)
340
- print(path)
341
- return path
342
-
343
-
344
- # Bert_vits2
345
- def voice_bert_vits2(text, id=0, format="wav", lang="auto", length=1, noise=0.667, noisew=0.8, max=50, sdp_ratio=0.2,
346
- save_audio=True, save_path=None):
347
- fields = {
348
- "text": text,
349
- "id": str(id),
350
- "format": format,
351
- "lang": lang,
352
- "length": str(length),
353
- "noise": str(noise),
354
- "noisew": str(noisew),
355
- "max": str(max),
356
- "sdp_ratio": str(sdp_ratio)
357
- }
358
- boundary = '----VoiceConversionFormBoundary' + ''.join(random.sample(string.ascii_letters + string.digits, 16))
359
-
360
- m = MultipartEncoder(fields=fields, boundary=boundary)
361
- headers = {"Content-Type": m.content_type}
362
- url = f"{base_url}/voice/bert-vits2"
363
-
364
- res = requests.post(url=url, data=m, headers=headers)
365
- fname = re.findall("filename=(.+)", res.headers["Content-Disposition"])[0]
366
- if save_path is not None:
367
- path = os.path.join(save_path, fname)
368
- else:
369
- path = os.path.join(absolute_path, fname)
370
- if save_audio:
371
- with open(path, "wb") as f:
372
- f.write(res.content)
373
- print(path)
374
- return path
375
- return None
376
-
377
-
378
- def test_interface(text):
379
- error_num = 0
380
- for i in range(100):
381
- try:
382
- time.sleep(1)
383
- t1 = time.time()
384
- voice_vits(text, format="wav", lang="zh", save_audio=False)
385
- t2 = time.time()
386
- print(f"{i}:len:{len(text)}耗时:{t2 - t1}")
387
- except Exception as e:
388
- error_num += 1
389
- print(e)
390
- print(f"error_num={error_num}")
391
-
392
-
393
- if __name__ == '__main__':
394
- text = "你好,こんにちは"
395
-
396
- ssml = """
397
- <speak lang="zh" format="mp3" length="1.2">
398
- <voice id="92" >这几天心里颇不宁静。</voice>
399
- <voice id="0" model_type="Bert-VITS2">今晚在院子里坐着乘凉,忽然想起日日走过的荷塘,在这满月的光里,总该另有一番样子吧。</voice>
400
- <voice id="142">月亮渐渐地升高了,墙外马路上孩子们的欢笑,已经听不见了;</voice>
401
- <voice id="0" model_type="Bert-VITS2">妻在屋里拍着闰儿,迷迷糊糊地哼着眠歌。</voice>
402
- <voice id="120">我悄悄地披了大衫,带上门出去。</voice><break time="2s"/>
403
- <voice id="121">沿着荷塘,是一条曲折的小煤屑路。</voice>
404
- <voice id="122">这是一条幽僻的路;白天也少人走,夜晚更加寂寞。</voice>
405
- <voice id="123">荷塘四面,长着许多树,蓊蓊郁郁的。</voice>
406
- <voice id="124">路的一旁,是些杨柳,和一些不知道名字的树。</voice>
407
- <voice id="125">没有月光的晚上,这路上阴森森的,有些怕人。</voice>
408
- <voice id="126">今晚却很好,虽然月光也还是淡淡的。</voice><break time="2s"/>
409
- <voice id="127">路上只我一个人,背着手踱着。</voice>
410
- <voice id="128">这一片天地好像是我的;我也像超出了平常的自己,到了另一个世界里。</voice>
411
- <voice id="129">我爱热闹,也爱冷静;<break strength="x-weak"/>爱群居,也爱独处。</voice>
412
- <voice id="130">像今晚上,一个人在这苍茫的月下,什么都可以想,什么都可以不想,便觉是个自由的人。</voice>
413
- <voice id="131">白天里一定要做的事,一定要说的话,现在都可不理。</voice>
414
- <voice id="132">这是独处的妙处,我且受用这无边的荷香月色好了。</voice>
415
- </speak>
416
- """
417
-
418
- from config import CACHE_PATH
419
-
420
- path = voice_vits(text, save_path=CACHE_PATH)
421
- voice_vits_streaming(text, save_path=CACHE_PATH)
422
- voice_w2v2_vits(text, save_path=CACHE_PATH)
423
- voice_conversion(path, 1, 3, save_path=CACHE_PATH)
424
- voice_hubert_vits(path, 0, save_path=CACHE_PATH)
425
- voice_dimensional_emotion(path, save_path=CACHE_PATH)
426
- voice_ssml(ssml, save_path=CACHE_PATH)
427
- voice_bert_vits2("你好",lang="zh", save_path=CACHE_PATH)
428
- voice_bert_vits2("こんにちは", lang="ja", save_path=CACHE_PATH)
429
- # os.system(path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AsakuraMizu/moe-tts/commons.py DELETED
@@ -1,172 +0,0 @@
1
- import math
2
- import torch
3
- from torch.nn import functional as F
4
- import torch.jit
5
-
6
-
7
- def script_method(fn, _rcb=None):
8
- return fn
9
-
10
-
11
- def script(obj, optimize=True, _frames_up=0, _rcb=None):
12
- return obj
13
-
14
-
15
- torch.jit.script_method = script_method
16
- torch.jit.script = script
17
-
18
-
19
- def init_weights(m, mean=0.0, std=0.01):
20
- classname = m.__class__.__name__
21
- if classname.find("Conv") != -1:
22
- m.weight.data.normal_(mean, std)
23
-
24
-
25
- def get_padding(kernel_size, dilation=1):
26
- return int((kernel_size*dilation - dilation)/2)
27
-
28
-
29
- def convert_pad_shape(pad_shape):
30
- l = pad_shape[::-1]
31
- pad_shape = [item for sublist in l for item in sublist]
32
- return pad_shape
33
-
34
-
35
- def intersperse(lst, item):
36
- result = [item] * (len(lst) * 2 + 1)
37
- result[1::2] = lst
38
- return result
39
-
40
-
41
- def kl_divergence(m_p, logs_p, m_q, logs_q):
42
- """KL(P||Q)"""
43
- kl = (logs_q - logs_p) - 0.5
44
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
45
- return kl
46
-
47
-
48
- def rand_gumbel(shape):
49
- """Sample from the Gumbel distribution, protect from overflows."""
50
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
51
- return -torch.log(-torch.log(uniform_samples))
52
-
53
-
54
- def rand_gumbel_like(x):
55
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
56
- return g
57
-
58
-
59
- def slice_segments(x, ids_str, segment_size=4):
60
- ret = torch.zeros_like(x[:, :, :segment_size])
61
- for i in range(x.size(0)):
62
- idx_str = ids_str[i]
63
- idx_end = idx_str + segment_size
64
- ret[i] = x[i, :, idx_str:idx_end]
65
- return ret
66
-
67
-
68
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
69
- b, d, t = x.size()
70
- if x_lengths is None:
71
- x_lengths = t
72
- ids_str_max = x_lengths - segment_size + 1
73
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
74
- ret = slice_segments(x, ids_str, segment_size)
75
- return ret, ids_str
76
-
77
-
78
- def get_timing_signal_1d(
79
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
80
- position = torch.arange(length, dtype=torch.float)
81
- num_timescales = channels // 2
82
- log_timescale_increment = (
83
- math.log(float(max_timescale) / float(min_timescale)) /
84
- (num_timescales - 1))
85
- inv_timescales = min_timescale * torch.exp(
86
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
87
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
88
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
89
- signal = F.pad(signal, [0, 0, 0, channels % 2])
90
- signal = signal.view(1, channels, length)
91
- return signal
92
-
93
-
94
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return x + signal.to(dtype=x.dtype, device=x.device)
98
-
99
-
100
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
101
- b, channels, length = x.size()
102
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
103
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
104
-
105
-
106
- def subsequent_mask(length):
107
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
108
- return mask
109
-
110
-
111
- @torch.jit.script
112
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
113
- n_channels_int = n_channels[0]
114
- in_act = input_a + input_b
115
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
116
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
117
- acts = t_act * s_act
118
- return acts
119
-
120
-
121
- def convert_pad_shape(pad_shape):
122
- l = pad_shape[::-1]
123
- pad_shape = [item for sublist in l for item in sublist]
124
- return pad_shape
125
-
126
-
127
- def shift_1d(x):
128
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
129
- return x
130
-
131
-
132
- def sequence_mask(length, max_length=None):
133
- if max_length is None:
134
- max_length = length.max()
135
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
136
- return x.unsqueeze(0) < length.unsqueeze(1)
137
-
138
-
139
- def generate_path(duration, mask):
140
- """
141
- duration: [b, 1, t_x]
142
- mask: [b, 1, t_y, t_x]
143
- """
144
- device = duration.device
145
-
146
- b, _, t_y, t_x = mask.shape
147
- cum_duration = torch.cumsum(duration, -1)
148
-
149
- cum_duration_flat = cum_duration.view(b * t_x)
150
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
151
- path = path.view(b, t_x, t_y)
152
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
153
- path = path.unsqueeze(1).transpose(2,3) * mask
154
- return path
155
-
156
-
157
- def clip_grad_value_(parameters, clip_value, norm_type=2):
158
- if isinstance(parameters, torch.Tensor):
159
- parameters = [parameters]
160
- parameters = list(filter(lambda p: p.grad is not None, parameters))
161
- norm_type = float(norm_type)
162
- if clip_value is not None:
163
- clip_value = float(clip_value)
164
-
165
- total_norm = 0
166
- for p in parameters:
167
- param_norm = p.grad.data.norm(norm_type)
168
- total_norm += param_norm.item() ** norm_type
169
- if clip_value is not None:
170
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
171
- total_norm = total_norm ** (1. / norm_type)
172
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ashrafb/Imdf2/app.py DELETED
@@ -1,75 +0,0 @@
1
- import streamlit as st
2
- from io import BytesIO
3
- import base64
4
- import os
5
- from replicate import Client
6
- from PIL import Image
7
-
8
- illuse = Client(api_token=os.getenv('REPLICATE'))
9
- model_name = "andreasjansson/illusion:75d51a73fce3c00de31ed9ab4358c73e8fc0f627dc8ce975818e653317cb919b"
10
- example_image = "https://replicate.delivery/pbxt/hHJNV9QteKX8DK2ckkUeXsqbEIKNGFXU1fN0MJoizz3iPlOjA/output-0.png"
11
-
12
- def generate(prompt, negative_prompt, qr_content, pattern_image, num_inference_steps, guidance_scale, width, height, seed, num_outputs, controlnet_conditioning_scale, border, qrcode_background):
13
- try:
14
- inputs = {
15
- 'prompt': prompt,
16
- 'negative_prompt': negative_prompt,
17
- 'qr_code_content': qr_content,
18
- 'num_inference_steps': num_inference_steps,
19
- 'guidance_scale': guidance_scale,
20
- 'width': width,
21
- 'height': height,
22
- 'seed': seed,
23
- 'num_outputs': num_outputs,
24
- 'controlnet_conditioning_scale': controlnet_conditioning_scale,
25
- 'border': border,
26
- 'qrcode_background': qrcode_background
27
- }
28
- if pattern_image is not None:
29
- image = Image.open(pattern_image)
30
- image_bytes = BytesIO()
31
- image.save(image_bytes, format='PNG')
32
- inputs['image'] = image_bytes
33
-
34
- result_uris = illuse.run(
35
- model_name,
36
- input=inputs
37
- )
38
-
39
- return result_uris
40
-
41
- except Exception as e:
42
- print(e)
43
- st.error(str(e))
44
- return
45
-
46
- st.title("Illusion Diffusion by Aiconvert.online")
47
- st.markdown('<style>h1{color: #191970; text-align: center;}</style>', unsafe_allow_html=True)
48
-
49
- prompt = st.text_input("Prompt")
50
- negative_prompt = st.text_input("Negative")
51
-
52
- qr_content = st.text_input("QR Code Content", "https://youtube.com/")
53
- pattern_input = st.file_uploader("Pattern Image (if used, QR Code Content won't be used)", type=["jpg", "png", "jpeg"])
54
-
55
- st.sidebar.markdown("## Advanced Settings")
56
-
57
- with st.sidebar.expander("Advanced Settings", expanded=True):
58
- num_inference_steps = st.slider("num_inference_steps", min_value=20, max_value=100, step=1, value=42)
59
- guidance_scale = st.slider("guidance_scale", min_value=0.1, max_value=30.0, step=0.01, value=14.5)
60
- width = st.slider("width", min_value=128, max_value=1024, step=8, value=768)
61
- height = st.slider("height", min_value=128, max_value=1024, step=8, value=768)
62
- seed = st.number_input("seed", value=-1)
63
- num_outputs = st.slider("num_outputs", min_value=1, max_value=4, step=1, value=1)
64
- controlnet_conditioning_scale = st.slider("controlnet_conditioning_scale", min_value=0, max_value=4, step=1, value=1)
65
- border = st.slider("border", min_value=0, max_value=4, step=1, value=4)
66
- qrcode_background = st.selectbox("qrcode_background", options=['gray', 'white'], index=1)
67
-
68
- if st.button("Generate"):
69
- with st.spinner("Running..."):
70
- result_uris = generate(prompt, negative_prompt, qr_content, pattern_input, num_inference_steps, guidance_scale, width, height, seed, num_outputs, controlnet_conditioning_scale, border, qrcode_background)
71
- for uri in result_uris:
72
- st.image(uri)
73
-
74
- st.image(example_image, caption='Example Image', use_column_width=True)
75
- st.markdown("powered with ❤️ by Aiconvert.online")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/prepare_ade20k_sem_seg.py DELETED
@@ -1,26 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- # Copyright (c) Facebook, Inc. and its affiliates.
4
- import numpy as np
5
- import os
6
- from pathlib import Path
7
- import tqdm
8
- from PIL import Image
9
-
10
-
11
- def convert(input, output):
12
- img = np.asarray(Image.open(input))
13
- assert img.dtype == np.uint8
14
- img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1
15
- Image.fromarray(img).save(output)
16
-
17
-
18
- if __name__ == "__main__":
19
- dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016"
20
- for name in ["training", "validation"]:
21
- annotation_dir = dataset_dir / "annotations" / name
22
- output_dir = dataset_dir / "annotations_detectron2" / name
23
- output_dir.mkdir(parents=True, exist_ok=True)
24
- for file in tqdm.tqdm(list(annotation_dir.iterdir())):
25
- output_file = output_dir / file.name
26
- convert(file, output_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/custom_dataset_dataloader.py DELETED
@@ -1,229 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import copy
3
- import logging
4
- import numpy as np
5
- import operator
6
- import torch
7
- import torch.utils.data
8
- import json
9
- from detectron2.utils.comm import get_world_size
10
-
11
- from detectron2.data import samplers
12
- from torch.utils.data.sampler import BatchSampler, Sampler
13
- from detectron2.data.common import DatasetFromList, MapDataset
14
- from detectron2.data.dataset_mapper import DatasetMapper
15
- from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader
16
- from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler
17
- from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram
18
- from detectron2.data.build import filter_images_with_only_crowd_annotations
19
- from detectron2.data.build import filter_images_with_few_keypoints
20
- from detectron2.data.build import check_metadata_consistency
21
- from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
22
- from detectron2.utils import comm
23
- import itertools
24
- import math
25
- from collections import defaultdict
26
- from typing import Optional
27
-
28
- # from .custom_build_augmentation import build_custom_augmentation
29
-
30
- def build_custom_train_loader(cfg, mapper=None):
31
- """
32
- Modified from detectron2.data.build.build_custom_train_loader, but supports
33
- different samplers
34
- """
35
- source_aware = cfg.DATALOADER.SOURCE_AWARE
36
- if source_aware:
37
- dataset_dicts = get_detection_dataset_dicts_with_source(
38
- cfg.DATASETS.TRAIN,
39
- filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
40
- min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
41
- if cfg.MODEL.KEYPOINT_ON
42
- else 0,
43
- proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
44
- )
45
- sizes = [0 for _ in range(len(cfg.DATASETS.TRAIN))]
46
- for d in dataset_dicts:
47
- sizes[d['dataset_source']] += 1
48
- print('dataset sizes', sizes)
49
- else:
50
- dataset_dicts = get_detection_dataset_dicts(
51
- cfg.DATASETS.TRAIN,
52
- filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
53
- min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
54
- if cfg.MODEL.KEYPOINT_ON
55
- else 0,
56
- proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
57
- )
58
- dataset = DatasetFromList(dataset_dicts, copy=False)
59
-
60
- if mapper is None:
61
- assert 0
62
- # mapper = DatasetMapper(cfg, True)
63
- dataset = MapDataset(dataset, mapper)
64
-
65
- sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
66
- logger = logging.getLogger(__name__)
67
- logger.info("Using training sampler {}".format(sampler_name))
68
- # TODO avoid if-else?
69
- if sampler_name == "TrainingSampler":
70
- sampler = TrainingSampler(len(dataset))
71
- elif sampler_name == "MultiDatasetSampler":
72
- assert source_aware
73
- sampler = MultiDatasetSampler(cfg, sizes, dataset_dicts)
74
- elif sampler_name == "RepeatFactorTrainingSampler":
75
- repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
76
- dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
77
- )
78
- sampler = RepeatFactorTrainingSampler(repeat_factors)
79
- elif sampler_name == "ClassAwareSampler":
80
- sampler = ClassAwareSampler(dataset_dicts)
81
- else:
82
- raise ValueError("Unknown training sampler: {}".format(sampler_name))
83
-
84
- return build_batch_data_loader(
85
- dataset,
86
- sampler,
87
- cfg.SOLVER.IMS_PER_BATCH,
88
- aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
89
- num_workers=cfg.DATALOADER.NUM_WORKERS,
90
- )
91
-
92
-
93
- class ClassAwareSampler(Sampler):
94
- def __init__(self, dataset_dicts, seed: Optional[int] = None):
95
- """
96
- Args:
97
- size (int): the total number of data of the underlying dataset to sample from
98
- seed (int): the initial seed of the shuffle. Must be the same
99
- across all workers. If None, will use a random seed shared
100
- among workers (require synchronization among all workers).
101
- """
102
- self._size = len(dataset_dicts)
103
- assert self._size > 0
104
- if seed is None:
105
- seed = comm.shared_random_seed()
106
- self._seed = int(seed)
107
-
108
- self._rank = comm.get_rank()
109
- self._world_size = comm.get_world_size()
110
- self.weights = self._get_class_balance_factor(dataset_dicts)
111
-
112
-
113
- def __iter__(self):
114
- start = self._rank
115
- yield from itertools.islice(
116
- self._infinite_indices(), start, None, self._world_size)
117
-
118
-
119
- def _infinite_indices(self):
120
- g = torch.Generator()
121
- g.manual_seed(self._seed)
122
- while True:
123
- ids = torch.multinomial(
124
- self.weights, self._size, generator=g,
125
- replacement=True)
126
- yield from ids
127
-
128
-
129
- def _get_class_balance_factor(self, dataset_dicts, l=1.):
130
- # 1. For each category c, compute the fraction of images that contain it: f(c)
131
- ret = []
132
- category_freq = defaultdict(int)
133
- for dataset_dict in dataset_dicts: # For each image (without repeats)
134
- cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
135
- for cat_id in cat_ids:
136
- category_freq[cat_id] += 1
137
- for i, dataset_dict in enumerate(dataset_dicts):
138
- cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
139
- ret.append(sum(
140
- [1. / (category_freq[cat_id] ** l) for cat_id in cat_ids]))
141
- return torch.tensor(ret).float()
142
-
143
-
144
- def get_detection_dataset_dicts_with_source(
145
- dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
146
- ):
147
- assert len(dataset_names)
148
- dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
149
- for dataset_name, dicts in zip(dataset_names, dataset_dicts):
150
- assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
151
-
152
- for source_id, (dataset_name, dicts) in \
153
- enumerate(zip(dataset_names, dataset_dicts)):
154
- assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
155
- for d in dicts:
156
- d['dataset_source'] = source_id
157
-
158
- if "annotations" in dicts[0]:
159
- try:
160
- class_names = MetadataCatalog.get(dataset_name).thing_classes
161
- check_metadata_consistency("thing_classes", dataset_name)
162
- print_instances_class_histogram(dicts, class_names)
163
- except AttributeError: # class names are not available for this dataset
164
- pass
165
-
166
- assert proposal_files is None
167
-
168
- dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
169
-
170
- has_instances = "annotations" in dataset_dicts[0]
171
- if filter_empty and has_instances:
172
- dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
173
- if min_keypoints > 0 and has_instances:
174
- dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
175
-
176
- return dataset_dicts
177
-
178
- class MultiDatasetSampler(Sampler):
179
- def __init__(self, cfg, sizes, dataset_dicts, seed: Optional[int] = None):
180
- """
181
- Args:
182
- size (int): the total number of data of the underlying dataset to sample from
183
- seed (int): the initial seed of the shuffle. Must be the same
184
- across all workers. If None, will use a random seed shared
185
- among workers (require synchronization among all workers).
186
- """
187
- self.sizes = sizes
188
- dataset_ratio = cfg.DATALOADER.DATASET_RATIO
189
- self._batch_size = cfg.SOLVER.IMS_PER_BATCH
190
- assert len(dataset_ratio) == len(sizes), \
191
- 'length of dataset ratio {} should be equal to number if dataset {}'.format(
192
- len(dataset_ratio), len(sizes)
193
- )
194
- if seed is None:
195
- seed = comm.shared_random_seed()
196
- self._seed = int(seed)
197
- self._rank = comm.get_rank()
198
- self._world_size = comm.get_world_size()
199
-
200
- self._ims_per_gpu = self._batch_size // self._world_size
201
- self.dataset_ids = torch.tensor(
202
- [d['dataset_source'] for d in dataset_dicts], dtype=torch.long)
203
-
204
- dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \
205
- for i, (r, s) in enumerate(zip(dataset_ratio, sizes))]
206
- dataset_weight = torch.cat(dataset_weight)
207
- self.weights = dataset_weight
208
- self.sample_epoch_size = len(self.weights)
209
-
210
- def __iter__(self):
211
- start = self._rank
212
- yield from itertools.islice(
213
- self._infinite_indices(), start, None, self._world_size)
214
-
215
-
216
- def _infinite_indices(self):
217
- g = torch.Generator()
218
- g.manual_seed(self._seed)
219
- while True:
220
- ids = torch.multinomial(
221
- self.weights, self.sample_epoch_size, generator=g,
222
- replacement=True)
223
- nums = [(self.dataset_ids[ids] == i).sum().int().item() \
224
- for i in range(len(self.sizes))]
225
- print('_rank, len, nums', self._rank, len(ids), nums, flush=True)
226
- # print('_rank, len, nums, self.dataset_ids[ids[:10]], ',
227
- # self._rank, len(ids), nums, self.dataset_ids[ids[:10]],
228
- # flush=True)
229
- yield from ids
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Apk Hz Hilesi.md DELETED
@@ -1,63 +0,0 @@
1
-
2
- <p>Car Parking Multiplayer is a remarkable game with many features. Here are some features of the game: </p>
3
- <h2>aparcamiento de coches multijugador apk hız hilesi</h2><br /><p><b><b>Download Zip</b> - <a href="https://bltlly.com/2v6JEI">https://bltlly.com/2v6JEI</a></b></p><br /><br />
4
- <ul>
5
- <li><b> Vehicle Customization: </b> There are more than 100 vehicles in the game. You can customize these tools as you like, add accessories such as color, rim, glass film, plate, exhaust, spoiler, neon light. </li>
6
- <li><b>Open World: </b> There are 16 different maps in the game. You can freely navigate these maps, explore various locations, comply with traffic rules or not. </li>
7
- <li><b> Multiplayer: </b> You can play with other players online in the game. You can park, chat, be a police or a thief with friends or random players. </li>
8
- <li><b>Police Mode: </b> You can be a police officer or escape from the police. In police mode, you try to catch players who exceed speed limits or violate traffic rules. In leak mode, you try to get rid of the police. </li>
9
- <li><b>Rol Making Mode: </b> There is a role-playing mode that simulates real life in the game. In this mode, you can choose a character you want, get a profession, buy a house, buy a car, buy gasoline, eat, sleep and interact with other players. </li>
10
- </ul>
11
- <h3> Game Modes </h3>
12
- <p>Car Parking Multiplayer offers many game modes that appeal to different tastes and skills. Here are some modes of the game: </p>
13
- <ul>
14
- <li><b>Park Mode: </b> It is the basic mode of the game. In this mode, there are more than 150 parking tasks with different difficulty levels. To complete these tasks, you must park your vehicle without damaging the designated area. </li>
15
- <li><b> Race Mode: </b> It is one of the most exciting modes of the game. In this mode, you can race parking with other players or against artificial intelligence. In races, you need to park as soon as possible and pass your opponents. </li>
16
-
17
- <li><b>Career Mode: </b> It is one of the longest running modes of the game. In this mode, you can build your own car parking empire. In career mode, you can buy parking spaces in different cities, earn income and grow your business. </li>
18
- </ul>
19
- <h2>Car Parking Multiplayer How to Make APK Speed Cheat?</h2>
20
- After entering the <li> Speed trick value, click the save button and exit the settings menu. </li>
21
- </ol>
22
- <p> You can now start playing the game with the speed trick. The speed trick works in every mode of the game. If you wish, you can turn the speed trick on or off at any time or change its value. </p>
23
- <h2>Car Parking Multiplayer What are the APK Speed Cheat Advantages and Disadvantages?</h2>
24
- Using <p>Car Parking Multiplayer APK speed trick has both advantages and disadvantages. In this section, we will list them. </p>
25
- <p></p>
26
- <h3> Advantages </h3>
27
- Some advantages of using <p> Speed trick are: </p>
28
- <ul>
29
- <li><b>Faster Game: </b> Thanks to the speed trick, you can increase the speed of your vehicle and play the game faster. So you can have more fun, earn more points and skip more levels. </li>
30
- <li><b>More Fun Game: </b> Thanks to the speed trick, you can skip the boring or difficult parts of the game and focus on more fun parts. For example, you can easily complete parking tasks, leave your opponents behind in races, experience excitement while escaping from the police, or hang out as you like in role-playing mode. </li>
31
- <li><b>More Easy Game: </b> Thanks to the speed trick, you can lower the difficulty level of the game and play the game more easily. For example, you can park without damage to parking tasks, pass your opponents in races, get rid of the police or choose the profession you want in role-playing mode. </li>
32
- </ul>
33
- <h3> Disadvantages </h3>
34
- Some disadvantages of using <p> Speed trick are: </p>
35
- <ul>
36
-
37
- <li><b>Possible Ban: </b> If your speed trick is noticed by the developer of the game, there is a risk that your account will be banned. Olzhass, the developer of the game, tries to block tricks by constantly updating the game. If your account is banned, you may not play the game again or lose your progress. </li>
38
- <li><b> Possible Errors: </b> Using a speed trick can cause some errors in the game's operation. For example, you may lose control of your vehicle, get off the map, be kicked out or the game may freeze. In this case, you may need to restart or install the game. </li>
39
- </ul>
40
- <h2>Car Parking Multiplayer APK Speed Cheat Alternatives Are There?</h2>
41
- <p>Car Parking Multiplayer If you don't want or can't use APK speed trick, there are some alternatives to play the game faster and fun. Here are some alternatives to the game: </p>
42
- <h3> In-Game Purchases </h3>
43
- <p> There are many things you can buy with real money in the game. These include options such as game money, diamond, gold, VIP membership, vehicle packages, ad removal. Some of these options can help you play the game faster and fun. For example, with game money, you can buy better vehicles, gain more speed with diamonds, and get more benefits with VIP membership. However, the disadvantage of in-game purchases is that you have to spend real money. This can push your budget or miss the taste of the game. </p>
44
- <h3> Out of Game Applications </h3>
45
-
46
- <h3> Game Tips and Tricks </h3>
47
- <p> The best and safest option you can use to play the game faster and fun is to learn and apply game tips and tricks. These include options such as parking techniques, racing tactics, police chase strategies, role-playing mode tips. Some of these options can help you play the game faster and fun. For example, with parking techniques, you can easily complete parking tasks, pass your opponents with racing tactics, get rid of the police with police chase strategies, or live as you wish with role-playing mode tips. However, the disadvantage of game tips and tricks can take time or be difficult. This can test your patience or skill. </p>
48
- <h1>Result </h1>
49
- <p>Car Parking Multiplayer APK speed trick is an option that allows you to play the game faster and more fun. To use this option, you must first uninstall the original version of the game and download and install the fraudulent APK file. Then you can change the speed settings by opening the game and play as you wish. However, this option has some advantages and disadvantages. Its advantages include faster, more fun and easier gaming experience. Its disadvantages include unfair advantage, possible prohibition and possible errors. Therefore, when using this option, you need to be careful and take responsibility. </p>
50
-
51
- <p>Car Parking Multiplayer is a car parking simulation game. But there is much more than just parking. Open world mode offers many features and modes such as car customization, multiplayer modes, police chase and role playing. You can use car parking multiplayer apk speed trick or alternatives to make this game more fun. However, no matter what option you use, we recommend that you respect the rules and tag of the game and get along well with other players. Click <a href=" > here </a> to download the game. Good luck playing the game!</p>
52
- <h2> Frequently Asked Questions </h2>
53
- <p>In this section you will find answers to frequently asked questions about car parking multiplayer apk speed trick. </p>
54
- <ol>
55
- <li><b> Is the speed trick safe?</b> Speed trick is not safe. Cheating can damage your device or account or cause you to be banned. Therefore, you should be careful when cheating and do not forget that you are at your own risk. </li>
56
- <li><b> Is the speed trick paid?</b> Speed trick is free. You do not have to pay any fees to download the fraudulent APK file. However, some resources may request membership or subscription from you. We recommend that you stay away from such sources. </li>
57
-
58
- <li><b> In which game version does the speed trick work?</b> Speed trick works in version 4.8.2 of the game. This version was released in June 2023. It may not work in newer or older versions or cause problems. </li>
59
- Where to download the <li><b> speed trick?</b> You can click <a href=" > where </a> to download the speed trick. This link is taken from a reliable source and does not contain viruses. If you want to download from other sources, we recommend that you be careful and check its reliability. </li>
60
- </ol>
61
- <p> In this article, we explained everything you need to know about car parking multiplayer apk speed trick. We hope our article has been useful to you. If you like our post, please share and comment. If there is missing or incorrect information in our article, please let us know. If you have any other questions about the game, please ask us. Let's try to answer for you. Click <a href=" > here </a> to download the game. Good luck playing the game!</p> 64aa2da5cf<br />
62
- <br />
63
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Casos Criminales Misterios Del Pasado Mod Apk Modyolo.md DELETED
@@ -1,102 +0,0 @@
1
- <br />
2
- <h1>Caso Criminal: Misterios del Pasado Mod APK Modyolo - Una revisión</h1>
3
- <p>Si eres un fan de los juegos de detectives, es posible que hayas oído hablar de Criminal Case: Mysteries of the Past, un popular juego móvil que te permite resolver crímenes en la Europa del siglo XIX. ¿Pero sabías que hay una versión modificada de este juego que te da estrellas ilimitadas, energía, monedas y más? En este artículo, vamos a revisar Criminal Case: Mysteries of the Past Mod APK Modyolo, un sitio web que ofrece esta versión modificada de forma gratuita. También te mostraremos cómo descargar, instalar y jugar a este juego en tu dispositivo Android. ¡Empecemos! </p>
4
- <h2>¿Qué es el Caso Criminal: Misterios del Pasado? </h2>
5
- <p>Criminal Case: Mysteries of the Past es un juego de aventura de objetos ocultos desarrollado por Pretty Simple. Es la cuarta entrega de la serie Criminal Case, siguiendo a Criminal Case, Criminal Case: Pacific Bay y Criminal Case: Save the World. En este juego, te unes a la Concordian Flying Squad, un equipo de detectives de élite que investigan asesinatos y otros crímenes en varios lugares de Europa a finales del siglo XIX. Tendrás que encontrar pistas, interrogar sospechosos, analizar pruebas y atrapar a los asesinos. </p>
6
- <h2>casos criminales misterios del pasado mod apk modyolo</h2><br /><p><b><b>Download Zip</b> &#10027;&#10027;&#10027; <a href="https://bltlly.com/2v6JWs">https://bltlly.com/2v6JWs</a></b></p><br /><br />
7
- <p>El juego cuenta con más de 60 casos para resolver, cada uno con su propia historia, personajes y ubicaciones. También puedes personalizar tu avatar, coleccionar atuendos y mascotas, formar equipo con amigos y competir con otros jugadores en las tablas de clasificación. El juego es gratis, pero también puedes comprar objetos y divisas con dinero real. </p>
8
- <h2>¿Qué es Modyolo? </h2>
9
-
10
- <h2>¿Qué es el caso penal: Misterios del pasado Mod APK Modyolo? </h2>
11
- <p>Caso Penal: Misterios del Pasado Mod APK Modyolo es una versión modificada de Caso Penal: Misterios del Pasado que está disponible en el sitio web de Modyolo. Esta versión modificada tiene varios beneficios que la hacen más agradable y cómoda de jugar que la versión original. Algunos de estos beneficios son:</p>
12
- <ul>
13
- <li>Estrellas ilimitadas: Las estrellas se utilizan para desbloquear nuevas escenas, realizar tareas y progresar en el juego. Normalmente, tienes que ganar estrellas completando escenas o viendo anuncios. Pero con esta versión modificada, tendrás estrellas ilimitadas a tu disposición. </li>
14
- <li>Energía ilimitada: La energía se utiliza para reproducir escenas y realizar tareas. Normalmente, usted tiene una cantidad limitada de energía que se repone con el tiempo o mediante la compra de dinero real. Pero con esta versión modificada, tendrá energía ilimitada que nunca se agota. </li>
15
- <li>Monedas ilimitadas: Las monedas se utilizan para comprar artículos, trajes, mascotas y sugerencias. Normalmente, tienes que ganar monedas completando escenas, tareas o logros. Pero con esta versión modificada, tendrá monedas ilimitadas para gastar como desee. </li>
16
- <li>Sin anuncios: Los anuncios son molestos y distraen, especialmente cuando interrumpen tu juego. Normalmente, tienes que ver anuncios para ganar estrellas, energía o monedas. Pero con esta versión modificada, no verás ningún anuncio en absoluto. </li>
17
- </ul>
18
- <p>Con estos beneficios, puedes disfrutar de Criminal Case: Mysteries of the Past sin limitaciones ni interrupciones. Puedes reproducir tantas escenas como quieras, desbloquear todo el contenido y divertirte más resolviendo crímenes. </p>
19
- <h2>¿Cómo descargar e instalar Criminal Case: Mysteries of the Past Mod APK Modyolo? </h2>
20
- <p>Descargar e instalar Criminal Case: Mysteries of the Past Mod APK Modyolo es muy fácil y rápido. Solo tienes que seguir estos sencillos pasos:</p>
21
- <ol>
22
-
23
- <li>Haga clic en el botón de descarga y espere a que el archivo se descargue en su dispositivo. El tamaño del archivo es de aproximadamente 70 MB.</li>
24
- <li>Una vez completada la descarga, vaya a su administrador de archivos y localice el archivo descargado. Toque en él para iniciar el proceso de instalación. </li>
25
- <li>Si ves un mensaje emergente que dice "Instalar bloqueado", ve a la configuración del dispositivo y habilita la opción "Fuentes desconocidas". Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.</li>
26
- <li>Después de habilitar la opción, vuelva al administrador de archivos y toque en el archivo de nuevo. Siga las instrucciones de la pantalla para completar la instalación. </li>
27
- <li>Una vez que se hace la instalación, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Disfrute! </li>
28
- </ol>
29
- <p>Aquí hay algunas capturas de pantalla del proceso de descarga e instalación:</p>
30
- <img src="" alt="Captura de pantalla 1" width="300" height="500">
31
- <img src="" alt="Captura de pantalla 2" width="300" height="500">
32
- <img src="" alt="Captura de pantalla 3" width="300" height="500">
33
- <img src="" alt="Captura de pantalla 4" width="300" height="500">
34
- <img src="" alt="Screenshot 5" width="300" height="500"> <h2>Cómo jugar a Criminal Case: Mysteries of the Past Mod APK Modyolo? </h2>
35
- <p>Jugando caso criminal: Misterios del pasado Mod APK Modyolo es muy similar a jugar la versión original. Solo tienes que seguir estos sencillos pasos:</p>
36
- <ol>
37
- <li>Cuando inicie el juego, verá el menú principal con varias opciones. Puede tocar en "Jugar" para iniciar un nuevo caso o continuar uno existente. También puedes tocar en "Perfil" para personalizar tu avatar, "Tienda" para comprar artículos, trajes y mascotas, "Equipo" para unirte o crear un equipo con tus amigos, y "Rankings" para ver tu posición en las tablas de clasificación. </li>
38
- <li>Cuando toque en "Play", verá el mapa de Europa con diferentes distritos y casos. Puedes elegir cualquier caso que quieras jugar, ya que todos están desbloqueados para ti. También puedes ver el número de estrellas que has ganado para cada caso. </li>
39
-
40
- <li>Al iniciar una escena, verá una escena de objetos ocultos con una lista de elementos para encontrar. Tienes que encontrar todos los artículos lo más rápido posible para ganar más puntos y estrellas. También puede utilizar sugerencias, lupas y otras herramientas para ayudarle a encontrar los elementos. También puede hacer zoom hacia dentro y hacia fuera pellizcando la pantalla. </li>
41
- <li>Cuando termines una escena, verás tu puntuación y rango. También ganarás estrellas, monedas y puntos de experiencia. Puedes usar las estrellas para desbloquear nuevas escenas, realizar tareas y progresar en el caso. Puedes usar las monedas para comprar objetos, trajes, mascotas y sugerencias. Puedes usar los puntos de experiencia para subir de nivel y desbloquear nuevas funciones. </li>
42
- Cuando completes todas las escenas de un caso, tendrás que interrogar a los sospechosos, analizar las pruebas y arrestar al asesino. También tendrá que tomar decisiones que afecten el resultado del caso y su reputación. Puede reproducir cualquier escena o caso si desea mejorar su puntuación o cambiar sus opciones. </li>
43
- </ol>
44
- <p>Aquí hay algunas capturas de pantalla del juego:</p>
45
- <p></p>
46
- <img src="" alt="Captura de pantalla 6" width="300" height="500">
47
- <img src="" alt="Captura de pantalla 7" width="300" height="500">
48
- <img src="" alt="Captura de pantalla 8" width="300" height="500">
49
- <img src="" alt="Captura de pantalla 9" width="300" height="500">
50
- <img src="" alt="Screenshot 10" width="300" height="500"> <h2>Pros y contras de Criminal Case: Mysteries of the Past Mod APK Modyolo</h2>
51
- <p>Como con cualquier versión modificada de un juego, Criminal Case: Mysteries of the Past Mod APK Modyolo tiene sus pros y contras. Aquí están algunos de ellos:</p>
52
- <tabla>
53
- <tr>
54
- <th>Pros</th>
55
- <th>Contras</th>
56
- </tr>
57
- <tr>
58
- <td>- Las estrellas ilimitadas, la energía, las monedas, y ningún anuncio hacen el juego más agradable y conveniente jugar. </td>
59
- <td>- La versión modificada podría no ser compatible con algunos dispositivos o las últimas actualizaciones del juego. </td>
60
- </tr>
61
- <tr>
62
- <td>- Puedes desbloquear y acceder a todo el contenido y características del juego sin gastar dinero real. </td>
63
-
64
- </tr>
65
- <tr>
66
- <td>- Puedes jugar el juego sin conexión a Internet. </td>
67
- <td>- La versión modded podría no ser segura de usar, ya que podría contener virus, malware o spyware que pueden dañar su dispositivo o sus datos. </td>
68
- </tr>
69
- <tr>
70
- <td>- Usted puede tener más diversión y desafiarse jugando el juego a su propio ritmo y estilo. </td>
71
- <td>- La versión modificada podría no ser justa o ética de usar, ya que le da una ventaja injusta sobre otros jugadores que juegan la versión original del juego. </td>
72
- </tr>
73
- </tabla>
74
- <h2>Consejos y trucos para el caso penal: Misterios del pasado Mod APK Modyolo</h2>
75
- <p>Si quieres aprovechar al máximo Caso Penal: Misterios del Pasado Mod APK Modyolo, aquí hay algunos consejos y trucos que puede utilizar:</p>
76
- <ul>
77
- <li>Usa sabiamente tus estrellas ilimitadas. No tienes que gastarlas en cada escena o tarea. Puede guardarlos para casos posteriores o para comprar artículos, trajes y mascotas que le gusten. </li>
78
- <li>Usa tu energía ilimitada sabiamente. No tienes que reproducir cada escena o tarea de inmediato. Puedes tomar descansos y volver más tarde cuando quieras jugar. También puedes usar tu energía para reproducir escenas o casos que hayas disfrutado o en los que quieras mejorar tu puntuación. </li>
79
- <li>Usa tus monedas ilimitadas sabiamente. No tienes que comprar cada artículo, atuendo o mascota que veas. Puede comparar y elegir los que se adapten a sus preferencias y estilo. También puede utilizar sus monedas para comprar pistas, lupas y otras herramientas que pueden ayudarle a encontrar artículos más rápido y más fácil. </li>
80
- <li>Usa tus pistas con moderación. Aunque tengas monedas ilimitadas para comprar pistas, no tienes que usarlas todo el tiempo. Puede desafiarse a sí mismo encontrando objetos sin pistas. También puede guardar sus sugerencias para escenas o tareas que sean más difíciles o que consuman más tiempo. </li>
81
-
82
- <li>Haz equipo con tus amigos. Tienes energía ilimitada para jugar con tus amigos. Puede unirse o crear un equipo con sus amigos y cooperar con ellos para resolver los casos más rápido y más fácil. También puedes chatear con ellos, enviarles regalos y competir con ellos en las tablas de clasificación. </li>
83
- <li>Explora el mapa. Tienes estrellas ilimitadas para desbloquear nuevos distritos y casos. Puedes explorar diferentes lugares de Europa y descubrir su historia, cultura y secretos. También puedes aprender más sobre los personajes, sus antecedentes y sus relaciones. </li>
84
- </ul>
85
- <h2>Preguntas frecuentes sobre Caso Criminal: Misterios del Pasado Mod APK Modyolo</h2>
86
- <p>Aquí hay algunas preguntas frecuentes sobre Caso Penal: Misterios del Pasado Mod APK Modyolo:</p>
87
- <h3>Q: ¿Es el caso penal: Misterios del pasado Mod APK Modyolo libre? </h3>
88
- <p>A: Sí, Caso Penal: Misterios del Pasado Mod APK Modyolo es gratis para descargar y jugar. No tienes que pagar nada para usar esta versión modificada del juego. </p>
89
- <h3>Q: ¿Es el caso penal: Misterios del pasado Mod APK Modyolo seguro? </h3>
90
- <p>A: No podemos garantizar que Caso Penal: Misterios del Pasado Mod APK Modyolo es seguro o seguro de usar. Es posible que esta versión modificada del juego contenga virus, malware o spyware que puedan dañar tu dispositivo o tus datos. Recomendamos escanear el archivo antes de instalarlo y usarlo bajo su propio riesgo. </p>
91
- <h3>Q: ¿Es el caso penal: Misterios del pasado Mod APK Modyolo legal? </h3>
92
- <p>A: No podemos garantizar que Caso Penal: Myst eries del Pasado Mod APK Modyolo es legal o ético de usar. Es posible que esta versión modificada del juego viole los términos y condiciones del juego original o la Google Play Store. También es posible que esta versión modificada del juego le da una ventaja injusta sobre otros jugadores que juegan la versión original del juego. Te recomendamos que utilices esta versión modificada del juego con precaución y respeto. </p>
93
-
94
- <p>A: No podemos garantizar que Criminal Case: Mysteries of the Past Mod APK Modyolo es compatible con su dispositivo o las últimas actualizaciones del juego. Es posible que esta versión modificada del juego no funcione correctamente o cause errores o accidentes en su dispositivo. Le recomendamos que compruebe la compatibilidad y los requisitos de esta versión modificada del juego antes de instalarlo y utilizarlo a su discreción. </p>
95
- <h3>Q: ¿Es el caso penal: Misterios del pasado Mod APK Modyolo actualizado regularmente? </h3>
96
- <p>A: No podemos garantizar que Criminal Case: Mysteries of the Past Mod APK Modyolo se actualiza regularmente o en sincronía con la versión original del juego. Es posible que esta versión modificada del juego se vuelva obsoleta u obsoleta ya que la versión original del juego recibe nuevas características, contenido o correcciones. Te recomendamos que compruebes la última versión y actualizaciones de esta versión modificada del juego en el sitio web de Modyolo y la utilices según tus preferencias. </p>
97
- <h2>Conclusión</h2>
98
- <p>Caso Criminal: Misterios del Pasado Mod APK Modyolo es una versión modificada de Criminal Case: Mysteries of the Past que ofrece estrellas ilimitadas, energía, monedas, y no hay anuncios gratis. Te permite disfrutar y explorar el juego sin limitaciones ni interrupciones. Sin embargo, también tiene algunos inconvenientes y riesgos que debe tener en cuenta antes de usarlo. También debes seguir algunos consejos y trucos para sacarle el máximo partido. Si estás interesado en probar esta versión modificada del juego, puedes descargarla e instalarla desde el sitio web de Modyolo siguiendo nuestra guía. Pero recuerda, úsalo bajo tu propio riesgo y responsabilidad. </p>
99
- <p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta, comentario o retroalimentación, no dude en dejarlos a continuación. ¡Gracias por leer! </p>
100
- <h2></h2></p> 64aa2da5cf<br />
101
- <br />
102
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Deriva Cazadores Descargar Chromebook.md DELETED
@@ -1,67 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar y jugar Drift Hunters en su Chromebook</h1>
3
- <p>Si usted está buscando un juego de deriva emocionante y realista que se puede jugar en su Chromebook, no busque más que <strong>Drift Hunters</strong>. Este juego le permitirá dar rienda suelta a su corredor interior y la deriva alrededor de varias pistas con diferentes coches. En este artículo, le mostraremos cómo descargar y jugar Drift Hunters en su Chromebook, así como algunos consejos y trucos para ayudarle a dominar el arte de la deriva. </p>
4
- <h2>¿Qué es Drift Hunters? </h2>
5
- <p>Drift Hunters es un juego de deriva 3D gratuito desarrollado por Studionum43. Es uno de los juegos más populares en la Chrome Web Store, con más de 70.000 usuarios y críticas positivas. Estas son algunas de las características que hacen que los cazadores de deriva se destacan:</p>
6
- <h2>deriva cazadores descargar chromebook</h2><br /><p><b><b>DOWNLOAD</b> &#9733; <a href="https://bltlly.com/2v6MdC">https://bltlly.com/2v6MdC</a></b></p><br /><br />
7
- <h3>Un juego 3D libre con más de 25 coches y 10 ubicaciones</h3>
8
- <p>Drift Hunters le ofrece una amplia selección de coches para elegir, que van desde los coches clásicos del músculo a los coches deportivos modernos. Cada automóvil tiene sus propias características, como velocidad, manejo, peso y capacidad de deriva. También puede personalizar la apariencia de su coche con diferentes colores, llantas, calcomanías y alerones. Puede desbloquear más coches por ganar dinero de la deriva. </p>
9
- <p>Drift Hunters también tiene 10 lugares únicos donde puedes ir a la deriva, como calles de ciudades, hipódromos, bosques, desiertos y aeropuertos. Cada lugar tiene su propio diseño, obstáculos, clima y atmósfera. Puede explorar diferentes rutas y encontrar lugares ocultos para la deriva. </p>
10
- <h3>Un juego basado en navegador con excelentes gráficos y opciones de ajuste</h3>
11
- <p>Drift Hunters es un juego basado en navegador que se ejecuta con tecnología HTML5. Esto significa que no necesitas descargar ni instalar nada para jugarlo. Solo tiene que visitar la Chrome Web Store y añadirlo a su navegador Chrome. El juego también es compatible con el modo de pantalla completa, para que pueda disfrutar de ella en la pantalla de su Chromebook sin distracciones. </p>
12
-
13
- <h3>Inicie el juego desde su página de Chrome Apps o New Tab</h3>
14
- <p>El paso final es lanzar el juego y empezar a jugar. Puede hacer esto haciendo clic en el icono de Drift Hunters que debería aparecer en su página de Chrome Apps o en su página de New Tab. También puedes acceder al juego escribiendo chrome://apps en la barra de direcciones y pulsando Enter.</p>
15
- <p>Una vez que inicie el juego, verá el menú principal donde puede elegir su automóvil, pista y configuración. También puedes ver tus logros, tablas de clasificación y créditos. Haz clic en Jugar para comenzar a navegar. </p>
16
- <h2>Cómo jugar Drift Hunters en su Chromebook</h2>
17
- <p>Jugar Drift Hunters en tu Chromebook es muy fácil y divertido. Aquí hay algunas instrucciones básicas sobre cómo jugar el juego:</p>
18
- <h3>Elija su coche y pista del menú</h3>
19
- <p>Lo primero que tienes que hacer es elegir el coche y la pista del menú. Puede desplazarse a través de los coches y pistas disponibles mediante el uso de las flechas izquierda y derecha. También puede hacer clic en el coche o el nombre de la pista para ver más detalles al respecto. Puedes ver las estadísticas del coche, como velocidad, manejo, peso y deriva. También puedes ver el diseño, el clima y la dificultad de la pista. </p>
20
- <p>Usted puede desbloquear más coches y pistas por ganar dinero de la deriva. También puede personalizar la apariencia y el rendimiento de su coche con el menú de ajuste. Para acceder al menú de ajuste, haga clic en el icono de llave inglesa en la esquina inferior derecha de la pantalla. Puede cambiar el color de su coche, llantas, calcomanías, alerones, motor, turbo, caja de cambios, suspensión, frenos, neumáticos y sistemas. </p>
21
- <p></p>
22
- <h3> Utilice las teclas de flecha o WASD para dirigir, acelerar, frenar y freno de mano</h3>
23
-
24
- <p>También puede utilizar algunas otras teclas para funciones adicionales. La tecla C es para cambiar el ángulo de la cámara, que puede ayudarle a ver mejor o disfrutar de diferentes vistas de su coche y pista. La tecla M es para silenciar o desmontar los efectos de sonido y música. La tecla P es para pausar o reanudar el juego. </p>
25
- <h3>Deriva alrededor de las esquinas y ganar puntos y dinero</h3>
26
- <p>El objetivo principal de Drift Hunters es desplazarse por las esquinas y ganar puntos y dinero. A la deriva es una técnica de conducción en la que se desliza su coche de lado, manteniendo el control y la velocidad. Para desplazarse con éxito, es necesario utilizar una combinación de dirección, aceleración, frenado y frenado de mano. </p>
27
- <p>Para iniciar una deriva, debe acercarse a una esquina a alta velocidad y usar el freno de mano para hacer que sus ruedas traseras pierdan tracción. A continuación, es necesario contradirigir para mantener su coche en equilibrio y evitar que se salga. También es necesario modular el acelerador y el freno para ajustar la velocidad y el ángulo. </p>
28
- <p>Cuanto más largo y suave que la deriva, más puntos que ganar. También puedes ganar puntos de bonificación por derrapar cerca de las paredes u obstáculos, ir a la deriva en direcciones opuestas sin parar (lo que se llama deriva en tándem), o ir a la deriva con otros coches (lo que se llama deriva multijugador). Puedes ver tu puntuación en la esquina superior izquierda de la pantalla. </p>
29
- <p>Usted también gana dinero basado en su puntuación y rendimiento. Puede utilizar este dinero para desbloquear más coches y pistas o actualizar su coche existente. Puedes ver tu dinero en la esquina superior derecha de la pantalla. </p>
30
- <h2>Consejos y trucos para la deriva como un profesional</h2>
31
- <p>Drifting in Drift Hunters puede ser un reto al principio, pero con un poco de práctica y consejos, puede convertirse en un profesional en ningún momento. Aquí hay algunos consejos y trucos que pueden ayudarle a mejorar sus habilidades de deriva:</p>
32
- <h3>Ajustar la configuración de su coche para adaptarse a su estilo de conducción y condiciones de la pista</h3>
33
-
34
- <p>Por ejemplo, si quieres más potencia y velocidad, puedes aumentar la configuración del motor y del turbo. Si quieres más estabilidad y agarre, puedes bajar la altura de conducción y aumentar la presión de los neumáticos. Si desea más agilidad y capacidad de respuesta, puede reducir su distribución de peso y camber. También puede experimentar con diferentes combinaciones de ajustes para encontrar su configuración óptima. </p>
35
- <h3>Utilice el freno de mano para iniciar una deriva y dirección contraria para mantenerlo</h3>
36
- <p>El freno de mano es tu mejor amigo cuando se trata de deriva. Es la clave para hacer que tus ruedas traseras pierdan tracción y se deslicen de lado. Es necesario utilizar el freno de mano en el momento adecuado y durante la duración adecuada para iniciar una deriva. También es necesario soltar el freno de mano en el momento adecuado para evitar el sobreviraje o el subviraje. </p>
37
- <p>Para iniciar una deriva, debe acercarse a una esquina a alta velocidad y presionar la barra espaciadora para usar el freno de mano. Esto hará que sus ruedas traseras se bloqueen y se deslicen. Debe sostener la barra espaciadora por un corto tiempo, lo suficiente para que su automóvil comience a deslizarse. Si lo sostienes por mucho tiempo, perderás demasiada velocidad e impulso. </p>
38
- <p>Para mantener una deriva, es necesario contradirigir para mantener su coche en equilibrio y evitar que se salga. Es necesario dirigir en la dirección opuesta de su deriva, lo que significa la dirección a la izquierda si usted está a la deriva a la derecha, y viceversa. Es necesario dirigir con suavidad y precisión, sin sacudidas ni sobrerregir. Si se conduce demasiado, se girará. Si manejas muy poco, te enderezarás. </p>
39
- <h3>Deriva el mayor tiempo posible sin golpear las paredes u obstáculos</h3>
40
- <p>Cuanto más largo y suave que la deriva, más puntos que ganar. Usted también gana puntos de bonificación por la deriva cerca de las paredes o obstáculos, lo que demuestra su habilidad y coraje. Sin embargo, debe tener cuidado de no golpear las paredes o obstáculos, ya que esto dañará su coche y reducir su puntuación. </p>
41
-
42
- <p>También es necesario planificar con antelación y anticipar la siguiente esquina. Usted necesita colocar su coche correctamente antes de entrar en una esquina, lo que significa alinear su coche con el ápice de la esquina. También necesita salir de la esquina sin problemas y prepararse para la siguiente. </p>
43
- <h3>Prueba diferentes coches y pistas para encontrar tus favoritos</h3>
44
- <p>Drift Hunters tiene mucha variedad cuando se trata de coches y pistas. Cada automóvil tiene sus propias características, como velocidad, manejo, peso y capacidad de deriva. Cada pista tiene su propio diseño, obstáculos, clima y atmósfera. Puedes probar diferentes coches y pistas para encontrar tus favoritos. </p>
45
- <p>Algunos coches son más fáciles de deriva que otros, mientras que algunas pistas son más difíciles que otras. Usted puede elegir un coche que se adapte a su estilo de conducción y nivel de habilidad, o una pista que coincida con su estado de ánimo y preferencia. También puede desafiarse a sí mismo al probar nuevos coches y pistas que son más difíciles o diferentes de lo que está acostumbrado. </p>
46
- <p>También puede comparar sus resultados y rendimiento con otros jugadores en las tablas de clasificación. Puedes ver cómo te encuentras entre otros vagabundos de todo el mundo o en tu región. También puedes ver cómo otros jugadores se desplazan en diferentes coches y pistas viendo sus repeticiones. </p>
47
- <h2>Conclusión</h2>
48
- <p>Drift Hunters es un juego divertido y adictivo que puedes jugar en tu Chromebook. Ofrece una experiencia de deriva realista e inmersiva con impresionantes gráficos y efectos de sonido. Es fácil de descargar y jugar, y puede personalizar su coche a su gusto. También puede competir con otros jugadores y mejorar sus habilidades de deriva. Drift Hunters es un juego que te mantendrá entretenido y retado durante horas. </p>
49
- <h2>Preguntas frecuentes</h2>
50
- <h4>¿Cuáles son los requisitos del sistema para los cazadores de deriva? </h4>
51
-
52
- <h4>¿Puedo jugar Drift Hunters sin conexión? </h4>
53
- <p>No, no puedes jugar Drift Hunters sin conexión. El juego requiere una conexión a Internet para cargar los activos del juego, guardar su progreso y sincronizar sus datos. Si pierde su conexión a Internet mientras juega, puede experimentar algunos fallos o errores. </p>
54
- <h4>¿Puedo usar un controlador o un ratón para jugar a Drift Hunters? </h4>
55
- <p>No, no puedes usar un controlador o un ratón para jugar a Drift Hunters. El juego solo admite controles de teclado, que son más convenientes y sensibles que otros dispositivos de entrada. Puede usar las teclas de flecha o WASD para dirigir, acelerar, frenar y frenar. También puede usar otras teclas para funciones adicionales. </p>
56
- <h4>¿Cuáles son algunos otros juegos como Drift Hunters que puedo jugar en mi Chromebook? </h4>
57
- <p>Si te gusta Drift Hunters, también te pueden gustar otros juegos similares o relacionados con él. Estos son algunos ejemplos de otros juegos que puedes jugar en tu Chromebook:</p>
58
- <ul>
59
- <li><a href="">Madalin Stunt Cars 2</a>: Un juego de carreras en 3D donde puedes realizar acrobacias increíbles con diferentes coches en varias pistas. </li>
60
- <li><a href="">Burnout Drift</a>: Un juego de deriva en 3D donde puedes desplazarte por diferentes pistas y entornos con física realista y efectos de daño. </li>
61
- <li><a href="">Drift Runner 3D</a>: Un juego de deriva en 3D donde puedes deambular por las calles de Tokio con diferentes coches y personalizaciones. </li>
62
- <li><a href="">Drift Max Pro</a>: Un juego de deriva en 3D donde puedes desplazarte por diferentes pistas y ubicaciones con varios coches y modos. </li>
63
- <li><a href="">Drift Legends</a>: Un juego de deriva en 3D donde puedes desplazarte por diferentes pistas y escenarios con más de 40 coches y desafíos. </li>
64
- </ul>
65
- <h4>¿Cómo puedo contactar al desarrollador de Drift Hunters? </h4> 64aa2da5cf<br />
66
- <br />
67
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Coches Rpidos Como El Rayo Mod Apk Terbaru.md DELETED
@@ -1,92 +0,0 @@
1
- <br />
2
- <h1>Descargar coches rápidos como Lightning Mod APK Terbaru</h1>
3
- <p>Si eres un fan de las películas de Disney Pixar’s Cars, te encantará Cars Fast as Lightning, un divertido y colorido juego de carreras que te permite construir tu propio Radiator Springs y competir como tus personajes favoritos. Pero ¿qué pasa si quieres desbloquear todos los coches, pistas y trucos sin gastar dinero o esperar mucho tiempo? Ahí es donde los coches rápidos como Lightning mod APK viene muy bien. En este artículo, le diremos qué es Cars Fast as Lightning, cómo descargar e instalar el mod APK, por qué usarlo, y algunos consejos y trucos para jugar el juego. También te daremos una breve reseña del juego y responderemos algunas preguntas frecuentes. </p>
4
- <h2>descargar coches rápidos como el rayo mod apk terbaru</h2><br /><p><b><b>Download File</b> >>> <a href="https://bltlly.com/2v6MJL">https://bltlly.com/2v6MJL</a></b></p><br /><br />
5
- <h2>¿Qué es Coches Rápidos como Relámpagos? </h2>
6
- <p>Cars Fast as Lightning es un juego para móviles desarrollado por Gameloft y basado en la popular franquicia Cars. El juego combina elementos de carreras y construcción de ciudades, lo que le permite crear su propia versión de Radiator Springs con edificios, decoraciones y atracciones. También puedes competir como varios personajes de las películas, como Lightning McQueen, Mater, Chick Hicks y más. El juego cuenta con controles simples de un toque, gráficos impresionantes y clips de voz originales de Owen Wilson como Lightning McQueen.</p>
7
- <h3>Características del juego</h3>
8
- <p>Algunas de las características de Cars Fast as Lightning son:</p>
9
- <ul>
10
- <li>Carreras personalizables: Puedes construir tus propias pistas con diferentes trucos y trucos, como bucles, saltos, golpes y más. También puede personalizar sus coches con trabajos de pintura, pegatinas y accesorios. </li>
11
- <li>Radiator Springs: Puede explorar la ciudad de Radiator Springs e interactuar con sus residentes. También puede agregar edificios y atracciones de las películas, como el V8 Café de Flo, la Casa Della Tires de Luigi, la Casa de Arte Corporal de Ramone y más. </li>
12
-
13
- <li>Experiencia de carreras única: Puede disfrutar de una acción de carreras de ritmo rápido con simples controles de toque y deslizamiento. También puede realizar trucos en puntos azules para aumentar su velocidad y evitar obstáculos deslizando en la dirección correcta. </li>
14
- </ul>
15
- <h3>Cómo descargar e instalar el mod APK</h3>
16
- <p>Si desea descargar coches rápidos como Lightning mod APK terbaru, tendrá que seguir estos pasos:</p>
17
- <ol>
18
- <li>Ir a <a href="( 1 )">https://apkdone.com/cars-fast-as-lightning/</a>, un sitio web de confianza que ofrece descargas gratuitas de mod APK. </li>
19
- <li>Haga clic en el botón verde "Descargar APK" para comenzar a descargar el archivo. </li>
20
- <li>Una vez que la descarga se haya completado, busque el archivo en su dispositivo y toque en él para instalarlo. Es posible que necesite habilitar "Fuentes desconocidas" en su configuración para permitir la instalación desde fuentes de terceros. </li>
21
- <li>Después de la instalación se hace, iniciar el juego y disfrutar de dinero ilimitado, gemas, coches, pistas, trucos, y más. </li>
22
- </ol>
23
- <h2>¿Por qué usar coches rápidos como Lightning mod APK? </h2>
24
- <p>Coches rápidos como Lightning mod APK es una versión modificada del juego original que le da acceso a recursos ilimitados y características que de otro modo están bloqueados o requieren dinero real para obtener. Mediante el uso del mod APK, se puede disfrutar del juego sin limitaciones o restricciones. </p>
25
- <p></p>
26
- <h3>Beneficios del mod APK</h3>
27
- <p>Algunos de los beneficios de usar coches rápidos como Lightning mod APK son:</p>
28
- <ul>
29
- <li>Dinero y gemas ilimitados: Puedes usar estas monedas para comprar coches, pistas, edificios y decoraciones nuevos. También puede actualizar su coche y pista con mejores piezas y características. </li>
30
- <li>Todos los coches y pistas desbloqueadas: Puede elegir entre más de 20 coches y 10 pistas para competir. También puede cambiar entre diferentes coches y pistas en cualquier momento que desee. </li>
31
- <li>Todos los trucos desbloqueados: Puedes realizar trucos increíbles en tu pista, como bucles, saltos, sacacorchos y más. También puede personalizar su pista con diferentes apoyos y obstáculos. </li>
32
-
33
- </ul>
34
- <h3>Inconvenientes del mod APK</h3>
35
- <p>Algunos de los inconvenientes de usar coches rápidos como Lightning mod APK son:</p>
36
- <ul>
37
- <li>Riesgos potenciales: Dado que el mod APK no es una versión oficial del juego, puede contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. Siempre debe descargar el mod APK de una fuente de confianza y escanearlo con un antivirus antes de instalarlo. </li>
38
- <li>No hay actualizaciones: El mod APK puede no ser compatible con la última versión del juego o el sistema operativo de su dispositivo. Puede perderse nuevas características, correcciones de errores o parches de seguridad que son liberados por los desarrolladores. </li>
39
- <li>No hay características en línea: El mod APK puede no permitirle acceder a las características en línea del juego, como el modo multijugador, tablas de clasificación, logros, o la integración de redes sociales. También puede ser expulsado del juego si usted es detectado usando el mod APK por los desarrolladores. </li>
40
- </ul>
41
- <h2>Consejos y trucos para jugar coches rápidos como un rayo</h2>
42
- <p>Si quieres mejorar tus habilidades y divertirte más jugando Cars Fast as Lightning, aquí tienes algunos consejos y trucos que puedes usar:</p>
43
- <h3>Aumenta tu velocidad con trucos</h3>
44
- <p>Uno de los aspectos más importantes de las carreras en Cars Fast as Lightning es realizar trucos en puntos azules en su pista. Estos trucos te darán un impulso de velocidad que puede ayudarte a ganar la carrera. Para realizar un truco, debes tocar la pantalla cuando alcances un punto azul. El truco variará según el tipo de spot y el coche que uses. Por ejemplo, algunos spots te harán hacer un bucle, un salto o un giro. Algunos coches también tendrán trucos únicos, como la deriva de Lightning McQueen o el gancho de remolque de Mater. Trate de realizar tantos trucos como sea posible para aumentar su velocidad y puntuación. </p>
45
- <h3>Actualizar su coche y la pista</h3>
46
-
47
- <h3>Desafiar a otros coches y recoger pegatinas</h3>
48
- <p>Para desbloquear nuevos coches y pistas en Cars Fast as Lightning, es necesario desafiar a otros coches de las películas y ganar carreras contra ellos. Cada coche tiene su propia personalidad y nivel de dificultad. También puedes recoger pegatinas de cada coche completando ciertas tareas o logros. Por ejemplo, para conseguir la pegatina de Mater, necesitas ganar una carrera contra él con un truco de gancho de remolque. Pegatinas le ayudará a desbloquear nuevos coches y pistas más rápido. </p>
49
- <h3>Completa misiones y recoge monedas</h3>
50
- <p>Además de las carreras, también puedes completar misiones en Cars Fast as Lightning para ganar monedas y gemas adicionales. Las misiones son tareas que necesitas hacer en el juego, como construir una cierta atracción, realizar un cierto truco o ganar un cierto número de carreras. Puedes comprobar tus misiones tocando el icono del portapapeles en la esquina superior izquierda de la pantalla. Completar misiones también aumentará tu nivel y desbloqueará nuevas características en el juego. </p>
51
- <h3>Despierta coches dormidos y enviarlos a las atracciones</h3>
52
- <p>En Cars Fast as Lightning, también puedes construir tu propio Radiator Springs con varios edificios y atracciones de las películas. Estos edificios y atracciones generarán monedas para usted con el tiempo. Sin embargo, algunos de ellos requerirán coches somnolientos para operarlos. Los coches somnolientos son los coches que no están activos en las carreras o la construcción. Usted puede despertar coches soñolientos tocando en ellos y enviarlos a una atracción que coincida con su color. Por ejemplo, puedes enviar a Red el camión de bomberos al V8 Café de Flo o a Luigi a la Casa Della Tires de Luigi. Esto los hará felices y te hará ganar más monedas . </p>
53
- <h2>Coches rápidos como rayo juego de revisión</h2>
54
-
55
- <h3>Pros y contras del juego</h3>
56
- <p>Algunos de los pros y contras de Cars Fast as Lightning son:</p>
57
- <tabla>
58
- <tr>
59
- <th>Pros</th>
60
- <th>Contras</th>
61
- </tr>
62
- <tr>
63
- <td>Divertido y colorido juego de carreras</td>
64
- <td>Requiere conexión a Internet</td>
65
- </tr>
66
- <tr>
67
- <td>Coches y pistas personalizables</td>
68
- <td>Puede ser repetitivo y aburrido</td>
69
- </tr>
70
- <tr>
71
- <td>Construcción de la ciudad de Radiator Springs</td>
72
- <td>Tiene compras y anuncios en la aplicación</td>
73
- </tr>
74
- <tr>
75
- <td>Personajes de coches y clips de voz</td>
76
- <td>Ocupa mucho espacio de almacenamiento</td>
77
- </tr>
78
- <tr>
79
- <td>Controles simples e intuitivos</td>
80
- <td>Carece de modo multijugador en línea</td>
81
- </tr>
82
- </tabla>
83
- <h3>Valoraciones y comentarios de los usuarios</h3>
84
- <p>Cars Fast as Lightning ha recibido calificaciones y comentarios positivos de los usuarios en Google Play Store y App Store. El juego tiene una calificación promedio de 4.3 de 5 estrellas en Google Play Store, basado en más de 1.5 millones de comentarios. El juego tiene una calificación promedio de 4.6 de 5 estrellas en App Store, basado en más de 17 mil comentarios. Algunos de los comentarios de los usuarios son:</p>
85
-
86
- <p>Cars Fast as Lightning es un juego móvil que te permite competir como tus personajes favoritos de las películas de Cars y construir tu propio Radiator Springs. El juego tiene un montón de características y contenido, tales como coches personalizables y pistas, Radiator Springs ciudad-edificio, Coches personajes y clips de voz, controles simples e intuitivos, acción de carreras divertido y colorido, etc. El juego también tiene algunos inconvenientes, como requerir conexión a Internet, ser repetitivo y aburrido, tener compras y anuncios en la aplicación, ocupar mucho espacio de almacenamiento, carecer de modo multijugador en línea, etc.</p>
87
- <h3>Resumen del artículo</h3>
88
- <p>En este artículo, le hemos dicho lo que es Cars Fast as Lightning, cómo descargar e instalar el mod APK terbaru, ¿por qué usarlo, y algunos consejos y trucos para jugar el juego. También te hemos dado una breve reseña del juego y respondido algunas preguntas frecuentes. </p>
89
- <h3>Preguntas frecuentes</h3>
90
- <p>Aquí están algunas de las preguntas más frecuentes sobre Cars Fast as Lightning:</p> 64aa2da5cf<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/rrule.py DELETED
@@ -1,1737 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- The rrule module offers a small, complete, and very fast, implementation of
4
- the recurrence rules documented in the
5
- `iCalendar RFC <https://tools.ietf.org/html/rfc5545>`_,
6
- including support for caching of results.
7
- """
8
- import calendar
9
- import datetime
10
- import heapq
11
- import itertools
12
- import re
13
- import sys
14
- from functools import wraps
15
- # For warning about deprecation of until and count
16
- from warnings import warn
17
-
18
- from six import advance_iterator, integer_types
19
-
20
- from six.moves import _thread, range
21
-
22
- from ._common import weekday as weekdaybase
23
-
24
- try:
25
- from math import gcd
26
- except ImportError:
27
- from fractions import gcd
28
-
29
- __all__ = ["rrule", "rruleset", "rrulestr",
30
- "YEARLY", "MONTHLY", "WEEKLY", "DAILY",
31
- "HOURLY", "MINUTELY", "SECONDLY",
32
- "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
33
-
34
- # Every mask is 7 days longer to handle cross-year weekly periods.
35
- M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
36
- [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
37
- M365MASK = list(M366MASK)
38
- M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
39
- MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
40
- MDAY365MASK = list(MDAY366MASK)
41
- M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
42
- NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
43
- NMDAY365MASK = list(NMDAY366MASK)
44
- M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
45
- M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
46
- WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
47
- del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
48
- MDAY365MASK = tuple(MDAY365MASK)
49
- M365MASK = tuple(M365MASK)
50
-
51
- FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
52
-
53
- (YEARLY,
54
- MONTHLY,
55
- WEEKLY,
56
- DAILY,
57
- HOURLY,
58
- MINUTELY,
59
- SECONDLY) = list(range(7))
60
-
61
- # Imported on demand.
62
- easter = None
63
- parser = None
64
-
65
-
66
- class weekday(weekdaybase):
67
- """
68
- This version of weekday does not allow n = 0.
69
- """
70
- def __init__(self, wkday, n=None):
71
- if n == 0:
72
- raise ValueError("Can't create weekday with n==0")
73
-
74
- super(weekday, self).__init__(wkday, n)
75
-
76
-
77
- MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
78
-
79
-
80
- def _invalidates_cache(f):
81
- """
82
- Decorator for rruleset methods which may invalidate the
83
- cached length.
84
- """
85
- @wraps(f)
86
- def inner_func(self, *args, **kwargs):
87
- rv = f(self, *args, **kwargs)
88
- self._invalidate_cache()
89
- return rv
90
-
91
- return inner_func
92
-
93
-
94
- class rrulebase(object):
95
- def __init__(self, cache=False):
96
- if cache:
97
- self._cache = []
98
- self._cache_lock = _thread.allocate_lock()
99
- self._invalidate_cache()
100
- else:
101
- self._cache = None
102
- self._cache_complete = False
103
- self._len = None
104
-
105
- def __iter__(self):
106
- if self._cache_complete:
107
- return iter(self._cache)
108
- elif self._cache is None:
109
- return self._iter()
110
- else:
111
- return self._iter_cached()
112
-
113
- def _invalidate_cache(self):
114
- if self._cache is not None:
115
- self._cache = []
116
- self._cache_complete = False
117
- self._cache_gen = self._iter()
118
-
119
- if self._cache_lock.locked():
120
- self._cache_lock.release()
121
-
122
- self._len = None
123
-
124
- def _iter_cached(self):
125
- i = 0
126
- gen = self._cache_gen
127
- cache = self._cache
128
- acquire = self._cache_lock.acquire
129
- release = self._cache_lock.release
130
- while gen:
131
- if i == len(cache):
132
- acquire()
133
- if self._cache_complete:
134
- break
135
- try:
136
- for j in range(10):
137
- cache.append(advance_iterator(gen))
138
- except StopIteration:
139
- self._cache_gen = gen = None
140
- self._cache_complete = True
141
- break
142
- release()
143
- yield cache[i]
144
- i += 1
145
- while i < self._len:
146
- yield cache[i]
147
- i += 1
148
-
149
- def __getitem__(self, item):
150
- if self._cache_complete:
151
- return self._cache[item]
152
- elif isinstance(item, slice):
153
- if item.step and item.step < 0:
154
- return list(iter(self))[item]
155
- else:
156
- return list(itertools.islice(self,
157
- item.start or 0,
158
- item.stop or sys.maxsize,
159
- item.step or 1))
160
- elif item >= 0:
161
- gen = iter(self)
162
- try:
163
- for i in range(item+1):
164
- res = advance_iterator(gen)
165
- except StopIteration:
166
- raise IndexError
167
- return res
168
- else:
169
- return list(iter(self))[item]
170
-
171
- def __contains__(self, item):
172
- if self._cache_complete:
173
- return item in self._cache
174
- else:
175
- for i in self:
176
- if i == item:
177
- return True
178
- elif i > item:
179
- return False
180
- return False
181
-
182
- # __len__() introduces a large performance penalty.
183
- def count(self):
184
- """ Returns the number of recurrences in this set. It will have go
185
- trough the whole recurrence, if this hasn't been done before. """
186
- if self._len is None:
187
- for x in self:
188
- pass
189
- return self._len
190
-
191
- def before(self, dt, inc=False):
192
- """ Returns the last recurrence before the given datetime instance. The
193
- inc keyword defines what happens if dt is an occurrence. With
194
- inc=True, if dt itself is an occurrence, it will be returned. """
195
- if self._cache_complete:
196
- gen = self._cache
197
- else:
198
- gen = self
199
- last = None
200
- if inc:
201
- for i in gen:
202
- if i > dt:
203
- break
204
- last = i
205
- else:
206
- for i in gen:
207
- if i >= dt:
208
- break
209
- last = i
210
- return last
211
-
212
- def after(self, dt, inc=False):
213
- """ Returns the first recurrence after the given datetime instance. The
214
- inc keyword defines what happens if dt is an occurrence. With
215
- inc=True, if dt itself is an occurrence, it will be returned. """
216
- if self._cache_complete:
217
- gen = self._cache
218
- else:
219
- gen = self
220
- if inc:
221
- for i in gen:
222
- if i >= dt:
223
- return i
224
- else:
225
- for i in gen:
226
- if i > dt:
227
- return i
228
- return None
229
-
230
- def xafter(self, dt, count=None, inc=False):
231
- """
232
- Generator which yields up to `count` recurrences after the given
233
- datetime instance, equivalent to `after`.
234
-
235
- :param dt:
236
- The datetime at which to start generating recurrences.
237
-
238
- :param count:
239
- The maximum number of recurrences to generate. If `None` (default),
240
- dates are generated until the recurrence rule is exhausted.
241
-
242
- :param inc:
243
- If `dt` is an instance of the rule and `inc` is `True`, it is
244
- included in the output.
245
-
246
- :yields: Yields a sequence of `datetime` objects.
247
- """
248
-
249
- if self._cache_complete:
250
- gen = self._cache
251
- else:
252
- gen = self
253
-
254
- # Select the comparison function
255
- if inc:
256
- comp = lambda dc, dtc: dc >= dtc
257
- else:
258
- comp = lambda dc, dtc: dc > dtc
259
-
260
- # Generate dates
261
- n = 0
262
- for d in gen:
263
- if comp(d, dt):
264
- if count is not None:
265
- n += 1
266
- if n > count:
267
- break
268
-
269
- yield d
270
-
271
- def between(self, after, before, inc=False, count=1):
272
- """ Returns all the occurrences of the rrule between after and before.
273
- The inc keyword defines what happens if after and/or before are
274
- themselves occurrences. With inc=True, they will be included in the
275
- list, if they are found in the recurrence set. """
276
- if self._cache_complete:
277
- gen = self._cache
278
- else:
279
- gen = self
280
- started = False
281
- l = []
282
- if inc:
283
- for i in gen:
284
- if i > before:
285
- break
286
- elif not started:
287
- if i >= after:
288
- started = True
289
- l.append(i)
290
- else:
291
- l.append(i)
292
- else:
293
- for i in gen:
294
- if i >= before:
295
- break
296
- elif not started:
297
- if i > after:
298
- started = True
299
- l.append(i)
300
- else:
301
- l.append(i)
302
- return l
303
-
304
-
305
- class rrule(rrulebase):
306
- """
307
- That's the base of the rrule operation. It accepts all the keywords
308
- defined in the RFC as its constructor parameters (except byday,
309
- which was renamed to byweekday) and more. The constructor prototype is::
310
-
311
- rrule(freq)
312
-
313
- Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
314
- or SECONDLY.
315
-
316
- .. note::
317
- Per RFC section 3.3.10, recurrence instances falling on invalid dates
318
- and times are ignored rather than coerced:
319
-
320
- Recurrence rules may generate recurrence instances with an invalid
321
- date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
322
- on a day where the local time is moved forward by an hour at 1:00
323
- AM). Such recurrence instances MUST be ignored and MUST NOT be
324
- counted as part of the recurrence set.
325
-
326
- This can lead to possibly surprising behavior when, for example, the
327
- start date occurs at the end of the month:
328
-
329
- >>> from dateutil.rrule import rrule, MONTHLY
330
- >>> from datetime import datetime
331
- >>> start_date = datetime(2014, 12, 31)
332
- >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
333
- ... # doctest: +NORMALIZE_WHITESPACE
334
- [datetime.datetime(2014, 12, 31, 0, 0),
335
- datetime.datetime(2015, 1, 31, 0, 0),
336
- datetime.datetime(2015, 3, 31, 0, 0),
337
- datetime.datetime(2015, 5, 31, 0, 0)]
338
-
339
- Additionally, it supports the following keyword arguments:
340
-
341
- :param dtstart:
342
- The recurrence start. Besides being the base for the recurrence,
343
- missing parameters in the final recurrence instances will also be
344
- extracted from this date. If not given, datetime.now() will be used
345
- instead.
346
- :param interval:
347
- The interval between each freq iteration. For example, when using
348
- YEARLY, an interval of 2 means once every two years, but with HOURLY,
349
- it means once every two hours. The default interval is 1.
350
- :param wkst:
351
- The week start day. Must be one of the MO, TU, WE constants, or an
352
- integer, specifying the first day of the week. This will affect
353
- recurrences based on weekly periods. The default week start is got
354
- from calendar.firstweekday(), and may be modified by
355
- calendar.setfirstweekday().
356
- :param count:
357
- If given, this determines how many occurrences will be generated.
358
-
359
- .. note::
360
- As of version 2.5.0, the use of the keyword ``until`` in conjunction
361
- with ``count`` is deprecated, to make sure ``dateutil`` is fully
362
- compliant with `RFC-5545 Sec. 3.3.10 <https://tools.ietf.org/
363
- html/rfc5545#section-3.3.10>`_. Therefore, ``until`` and ``count``
364
- **must not** occur in the same call to ``rrule``.
365
- :param until:
366
- If given, this must be a datetime instance specifying the upper-bound
367
- limit of the recurrence. The last recurrence in the rule is the greatest
368
- datetime that is less than or equal to the value specified in the
369
- ``until`` parameter.
370
-
371
- .. note::
372
- As of version 2.5.0, the use of the keyword ``until`` in conjunction
373
- with ``count`` is deprecated, to make sure ``dateutil`` is fully
374
- compliant with `RFC-5545 Sec. 3.3.10 <https://tools.ietf.org/
375
- html/rfc5545#section-3.3.10>`_. Therefore, ``until`` and ``count``
376
- **must not** occur in the same call to ``rrule``.
377
- :param bysetpos:
378
- If given, it must be either an integer, or a sequence of integers,
379
- positive or negative. Each given integer will specify an occurrence
380
- number, corresponding to the nth occurrence of the rule inside the
381
- frequency period. For example, a bysetpos of -1 if combined with a
382
- MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
383
- result in the last work day of every month.
384
- :param bymonth:
385
- If given, it must be either an integer, or a sequence of integers,
386
- meaning the months to apply the recurrence to.
387
- :param bymonthday:
388
- If given, it must be either an integer, or a sequence of integers,
389
- meaning the month days to apply the recurrence to.
390
- :param byyearday:
391
- If given, it must be either an integer, or a sequence of integers,
392
- meaning the year days to apply the recurrence to.
393
- :param byeaster:
394
- If given, it must be either an integer, or a sequence of integers,
395
- positive or negative. Each integer will define an offset from the
396
- Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
397
- Sunday itself. This is an extension to the RFC specification.
398
- :param byweekno:
399
- If given, it must be either an integer, or a sequence of integers,
400
- meaning the week numbers to apply the recurrence to. Week numbers
401
- have the meaning described in ISO8601, that is, the first week of
402
- the year is that containing at least four days of the new year.
403
- :param byweekday:
404
- If given, it must be either an integer (0 == MO), a sequence of
405
- integers, one of the weekday constants (MO, TU, etc), or a sequence
406
- of these constants. When given, these variables will define the
407
- weekdays where the recurrence will be applied. It's also possible to
408
- use an argument n for the weekday instances, which will mean the nth
409
- occurrence of this weekday in the period. For example, with MONTHLY,
410
- or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
411
- first friday of the month where the recurrence happens. Notice that in
412
- the RFC documentation, this is specified as BYDAY, but was renamed to
413
- avoid the ambiguity of that keyword.
414
- :param byhour:
415
- If given, it must be either an integer, or a sequence of integers,
416
- meaning the hours to apply the recurrence to.
417
- :param byminute:
418
- If given, it must be either an integer, or a sequence of integers,
419
- meaning the minutes to apply the recurrence to.
420
- :param bysecond:
421
- If given, it must be either an integer, or a sequence of integers,
422
- meaning the seconds to apply the recurrence to.
423
- :param cache:
424
- If given, it must be a boolean value specifying to enable or disable
425
- caching of results. If you will use the same rrule instance multiple
426
- times, enabling caching will improve the performance considerably.
427
- """
428
- def __init__(self, freq, dtstart=None,
429
- interval=1, wkst=None, count=None, until=None, bysetpos=None,
430
- bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
431
- byweekno=None, byweekday=None,
432
- byhour=None, byminute=None, bysecond=None,
433
- cache=False):
434
- super(rrule, self).__init__(cache)
435
- global easter
436
- if not dtstart:
437
- if until and until.tzinfo:
438
- dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0)
439
- else:
440
- dtstart = datetime.datetime.now().replace(microsecond=0)
441
- elif not isinstance(dtstart, datetime.datetime):
442
- dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
443
- else:
444
- dtstart = dtstart.replace(microsecond=0)
445
- self._dtstart = dtstart
446
- self._tzinfo = dtstart.tzinfo
447
- self._freq = freq
448
- self._interval = interval
449
- self._count = count
450
-
451
- # Cache the original byxxx rules, if they are provided, as the _byxxx
452
- # attributes do not necessarily map to the inputs, and this can be
453
- # a problem in generating the strings. Only store things if they've
454
- # been supplied (the string retrieval will just use .get())
455
- self._original_rule = {}
456
-
457
- if until and not isinstance(until, datetime.datetime):
458
- until = datetime.datetime.fromordinal(until.toordinal())
459
- self._until = until
460
-
461
- if self._dtstart and self._until:
462
- if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None):
463
- # According to RFC5545 Section 3.3.10:
464
- # https://tools.ietf.org/html/rfc5545#section-3.3.10
465
- #
466
- # > If the "DTSTART" property is specified as a date with UTC
467
- # > time or a date with local time and time zone reference,
468
- # > then the UNTIL rule part MUST be specified as a date with
469
- # > UTC time.
470
- raise ValueError(
471
- 'RRULE UNTIL values must be specified in UTC when DTSTART '
472
- 'is timezone-aware'
473
- )
474
-
475
- if count is not None and until:
476
- warn("Using both 'count' and 'until' is inconsistent with RFC 5545"
477
- " and has been deprecated in dateutil. Future versions will "
478
- "raise an error.", DeprecationWarning)
479
-
480
- if wkst is None:
481
- self._wkst = calendar.firstweekday()
482
- elif isinstance(wkst, integer_types):
483
- self._wkst = wkst
484
- else:
485
- self._wkst = wkst.weekday
486
-
487
- if bysetpos is None:
488
- self._bysetpos = None
489
- elif isinstance(bysetpos, integer_types):
490
- if bysetpos == 0 or not (-366 <= bysetpos <= 366):
491
- raise ValueError("bysetpos must be between 1 and 366, "
492
- "or between -366 and -1")
493
- self._bysetpos = (bysetpos,)
494
- else:
495
- self._bysetpos = tuple(bysetpos)
496
- for pos in self._bysetpos:
497
- if pos == 0 or not (-366 <= pos <= 366):
498
- raise ValueError("bysetpos must be between 1 and 366, "
499
- "or between -366 and -1")
500
-
501
- if self._bysetpos:
502
- self._original_rule['bysetpos'] = self._bysetpos
503
-
504
- if (byweekno is None and byyearday is None and bymonthday is None and
505
- byweekday is None and byeaster is None):
506
- if freq == YEARLY:
507
- if bymonth is None:
508
- bymonth = dtstart.month
509
- self._original_rule['bymonth'] = None
510
- bymonthday = dtstart.day
511
- self._original_rule['bymonthday'] = None
512
- elif freq == MONTHLY:
513
- bymonthday = dtstart.day
514
- self._original_rule['bymonthday'] = None
515
- elif freq == WEEKLY:
516
- byweekday = dtstart.weekday()
517
- self._original_rule['byweekday'] = None
518
-
519
- # bymonth
520
- if bymonth is None:
521
- self._bymonth = None
522
- else:
523
- if isinstance(bymonth, integer_types):
524
- bymonth = (bymonth,)
525
-
526
- self._bymonth = tuple(sorted(set(bymonth)))
527
-
528
- if 'bymonth' not in self._original_rule:
529
- self._original_rule['bymonth'] = self._bymonth
530
-
531
- # byyearday
532
- if byyearday is None:
533
- self._byyearday = None
534
- else:
535
- if isinstance(byyearday, integer_types):
536
- byyearday = (byyearday,)
537
-
538
- self._byyearday = tuple(sorted(set(byyearday)))
539
- self._original_rule['byyearday'] = self._byyearday
540
-
541
- # byeaster
542
- if byeaster is not None:
543
- if not easter:
544
- from dateutil import easter
545
- if isinstance(byeaster, integer_types):
546
- self._byeaster = (byeaster,)
547
- else:
548
- self._byeaster = tuple(sorted(byeaster))
549
-
550
- self._original_rule['byeaster'] = self._byeaster
551
- else:
552
- self._byeaster = None
553
-
554
- # bymonthday
555
- if bymonthday is None:
556
- self._bymonthday = ()
557
- self._bynmonthday = ()
558
- else:
559
- if isinstance(bymonthday, integer_types):
560
- bymonthday = (bymonthday,)
561
-
562
- bymonthday = set(bymonthday) # Ensure it's unique
563
-
564
- self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
565
- self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
566
-
567
- # Storing positive numbers first, then negative numbers
568
- if 'bymonthday' not in self._original_rule:
569
- self._original_rule['bymonthday'] = tuple(
570
- itertools.chain(self._bymonthday, self._bynmonthday))
571
-
572
- # byweekno
573
- if byweekno is None:
574
- self._byweekno = None
575
- else:
576
- if isinstance(byweekno, integer_types):
577
- byweekno = (byweekno,)
578
-
579
- self._byweekno = tuple(sorted(set(byweekno)))
580
-
581
- self._original_rule['byweekno'] = self._byweekno
582
-
583
- # byweekday / bynweekday
584
- if byweekday is None:
585
- self._byweekday = None
586
- self._bynweekday = None
587
- else:
588
- # If it's one of the valid non-sequence types, convert to a
589
- # single-element sequence before the iterator that builds the
590
- # byweekday set.
591
- if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
592
- byweekday = (byweekday,)
593
-
594
- self._byweekday = set()
595
- self._bynweekday = set()
596
- for wday in byweekday:
597
- if isinstance(wday, integer_types):
598
- self._byweekday.add(wday)
599
- elif not wday.n or freq > MONTHLY:
600
- self._byweekday.add(wday.weekday)
601
- else:
602
- self._bynweekday.add((wday.weekday, wday.n))
603
-
604
- if not self._byweekday:
605
- self._byweekday = None
606
- elif not self._bynweekday:
607
- self._bynweekday = None
608
-
609
- if self._byweekday is not None:
610
- self._byweekday = tuple(sorted(self._byweekday))
611
- orig_byweekday = [weekday(x) for x in self._byweekday]
612
- else:
613
- orig_byweekday = ()
614
-
615
- if self._bynweekday is not None:
616
- self._bynweekday = tuple(sorted(self._bynweekday))
617
- orig_bynweekday = [weekday(*x) for x in self._bynweekday]
618
- else:
619
- orig_bynweekday = ()
620
-
621
- if 'byweekday' not in self._original_rule:
622
- self._original_rule['byweekday'] = tuple(itertools.chain(
623
- orig_byweekday, orig_bynweekday))
624
-
625
- # byhour
626
- if byhour is None:
627
- if freq < HOURLY:
628
- self._byhour = {dtstart.hour}
629
- else:
630
- self._byhour = None
631
- else:
632
- if isinstance(byhour, integer_types):
633
- byhour = (byhour,)
634
-
635
- if freq == HOURLY:
636
- self._byhour = self.__construct_byset(start=dtstart.hour,
637
- byxxx=byhour,
638
- base=24)
639
- else:
640
- self._byhour = set(byhour)
641
-
642
- self._byhour = tuple(sorted(self._byhour))
643
- self._original_rule['byhour'] = self._byhour
644
-
645
- # byminute
646
- if byminute is None:
647
- if freq < MINUTELY:
648
- self._byminute = {dtstart.minute}
649
- else:
650
- self._byminute = None
651
- else:
652
- if isinstance(byminute, integer_types):
653
- byminute = (byminute,)
654
-
655
- if freq == MINUTELY:
656
- self._byminute = self.__construct_byset(start=dtstart.minute,
657
- byxxx=byminute,
658
- base=60)
659
- else:
660
- self._byminute = set(byminute)
661
-
662
- self._byminute = tuple(sorted(self._byminute))
663
- self._original_rule['byminute'] = self._byminute
664
-
665
- # bysecond
666
- if bysecond is None:
667
- if freq < SECONDLY:
668
- self._bysecond = ((dtstart.second,))
669
- else:
670
- self._bysecond = None
671
- else:
672
- if isinstance(bysecond, integer_types):
673
- bysecond = (bysecond,)
674
-
675
- self._bysecond = set(bysecond)
676
-
677
- if freq == SECONDLY:
678
- self._bysecond = self.__construct_byset(start=dtstart.second,
679
- byxxx=bysecond,
680
- base=60)
681
- else:
682
- self._bysecond = set(bysecond)
683
-
684
- self._bysecond = tuple(sorted(self._bysecond))
685
- self._original_rule['bysecond'] = self._bysecond
686
-
687
- if self._freq >= HOURLY:
688
- self._timeset = None
689
- else:
690
- self._timeset = []
691
- for hour in self._byhour:
692
- for minute in self._byminute:
693
- for second in self._bysecond:
694
- self._timeset.append(
695
- datetime.time(hour, minute, second,
696
- tzinfo=self._tzinfo))
697
- self._timeset.sort()
698
- self._timeset = tuple(self._timeset)
699
-
700
- def __str__(self):
701
- """
702
- Output a string that would generate this RRULE if passed to rrulestr.
703
- This is mostly compatible with RFC5545, except for the
704
- dateutil-specific extension BYEASTER.
705
- """
706
-
707
- output = []
708
- h, m, s = [None] * 3
709
- if self._dtstart:
710
- output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
711
- h, m, s = self._dtstart.timetuple()[3:6]
712
-
713
- parts = ['FREQ=' + FREQNAMES[self._freq]]
714
- if self._interval != 1:
715
- parts.append('INTERVAL=' + str(self._interval))
716
-
717
- if self._wkst:
718
- parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
719
-
720
- if self._count is not None:
721
- parts.append('COUNT=' + str(self._count))
722
-
723
- if self._until:
724
- parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
725
-
726
- if self._original_rule.get('byweekday') is not None:
727
- # The str() method on weekday objects doesn't generate
728
- # RFC5545-compliant strings, so we should modify that.
729
- original_rule = dict(self._original_rule)
730
- wday_strings = []
731
- for wday in original_rule['byweekday']:
732
- if wday.n:
733
- wday_strings.append('{n:+d}{wday}'.format(
734
- n=wday.n,
735
- wday=repr(wday)[0:2]))
736
- else:
737
- wday_strings.append(repr(wday))
738
-
739
- original_rule['byweekday'] = wday_strings
740
- else:
741
- original_rule = self._original_rule
742
-
743
- partfmt = '{name}={vals}'
744
- for name, key in [('BYSETPOS', 'bysetpos'),
745
- ('BYMONTH', 'bymonth'),
746
- ('BYMONTHDAY', 'bymonthday'),
747
- ('BYYEARDAY', 'byyearday'),
748
- ('BYWEEKNO', 'byweekno'),
749
- ('BYDAY', 'byweekday'),
750
- ('BYHOUR', 'byhour'),
751
- ('BYMINUTE', 'byminute'),
752
- ('BYSECOND', 'bysecond'),
753
- ('BYEASTER', 'byeaster')]:
754
- value = original_rule.get(key)
755
- if value:
756
- parts.append(partfmt.format(name=name, vals=(','.join(str(v)
757
- for v in value))))
758
-
759
- output.append('RRULE:' + ';'.join(parts))
760
- return '\n'.join(output)
761
-
762
- def replace(self, **kwargs):
763
- """Return new rrule with same attributes except for those attributes given new
764
- values by whichever keyword arguments are specified."""
765
- new_kwargs = {"interval": self._interval,
766
- "count": self._count,
767
- "dtstart": self._dtstart,
768
- "freq": self._freq,
769
- "until": self._until,
770
- "wkst": self._wkst,
771
- "cache": False if self._cache is None else True }
772
- new_kwargs.update(self._original_rule)
773
- new_kwargs.update(kwargs)
774
- return rrule(**new_kwargs)
775
-
776
- def _iter(self):
777
- year, month, day, hour, minute, second, weekday, yearday, _ = \
778
- self._dtstart.timetuple()
779
-
780
- # Some local variables to speed things up a bit
781
- freq = self._freq
782
- interval = self._interval
783
- wkst = self._wkst
784
- until = self._until
785
- bymonth = self._bymonth
786
- byweekno = self._byweekno
787
- byyearday = self._byyearday
788
- byweekday = self._byweekday
789
- byeaster = self._byeaster
790
- bymonthday = self._bymonthday
791
- bynmonthday = self._bynmonthday
792
- bysetpos = self._bysetpos
793
- byhour = self._byhour
794
- byminute = self._byminute
795
- bysecond = self._bysecond
796
-
797
- ii = _iterinfo(self)
798
- ii.rebuild(year, month)
799
-
800
- getdayset = {YEARLY: ii.ydayset,
801
- MONTHLY: ii.mdayset,
802
- WEEKLY: ii.wdayset,
803
- DAILY: ii.ddayset,
804
- HOURLY: ii.ddayset,
805
- MINUTELY: ii.ddayset,
806
- SECONDLY: ii.ddayset}[freq]
807
-
808
- if freq < HOURLY:
809
- timeset = self._timeset
810
- else:
811
- gettimeset = {HOURLY: ii.htimeset,
812
- MINUTELY: ii.mtimeset,
813
- SECONDLY: ii.stimeset}[freq]
814
- if ((freq >= HOURLY and
815
- self._byhour and hour not in self._byhour) or
816
- (freq >= MINUTELY and
817
- self._byminute and minute not in self._byminute) or
818
- (freq >= SECONDLY and
819
- self._bysecond and second not in self._bysecond)):
820
- timeset = ()
821
- else:
822
- timeset = gettimeset(hour, minute, second)
823
-
824
- total = 0
825
- count = self._count
826
- while True:
827
- # Get dayset with the right frequency
828
- dayset, start, end = getdayset(year, month, day)
829
-
830
- # Do the "hard" work ;-)
831
- filtered = False
832
- for i in dayset[start:end]:
833
- if ((bymonth and ii.mmask[i] not in bymonth) or
834
- (byweekno and not ii.wnomask[i]) or
835
- (byweekday and ii.wdaymask[i] not in byweekday) or
836
- (ii.nwdaymask and not ii.nwdaymask[i]) or
837
- (byeaster and not ii.eastermask[i]) or
838
- ((bymonthday or bynmonthday) and
839
- ii.mdaymask[i] not in bymonthday and
840
- ii.nmdaymask[i] not in bynmonthday) or
841
- (byyearday and
842
- ((i < ii.yearlen and i+1 not in byyearday and
843
- -ii.yearlen+i not in byyearday) or
844
- (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
845
- -ii.nextyearlen+i-ii.yearlen not in byyearday)))):
846
- dayset[i] = None
847
- filtered = True
848
-
849
- # Output results
850
- if bysetpos and timeset:
851
- poslist = []
852
- for pos in bysetpos:
853
- if pos < 0:
854
- daypos, timepos = divmod(pos, len(timeset))
855
- else:
856
- daypos, timepos = divmod(pos-1, len(timeset))
857
- try:
858
- i = [x for x in dayset[start:end]
859
- if x is not None][daypos]
860
- time = timeset[timepos]
861
- except IndexError:
862
- pass
863
- else:
864
- date = datetime.date.fromordinal(ii.yearordinal+i)
865
- res = datetime.datetime.combine(date, time)
866
- if res not in poslist:
867
- poslist.append(res)
868
- poslist.sort()
869
- for res in poslist:
870
- if until and res > until:
871
- self._len = total
872
- return
873
- elif res >= self._dtstart:
874
- if count is not None:
875
- count -= 1
876
- if count < 0:
877
- self._len = total
878
- return
879
- total += 1
880
- yield res
881
- else:
882
- for i in dayset[start:end]:
883
- if i is not None:
884
- date = datetime.date.fromordinal(ii.yearordinal + i)
885
- for time in timeset:
886
- res = datetime.datetime.combine(date, time)
887
- if until and res > until:
888
- self._len = total
889
- return
890
- elif res >= self._dtstart:
891
- if count is not None:
892
- count -= 1
893
- if count < 0:
894
- self._len = total
895
- return
896
-
897
- total += 1
898
- yield res
899
-
900
- # Handle frequency and interval
901
- fixday = False
902
- if freq == YEARLY:
903
- year += interval
904
- if year > datetime.MAXYEAR:
905
- self._len = total
906
- return
907
- ii.rebuild(year, month)
908
- elif freq == MONTHLY:
909
- month += interval
910
- if month > 12:
911
- div, mod = divmod(month, 12)
912
- month = mod
913
- year += div
914
- if month == 0:
915
- month = 12
916
- year -= 1
917
- if year > datetime.MAXYEAR:
918
- self._len = total
919
- return
920
- ii.rebuild(year, month)
921
- elif freq == WEEKLY:
922
- if wkst > weekday:
923
- day += -(weekday+1+(6-wkst))+self._interval*7
924
- else:
925
- day += -(weekday-wkst)+self._interval*7
926
- weekday = wkst
927
- fixday = True
928
- elif freq == DAILY:
929
- day += interval
930
- fixday = True
931
- elif freq == HOURLY:
932
- if filtered:
933
- # Jump to one iteration before next day
934
- hour += ((23-hour)//interval)*interval
935
-
936
- if byhour:
937
- ndays, hour = self.__mod_distance(value=hour,
938
- byxxx=self._byhour,
939
- base=24)
940
- else:
941
- ndays, hour = divmod(hour+interval, 24)
942
-
943
- if ndays:
944
- day += ndays
945
- fixday = True
946
-
947
- timeset = gettimeset(hour, minute, second)
948
- elif freq == MINUTELY:
949
- if filtered:
950
- # Jump to one iteration before next day
951
- minute += ((1439-(hour*60+minute))//interval)*interval
952
-
953
- valid = False
954
- rep_rate = (24*60)
955
- for j in range(rep_rate // gcd(interval, rep_rate)):
956
- if byminute:
957
- nhours, minute = \
958
- self.__mod_distance(value=minute,
959
- byxxx=self._byminute,
960
- base=60)
961
- else:
962
- nhours, minute = divmod(minute+interval, 60)
963
-
964
- div, hour = divmod(hour+nhours, 24)
965
- if div:
966
- day += div
967
- fixday = True
968
- filtered = False
969
-
970
- if not byhour or hour in byhour:
971
- valid = True
972
- break
973
-
974
- if not valid:
975
- raise ValueError('Invalid combination of interval and ' +
976
- 'byhour resulting in empty rule.')
977
-
978
- timeset = gettimeset(hour, minute, second)
979
- elif freq == SECONDLY:
980
- if filtered:
981
- # Jump to one iteration before next day
982
- second += (((86399 - (hour * 3600 + minute * 60 + second))
983
- // interval) * interval)
984
-
985
- rep_rate = (24 * 3600)
986
- valid = False
987
- for j in range(0, rep_rate // gcd(interval, rep_rate)):
988
- if bysecond:
989
- nminutes, second = \
990
- self.__mod_distance(value=second,
991
- byxxx=self._bysecond,
992
- base=60)
993
- else:
994
- nminutes, second = divmod(second+interval, 60)
995
-
996
- div, minute = divmod(minute+nminutes, 60)
997
- if div:
998
- hour += div
999
- div, hour = divmod(hour, 24)
1000
- if div:
1001
- day += div
1002
- fixday = True
1003
-
1004
- if ((not byhour or hour in byhour) and
1005
- (not byminute or minute in byminute) and
1006
- (not bysecond or second in bysecond)):
1007
- valid = True
1008
- break
1009
-
1010
- if not valid:
1011
- raise ValueError('Invalid combination of interval, ' +
1012
- 'byhour and byminute resulting in empty' +
1013
- ' rule.')
1014
-
1015
- timeset = gettimeset(hour, minute, second)
1016
-
1017
- if fixday and day > 28:
1018
- daysinmonth = calendar.monthrange(year, month)[1]
1019
- if day > daysinmonth:
1020
- while day > daysinmonth:
1021
- day -= daysinmonth
1022
- month += 1
1023
- if month == 13:
1024
- month = 1
1025
- year += 1
1026
- if year > datetime.MAXYEAR:
1027
- self._len = total
1028
- return
1029
- daysinmonth = calendar.monthrange(year, month)[1]
1030
- ii.rebuild(year, month)
1031
-
1032
- def __construct_byset(self, start, byxxx, base):
1033
- """
1034
- If a `BYXXX` sequence is passed to the constructor at the same level as
1035
- `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
1036
- specifications which cannot be reached given some starting conditions.
1037
-
1038
- This occurs whenever the interval is not coprime with the base of a
1039
- given unit and the difference between the starting position and the
1040
- ending position is not coprime with the greatest common denominator
1041
- between the interval and the base. For example, with a FREQ of hourly
1042
- starting at 17:00 and an interval of 4, the only valid values for
1043
- BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
1044
- coprime.
1045
-
1046
- :param start:
1047
- Specifies the starting position.
1048
- :param byxxx:
1049
- An iterable containing the list of allowed values.
1050
- :param base:
1051
- The largest allowable value for the specified frequency (e.g.
1052
- 24 hours, 60 minutes).
1053
-
1054
- This does not preserve the type of the iterable, returning a set, since
1055
- the values should be unique and the order is irrelevant, this will
1056
- speed up later lookups.
1057
-
1058
- In the event of an empty set, raises a :exception:`ValueError`, as this
1059
- results in an empty rrule.
1060
- """
1061
-
1062
- cset = set()
1063
-
1064
- # Support a single byxxx value.
1065
- if isinstance(byxxx, integer_types):
1066
- byxxx = (byxxx, )
1067
-
1068
- for num in byxxx:
1069
- i_gcd = gcd(self._interval, base)
1070
- # Use divmod rather than % because we need to wrap negative nums.
1071
- if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
1072
- cset.add(num)
1073
-
1074
- if len(cset) == 0:
1075
- raise ValueError("Invalid rrule byxxx generates an empty set.")
1076
-
1077
- return cset
1078
-
1079
- def __mod_distance(self, value, byxxx, base):
1080
- """
1081
- Calculates the next value in a sequence where the `FREQ` parameter is
1082
- specified along with a `BYXXX` parameter at the same "level"
1083
- (e.g. `HOURLY` specified with `BYHOUR`).
1084
-
1085
- :param value:
1086
- The old value of the component.
1087
- :param byxxx:
1088
- The `BYXXX` set, which should have been generated by
1089
- `rrule._construct_byset`, or something else which checks that a
1090
- valid rule is present.
1091
- :param base:
1092
- The largest allowable value for the specified frequency (e.g.
1093
- 24 hours, 60 minutes).
1094
-
1095
- If a valid value is not found after `base` iterations (the maximum
1096
- number before the sequence would start to repeat), this raises a
1097
- :exception:`ValueError`, as no valid values were found.
1098
-
1099
- This returns a tuple of `divmod(n*interval, base)`, where `n` is the
1100
- smallest number of `interval` repetitions until the next specified
1101
- value in `byxxx` is found.
1102
- """
1103
- accumulator = 0
1104
- for ii in range(1, base + 1):
1105
- # Using divmod() over % to account for negative intervals
1106
- div, value = divmod(value + self._interval, base)
1107
- accumulator += div
1108
- if value in byxxx:
1109
- return (accumulator, value)
1110
-
1111
-
1112
- class _iterinfo(object):
1113
- __slots__ = ["rrule", "lastyear", "lastmonth",
1114
- "yearlen", "nextyearlen", "yearordinal", "yearweekday",
1115
- "mmask", "mrange", "mdaymask", "nmdaymask",
1116
- "wdaymask", "wnomask", "nwdaymask", "eastermask"]
1117
-
1118
- def __init__(self, rrule):
1119
- for attr in self.__slots__:
1120
- setattr(self, attr, None)
1121
- self.rrule = rrule
1122
-
1123
- def rebuild(self, year, month):
1124
- # Every mask is 7 days longer to handle cross-year weekly periods.
1125
- rr = self.rrule
1126
- if year != self.lastyear:
1127
- self.yearlen = 365 + calendar.isleap(year)
1128
- self.nextyearlen = 365 + calendar.isleap(year + 1)
1129
- firstyday = datetime.date(year, 1, 1)
1130
- self.yearordinal = firstyday.toordinal()
1131
- self.yearweekday = firstyday.weekday()
1132
-
1133
- wday = datetime.date(year, 1, 1).weekday()
1134
- if self.yearlen == 365:
1135
- self.mmask = M365MASK
1136
- self.mdaymask = MDAY365MASK
1137
- self.nmdaymask = NMDAY365MASK
1138
- self.wdaymask = WDAYMASK[wday:]
1139
- self.mrange = M365RANGE
1140
- else:
1141
- self.mmask = M366MASK
1142
- self.mdaymask = MDAY366MASK
1143
- self.nmdaymask = NMDAY366MASK
1144
- self.wdaymask = WDAYMASK[wday:]
1145
- self.mrange = M366RANGE
1146
-
1147
- if not rr._byweekno:
1148
- self.wnomask = None
1149
- else:
1150
- self.wnomask = [0]*(self.yearlen+7)
1151
- # no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
1152
- no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
1153
- if no1wkst >= 4:
1154
- no1wkst = 0
1155
- # Number of days in the year, plus the days we got
1156
- # from last year.
1157
- wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
1158
- else:
1159
- # Number of days in the year, minus the days we
1160
- # left in last year.
1161
- wyearlen = self.yearlen-no1wkst
1162
- div, mod = divmod(wyearlen, 7)
1163
- numweeks = div+mod//4
1164
- for n in rr._byweekno:
1165
- if n < 0:
1166
- n += numweeks+1
1167
- if not (0 < n <= numweeks):
1168
- continue
1169
- if n > 1:
1170
- i = no1wkst+(n-1)*7
1171
- if no1wkst != firstwkst:
1172
- i -= 7-firstwkst
1173
- else:
1174
- i = no1wkst
1175
- for j in range(7):
1176
- self.wnomask[i] = 1
1177
- i += 1
1178
- if self.wdaymask[i] == rr._wkst:
1179
- break
1180
- if 1 in rr._byweekno:
1181
- # Check week number 1 of next year as well
1182
- # TODO: Check -numweeks for next year.
1183
- i = no1wkst+numweeks*7
1184
- if no1wkst != firstwkst:
1185
- i -= 7-firstwkst
1186
- if i < self.yearlen:
1187
- # If week starts in next year, we
1188
- # don't care about it.
1189
- for j in range(7):
1190
- self.wnomask[i] = 1
1191
- i += 1
1192
- if self.wdaymask[i] == rr._wkst:
1193
- break
1194
- if no1wkst:
1195
- # Check last week number of last year as
1196
- # well. If no1wkst is 0, either the year
1197
- # started on week start, or week number 1
1198
- # got days from last year, so there are no
1199
- # days from last year's last week number in
1200
- # this year.
1201
- if -1 not in rr._byweekno:
1202
- lyearweekday = datetime.date(year-1, 1, 1).weekday()
1203
- lno1wkst = (7-lyearweekday+rr._wkst) % 7
1204
- lyearlen = 365+calendar.isleap(year-1)
1205
- if lno1wkst >= 4:
1206
- lno1wkst = 0
1207
- lnumweeks = 52+(lyearlen +
1208
- (lyearweekday-rr._wkst) % 7) % 7//4
1209
- else:
1210
- lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
1211
- else:
1212
- lnumweeks = -1
1213
- if lnumweeks in rr._byweekno:
1214
- for i in range(no1wkst):
1215
- self.wnomask[i] = 1
1216
-
1217
- if (rr._bynweekday and (month != self.lastmonth or
1218
- year != self.lastyear)):
1219
- ranges = []
1220
- if rr._freq == YEARLY:
1221
- if rr._bymonth:
1222
- for month in rr._bymonth:
1223
- ranges.append(self.mrange[month-1:month+1])
1224
- else:
1225
- ranges = [(0, self.yearlen)]
1226
- elif rr._freq == MONTHLY:
1227
- ranges = [self.mrange[month-1:month+1]]
1228
- if ranges:
1229
- # Weekly frequency won't get here, so we may not
1230
- # care about cross-year weekly periods.
1231
- self.nwdaymask = [0]*self.yearlen
1232
- for first, last in ranges:
1233
- last -= 1
1234
- for wday, n in rr._bynweekday:
1235
- if n < 0:
1236
- i = last+(n+1)*7
1237
- i -= (self.wdaymask[i]-wday) % 7
1238
- else:
1239
- i = first+(n-1)*7
1240
- i += (7-self.wdaymask[i]+wday) % 7
1241
- if first <= i <= last:
1242
- self.nwdaymask[i] = 1
1243
-
1244
- if rr._byeaster:
1245
- self.eastermask = [0]*(self.yearlen+7)
1246
- eyday = easter.easter(year).toordinal()-self.yearordinal
1247
- for offset in rr._byeaster:
1248
- self.eastermask[eyday+offset] = 1
1249
-
1250
- self.lastyear = year
1251
- self.lastmonth = month
1252
-
1253
- def ydayset(self, year, month, day):
1254
- return list(range(self.yearlen)), 0, self.yearlen
1255
-
1256
- def mdayset(self, year, month, day):
1257
- dset = [None]*self.yearlen
1258
- start, end = self.mrange[month-1:month+1]
1259
- for i in range(start, end):
1260
- dset[i] = i
1261
- return dset, start, end
1262
-
1263
- def wdayset(self, year, month, day):
1264
- # We need to handle cross-year weeks here.
1265
- dset = [None]*(self.yearlen+7)
1266
- i = datetime.date(year, month, day).toordinal()-self.yearordinal
1267
- start = i
1268
- for j in range(7):
1269
- dset[i] = i
1270
- i += 1
1271
- # if (not (0 <= i < self.yearlen) or
1272
- # self.wdaymask[i] == self.rrule._wkst):
1273
- # This will cross the year boundary, if necessary.
1274
- if self.wdaymask[i] == self.rrule._wkst:
1275
- break
1276
- return dset, start, i
1277
-
1278
- def ddayset(self, year, month, day):
1279
- dset = [None] * self.yearlen
1280
- i = datetime.date(year, month, day).toordinal() - self.yearordinal
1281
- dset[i] = i
1282
- return dset, i, i + 1
1283
-
1284
- def htimeset(self, hour, minute, second):
1285
- tset = []
1286
- rr = self.rrule
1287
- for minute in rr._byminute:
1288
- for second in rr._bysecond:
1289
- tset.append(datetime.time(hour, minute, second,
1290
- tzinfo=rr._tzinfo))
1291
- tset.sort()
1292
- return tset
1293
-
1294
- def mtimeset(self, hour, minute, second):
1295
- tset = []
1296
- rr = self.rrule
1297
- for second in rr._bysecond:
1298
- tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
1299
- tset.sort()
1300
- return tset
1301
-
1302
- def stimeset(self, hour, minute, second):
1303
- return (datetime.time(hour, minute, second,
1304
- tzinfo=self.rrule._tzinfo),)
1305
-
1306
-
1307
- class rruleset(rrulebase):
1308
- """ The rruleset type allows more complex recurrence setups, mixing
1309
- multiple rules, dates, exclusion rules, and exclusion dates. The type
1310
- constructor takes the following keyword arguments:
1311
-
1312
- :param cache: If True, caching of results will be enabled, improving
1313
- performance of multiple queries considerably. """
1314
-
1315
- class _genitem(object):
1316
- def __init__(self, genlist, gen):
1317
- try:
1318
- self.dt = advance_iterator(gen)
1319
- genlist.append(self)
1320
- except StopIteration:
1321
- pass
1322
- self.genlist = genlist
1323
- self.gen = gen
1324
-
1325
- def __next__(self):
1326
- try:
1327
- self.dt = advance_iterator(self.gen)
1328
- except StopIteration:
1329
- if self.genlist[0] is self:
1330
- heapq.heappop(self.genlist)
1331
- else:
1332
- self.genlist.remove(self)
1333
- heapq.heapify(self.genlist)
1334
-
1335
- next = __next__
1336
-
1337
- def __lt__(self, other):
1338
- return self.dt < other.dt
1339
-
1340
- def __gt__(self, other):
1341
- return self.dt > other.dt
1342
-
1343
- def __eq__(self, other):
1344
- return self.dt == other.dt
1345
-
1346
- def __ne__(self, other):
1347
- return self.dt != other.dt
1348
-
1349
- def __init__(self, cache=False):
1350
- super(rruleset, self).__init__(cache)
1351
- self._rrule = []
1352
- self._rdate = []
1353
- self._exrule = []
1354
- self._exdate = []
1355
-
1356
- @_invalidates_cache
1357
- def rrule(self, rrule):
1358
- """ Include the given :py:class:`rrule` instance in the recurrence set
1359
- generation. """
1360
- self._rrule.append(rrule)
1361
-
1362
- @_invalidates_cache
1363
- def rdate(self, rdate):
1364
- """ Include the given :py:class:`datetime` instance in the recurrence
1365
- set generation. """
1366
- self._rdate.append(rdate)
1367
-
1368
- @_invalidates_cache
1369
- def exrule(self, exrule):
1370
- """ Include the given rrule instance in the recurrence set exclusion
1371
- list. Dates which are part of the given recurrence rules will not
1372
- be generated, even if some inclusive rrule or rdate matches them.
1373
- """
1374
- self._exrule.append(exrule)
1375
-
1376
- @_invalidates_cache
1377
- def exdate(self, exdate):
1378
- """ Include the given datetime instance in the recurrence set
1379
- exclusion list. Dates included that way will not be generated,
1380
- even if some inclusive rrule or rdate matches them. """
1381
- self._exdate.append(exdate)
1382
-
1383
- def _iter(self):
1384
- rlist = []
1385
- self._rdate.sort()
1386
- self._genitem(rlist, iter(self._rdate))
1387
- for gen in [iter(x) for x in self._rrule]:
1388
- self._genitem(rlist, gen)
1389
- exlist = []
1390
- self._exdate.sort()
1391
- self._genitem(exlist, iter(self._exdate))
1392
- for gen in [iter(x) for x in self._exrule]:
1393
- self._genitem(exlist, gen)
1394
- lastdt = None
1395
- total = 0
1396
- heapq.heapify(rlist)
1397
- heapq.heapify(exlist)
1398
- while rlist:
1399
- ritem = rlist[0]
1400
- if not lastdt or lastdt != ritem.dt:
1401
- while exlist and exlist[0] < ritem:
1402
- exitem = exlist[0]
1403
- advance_iterator(exitem)
1404
- if exlist and exlist[0] is exitem:
1405
- heapq.heapreplace(exlist, exitem)
1406
- if not exlist or ritem != exlist[0]:
1407
- total += 1
1408
- yield ritem.dt
1409
- lastdt = ritem.dt
1410
- advance_iterator(ritem)
1411
- if rlist and rlist[0] is ritem:
1412
- heapq.heapreplace(rlist, ritem)
1413
- self._len = total
1414
-
1415
-
1416
-
1417
-
1418
- class _rrulestr(object):
1419
- """ Parses a string representation of a recurrence rule or set of
1420
- recurrence rules.
1421
-
1422
- :param s:
1423
- Required, a string defining one or more recurrence rules.
1424
-
1425
- :param dtstart:
1426
- If given, used as the default recurrence start if not specified in the
1427
- rule string.
1428
-
1429
- :param cache:
1430
- If set ``True`` caching of results will be enabled, improving
1431
- performance of multiple queries considerably.
1432
-
1433
- :param unfold:
1434
- If set ``True`` indicates that a rule string is split over more
1435
- than one line and should be joined before processing.
1436
-
1437
- :param forceset:
1438
- If set ``True`` forces a :class:`dateutil.rrule.rruleset` to
1439
- be returned.
1440
-
1441
- :param compatible:
1442
- If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``.
1443
-
1444
- :param ignoretz:
1445
- If set ``True``, time zones in parsed strings are ignored and a naive
1446
- :class:`datetime.datetime` object is returned.
1447
-
1448
- :param tzids:
1449
- If given, a callable or mapping used to retrieve a
1450
- :class:`datetime.tzinfo` from a string representation.
1451
- Defaults to :func:`dateutil.tz.gettz`.
1452
-
1453
- :param tzinfos:
1454
- Additional time zone names / aliases which may be present in a string
1455
- representation. See :func:`dateutil.parser.parse` for more
1456
- information.
1457
-
1458
- :return:
1459
- Returns a :class:`dateutil.rrule.rruleset` or
1460
- :class:`dateutil.rrule.rrule`
1461
- """
1462
-
1463
- _freq_map = {"YEARLY": YEARLY,
1464
- "MONTHLY": MONTHLY,
1465
- "WEEKLY": WEEKLY,
1466
- "DAILY": DAILY,
1467
- "HOURLY": HOURLY,
1468
- "MINUTELY": MINUTELY,
1469
- "SECONDLY": SECONDLY}
1470
-
1471
- _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
1472
- "FR": 4, "SA": 5, "SU": 6}
1473
-
1474
- def _handle_int(self, rrkwargs, name, value, **kwargs):
1475
- rrkwargs[name.lower()] = int(value)
1476
-
1477
- def _handle_int_list(self, rrkwargs, name, value, **kwargs):
1478
- rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
1479
-
1480
- _handle_INTERVAL = _handle_int
1481
- _handle_COUNT = _handle_int
1482
- _handle_BYSETPOS = _handle_int_list
1483
- _handle_BYMONTH = _handle_int_list
1484
- _handle_BYMONTHDAY = _handle_int_list
1485
- _handle_BYYEARDAY = _handle_int_list
1486
- _handle_BYEASTER = _handle_int_list
1487
- _handle_BYWEEKNO = _handle_int_list
1488
- _handle_BYHOUR = _handle_int_list
1489
- _handle_BYMINUTE = _handle_int_list
1490
- _handle_BYSECOND = _handle_int_list
1491
-
1492
- def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
1493
- rrkwargs["freq"] = self._freq_map[value]
1494
-
1495
- def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
1496
- global parser
1497
- if not parser:
1498
- from dateutil import parser
1499
- try:
1500
- rrkwargs["until"] = parser.parse(value,
1501
- ignoretz=kwargs.get("ignoretz"),
1502
- tzinfos=kwargs.get("tzinfos"))
1503
- except ValueError:
1504
- raise ValueError("invalid until date")
1505
-
1506
- def _handle_WKST(self, rrkwargs, name, value, **kwargs):
1507
- rrkwargs["wkst"] = self._weekday_map[value]
1508
-
1509
- def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
1510
- """
1511
- Two ways to specify this: +1MO or MO(+1)
1512
- """
1513
- l = []
1514
- for wday in value.split(','):
1515
- if '(' in wday:
1516
- # If it's of the form TH(+1), etc.
1517
- splt = wday.split('(')
1518
- w = splt[0]
1519
- n = int(splt[1][:-1])
1520
- elif len(wday):
1521
- # If it's of the form +1MO
1522
- for i in range(len(wday)):
1523
- if wday[i] not in '+-0123456789':
1524
- break
1525
- n = wday[:i] or None
1526
- w = wday[i:]
1527
- if n:
1528
- n = int(n)
1529
- else:
1530
- raise ValueError("Invalid (empty) BYDAY specification.")
1531
-
1532
- l.append(weekdays[self._weekday_map[w]](n))
1533
- rrkwargs["byweekday"] = l
1534
-
1535
- _handle_BYDAY = _handle_BYWEEKDAY
1536
-
1537
- def _parse_rfc_rrule(self, line,
1538
- dtstart=None,
1539
- cache=False,
1540
- ignoretz=False,
1541
- tzinfos=None):
1542
- if line.find(':') != -1:
1543
- name, value = line.split(':')
1544
- if name != "RRULE":
1545
- raise ValueError("unknown parameter name")
1546
- else:
1547
- value = line
1548
- rrkwargs = {}
1549
- for pair in value.split(';'):
1550
- name, value = pair.split('=')
1551
- name = name.upper()
1552
- value = value.upper()
1553
- try:
1554
- getattr(self, "_handle_"+name)(rrkwargs, name, value,
1555
- ignoretz=ignoretz,
1556
- tzinfos=tzinfos)
1557
- except AttributeError:
1558
- raise ValueError("unknown parameter '%s'" % name)
1559
- except (KeyError, ValueError):
1560
- raise ValueError("invalid '%s': %s" % (name, value))
1561
- return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
1562
-
1563
- def _parse_date_value(self, date_value, parms, rule_tzids,
1564
- ignoretz, tzids, tzinfos):
1565
- global parser
1566
- if not parser:
1567
- from dateutil import parser
1568
-
1569
- datevals = []
1570
- value_found = False
1571
- TZID = None
1572
-
1573
- for parm in parms:
1574
- if parm.startswith("TZID="):
1575
- try:
1576
- tzkey = rule_tzids[parm.split('TZID=')[-1]]
1577
- except KeyError:
1578
- continue
1579
- if tzids is None:
1580
- from . import tz
1581
- tzlookup = tz.gettz
1582
- elif callable(tzids):
1583
- tzlookup = tzids
1584
- else:
1585
- tzlookup = getattr(tzids, 'get', None)
1586
- if tzlookup is None:
1587
- msg = ('tzids must be a callable, mapping, or None, '
1588
- 'not %s' % tzids)
1589
- raise ValueError(msg)
1590
-
1591
- TZID = tzlookup(tzkey)
1592
- continue
1593
-
1594
- # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found
1595
- # only once.
1596
- if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}:
1597
- raise ValueError("unsupported parm: " + parm)
1598
- else:
1599
- if value_found:
1600
- msg = ("Duplicate value parameter found in: " + parm)
1601
- raise ValueError(msg)
1602
- value_found = True
1603
-
1604
- for datestr in date_value.split(','):
1605
- date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)
1606
- if TZID is not None:
1607
- if date.tzinfo is None:
1608
- date = date.replace(tzinfo=TZID)
1609
- else:
1610
- raise ValueError('DTSTART/EXDATE specifies multiple timezone')
1611
- datevals.append(date)
1612
-
1613
- return datevals
1614
-
1615
- def _parse_rfc(self, s,
1616
- dtstart=None,
1617
- cache=False,
1618
- unfold=False,
1619
- forceset=False,
1620
- compatible=False,
1621
- ignoretz=False,
1622
- tzids=None,
1623
- tzinfos=None):
1624
- global parser
1625
- if compatible:
1626
- forceset = True
1627
- unfold = True
1628
-
1629
- TZID_NAMES = dict(map(
1630
- lambda x: (x.upper(), x),
1631
- re.findall('TZID=(?P<name>[^:]+):', s)
1632
- ))
1633
- s = s.upper()
1634
- if not s.strip():
1635
- raise ValueError("empty string")
1636
- if unfold:
1637
- lines = s.splitlines()
1638
- i = 0
1639
- while i < len(lines):
1640
- line = lines[i].rstrip()
1641
- if not line:
1642
- del lines[i]
1643
- elif i > 0 and line[0] == " ":
1644
- lines[i-1] += line[1:]
1645
- del lines[i]
1646
- else:
1647
- i += 1
1648
- else:
1649
- lines = s.split()
1650
- if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
1651
- s.startswith('RRULE:'))):
1652
- return self._parse_rfc_rrule(lines[0], cache=cache,
1653
- dtstart=dtstart, ignoretz=ignoretz,
1654
- tzinfos=tzinfos)
1655
- else:
1656
- rrulevals = []
1657
- rdatevals = []
1658
- exrulevals = []
1659
- exdatevals = []
1660
- for line in lines:
1661
- if not line:
1662
- continue
1663
- if line.find(':') == -1:
1664
- name = "RRULE"
1665
- value = line
1666
- else:
1667
- name, value = line.split(':', 1)
1668
- parms = name.split(';')
1669
- if not parms:
1670
- raise ValueError("empty property name")
1671
- name = parms[0]
1672
- parms = parms[1:]
1673
- if name == "RRULE":
1674
- for parm in parms:
1675
- raise ValueError("unsupported RRULE parm: "+parm)
1676
- rrulevals.append(value)
1677
- elif name == "RDATE":
1678
- for parm in parms:
1679
- if parm != "VALUE=DATE-TIME":
1680
- raise ValueError("unsupported RDATE parm: "+parm)
1681
- rdatevals.append(value)
1682
- elif name == "EXRULE":
1683
- for parm in parms:
1684
- raise ValueError("unsupported EXRULE parm: "+parm)
1685
- exrulevals.append(value)
1686
- elif name == "EXDATE":
1687
- exdatevals.extend(
1688
- self._parse_date_value(value, parms,
1689
- TZID_NAMES, ignoretz,
1690
- tzids, tzinfos)
1691
- )
1692
- elif name == "DTSTART":
1693
- dtvals = self._parse_date_value(value, parms, TZID_NAMES,
1694
- ignoretz, tzids, tzinfos)
1695
- if len(dtvals) != 1:
1696
- raise ValueError("Multiple DTSTART values specified:" +
1697
- value)
1698
- dtstart = dtvals[0]
1699
- else:
1700
- raise ValueError("unsupported property: "+name)
1701
- if (forceset or len(rrulevals) > 1 or rdatevals
1702
- or exrulevals or exdatevals):
1703
- if not parser and (rdatevals or exdatevals):
1704
- from dateutil import parser
1705
- rset = rruleset(cache=cache)
1706
- for value in rrulevals:
1707
- rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
1708
- ignoretz=ignoretz,
1709
- tzinfos=tzinfos))
1710
- for value in rdatevals:
1711
- for datestr in value.split(','):
1712
- rset.rdate(parser.parse(datestr,
1713
- ignoretz=ignoretz,
1714
- tzinfos=tzinfos))
1715
- for value in exrulevals:
1716
- rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
1717
- ignoretz=ignoretz,
1718
- tzinfos=tzinfos))
1719
- for value in exdatevals:
1720
- rset.exdate(value)
1721
- if compatible and dtstart:
1722
- rset.rdate(dtstart)
1723
- return rset
1724
- else:
1725
- return self._parse_rfc_rrule(rrulevals[0],
1726
- dtstart=dtstart,
1727
- cache=cache,
1728
- ignoretz=ignoretz,
1729
- tzinfos=tzinfos)
1730
-
1731
- def __call__(self, s, **kwargs):
1732
- return self._parse_rfc(s, **kwargs)
1733
-
1734
-
1735
- rrulestr = _rrulestr()
1736
-
1737
- # vim:ts=4:sw=4:et
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- """Subpackage containing all of pip's command line interface related code
2
- """
3
-
4
- # This file intentionally does not import submodules
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console
3
- from .ansi import Fore, Back, Style, Cursor
4
- from .ansitowin32 import AnsiToWin32
5
-
6
- __version__ = '0.4.6'
7
-
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/compat.py DELETED
@@ -1,13 +0,0 @@
1
- from .core import *
2
- from .codec import *
3
- from typing import Any, Union
4
-
5
- def ToASCII(label: str) -> bytes:
6
- return encode(label)
7
-
8
- def ToUnicode(label: Union[bytes, bytearray]) -> str:
9
- return decode(label)
10
-
11
- def nameprep(s: Any) -> None:
12
- raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol')
13
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/tensormask/arch.py DELETED
@@ -1,904 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import copy
3
- import logging
4
- import math
5
- from typing import List
6
- import torch
7
- import torch.nn.functional as F
8
- from fvcore.nn import sigmoid_focal_loss_star_jit, smooth_l1_loss
9
- from torch import nn
10
-
11
- from detectron2.layers import ShapeSpec, batched_nms, cat, paste_masks_in_image
12
- from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
13
- from detectron2.modeling.backbone import build_backbone
14
- from detectron2.modeling.box_regression import Box2BoxTransform
15
- from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
16
- from detectron2.modeling.meta_arch.retinanet import (
17
- permute_all_cls_and_box_to_N_HWA_K_and_concat,
18
- permute_to_N_HWA_K,
19
- )
20
- from detectron2.structures import Boxes, ImageList, Instances
21
- from detectron2.utils.logger import log_first_n
22
-
23
- from tensormask.layers import SwapAlign2Nat
24
-
25
- __all__ = ["TensorMask"]
26
-
27
-
28
- def _assignment_rule(
29
- gt_boxes,
30
- anchor_boxes,
31
- unit_lengths,
32
- min_anchor_size,
33
- scale_thresh=2.0,
34
- spatial_thresh=1.0,
35
- uniqueness_on=True,
36
- ):
37
- """
38
- Given two lists of boxes of N ground truth boxes and M anchor boxes,
39
- compute the assignment between the two, following the assignment rules in
40
- https://arxiv.org/abs/1903.12174.
41
- The box order must be (xmin, ymin, xmax, ymax), so please make sure to convert
42
- to BoxMode.XYXY_ABS before calling this function.
43
-
44
- Args:
45
- gt_boxes, anchor_boxes (Boxes): two Boxes. Contains N & M boxes/anchors, respectively.
46
- unit_lengths (Tensor): Contains the unit lengths of M anchor boxes.
47
- min_anchor_size (float): Minimum size of the anchor, in pixels
48
- scale_thresh (float): The `scale` threshold: the maximum size of the anchor
49
- should not be greater than scale_thresh x max(h, w) of
50
- the ground truth box.
51
- spatial_thresh (float): The `spatial` threshold: the l2 distance between the
52
- center of the anchor and the ground truth box should not
53
- be greater than spatial_thresh x u where u is the unit length.
54
-
55
- Returns:
56
- matches (Tensor[int64]): a vector of length M, where matches[i] is a matched
57
- ground-truth index in [0, N)
58
- match_labels (Tensor[int8]): a vector of length M, where pred_labels[i] indicates
59
- whether a prediction is a true or false positive or ignored
60
- """
61
- gt_boxes, anchor_boxes = gt_boxes.tensor, anchor_boxes.tensor
62
- N = gt_boxes.shape[0]
63
- M = anchor_boxes.shape[0]
64
- if N == 0 or M == 0:
65
- return (
66
- gt_boxes.new_full((N,), 0, dtype=torch.int64),
67
- gt_boxes.new_full((N,), -1, dtype=torch.int8),
68
- )
69
-
70
- # Containment rule
71
- lt = torch.min(gt_boxes[:, None, :2], anchor_boxes[:, :2]) # [N,M,2]
72
- rb = torch.max(gt_boxes[:, None, 2:], anchor_boxes[:, 2:]) # [N,M,2]
73
- union = cat([lt, rb], dim=2) # [N,M,4]
74
-
75
- dummy_gt_boxes = torch.zeros_like(gt_boxes)
76
- anchor = dummy_gt_boxes[:, None, :] + anchor_boxes[:, :] # [N,M,4]
77
-
78
- contain_matrix = torch.all(union == anchor, dim=2) # [N,M]
79
-
80
- # Centrality rule, scale
81
- gt_size_lower = torch.max(gt_boxes[:, 2:] - gt_boxes[:, :2], dim=1)[0] # [N]
82
- gt_size_upper = gt_size_lower * scale_thresh # [N]
83
- # Fall back for small objects
84
- gt_size_upper[gt_size_upper < min_anchor_size] = min_anchor_size
85
- # Due to sampling of locations, the anchor sizes are deducted with sampling strides
86
- anchor_size = (
87
- torch.max(anchor_boxes[:, 2:] - anchor_boxes[:, :2], dim=1)[0] - unit_lengths
88
- ) # [M]
89
-
90
- size_diff_upper = gt_size_upper[:, None] - anchor_size # [N,M]
91
- scale_matrix = size_diff_upper >= 0 # [N,M]
92
-
93
- # Centrality rule, spatial
94
- gt_center = (gt_boxes[:, 2:] + gt_boxes[:, :2]) / 2 # [N,2]
95
- anchor_center = (anchor_boxes[:, 2:] + anchor_boxes[:, :2]) / 2 # [M,2]
96
- offset_center = gt_center[:, None, :] - anchor_center[:, :] # [N,M,2]
97
- offset_center /= unit_lengths[:, None] # [N,M,2]
98
- spatial_square = spatial_thresh * spatial_thresh
99
- spatial_matrix = torch.sum(offset_center * offset_center, dim=2) <= spatial_square
100
-
101
- assign_matrix = (contain_matrix & scale_matrix & spatial_matrix).int()
102
-
103
- # assign_matrix is N (gt) x M (predicted)
104
- # Max over gt elements (dim 0) to find best gt candidate for each prediction
105
- matched_vals, matches = assign_matrix.max(dim=0)
106
- match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
107
-
108
- match_labels[matched_vals == 0] = 0
109
- match_labels[matched_vals == 1] = 1
110
-
111
- # find all the elements that match to ground truths multiple times
112
- not_unique_idxs = assign_matrix.sum(dim=0) > 1
113
- if uniqueness_on:
114
- match_labels[not_unique_idxs] = 0
115
- else:
116
- match_labels[not_unique_idxs] = -1
117
-
118
- return matches, match_labels
119
-
120
-
121
- # TODO make the paste_mask function in d2 core support mask list
122
- def _paste_mask_lists_in_image(masks, boxes, image_shape, threshold=0.5):
123
- """
124
- Paste a list of masks that are of various resolutions (e.g., 28 x 28) into an image.
125
- The location, height, and width for pasting each mask is determined by their
126
- corresponding bounding boxes in boxes.
127
-
128
- Args:
129
- masks (list(Tensor)): A list of Tensor of shape (1, Hmask_i, Wmask_i).
130
- Values are in [0, 1]. The list length, Bimg, is the
131
- number of detected object instances in the image.
132
- boxes (Boxes): A Boxes of length Bimg. boxes.tensor[i] and masks[i] correspond
133
- to the same object instance.
134
- image_shape (tuple): height, width
135
- threshold (float): A threshold in [0, 1] for converting the (soft) masks to
136
- binary masks.
137
-
138
- Returns:
139
- img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the
140
- number of detected object instances and Himage, Wimage are the image width
141
- and height. img_masks[i] is a binary mask for object instance i.
142
- """
143
- if len(masks) == 0:
144
- return torch.empty((0, 1) + image_shape, dtype=torch.uint8)
145
-
146
- # Loop over masks groups. Each group has the same mask prediction size.
147
- img_masks = []
148
- ind_masks = []
149
- mask_sizes = torch.tensor([m.shape[-1] for m in masks])
150
- unique_sizes = torch.unique(mask_sizes)
151
- for msize in unique_sizes.tolist():
152
- cur_ind = torch.where(mask_sizes == msize)[0]
153
- ind_masks.append(cur_ind)
154
-
155
- cur_masks = cat([masks[i] for i in cur_ind])
156
- cur_boxes = boxes[cur_ind]
157
- img_masks.append(paste_masks_in_image(cur_masks, cur_boxes, image_shape, threshold))
158
-
159
- img_masks = cat(img_masks)
160
- ind_masks = cat(ind_masks)
161
-
162
- img_masks_out = torch.empty_like(img_masks)
163
- img_masks_out[ind_masks, :, :] = img_masks
164
-
165
- return img_masks_out
166
-
167
-
168
- def _postprocess(results, result_mask_info, output_height, output_width, mask_threshold=0.5):
169
- """
170
- Post-process the output boxes for TensorMask.
171
- The input images are often resized when entering an object detector.
172
- As a result, we often need the outputs of the detector in a different
173
- resolution from its inputs.
174
-
175
- This function will postprocess the raw outputs of TensorMask
176
- to produce outputs according to the desired output resolution.
177
-
178
- Args:
179
- results (Instances): the raw outputs from the detector.
180
- `results.image_size` contains the input image resolution the detector sees.
181
- This object might be modified in-place. Note that it does not contain the field
182
- `pred_masks`, which is provided by another input `result_masks`.
183
- result_mask_info (list[Tensor], Boxes): a pair of two items for mask related results.
184
- The first item is a list of #detection tensors, each is the predicted masks.
185
- The second item is the anchors corresponding to the predicted masks.
186
- output_height, output_width: the desired output resolution.
187
-
188
- Returns:
189
- Instances: the postprocessed output from the model, based on the output resolution
190
- """
191
- scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0])
192
- results = Instances((output_height, output_width), **results.get_fields())
193
-
194
- output_boxes = results.pred_boxes
195
- output_boxes.tensor[:, 0::2] *= scale_x
196
- output_boxes.tensor[:, 1::2] *= scale_y
197
- output_boxes.clip(results.image_size)
198
-
199
- inds_nonempty = output_boxes.nonempty()
200
- results = results[inds_nonempty]
201
- result_masks, result_anchors = result_mask_info
202
- if result_masks:
203
- result_anchors.tensor[:, 0::2] *= scale_x
204
- result_anchors.tensor[:, 1::2] *= scale_y
205
- result_masks = [x for (i, x) in zip(inds_nonempty.tolist(), result_masks) if i]
206
- results.pred_masks = _paste_mask_lists_in_image(
207
- result_masks,
208
- result_anchors[inds_nonempty],
209
- results.image_size,
210
- threshold=mask_threshold,
211
- )
212
- return results
213
-
214
-
215
- class TensorMaskAnchorGenerator(DefaultAnchorGenerator):
216
- """
217
- For a set of image sizes and feature maps, computes a set of anchors for TensorMask.
218
- It also computes the unit lengths and indexes for each anchor box.
219
- """
220
-
221
- def grid_anchors_with_unit_lengths_and_indexes(self, grid_sizes):
222
- anchors = []
223
- unit_lengths = []
224
- indexes = []
225
- for lvl, (size, stride, base_anchors) in enumerate(
226
- zip(grid_sizes, self.strides, self.cell_anchors)
227
- ):
228
- grid_height, grid_width = size
229
- device = base_anchors.device
230
- shifts_x = torch.arange(
231
- 0, grid_width * stride, step=stride, dtype=torch.float32, device=device
232
- )
233
- shifts_y = torch.arange(
234
- 0, grid_height * stride, step=stride, dtype=torch.float32, device=device
235
- )
236
- shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
237
- shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=2)
238
- # Stack anchors in shapes of (HWA, 4)
239
- cur_anchor = (shifts[:, :, None, :] + base_anchors.view(1, 1, -1, 4)).view(-1, 4)
240
- anchors.append(cur_anchor)
241
- unit_lengths.append(
242
- torch.full((cur_anchor.shape[0],), stride, dtype=torch.float32, device=device)
243
- )
244
- # create mask indexes using mesh grid
245
- shifts_l = torch.full((1,), lvl, dtype=torch.int64, device=device)
246
- shifts_i = torch.zeros((1,), dtype=torch.int64, device=device)
247
- shifts_h = torch.arange(0, grid_height, dtype=torch.int64, device=device)
248
- shifts_w = torch.arange(0, grid_width, dtype=torch.int64, device=device)
249
- shifts_a = torch.arange(0, base_anchors.shape[0], dtype=torch.int64, device=device)
250
- grids = torch.meshgrid(shifts_l, shifts_i, shifts_h, shifts_w, shifts_a)
251
-
252
- indexes.append(torch.stack(grids, dim=5).view(-1, 5))
253
-
254
- return anchors, unit_lengths, indexes
255
-
256
- def forward(self, features):
257
- """
258
- Returns:
259
- list[list[Boxes]]: a list of #image elements. Each is a list of #feature level Boxes.
260
- The Boxes contains anchors of this image on the specific feature level.
261
- list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors.
262
- The tensor contains strides, or unit lengths for the anchors.
263
- list[list[Tensor]]: a list of #image elements. Each is a list of #feature level tensors.
264
- The Tensor contains indexes for the anchors, with the last dimension meaning
265
- (L, N, H, W, A), where L is level, I is image (not set yet), H is height,
266
- W is width, and A is anchor.
267
- """
268
- num_images = len(features[0])
269
- grid_sizes = [feature_map.shape[-2:] for feature_map in features]
270
- anchors_list, lengths_list, indexes_list = self.grid_anchors_with_unit_lengths_and_indexes(
271
- grid_sizes
272
- )
273
-
274
- # Convert anchors from Tensor to Boxes
275
- anchors_per_im = [Boxes(x) for x in anchors_list]
276
-
277
- anchors = [copy.deepcopy(anchors_per_im) for _ in range(num_images)]
278
- unit_lengths = [copy.deepcopy(lengths_list) for _ in range(num_images)]
279
- indexes = [copy.deepcopy(indexes_list) for _ in range(num_images)]
280
-
281
- return anchors, unit_lengths, indexes
282
-
283
-
284
- @META_ARCH_REGISTRY.register()
285
- class TensorMask(nn.Module):
286
- """
287
- TensorMask model. Creates FPN backbone, anchors and a head for classification
288
- and box regression. Calculates and applies proper losses to class, box, and
289
- masks.
290
- """
291
-
292
- def __init__(self, cfg):
293
- super().__init__()
294
-
295
- # get the deice of the model
296
- self.device = torch.device(cfg.MODEL.DEVICE)
297
-
298
- # fmt: off
299
- self.num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES
300
- self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES
301
- self.anchor_sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES
302
- self.num_levels = len(cfg.MODEL.ANCHOR_GENERATOR.SIZES)
303
- # Loss parameters:
304
- self.focal_loss_alpha = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA
305
- self.focal_loss_gamma = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA
306
- # Inference parameters:
307
- self.score_threshold = cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST
308
- self.topk_candidates = cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST
309
- self.nms_threshold = cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST
310
- self.detections_im = cfg.TEST.DETECTIONS_PER_IMAGE
311
- # Mask parameters:
312
- self.mask_on = cfg.MODEL.MASK_ON
313
- self.mask_loss_weight = cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT
314
- self.mask_pos_weight = torch.tensor(cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT,
315
- dtype=torch.float32,
316
- device=self.device)
317
- self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON
318
- # fmt: on
319
-
320
- # build the backbone
321
- self.backbone = build_backbone(cfg)
322
-
323
- backbone_shape = self.backbone.output_shape()
324
- feature_shapes = [backbone_shape[f] for f in self.in_features]
325
- feature_strides = [x.stride for x in feature_shapes]
326
- # build anchors
327
- self.anchor_generator = TensorMaskAnchorGenerator(cfg, feature_shapes)
328
- self.num_anchors = self.anchor_generator.num_cell_anchors[0]
329
- anchors_min_level = cfg.MODEL.ANCHOR_GENERATOR.SIZES[0]
330
- self.mask_sizes = [size // feature_strides[0] for size in anchors_min_level]
331
- self.min_anchor_size = min(anchors_min_level) - feature_strides[0]
332
-
333
- # head of the TensorMask
334
- self.head = TensorMaskHead(
335
- cfg, self.num_levels, self.num_anchors, self.mask_sizes, feature_shapes
336
- )
337
- # box transform
338
- self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS)
339
- pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
340
- pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
341
- self.normalizer = lambda x: (x - pixel_mean) / pixel_std
342
- self.to(self.device)
343
-
344
- def forward(self, batched_inputs):
345
- """
346
- Args:
347
- batched_inputs: a list, batched outputs of :class:`DetectionTransform` .
348
- Each item in the list contains the inputs for one image.
349
- For now, each item in the list is a dict that contains:
350
- image: Tensor, image in (C, H, W) format.
351
- instances: Instances
352
- Other information that's included in the original dicts, such as:
353
- "height", "width" (int): the output resolution of the model, used in inference.
354
- See :meth:`postprocess` for details.
355
- Returns:
356
- losses (dict[str: Tensor]): mapping from a named loss to a tensor
357
- storing the loss. Used during training only.
358
- """
359
- images = self.preprocess_image(batched_inputs)
360
- if "instances" in batched_inputs[0]:
361
- gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
362
- elif "targets" in batched_inputs[0]:
363
- log_first_n(
364
- logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
365
- )
366
- gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
367
- else:
368
- gt_instances = None
369
-
370
- features = self.backbone(images.tensor)
371
- features = [features[f] for f in self.in_features]
372
- # apply the TensorMask head
373
- pred_logits, pred_deltas, pred_masks = self.head(features)
374
- # generate anchors based on features, is it image specific?
375
- anchors, unit_lengths, indexes = self.anchor_generator(features)
376
-
377
- if self.training:
378
- # get ground truths for class labels and box targets, it will label each anchor
379
- gt_class_info, gt_delta_info, gt_mask_info, num_fg = self.get_ground_truth(
380
- anchors, unit_lengths, indexes, gt_instances
381
- )
382
- # compute the loss
383
- return self.losses(
384
- gt_class_info,
385
- gt_delta_info,
386
- gt_mask_info,
387
- num_fg,
388
- pred_logits,
389
- pred_deltas,
390
- pred_masks,
391
- )
392
- else:
393
- # do inference to get the output
394
- results = self.inference(pred_logits, pred_deltas, pred_masks, anchors, indexes, images)
395
- processed_results = []
396
- for results_im, input_im, image_size in zip(
397
- results, batched_inputs, images.image_sizes
398
- ):
399
- height = input_im.get("height", image_size[0])
400
- width = input_im.get("width", image_size[1])
401
- # this is to do post-processing with the image size
402
- result_box, result_mask = results_im
403
- r = _postprocess(result_box, result_mask, height, width)
404
- processed_results.append({"instances": r})
405
- return processed_results
406
-
407
- def losses(
408
- self,
409
- gt_class_info,
410
- gt_delta_info,
411
- gt_mask_info,
412
- num_fg,
413
- pred_logits,
414
- pred_deltas,
415
- pred_masks,
416
- ):
417
- """
418
- Args:
419
- For `gt_class_info`, `gt_delta_info`, `gt_mask_info` and `num_fg` parameters, see
420
- :meth:`TensorMask.get_ground_truth`.
421
- For `pred_logits`, `pred_deltas` and `pred_masks`, see
422
- :meth:`TensorMaskHead.forward`.
423
-
424
- Returns:
425
- losses (dict[str: Tensor]): mapping from a named loss to a scalar tensor
426
- storing the loss. Used during training only. The potential dict keys are:
427
- "loss_cls", "loss_box_reg" and "loss_mask".
428
- """
429
- gt_classes_target, gt_valid_inds = gt_class_info
430
- gt_deltas, gt_fg_inds = gt_delta_info
431
- gt_masks, gt_mask_inds = gt_mask_info
432
- loss_normalizer = torch.tensor(max(1, num_fg), dtype=torch.float32, device=self.device)
433
-
434
- # classification and regression
435
- pred_logits, pred_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat(
436
- pred_logits, pred_deltas, self.num_classes
437
- )
438
- loss_cls = (
439
- sigmoid_focal_loss_star_jit(
440
- pred_logits[gt_valid_inds],
441
- gt_classes_target[gt_valid_inds],
442
- alpha=self.focal_loss_alpha,
443
- gamma=self.focal_loss_gamma,
444
- reduction="sum",
445
- )
446
- / loss_normalizer
447
- )
448
-
449
- if num_fg == 0:
450
- loss_box_reg = pred_deltas.sum() * 0
451
- else:
452
- loss_box_reg = (
453
- smooth_l1_loss(pred_deltas[gt_fg_inds], gt_deltas, beta=0.0, reduction="sum")
454
- / loss_normalizer
455
- )
456
- losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
457
-
458
- # mask prediction
459
- if self.mask_on:
460
- loss_mask = 0
461
- for lvl in range(self.num_levels):
462
- cur_level_factor = 2 ** lvl if self.bipyramid_on else 1
463
- for anc in range(self.num_anchors):
464
- cur_gt_mask_inds = gt_mask_inds[lvl][anc]
465
- if cur_gt_mask_inds is None:
466
- loss_mask += pred_masks[lvl][anc][0, 0, 0, 0] * 0
467
- else:
468
- cur_mask_size = self.mask_sizes[anc] * cur_level_factor
469
- # TODO maybe there are numerical issues when mask sizes are large
470
- cur_size_divider = torch.tensor(
471
- self.mask_loss_weight / (cur_mask_size ** 2),
472
- dtype=torch.float32,
473
- device=self.device,
474
- )
475
-
476
- cur_pred_masks = pred_masks[lvl][anc][
477
- cur_gt_mask_inds[:, 0], # N
478
- :, # V x U
479
- cur_gt_mask_inds[:, 1], # H
480
- cur_gt_mask_inds[:, 2], # W
481
- ]
482
-
483
- loss_mask += F.binary_cross_entropy_with_logits(
484
- cur_pred_masks.view(-1, cur_mask_size, cur_mask_size), # V, U
485
- gt_masks[lvl][anc].to(dtype=torch.float32),
486
- reduction="sum",
487
- weight=cur_size_divider,
488
- pos_weight=self.mask_pos_weight,
489
- )
490
- losses["loss_mask"] = loss_mask / loss_normalizer
491
- return losses
492
-
493
- @torch.no_grad()
494
- def get_ground_truth(self, anchors, unit_lengths, indexes, targets):
495
- """
496
- Args:
497
- anchors (list[list[Boxes]]): a list of N=#image elements. Each is a
498
- list of #feature level Boxes. The Boxes contains anchors of
499
- this image on the specific feature level.
500
- unit_lengths (list[list[Tensor]]): a list of N=#image elements. Each is a
501
- list of #feature level Tensor. The tensor contains unit lengths for anchors of
502
- this image on the specific feature level.
503
- indexes (list[list[Tensor]]): a list of N=#image elements. Each is a
504
- list of #feature level Tensor. The tensor contains the 5D index of
505
- each anchor, the second dimension means (L, N, H, W, A), where L
506
- is level, I is image, H is height, W is width, and A is anchor.
507
- targets (list[Instances]): a list of N `Instances`s. The i-th
508
- `Instances` contains the ground-truth per-instance annotations
509
- for the i-th input image. Specify `targets` during training only.
510
-
511
- Returns:
512
- gt_class_info (Tensor, Tensor): A pair of two tensors for classification.
513
- The first one is an integer tensor of shape (R, #classes) storing ground-truth
514
- labels for each anchor. R is the total number of anchors in the batch.
515
- The second one is an integer tensor of shape (R,), to indicate which
516
- anchors are valid for loss computation, which anchors are not.
517
- gt_delta_info (Tensor, Tensor): A pair of two tensors for boxes.
518
- The first one, of shape (F, 4). F=#foreground anchors.
519
- The last dimension represents ground-truth box2box transform
520
- targets (dx, dy, dw, dh) that map each anchor to its matched ground-truth box.
521
- Only foreground anchors have values in this tensor. Could be `None` if F=0.
522
- The second one, of shape (R,), is an integer tensor indicating which anchors
523
- are foreground ones used for box regression. Could be `None` if F=0.
524
- gt_mask_info (list[list[Tensor]], list[list[Tensor]]): A pair of two lists for masks.
525
- The first one is a list of P=#feature level elements. Each is a
526
- list of A=#anchor tensors. Each tensor contains the ground truth
527
- masks of the same size and for the same feature level. Could be `None`.
528
- The second one is a list of P=#feature level elements. Each is a
529
- list of A=#anchor tensors. Each tensor contains the location of the ground truth
530
- masks of the same size and for the same feature level. The second dimension means
531
- (N, H, W), where N is image, H is height, and W is width. Could be `None`.
532
- num_fg (int): F=#foreground anchors, used later for loss normalization.
533
- """
534
- gt_classes = []
535
- gt_deltas = []
536
- gt_masks = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)]
537
- gt_mask_inds = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)]
538
-
539
- anchors = [Boxes.cat(anchors_i) for anchors_i in anchors]
540
- unit_lengths = [cat(unit_lengths_i) for unit_lengths_i in unit_lengths]
541
- indexes = [cat(indexes_i) for indexes_i in indexes]
542
-
543
- num_fg = 0
544
- for i, (anchors_im, unit_lengths_im, indexes_im, targets_im) in enumerate(
545
- zip(anchors, unit_lengths, indexes, targets)
546
- ):
547
- # Initialize all
548
- gt_classes_i = torch.full_like(
549
- unit_lengths_im, self.num_classes, dtype=torch.int64, device=self.device
550
- )
551
- # Ground truth classes
552
- has_gt = len(targets_im) > 0
553
- if has_gt:
554
- # Compute the pairwise matrix
555
- gt_matched_inds, anchor_labels = _assignment_rule(
556
- targets_im.gt_boxes, anchors_im, unit_lengths_im, self.min_anchor_size
557
- )
558
- # Find the foreground instances
559
- fg_inds = anchor_labels == 1
560
- fg_anchors = anchors_im[fg_inds]
561
- num_fg += len(fg_anchors)
562
- # Find the ground truths for foreground instances
563
- gt_fg_matched_inds = gt_matched_inds[fg_inds]
564
- # Assign labels for foreground instances
565
- gt_classes_i[fg_inds] = targets_im.gt_classes[gt_fg_matched_inds]
566
- # Anchors with label -1 are ignored, others are left as negative
567
- gt_classes_i[anchor_labels == -1] = -1
568
-
569
- # Boxes
570
- # Ground truth box regression, only for foregrounds
571
- matched_gt_boxes = targets_im[gt_fg_matched_inds].gt_boxes
572
- # Compute box regression offsets for foregrounds only
573
- gt_deltas_i = self.box2box_transform.get_deltas(
574
- fg_anchors.tensor, matched_gt_boxes.tensor
575
- )
576
- gt_deltas.append(gt_deltas_i)
577
-
578
- # Masks
579
- if self.mask_on:
580
- # Compute masks for each level and each anchor
581
- matched_indexes = indexes_im[fg_inds, :]
582
- for lvl in range(self.num_levels):
583
- ids_lvl = matched_indexes[:, 0] == lvl
584
- if torch.any(ids_lvl):
585
- cur_level_factor = 2 ** lvl if self.bipyramid_on else 1
586
- for anc in range(self.num_anchors):
587
- ids_lvl_anchor = ids_lvl & (matched_indexes[:, 4] == anc)
588
- if torch.any(ids_lvl_anchor):
589
- gt_masks[lvl][anc].append(
590
- targets_im[
591
- gt_fg_matched_inds[ids_lvl_anchor]
592
- ].gt_masks.crop_and_resize(
593
- fg_anchors[ids_lvl_anchor].tensor,
594
- self.mask_sizes[anc] * cur_level_factor,
595
- )
596
- )
597
- # Select (N, H, W) dimensions
598
- gt_mask_inds_lvl_anc = matched_indexes[ids_lvl_anchor, 1:4]
599
- # Set the image index to the current image
600
- gt_mask_inds_lvl_anc[:, 0] = i
601
- gt_mask_inds[lvl][anc].append(gt_mask_inds_lvl_anc)
602
- gt_classes.append(gt_classes_i)
603
-
604
- # Classes and boxes
605
- gt_classes = cat(gt_classes)
606
- gt_valid_inds = gt_classes >= 0
607
- gt_fg_inds = gt_valid_inds & (gt_classes < self.num_classes)
608
- gt_classes_target = torch.zeros(
609
- (gt_classes.shape[0], self.num_classes), dtype=torch.float32, device=self.device
610
- )
611
- gt_classes_target[gt_fg_inds, gt_classes[gt_fg_inds]] = 1
612
- gt_deltas = cat(gt_deltas) if gt_deltas else None
613
-
614
- # Masks
615
- gt_masks = [[cat(mla) if mla else None for mla in ml] for ml in gt_masks]
616
- gt_mask_inds = [[cat(ila) if ila else None for ila in il] for il in gt_mask_inds]
617
- return (
618
- (gt_classes_target, gt_valid_inds),
619
- (gt_deltas, gt_fg_inds),
620
- (gt_masks, gt_mask_inds),
621
- num_fg,
622
- )
623
-
624
- def inference(self, pred_logits, pred_deltas, pred_masks, anchors, indexes, images):
625
- """
626
- Arguments:
627
- pred_logits, pred_deltas, pred_masks: Same as the output of:
628
- meth:`TensorMaskHead.forward`
629
- anchors, indexes: Same as the input of meth:`TensorMask.get_ground_truth`
630
- images (ImageList): the input images
631
-
632
- Returns:
633
- results (List[Instances]): a list of #images elements.
634
- """
635
- assert len(anchors) == len(images)
636
- results = []
637
-
638
- pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits]
639
- pred_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_deltas]
640
-
641
- pred_logits = cat(pred_logits, dim=1)
642
- pred_deltas = cat(pred_deltas, dim=1)
643
-
644
- for img_idx, (anchors_im, indexes_im) in enumerate(zip(anchors, indexes)):
645
- # Get the size of the current image
646
- image_size = images.image_sizes[img_idx]
647
-
648
- logits_im = pred_logits[img_idx]
649
- deltas_im = pred_deltas[img_idx]
650
-
651
- if self.mask_on:
652
- masks_im = [[mla[img_idx] for mla in ml] for ml in pred_masks]
653
- else:
654
- masks_im = [None] * self.num_levels
655
- results_im = self.inference_single_image(
656
- logits_im,
657
- deltas_im,
658
- masks_im,
659
- Boxes.cat(anchors_im),
660
- cat(indexes_im),
661
- tuple(image_size),
662
- )
663
- results.append(results_im)
664
- return results
665
-
666
- def inference_single_image(
667
- self, pred_logits, pred_deltas, pred_masks, anchors, indexes, image_size
668
- ):
669
- """
670
- Single-image inference. Return bounding-box detection results by thresholding
671
- on scores and applying non-maximum suppression (NMS).
672
-
673
- Arguments:
674
- pred_logits (list[Tensor]): list of #feature levels. Each entry contains
675
- tensor of size (AxHxW, K)
676
- pred_deltas (list[Tensor]): Same shape as 'pred_logits' except that K becomes 4.
677
- pred_masks (list[list[Tensor]]): List of #feature levels, each is a list of #anchors.
678
- Each entry contains tensor of size (M_i*M_i, H, W). `None` if mask_on=False.
679
- anchors (list[Boxes]): list of #feature levels. Each entry contains
680
- a Boxes object, which contains all the anchors for that
681
- image in that feature level.
682
- image_size (tuple(H, W)): a tuple of the image height and width.
683
-
684
- Returns:
685
- Same as `inference`, but for only one image.
686
- """
687
- pred_logits = pred_logits.flatten().sigmoid_()
688
- # We get top locations across all levels to accelerate the inference speed,
689
- # which does not seem to affect the accuracy.
690
- # First select values above the threshold
691
- logits_top_idxs = torch.where(pred_logits > self.score_threshold)[0]
692
- # Then get the top values
693
- num_topk = min(self.topk_candidates, logits_top_idxs.shape[0])
694
- pred_prob, topk_idxs = pred_logits[logits_top_idxs].sort(descending=True)
695
- # Keep top k scoring values
696
- pred_prob = pred_prob[:num_topk]
697
- # Keep top k values
698
- top_idxs = logits_top_idxs[topk_idxs[:num_topk]]
699
-
700
- # class index
701
- cls_idxs = top_idxs % self.num_classes
702
- # HWA index
703
- top_idxs //= self.num_classes
704
- # predict boxes
705
- pred_boxes = self.box2box_transform.apply_deltas(
706
- pred_deltas[top_idxs], anchors[top_idxs].tensor
707
- )
708
- # apply nms
709
- keep = batched_nms(pred_boxes, pred_prob, cls_idxs, self.nms_threshold)
710
- # pick the top ones
711
- keep = keep[: self.detections_im]
712
-
713
- results = Instances(image_size)
714
- results.pred_boxes = Boxes(pred_boxes[keep])
715
- results.scores = pred_prob[keep]
716
- results.pred_classes = cls_idxs[keep]
717
-
718
- # deal with masks
719
- result_masks, result_anchors = [], None
720
- if self.mask_on:
721
- # index and anchors, useful for masks
722
- top_indexes = indexes[top_idxs]
723
- top_anchors = anchors[top_idxs]
724
- result_indexes = top_indexes[keep]
725
- result_anchors = top_anchors[keep]
726
- # Get masks and do sigmoid
727
- for lvl, _, h, w, anc in result_indexes.tolist():
728
- cur_size = self.mask_sizes[anc] * (2 ** lvl if self.bipyramid_on else 1)
729
- result_masks.append(
730
- torch.sigmoid(pred_masks[lvl][anc][:, h, w].view(1, cur_size, cur_size))
731
- )
732
-
733
- return results, (result_masks, result_anchors)
734
-
735
- def preprocess_image(self, batched_inputs):
736
- """
737
- Normalize, pad and batch the input images.
738
- """
739
- images = [x["image"].to(self.device) for x in batched_inputs]
740
- images = [self.normalizer(x) for x in images]
741
- images = ImageList.from_tensors(images, self.backbone.size_divisibility)
742
- return images
743
-
744
-
745
- class TensorMaskHead(nn.Module):
746
- def __init__(self, cfg, num_levels, num_anchors, mask_sizes, input_shape: List[ShapeSpec]):
747
- """
748
- TensorMask head.
749
- """
750
- super().__init__()
751
- # fmt: off
752
- self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES
753
- in_channels = input_shape[0].channels
754
- num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES
755
- cls_channels = cfg.MODEL.TENSOR_MASK.CLS_CHANNELS
756
- num_convs = cfg.MODEL.TENSOR_MASK.NUM_CONVS
757
- # box parameters
758
- bbox_channels = cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS
759
- # mask parameters
760
- self.mask_on = cfg.MODEL.MASK_ON
761
- self.mask_sizes = mask_sizes
762
- mask_channels = cfg.MODEL.TENSOR_MASK.MASK_CHANNELS
763
- self.align_on = cfg.MODEL.TENSOR_MASK.ALIGNED_ON
764
- self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON
765
- # fmt: on
766
-
767
- # class subnet
768
- cls_subnet = []
769
- cur_channels = in_channels
770
- for _ in range(num_convs):
771
- cls_subnet.append(
772
- nn.Conv2d(cur_channels, cls_channels, kernel_size=3, stride=1, padding=1)
773
- )
774
- cur_channels = cls_channels
775
- cls_subnet.append(nn.ReLU())
776
-
777
- self.cls_subnet = nn.Sequential(*cls_subnet)
778
- self.cls_score = nn.Conv2d(
779
- cur_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1
780
- )
781
- modules_list = [self.cls_subnet, self.cls_score]
782
-
783
- # box subnet
784
- bbox_subnet = []
785
- cur_channels = in_channels
786
- for _ in range(num_convs):
787
- bbox_subnet.append(
788
- nn.Conv2d(cur_channels, bbox_channels, kernel_size=3, stride=1, padding=1)
789
- )
790
- cur_channels = bbox_channels
791
- bbox_subnet.append(nn.ReLU())
792
-
793
- self.bbox_subnet = nn.Sequential(*bbox_subnet)
794
- self.bbox_pred = nn.Conv2d(
795
- cur_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
796
- )
797
- modules_list.extend([self.bbox_subnet, self.bbox_pred])
798
-
799
- # mask subnet
800
- if self.mask_on:
801
- mask_subnet = []
802
- cur_channels = in_channels
803
- for _ in range(num_convs):
804
- mask_subnet.append(
805
- nn.Conv2d(cur_channels, mask_channels, kernel_size=3, stride=1, padding=1)
806
- )
807
- cur_channels = mask_channels
808
- mask_subnet.append(nn.ReLU())
809
-
810
- self.mask_subnet = nn.Sequential(*mask_subnet)
811
- modules_list.append(self.mask_subnet)
812
- for mask_size in self.mask_sizes:
813
- cur_mask_module = "mask_pred_%02d" % mask_size
814
- self.add_module(
815
- cur_mask_module,
816
- nn.Conv2d(
817
- cur_channels, mask_size * mask_size, kernel_size=1, stride=1, padding=0
818
- ),
819
- )
820
- modules_list.append(getattr(self, cur_mask_module))
821
- if self.align_on:
822
- if self.bipyramid_on:
823
- for lvl in range(num_levels):
824
- cur_mask_module = "align2nat_%02d" % lvl
825
- lambda_val = 2 ** lvl
826
- setattr(self, cur_mask_module, SwapAlign2Nat(lambda_val))
827
- # Also the fusing layer, stay at the same channel size
828
- mask_fuse = [
829
- nn.Conv2d(cur_channels, cur_channels, kernel_size=3, stride=1, padding=1),
830
- nn.ReLU(),
831
- ]
832
- self.mask_fuse = nn.Sequential(*mask_fuse)
833
- modules_list.append(self.mask_fuse)
834
- else:
835
- self.align2nat = SwapAlign2Nat(1)
836
-
837
- # Initialization
838
- for modules in modules_list:
839
- for layer in modules.modules():
840
- if isinstance(layer, nn.Conv2d):
841
- torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
842
- torch.nn.init.constant_(layer.bias, 0)
843
-
844
- # Use prior in model initialization to improve stability
845
- bias_value = -math.log((1 - 0.01) / 0.01)
846
- torch.nn.init.constant_(self.cls_score.bias, bias_value)
847
-
848
- def forward(self, features):
849
- """
850
- Arguments:
851
- features (list[Tensor]): FPN feature map tensors in high to low resolution.
852
- Each tensor in the list correspond to different feature levels.
853
-
854
- Returns:
855
- pred_logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi).
856
- The tensor predicts the classification probability
857
- at each spatial position for each of the A anchors and K object
858
- classes.
859
- pred_deltas (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi).
860
- The tensor predicts 4-vector (dx,dy,dw,dh) box
861
- regression values for every anchor. These values are the
862
- relative offset between the anchor and the ground truth box.
863
- pred_masks (list(list[Tensor])): #lvl list of tensors, each is a list of
864
- A tensors of shape (N, M_{i,a}, Hi, Wi).
865
- The tensor predicts a dense set of M_ixM_i masks at every location.
866
- """
867
- pred_logits = [self.cls_score(self.cls_subnet(x)) for x in features]
868
- pred_deltas = [self.bbox_pred(self.bbox_subnet(x)) for x in features]
869
-
870
- pred_masks = None
871
- if self.mask_on:
872
- mask_feats = [self.mask_subnet(x) for x in features]
873
-
874
- if self.bipyramid_on:
875
- mask_feat_high_res = mask_feats[0]
876
- H, W = mask_feat_high_res.shape[-2:]
877
- mask_feats_up = []
878
- for lvl, mask_feat in enumerate(mask_feats):
879
- lambda_val = 2.0 ** lvl
880
- mask_feat_up = mask_feat
881
- if lvl > 0:
882
- mask_feat_up = F.interpolate(
883
- mask_feat, scale_factor=lambda_val, mode="bilinear", align_corners=False
884
- )
885
- mask_feats_up.append(
886
- self.mask_fuse(mask_feat_up[:, :, :H, :W] + mask_feat_high_res)
887
- )
888
- mask_feats = mask_feats_up
889
-
890
- pred_masks = []
891
- for lvl, mask_feat in enumerate(mask_feats):
892
- cur_masks = []
893
- for mask_size in self.mask_sizes:
894
- cur_mask_module = getattr(self, "mask_pred_%02d" % mask_size)
895
- cur_mask = cur_mask_module(mask_feat)
896
- if self.align_on:
897
- if self.bipyramid_on:
898
- cur_mask_module = getattr(self, "align2nat_%02d" % lvl)
899
- cur_mask = cur_mask_module(cur_mask)
900
- else:
901
- cur_mask = self.align2nat(cur_mask)
902
- cur_masks.append(cur_mask)
903
- pred_masks.append(cur_masks)
904
- return pred_logits, pred_deltas, pred_masks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_eval_call.py DELETED
@@ -1,5 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # This file is called from 'test_eval.py'
3
-
4
- if 'call_test2' in locals():
5
- call_test2(y) # noqa: F821 undefined name
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/testing/unittest/assertions.h DELETED
@@ -1,593 +0,0 @@
1
- #pragma once
2
-
3
- #include <thrust/complex.h>
4
- #include <thrust/host_vector.h>
5
- #include <thrust/device_vector.h>
6
- #include <thrust/iterator/iterator_traits.h>
7
- #include <thrust/detail/type_traits.h>
8
-
9
- #include <unittest/exceptions.h>
10
- #include <unittest/util.h>
11
-
12
- #define ASSERT_EQUAL_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_equal((X),(Y), FILE_, LINE_)
13
- #define ASSERT_EQUAL_QUIET_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_equal_quiet((X),(Y), FILE_, LINE_)
14
- #define ASSERT_NOT_EQUAL_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_not_equal((X),(Y), FILE_, LINE_)
15
- #define ASSERT_NOT_EQUAL_QUIET_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_not_equal_quiet((X),(Y), FILE_, LINE_)
16
- #define ASSERT_LEQUAL_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_lequal((X),(Y), FILE_, LINE_)
17
- #define ASSERT_GEQUAL_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_gequal((X),(Y), FILE_, LINE_)
18
- #define ASSERT_LESS_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_less((X),(Y), FILE_, LINE_)
19
- #define ASSERT_GREATER_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_greater((X),(Y), FILE_, LINE_)
20
- #define ASSERT_ALMOST_EQUAL_WITH_FILE_AND_LINE(X,Y,FILE_,LINE_) unittest::assert_almost_equal((X),(Y), FILE_, LINE_)
21
- #define ASSERT_EQUAL_RANGES_WITH_FILE_AND_LINE(X,Y,Z,FILE_,LINE_) unittest::assert_equal((X),(Y),(Z), FILE_, LINE_)
22
-
23
- #define ASSERT_THROWS_WITH_FILE_AND_LINE( \
24
- EXPR, EXCEPTION_TYPE, FILE_, LINE_ \
25
- ) \
26
- { \
27
- unittest::threw_status THRUST_PP_CAT2(__s, LINE_) \
28
- = unittest::did_not_throw; \
29
- try { EXPR; } \
30
- catch (EXCEPTION_TYPE const&) \
31
- { THRUST_PP_CAT2(__s, LINE_) = unittest::threw_right_type; } \
32
- catch (...) \
33
- { THRUST_PP_CAT2(__s, LINE_) = unittest::threw_wrong_type; } \
34
- unittest::check_assert_throws( \
35
- THRUST_PP_CAT2(__s, LINE_), THRUST_PP_STRINGIZE(EXCEPTION_TYPE) \
36
- , FILE_, LINE_ \
37
- ); \
38
- } \
39
- /**/
40
-
41
- #define ASSERT_THROWS_EQUAL_WITH_FILE_AND_LINE( \
42
- EXPR, EXCEPTION_TYPE, VALUE, FILE_, LINE_ \
43
- ) \
44
- { \
45
- unittest::threw_status THRUST_PP_CAT2(__s, LINE_) \
46
- = unittest::did_not_throw; \
47
- try { EXPR; } \
48
- catch (EXCEPTION_TYPE const& THRUST_PP_CAT2(__e, LINE_)) \
49
- { \
50
- if (VALUE == THRUST_PP_CAT2(__e, LINE_)) \
51
- THRUST_PP_CAT2(__s, LINE_) \
52
- = unittest::threw_right_type; \
53
- else \
54
- THRUST_PP_CAT2(__s, LINE_) \
55
- = unittest::threw_right_type_but_wrong_value; \
56
- } \
57
- catch (...) { THRUST_PP_CAT2(__s, LINE_) = unittest::threw_wrong_type; } \
58
- unittest::check_assert_throws( \
59
- THRUST_PP_CAT2(__s, LINE_), THRUST_PP_STRINGIZE(EXCEPTION_TYPE) \
60
- , FILE_, LINE_ \
61
- ); \
62
- } \
63
- /**/
64
-
65
- #define KNOWN_FAILURE_WITH_FILE_AND_LINE(FILE_, LINE_) \
66
- { unittest::UnitTestKnownFailure f; f << "[" << FILE_ ":" << LINE_ << "]"; throw f; } \
67
- /**/
68
-
69
- #define ASSERT_EQUAL(X,Y) ASSERT_EQUAL_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
70
- #define ASSERT_EQUAL_QUIET(X,Y) ASSERT_EQUAL_QUIET_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
71
- #define ASSERT_NOT_EQUAL(X,Y) ASSERT_NOT_EQUAL_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
72
- #define ASSERT_NOT_EQUAL_QUIET(X,Y) ASSERT_NOT_EQUAL_QUIET_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
73
- #define ASSERT_LEQUAL(X,Y) ASSERT_LEQUAL_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
74
- #define ASSERT_GEQUAL(X,Y) ASSERT_GEQUAL_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
75
- #define ASSERT_LESS(X,Y) ASSERT_LESS_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
76
- #define ASSERT_GREATER(X,Y) ASSERT_GREATER_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
77
- #define ASSERT_ALMOST_EQUAL(X,Y) ASSERT_ALMOST_EQUAL_WITH_FILE_AND_LINE((X),(Y), __FILE__, __LINE__)
78
- #define ASSERT_EQUAL_RANGES(X,Y,Z) ASSERT_EQUAL_WITH_FILE_AND_LINE((X),(Y),(Z), __FILE__, __LINE__)
79
-
80
- #define ASSERT_THROWS(EXPR, EXCEPTION_TYPE) \
81
- ASSERT_THROWS_WITH_FILE_AND_LINE(EXPR, EXCEPTION_TYPE, __FILE__, __LINE__) \
82
- /**/
83
-
84
- #define ASSERT_THROWS_EQUAL(EXPR, EXCEPTION_TYPE, VALUE) \
85
- ASSERT_THROWS_EQUAL_WITH_FILE_AND_LINE(EXPR, EXCEPTION_TYPE, VALUE, __FILE__, __LINE__) \
86
- /**/
87
-
88
- #define KNOWN_FAILURE KNOWN_FAILURE_WITH_FILE_AND_LINE(__FILE__, __LINE__)
89
-
90
- namespace unittest
91
- {
92
-
93
- size_t const MAX_OUTPUT_LINES = 10;
94
-
95
- double const DEFAULT_RELATIVE_TOL = 1e-4;
96
- double const DEFAULT_ABSOLUTE_TOL = 1e-4;
97
-
98
- template<typename T>
99
- struct value_type
100
- {
101
- typedef typename thrust::detail::remove_const<
102
- typename thrust::detail::remove_reference<
103
- T
104
- >::type
105
- >::type type;
106
- };
107
-
108
- template<typename T>
109
- struct value_type< thrust::device_reference<T> >
110
- {
111
- typedef typename value_type<T>::type type;
112
- };
113
-
114
- ////
115
- // check scalar values
116
- template <typename T1, typename T2>
117
- void assert_equal(T1 a, T2 b,
118
- const std::string& filename = "unknown", int lineno = -1)
119
- {
120
- if(!(a == b)){
121
- unittest::UnitTestFailure f;
122
- f << "[" << filename << ":" << lineno << "] ";
123
- f << "values are not equal: " << a << " " << b;
124
- f << " [type='" << type_name<T1>() << "']";
125
- throw f;
126
- }
127
- }
128
-
129
- void assert_equal(char a, char b,
130
- const std::string& filename = "unknown", int lineno = -1)
131
- {
132
- if(!(a == b)){
133
- unittest::UnitTestFailure f;
134
- f << "[" << filename << ":" << lineno << "] ";
135
- f << "values are not equal: " << int(a) << " " << int(b);
136
- f << " [type='" << type_name<char>() << "']";
137
- throw f;
138
- }
139
- }
140
-
141
- // sometimes its not possible to << a type
142
- template <typename T1, typename T2>
143
- void assert_equal_quiet(const T1& a, const T2& b,
144
- const std::string& filename = "unknown", int lineno = -1)
145
- {
146
- if(!(a == b)){
147
- unittest::UnitTestFailure f;
148
- f << "[" << filename << ":" << lineno << "] ";
149
- f << "values are not equal";
150
- f << " [type='" << type_name<T1>() << "']";
151
- throw f;
152
- }
153
- }
154
-
155
- ////
156
- // check scalar values
157
- template <typename T1, typename T2>
158
- void assert_not_equal(T1 a, T2 b,
159
- const std::string& filename = "unknown", int lineno = -1)
160
- {
161
- if(a == b){
162
- unittest::UnitTestFailure f;
163
- f << "[" << filename << ":" << lineno << "] ";
164
- f << "values are equal: " << a << " " << b;
165
- f << " [type='" << type_name<T1>() << "']";
166
- throw f;
167
- }
168
- }
169
-
170
- void assert_not_equal(char a, char b,
171
- const std::string& filename = "unknown", int lineno = -1)
172
- {
173
- if(a == b){
174
- unittest::UnitTestFailure f;
175
- f << "[" << filename << ":" << lineno << "] ";
176
- f << "values are equal: " << int(a) << " " << int(b);
177
- f << " [type='" << type_name<char>() << "']";
178
- throw f;
179
- }
180
- }
181
-
182
- // sometimes its not possible to << a type
183
- template <typename T1, typename T2>
184
- void assert_not_equal_quiet(const T1& a, const T2& b,
185
- const std::string& filename = "unknown", int lineno = -1)
186
- {
187
- if(a == b){
188
- unittest::UnitTestFailure f;
189
- f << "[" << filename << ":" << lineno << "] ";
190
- f << "values are equal";
191
- f << " [type='" << type_name<T1>() << "']";
192
- throw f;
193
- }
194
- }
195
-
196
- template <typename T1, typename T2>
197
- void assert_less(T1 a, T2 b,
198
- const std::string& filename = "unknown", int lineno = -1)
199
- {
200
- if(!(a < b)){
201
- unittest::UnitTestFailure f;
202
- f << "[" << filename << ":" << lineno << "] ";
203
- f << a << " is greater or equal to " << b;
204
- f << " [type='" << type_name<T1>() << "']";
205
- throw f;
206
- }
207
- }
208
-
209
- void assert_less(char a, char b,
210
- const std::string& filename = "unknown", int lineno = -1)
211
- {
212
- if(!(a < b)){
213
- unittest::UnitTestFailure f;
214
- f << "[" << filename << ":" << lineno << "] ";
215
- f << int(a) << " is greater than or equal to " << int(b);
216
- f << " [type='" << type_name<char>() << "']";
217
- throw f;
218
- }
219
- }
220
-
221
- template <typename T1, typename T2>
222
- void assert_greater(T1 a, T2 b,
223
- const std::string& filename = "unknown", int lineno = -1)
224
- {
225
- if(!(a > b)){
226
- unittest::UnitTestFailure f;
227
- f << "[" << filename << ":" << lineno << "] ";
228
- f << a << " is less than or equal to " << b;
229
- f << " [type='" << type_name<T1>() << "']";
230
- throw f;
231
- }
232
- }
233
-
234
- void assert_greater(char a, char b,
235
- const std::string& filename = "unknown", int lineno = -1)
236
- {
237
- if(!(a > b)){
238
- unittest::UnitTestFailure f;
239
- f << "[" << filename << ":" << lineno << "] ";
240
- f << int(a) << " is less than or equal to " << int(b);
241
- f << " [type='" << type_name<char>() << "']";
242
- throw f;
243
- }
244
- }
245
-
246
- template <typename T1, typename T2>
247
- void assert_lequal(T1 a, T2 b,
248
- const std::string& filename = "unknown", int lineno = -1)
249
- {
250
- if(!(a <= b)){
251
- unittest::UnitTestFailure f;
252
- f << "[" << filename << ":" << lineno << "] ";
253
- f << a << " is greater than " << b;
254
- f << " [type='" << type_name<T1>() << "']";
255
- throw f;
256
- }
257
- }
258
-
259
- void assert_lequal(char a, char b,
260
- const std::string& filename = "unknown", int lineno = -1)
261
- {
262
- if(!(a <= b)){
263
- unittest::UnitTestFailure f;
264
- f << "[" << filename << ":" << lineno << "] ";
265
- f << int(a) << " is greater than " << int(b);
266
- f << " [type='" << type_name<char>() << "']";
267
- throw f;
268
- }
269
- }
270
-
271
- template <typename T1, typename T2>
272
- void assert_gequal(T1 a, T2 b,
273
- const std::string& filename = "unknown", int lineno = -1)
274
- {
275
- if(!(a >= b)){
276
- unittest::UnitTestFailure f;
277
- f << "[" << filename << ":" << lineno << "] ";
278
- f << a << " is less than " << b;
279
- f << " [type='" << type_name<T1>() << "']";
280
- throw f;
281
- }
282
- }
283
-
284
- void assert_gequal(char a, char b,
285
- const std::string& filename = "unknown", int lineno = -1)
286
- {
287
- if(!(a >= b)){
288
- unittest::UnitTestFailure f;
289
- f << "[" << filename << ":" << lineno << "] ";
290
- f << int(a) << " is less than " << int(b);
291
- f << " [type='" << type_name<char>() << "']";
292
- throw f;
293
- }
294
- }
295
-
296
- // define our own abs() because std::abs() isn't portable for all types for some reason
297
- template<typename T>
298
- T abs(const T &x)
299
- {
300
- return x > 0 ? x : -x;
301
- }
302
-
303
-
304
- inline
305
- bool almost_equal(const double& a, const double& b, const double& a_tol, const double& r_tol)
306
- {
307
- if(abs(a - b) > r_tol * (abs(a) + abs(b)) + a_tol)
308
- return false;
309
- else
310
- return true;
311
- }
312
-
313
- template <typename T1, typename T2>
314
- void assert_almost_equal(T1 a, T2 b,
315
- const std::string& filename = "unknown", int lineno = -1,
316
- double a_tol = DEFAULT_ABSOLUTE_TOL, double r_tol = DEFAULT_RELATIVE_TOL)
317
-
318
- {
319
- if(!almost_equal(a, b, a_tol, r_tol)){
320
- unittest::UnitTestFailure f;
321
- f << "[" << filename << ":" << lineno << "] ";
322
- f << "values are not approximately equal: " << (double) a << " " << (double) b;
323
- f << " [type='" << type_name<T1>() << "']";
324
- throw f;
325
- }
326
- }
327
-
328
-
329
- template <typename T1, typename T2>
330
- void assert_almost_equal(thrust::complex<T1> a, thrust::complex<T2> b,
331
- const std::string& filename = "unknown", int lineno = -1,
332
- double a_tol = DEFAULT_ABSOLUTE_TOL, double r_tol = DEFAULT_RELATIVE_TOL)
333
-
334
- {
335
- if(!almost_equal(a.real(), b.real(), a_tol, r_tol)){
336
- unittest::UnitTestFailure f;
337
- f << "[" << filename << ":" << lineno << "] ";
338
- f << "values are not approximately equal: " << a << " " << b;
339
- f << " [type='" << type_name<T1>() << "']";
340
- throw f;
341
- }
342
- }
343
-
344
-
345
- template <typename T1, typename T2>
346
- void assert_almost_equal(const thrust::complex<T1>& a, const std::complex<T2>& b,
347
- const std::string& filename = "unknown", int lineno = -1,
348
- double a_tol = DEFAULT_ABSOLUTE_TOL, double r_tol = DEFAULT_RELATIVE_TOL)
349
-
350
- {
351
- if(!almost_equal(a.real(), b.real(), a_tol, r_tol)){
352
- unittest::UnitTestFailure f;
353
- f << "[" << filename << ":" << lineno << "] ";
354
- f << "values are not approximately equal: " << a << " " << b;
355
- f << " [type='" << type_name<T1>() << "']";
356
- throw f;
357
- }
358
- }
359
-
360
- template <typename T>
361
- class almost_equal_to
362
- {
363
- public:
364
- double a_tol, r_tol;
365
- almost_equal_to(double _a_tol = DEFAULT_ABSOLUTE_TOL, double _r_tol = DEFAULT_RELATIVE_TOL) : a_tol(_a_tol), r_tol(_r_tol) {}
366
- bool operator()(const T& a, const T& b) const {
367
- return almost_equal((double) a, (double) b, a_tol, r_tol);
368
- }
369
- };
370
-
371
-
372
- template <typename T>
373
- class almost_equal_to<thrust::complex<T> >
374
- {
375
- public:
376
- double a_tol, r_tol;
377
- almost_equal_to(double _a_tol = DEFAULT_ABSOLUTE_TOL, double _r_tol = DEFAULT_RELATIVE_TOL) : a_tol(_a_tol), r_tol(_r_tol) {}
378
- bool operator()(const thrust::complex<T>& a, const thrust::complex<T>& b) const {
379
- return almost_equal((double) a.real(), (double) b.real(), a_tol, r_tol)
380
- && almost_equal((double) a.imag(), (double) b.imag(), a_tol, r_tol);
381
- }
382
- };
383
-
384
- ////
385
- // check sequences
386
-
387
- template <typename ForwardIterator1, typename ForwardIterator2, typename BinaryPredicate>
388
- void assert_equal(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2, BinaryPredicate op,
389
- const std::string& filename = "unknown", int lineno = -1)
390
- {
391
- typedef typename thrust::iterator_difference<ForwardIterator1>::type difference_type;
392
- typedef typename thrust::iterator_value<ForwardIterator1>::type InputType;
393
-
394
- bool failure = false;
395
-
396
- difference_type length1 = thrust::distance(first1, last1);
397
- difference_type length2 = thrust::distance(first2, last2);
398
-
399
- difference_type min_length = thrust::min(length1, length2);
400
-
401
- unittest::UnitTestFailure f;
402
- f << "[" << filename << ":" << lineno << "] ";
403
-
404
- // check lengths
405
- if (length1 != length2)
406
- {
407
- failure = true;
408
- f << "Sequences have different sizes (" << length1 << " != " << length2 << ")\n";
409
- }
410
-
411
- // check values
412
-
413
- size_t mismatches = 0;
414
-
415
- for (difference_type i = 0; i < min_length; i++)
416
- {
417
- if(!op(*first1, *first2))
418
- {
419
- if (mismatches == 0)
420
- {
421
- failure = true;
422
- f << "Sequences are not equal [type='" << type_name<InputType>() << "']\n";
423
- f << "--------------------------------\n";
424
- }
425
-
426
- mismatches++;
427
-
428
- if(mismatches <= MAX_OUTPUT_LINES)
429
- {
430
- if (sizeof(InputType) == 1)
431
- f << " [" << i << "] " << *first1 + InputType() << " " << *first2 + InputType() << "\n"; // unprintable chars are a problem
432
- else
433
- f << " [" << i << "] " << *first1 << " " << *first2 << "\n";
434
- }
435
- }
436
-
437
- first1++;
438
- first2++;
439
- }
440
-
441
- if (mismatches > 0)
442
- {
443
- if(mismatches > MAX_OUTPUT_LINES)
444
- f << " (output limit reached)\n";
445
- f << "--------------------------------\n";
446
- f << "Sequences differ at " << mismatches << " of " << min_length << " positions" << "\n";
447
- }
448
- else if (length1 != length2)
449
- {
450
- f << "Sequences agree through " << min_length << " positions [type='" << type_name<InputType>() << "']\n";
451
- }
452
-
453
- if (failure)
454
- throw f;
455
- }
456
-
457
- template <typename ForwardIterator1, typename ForwardIterator2>
458
- void assert_equal(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2,
459
- const std::string& filename = "unknown", int lineno = -1)
460
- {
461
- typedef typename thrust::iterator_traits<ForwardIterator1>::value_type InputType;
462
- assert_equal(first1, last1, first2, last2, thrust::equal_to<InputType>(), filename, lineno);
463
- }
464
-
465
-
466
- template <typename ForwardIterator1, typename ForwardIterator2>
467
- void assert_almost_equal(ForwardIterator1 first1, ForwardIterator1 last1, ForwardIterator2 first2, ForwardIterator2 last2,
468
- const std::string& filename = "unknown", int lineno = -1,
469
- const double a_tol = DEFAULT_ABSOLUTE_TOL, const double r_tol = DEFAULT_RELATIVE_TOL)
470
- {
471
- typedef typename thrust::iterator_traits<ForwardIterator1>::value_type InputType;
472
- assert_equal(first1, last1, first2, last2, almost_equal_to<InputType>(a_tol, r_tol), filename, lineno);
473
- }
474
-
475
-
476
- template <typename T, typename Alloc1, typename Alloc2>
477
- void assert_equal(const thrust::host_vector<T,Alloc1>& A, const thrust::host_vector<T,Alloc2>& B,
478
- const std::string& filename = "unknown", int lineno = -1)
479
- {
480
- assert_equal(A.begin(), A.end(), B.begin(), B.end(), filename, lineno);
481
- }
482
-
483
- template <typename T, typename Alloc1, typename Alloc2>
484
- void assert_almost_equal(const thrust::host_vector<T,Alloc1>& A, const thrust::host_vector<T,Alloc2>& B,
485
- const std::string& filename = "unknown", int lineno = -1,
486
- const double a_tol = DEFAULT_ABSOLUTE_TOL, const double r_tol = DEFAULT_RELATIVE_TOL)
487
- {
488
- assert_almost_equal(A.begin(), A.end(), B.begin(), B.end(), filename, lineno, a_tol, r_tol);
489
- }
490
-
491
- template <typename T, typename Alloc1, typename Alloc2>
492
- void assert_equal(const thrust::host_vector<T,Alloc1>& A, const thrust::device_vector<T,Alloc2>& B,
493
- const std::string& filename = "unknown", int lineno = -1)
494
- {
495
- thrust::host_vector<T,Alloc1> B_host = B;
496
- assert_equal(A, B_host, filename, lineno);
497
- }
498
-
499
- template <typename T, typename Alloc1, typename Alloc2>
500
- void assert_equal(const thrust::device_vector<T,Alloc1>& A, const thrust::host_vector<T,Alloc2>& B,
501
- const std::string& filename = "unknown", int lineno = -1)
502
- {
503
- thrust::host_vector<T,Alloc2> A_host = A;
504
- assert_equal(A_host, B, filename, lineno);
505
- }
506
-
507
- template <typename T, typename Alloc1, typename Alloc2>
508
- void assert_equal(const thrust::device_vector<T,Alloc1>& A, const thrust::device_vector<T,Alloc2>& B,
509
- const std::string& filename = "unknown", int lineno = -1)
510
- {
511
- thrust::host_vector<T> A_host = A;
512
- thrust::host_vector<T> B_host = B;
513
- assert_equal(A_host, B_host, filename, lineno);
514
- }
515
-
516
- template <typename T, typename Alloc1, typename Alloc2>
517
- void assert_almost_equal(const thrust::host_vector<T,Alloc1>& A, const thrust::device_vector<T,Alloc2>& B,
518
- const std::string& filename = "unknown", int lineno = -1,
519
- const double a_tol = DEFAULT_ABSOLUTE_TOL, const double r_tol = DEFAULT_RELATIVE_TOL)
520
- {
521
- thrust::host_vector<T,Alloc1> B_host = B;
522
- assert_almost_equal(A, B_host, filename, lineno, a_tol, r_tol);
523
- }
524
-
525
- template <typename T, typename Alloc1, typename Alloc2>
526
- void assert_almost_equal(const thrust::device_vector<T,Alloc1>& A, const thrust::host_vector<T,Alloc2>& B,
527
- const std::string& filename = "unknown", int lineno = -1,
528
- const double a_tol = DEFAULT_ABSOLUTE_TOL, const double r_tol = DEFAULT_RELATIVE_TOL)
529
- {
530
- thrust::host_vector<T,Alloc2> A_host = A;
531
- assert_almost_equal(A_host, B, filename, lineno, a_tol, r_tol);
532
- }
533
-
534
- template <typename T, typename Alloc1, typename Alloc2>
535
- void assert_almost_equal(const thrust::device_vector<T,Alloc1>& A, const thrust::device_vector<T,Alloc2>& B,
536
- const std::string& filename = "unknown", int lineno = -1,
537
- const double a_tol = DEFAULT_ABSOLUTE_TOL, const double r_tol = DEFAULT_RELATIVE_TOL)
538
- {
539
- thrust::host_vector<T> A_host = A;
540
- thrust::host_vector<T> B_host = B;
541
- assert_almost_equal(A_host, B_host, filename, lineno, a_tol, r_tol);
542
- }
543
-
544
- enum threw_status
545
- {
546
- did_not_throw
547
- , threw_wrong_type
548
- , threw_right_type_but_wrong_value
549
- , threw_right_type
550
- };
551
-
552
- void check_assert_throws(
553
- threw_status s
554
- , std::string const& exception_name
555
- , std::string const& file_name = "unknown"
556
- , int line_number = -1
557
- )
558
- {
559
- switch (s)
560
- {
561
- case did_not_throw:
562
- {
563
- unittest::UnitTestFailure f;
564
- f << "[" << file_name << ":" << line_number << "] did not throw anything";
565
- throw f;
566
- }
567
- case threw_wrong_type:
568
- {
569
- unittest::UnitTestFailure f;
570
- f << "[" << file_name << ":" << line_number << "] did not throw an "
571
- << "object of type " << exception_name;
572
- throw f;
573
- }
574
- case threw_right_type_but_wrong_value:
575
- {
576
- unittest::UnitTestFailure f;
577
- f << "[" << file_name << ":" << line_number << "] threw an object of the "
578
- << "correct type (" << exception_name << ") but wrong value";
579
- throw f;
580
- }
581
- case threw_right_type:
582
- break;
583
- default:
584
- {
585
- unittest::UnitTestFailure f;
586
- f << "[" << file_name << ":" << line_number << "] encountered an "
587
- << "unknown error";
588
- throw f;
589
- }
590
- }
591
- }
592
-
593
- }; //end namespace unittest
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/configs/_base_/schedules/schedule_1x.py DELETED
@@ -1,11 +0,0 @@
1
- # optimizer
2
- optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
3
- optimizer_config = dict(grad_clip=None)
4
- # learning policy
5
- lr_config = dict(
6
- policy='step',
7
- warmup='linear',
8
- warmup_iters=500,
9
- warmup_ratio=0.001,
10
- step=[8, 11])
11
- runner = dict(type='EpochBasedRunner', max_epochs=12)
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChenyangSi/FreeU/__init__.py DELETED
@@ -1 +0,0 @@
1
- from free_lunch_utils import register_upblock2d, register_free_upblock2d, register_crossattn_upblock2d, register_free_crossattn_upblock2d
 
 
spaces/ChrisCaviar/ControlNet-v1-1/app_softedge.py DELETED
@@ -1,110 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import gradio as gr
4
-
5
- from utils import randomize_seed_fn
6
-
7
-
8
- def create_demo(process, max_images=12, default_num_images=3):
9
- with gr.Blocks() as demo:
10
- with gr.Row():
11
- with gr.Column():
12
- image = gr.Image()
13
- prompt = gr.Textbox(label='Prompt')
14
- run_button = gr.Button('Run')
15
- with gr.Accordion('Advanced options', open=False):
16
- preprocessor_name = gr.Radio(label='Preprocessor',
17
- choices=[
18
- 'HED',
19
- 'PidiNet',
20
- 'HED safe',
21
- 'PidiNet safe',
22
- 'None',
23
- ],
24
- type='value',
25
- value='PidiNet')
26
- num_samples = gr.Slider(label='Number of images',
27
- minimum=1,
28
- maximum=max_images,
29
- value=default_num_images,
30
- step=1)
31
- image_resolution = gr.Slider(label='Image resolution',
32
- minimum=256,
33
- maximum=512,
34
- value=512,
35
- step=256)
36
- preprocess_resolution = gr.Slider(
37
- label='Preprocess resolution',
38
- minimum=128,
39
- maximum=512,
40
- value=512,
41
- step=1)
42
- num_steps = gr.Slider(label='Number of steps',
43
- minimum=1,
44
- maximum=100,
45
- value=20,
46
- step=1)
47
- guidance_scale = gr.Slider(label='Guidance scale',
48
- minimum=0.1,
49
- maximum=30.0,
50
- value=9.0,
51
- step=0.1)
52
- seed = gr.Slider(label='Seed',
53
- minimum=0,
54
- maximum=1000000,
55
- step=1,
56
- value=0,
57
- randomize=True)
58
- randomize_seed = gr.Checkbox(label='Randomize seed',
59
- value=True)
60
- a_prompt = gr.Textbox(
61
- label='Additional prompt',
62
- value='best quality, extremely detailed')
63
- n_prompt = gr.Textbox(
64
- label='Negative prompt',
65
- value=
66
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
67
- )
68
- with gr.Column():
69
- result = gr.Gallery(label='Output', show_label=False).style(
70
- columns=2, object_fit='scale-down')
71
- inputs = [
72
- image,
73
- prompt,
74
- a_prompt,
75
- n_prompt,
76
- num_samples,
77
- image_resolution,
78
- preprocess_resolution,
79
- num_steps,
80
- guidance_scale,
81
- seed,
82
- preprocessor_name,
83
- ]
84
- prompt.submit(
85
- fn=randomize_seed_fn,
86
- inputs=[seed, randomize_seed],
87
- outputs=seed,
88
- ).then(
89
- fn=process,
90
- inputs=inputs,
91
- outputs=result,
92
- )
93
- run_button.click(
94
- fn=randomize_seed_fn,
95
- inputs=[seed, randomize_seed],
96
- outputs=seed,
97
- ).then(
98
- fn=process,
99
- inputs=inputs,
100
- outputs=result,
101
- api_name='scribble',
102
- )
103
- return demo
104
-
105
-
106
- if __name__ == '__main__':
107
- from model import Model
108
- model = Model(task_name='softedge')
109
- demo = create_demo(model.process_softedge)
110
- demo.queue().launch()