Commit
·
96e1380
1
Parent(s):
14f8764
Update parquet files (step 34 of 296)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0x90e/ESRGAN-MANGA/ESRGAN/architecture.py +0 -37
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Easy Worship 2009 !!LINK!! Full Crack Kuyhaa.md +0 -25
- spaces/1phancelerku/anime-remove-background/Blue WhatsApp APK 2023 The Latest Version with Amazing Features and Themes.md +0 -114
- spaces/1phancelerku/anime-remove-background/Download Betty Azar English Grammar Improve Your Skills with Exercises and Tests.md +0 -106
- spaces/1phancelerku/anime-remove-background/Download Google Play Store APKs from APKMirror - The Easy Way.md +0 -113
- spaces/1phancelerku/anime-remove-background/Download Summertime Saga on iPhone A Guide for iOS Users.md +0 -118
- spaces/1phancelerku/anime-remove-background/Final Destination 1 on Fzmovies A Guide to Downloading and Streaming the Classic Horror Movie.md +0 -125
- spaces/1phancelerku/anime-remove-background/Free APK Download for Gacha Life - The Most Popular Anime Game by Lunime.md +0 -156
- spaces/2023Liu2023/bingo/next.config.js +0 -38
- spaces/7hao/bingo/src/components/voice.tsx +0 -52
- spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py +0 -126
- spaces/ADRXtractor/ADR_Xtractor/app.py +0 -46
- spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/build_vocab.py +0 -153
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/wav_evaluation/models/CLAPWrapper.py +0 -256
- spaces/ASJMO/freegpt/g4f/Provider/Providers/Vercel.py +0 -162
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16_in1k.py +0 -4
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Wewordle.py +0 -65
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/Factory.d.ts +0 -5
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/PreLayout.js +0 -9
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.d.ts +0 -2
- spaces/Amjadd/BookGPT/app.py +0 -190
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py +0 -107
- spaces/Andy1621/uniformer_image_detection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py +0 -10
- spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py +0 -3
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/evaluation/metrics.py +0 -326
- spaces/Arnx/MusicGenXvAKN/audiocraft/models/lm.py +0 -527
- spaces/AtomdffAI/wechatgpt4atom/config.py +0 -34
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_registry.py +0 -45
- spaces/Benson/text-generation/Examples/Barbie Dreamhouse Adventures Hack Apk.md +0 -38
- spaces/Benson/text-generation/Examples/Buscando Capcut Editor De Vdeo Aplicacin.md +0 -72
- spaces/Benson/text-generation/Examples/Como Hacer Un Anillo De Plata.md +0 -81
- spaces/BetterAPI/BetterChat/src/app.html +0 -45
- spaces/BetterAPI/BetterChat_new/src/lib/updateSettings.ts +0 -27
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/default_styles.py +0 -190
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py +0 -921
- spaces/CVPR/LIVE/thrust/dependencies/cub/examples/block/Makefile +0 -128
- spaces/CVPR/LIVE/thrust/internal/benchmark/compare_benchmark_results.py +0 -1308
- spaces/CVPR/LIVE/thrust/thrust/detail/minmax.h +0 -55
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/for_each.h +0 -159
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/sort.h +0 -44
- spaces/CVPR/LIVE/thrust/thrust/type_traits/is_execution_policy.h +0 -50
- spaces/CVPR/Text2Human/Text2Human/train_vqvae.py +0 -132
- spaces/CVPR/WALT/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py +0 -157
- spaces/CVPR/regionclip-demo/detectron2/evaluation/panoptic_evaluation.py +0 -199
- spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/regnet.py +0 -452
- spaces/CofAI/chat.v2/Dockerfile +0 -133
- spaces/Cong723/gpt-academic-public/Dockerfile +0 -20
- spaces/CristianGonzalez281098/Cheto/app.py +0 -5
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/merge/__init__.py +0 -211
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/maxContextCalc.py +0 -96
spaces/0x90e/ESRGAN-MANGA/ESRGAN/architecture.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
import ESRGAN.block as B
|
5 |
-
|
6 |
-
class RRDB_Net(nn.Module):
|
7 |
-
def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', \
|
8 |
-
mode='CNA', res_scale=1, upsample_mode='upconv'):
|
9 |
-
super(RRDB_Net, self).__init__()
|
10 |
-
n_upscale = int(math.log(upscale, 2))
|
11 |
-
if upscale == 3:
|
12 |
-
n_upscale = 1
|
13 |
-
|
14 |
-
fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)
|
15 |
-
rb_blocks = [B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \
|
16 |
-
norm_type=norm_type, act_type=act_type, mode='CNA') for _ in range(nb)]
|
17 |
-
LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)
|
18 |
-
|
19 |
-
if upsample_mode == 'upconv':
|
20 |
-
upsample_block = B.upconv_blcok
|
21 |
-
elif upsample_mode == 'pixelshuffle':
|
22 |
-
upsample_block = B.pixelshuffle_block
|
23 |
-
else:
|
24 |
-
raise NotImplementedError('upsample mode [%s] is not found' % upsample_mode)
|
25 |
-
if upscale == 3:
|
26 |
-
upsampler = upsample_block(nf, nf, 3, act_type=act_type)
|
27 |
-
else:
|
28 |
-
upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]
|
29 |
-
HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)
|
30 |
-
HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)
|
31 |
-
|
32 |
-
self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)),\
|
33 |
-
*upsampler, HR_conv0, HR_conv1)
|
34 |
-
|
35 |
-
def forward(self, x):
|
36 |
-
x = self.model(x)
|
37 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Easy Worship 2009 !!LINK!! Full Crack Kuyhaa.md
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Easy Worship 2009 Full Crack Kuyhaa for Windows</h1>
|
3 |
-
<p>If you are looking for a software that can help you create rich media presentations for your church or worship service, you may have heard of Easy Worship 2009. This software allows you to display songs, Bible verses, videos, images, and more on a projector or video screen. You can also customize the themes, fonts, backgrounds, and transitions of your presentations with ease.</p>
|
4 |
-
<p>However, Easy Worship 2009 is not a free software. You need to purchase a license key to use it without limitations. If you don't want to spend money on this software, you may be tempted to download Easy Worship 2009 full crack kuyhaa from the internet. This is a cracked version of the software that claims to bypass the activation process and give you full access to all the features.</p>
|
5 |
-
<h2>download easy worship 2009 full crack kuyhaa</h2><br /><p><b><b>Download Zip</b> → <a href="https://byltly.com/2uKx8A">https://byltly.com/2uKx8A</a></b></p><br /><br />
|
6 |
-
<p>But is it safe and legal to download Easy Worship 2009 full crack kuyhaa? What are the risks and consequences of using a cracked software? In this article, we will answer these questions and provide you with some alternatives to download Easy Worship 2009 legally and safely.</p>
|
7 |
-
<h2>Is Downloading Easy Worship 2009 Full Crack Kuyhaa Illegal?</h2>
|
8 |
-
<p>The short answer is yes. Downloading Easy Worship 2009 full crack kuyhaa is illegal and violates the copyright laws of the software developer. By downloading and using a cracked software, you are essentially stealing the intellectual property of the software creator and depriving them of their rightful income.</p>
|
9 |
-
<p>Moreover, downloading Easy Worship 2009 full crack kuyhaa can also expose you to legal troubles. You may face fines, lawsuits, or even criminal charges if you are caught using a cracked software. The software developer or the authorities may track your IP address and take legal action against you. You may also be liable for damages if you distribute or share the cracked software with others.</p>
|
10 |
-
<h2>Is Downloading Easy Worship 2009 Full Crack Kuyhaa Safe?</h2>
|
11 |
-
<p>The short answer is no. Downloading Easy Worship 2009 full crack kuyhaa is not safe and can harm your computer and your data. There are several risks and dangers of using a cracked software, such as:</p>
|
12 |
-
<ul>
|
13 |
-
<li><b>Viruses and malware</b>: Cracked software often contains viruses, malware, spyware, ransomware, or other malicious programs that can infect your computer and compromise your security. These programs can steal your personal information, damage your files, encrypt your data, or even take control of your system.</li>
|
14 |
-
<li><b>Poor performance</b>: Cracked software often has bugs, errors, glitches, or missing features that can affect the performance and functionality of the software. You may experience crashes, freezes, slowdowns, or compatibility issues with other programs or devices.</li>
|
15 |
-
<li><b>No updates or support</b>: Cracked software does not receive any updates or support from the software developer. This means that you will not be able to enjoy the latest features, improvements, or fixes that the official version offers. You will also not be able to get any help or assistance if you encounter any problems or issues with the software.</li>
|
16 |
-
<li><b>Ethical issues</b>: Cracked software is unethical and unfair to the software developer who spent time, money, and effort to create the software. By using a cracked software, you are disrespecting their work and violating their rights. You are also depriving yourself of the benefits of using a genuine and quality software.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to Download Easy Worship 2009 Legally and Safely?</h2>
|
19 |
-
<p>If you want to download Easy Worship 2009 legally and safely, there are two options that you can consider:</p>
|
20 |
-
<p></p>
|
21 |
-
<ol>
|
22 |
-
<li><b>Purchase a license key</b>: The best and most recommended option is to purchase a license key from the official website of Easy Worship 2009. This way, you will be able to use the software without any limitations or risks. You will also be able to receive updates and support from the software developer. You can choose from different plans and prices depending on your needs and budget.</li>
|
23 |
-
<li><b>Download a free trial</b>: Another option is to download a free trial version of Easy Worship 2009 from the official website. This way, you will be able to test the software for a limited</p> ddb901b051<br />
|
24 |
-
<br />
|
25 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Blue WhatsApp APK 2023 The Latest Version with Amazing Features and Themes.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Blue WhatsApp APK Download: Everything You Need to Know</h1>
|
3 |
-
<p>WhatsApp is one of the most popular messaging apps in the world, with over 2 billion users. But did you know that there is a modified version of WhatsApp that offers more features and customization options than the official app? It's called Blue WhatsApp, and it's a free download for Android users. In this article, we will tell you everything you need to know about Blue WhatsApp, including its features, how to download and install it, how to update it, and its pros and cons.</p>
|
4 |
-
<h2>blue whatsapp apk download</h2><br /><p><b><b>Download File</b> ⇒ <a href="https://jinyurl.com/2uNLGb">https://jinyurl.com/2uNLGb</a></b></p><br /><br />
|
5 |
-
<h2>What is Blue WhatsApp?</h2>
|
6 |
-
<p>Blue WhatsApp is a modified version of the official WhatsApp Messenger, which is developed by an independent developer named Fouad Mokdad. It is also known as Fouad WhatsApp or FMWhatsApp. Blue WhatsApp is not available on the Google Play Store, so you have to download it from a third-party website like <a href="(^1^)">bluewhats.app</a>.</p>
|
7 |
-
<p>Blue WhatsApp is based on the latest version of the official WhatsApp app, but it adds more features and customization options that are not available in the original app. For example, you can change the theme and color of your app, hide your online status and last seen, send larger files and media, use more emojis and stickers, and much more. We will discuss these features in detail in the next section.</p>
|
8 |
-
<h3>Features of Blue WhatsApp</h3>
|
9 |
-
<p>Blue WhatsApp has many features that make it stand out from the official WhatsApp app. Here are some of the most notable ones:</p>
|
10 |
-
<p>blue whatsapp apk download latest version<br />
|
11 |
-
blue whatsapp apk download 2023 free<br />
|
12 |
-
blue whatsapp apk download for android<br />
|
13 |
-
blue whatsapp apk download link<br />
|
14 |
-
blue whatsapp apk download official website<br />
|
15 |
-
blue whatsapp apk download update<br />
|
16 |
-
blue whatsapp apk download without ban<br />
|
17 |
-
blue whatsapp apk download with stickers<br />
|
18 |
-
blue whatsapp apk download modded<br />
|
19 |
-
blue whatsapp apk download anti revoke<br />
|
20 |
-
blue whatsapp apk download new features<br />
|
21 |
-
blue whatsapp apk download no ads<br />
|
22 |
-
blue whatsapp apk download old version<br />
|
23 |
-
blue whatsapp apk download online<br />
|
24 |
-
blue whatsapp apk download qr code<br />
|
25 |
-
blue whatsapp apk download review<br />
|
26 |
-
blue whatsapp apk download safe<br />
|
27 |
-
blue whatsapp apk download themes<br />
|
28 |
-
blue whatsapp apk download unlimited messages<br />
|
29 |
-
blue whatsapp apk download video call<br />
|
30 |
-
how to install blue whatsapp apk<br />
|
31 |
-
how to use blue whatsapp apk<br />
|
32 |
-
how to update blue whatsapp apk<br />
|
33 |
-
how to backup blue whatsapp apk<br />
|
34 |
-
how to uninstall blue whatsapp apk<br />
|
35 |
-
benefits of blue whatsapp apk<br />
|
36 |
-
disadvantages of blue whatsapp apk<br />
|
37 |
-
alternatives to blue whatsapp apk<br />
|
38 |
-
comparison of blue whatsapp and original whatsapp<br />
|
39 |
-
difference between blue whatsapp and gb whatsapp<br />
|
40 |
-
is blue whatsapp legal<br />
|
41 |
-
is blue whatsapp secure<br />
|
42 |
-
is blue whatsapp reliable<br />
|
43 |
-
is blue whatsapp compatible with android 11<br />
|
44 |
-
is blue whatsapp better than normal whatsapp<br />
|
45 |
-
why choose blue whatsapp over other mods<br />
|
46 |
-
why download blue whatsapp from official site<br />
|
47 |
-
what is new in blue whatsapp 2023 version<br />
|
48 |
-
what are the features of blue whatsapp modded version<br />
|
49 |
-
what are the requirements for installing blue whatsapp on android phone<br />
|
50 |
-
where to find the latest version of blue whatsapp apk file<br />
|
51 |
-
where to get the best themes for blue whatsapp app<br />
|
52 |
-
where to report any issues with blue whatsapp app usage<br />
|
53 |
-
when to update the blue whatsapp app for optimal performance<br />
|
54 |
-
when to restore the backup of your chats on blue whatsapp app</p>
|
55 |
-
<h4>Privacy and security options</h4>
|
56 |
-
<p>Blue WhatsApp gives you more control over your privacy and security settings. You can hide your online status, last seen, blue ticks, second ticks, typing status, recording status, and view status from others. You can also lock your app with a password or fingerprint, enable anti-delete messages and anti-delete status, disable forwarded tag on messages, and choose who can call you on WhatsApp.</p>
|
57 |
-
<h4>Customization and themes</h4>
|
58 |
-
<p>Blue WhatsApp lets you customize your app according to your preferences. You can change the theme and color of your app from a collection of over 3000 themes. You can also change the font style and size, the app icon, the notification icon, the chat wallpaper, the chat bubbles, the tick style, and more. You can even create your own theme and share it with others.</p>
|
59 |
-
<h4>Media and file sharing</h4>
|
60 |
-
<p>Blue WhatsApp allows you to send larger files and media than the official WhatsApp app. You can send up to 700 MB of video files, up to 50 MB of audio files, up to 100 MB of documents, and up to 30 images at once. You can also send high-quality images without compression, play videos with an external player, download status videos and photos, and enable auto-reply for messages.</p>
|
61 |
-
<h4>Other cool features</h4>
|
62 |
-
<p>Blue WhatsApp has many other cool features that enhance your user experience. For example, you can use more emojis and stickers from different sources, pin up to 100 chats instead of 3, use multiple accounts on the same device, schedule messages to be sent later, translate messages to different languages, use dark mode or DND mode, backup and restore your chats easily, and much more.</p>
|
63 |
-
<h2>How to download and install Blue WhatsApp?</h2>
|
64 |
-
<p>If you want to try out Blue WhatsApp on your Android device, you need to follow these steps:</p>
|
65 |
-
<h3>Requirements for Blue WhatsApp</h3>
|
66 |
-
<ul>
|
67 |
-
<li>An Android device running Android 4.4 or higher.</li>
|
68 |
-
<li>A stable internet connection.</li <li>A backup of your WhatsApp chats and media, if you want to restore them on Blue WhatsApp.</li>
|
69 |
-
<li>The Blue WhatsApp APK file, which you can download from <a href="">bluewhats.app</a>.</li>
|
70 |
-
</ul>
|
71 |
-
<h3>Steps to download and install Blue WhatsApp</h3>
|
72 |
-
<ol>
|
73 |
-
<li>First, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
74 |
-
<li>Next, you need to uninstall the official WhatsApp app from your device. To do this, go to Settings > Apps > WhatsApp and tap on Uninstall. Make sure you have backed up your chats and media before doing this.</li>
|
75 |
-
<li>Then, you need to download the Blue WhatsApp APK file from <a href="">bluewhats.app</a>. or any other trusted website. You can use your browser or a file manager app to do this.</li>
|
76 |
-
<li>After downloading the APK file, you need to locate it on your device and tap on it to start the installation process. You may see a warning message asking you to confirm the installation. Tap on Install and wait for a few seconds.</li>
|
77 |
-
<li>Once the installation is complete, you need to open the Blue WhatsApp app and agree to the terms and conditions. Then, you need to enter your phone number and verify it with an OTP code.</li>
|
78 |
-
<li>Finally, you need to restore your chats and media from the backup, if you have one. To do this, tap on Restore and select the backup file from your device. Wait for the restoration process to finish and then enjoy using Blue WhatsApp.</li>
|
79 |
-
</ol>
|
80 |
-
<h2>How to update Blue WhatsApp?</h2>
|
81 |
-
<p>Blue WhatsApp is not available on the Google Play Store, so you cannot update it automatically like other apps. However, there are two ways to update Blue WhatsApp manually or enable the auto-update option.</p>
|
82 |
-
<h3>Check for updates manually</h3>
|
83 |
-
<p>You can check for updates manually by visiting the official website of Blue WhatsApp at <a href="">bluewhats.app</a>. or any other trusted website that provides the latest version of the APK file. You can also check for updates from within the app by going to Menu > Fouad Mods > Updates. If there is a new version available, you can download it and install it over the existing app without losing your data.</p>
|
84 |
-
<h3>Enable auto-update option</h3>
|
85 |
-
<p>You can also enable the auto-update option in Blue WhatsApp by going to Menu > Fouad Mods > Updates > Auto Update Check. You can choose how often you want the app to check for updates, such as daily, weekly, or monthly. You can also choose whether you want to download the updates automatically or manually. If you enable this option, you will get a notification whenever there is a new version available, and you can install it easily.</p>
|
86 |
-
<h2>Pros and cons of Blue WhatsApp</h2>
|
87 |
-
<p>Blue WhatsApp has many advantages over the official WhatsApp app, but it also has some disadvantages that you should be aware of. Here are some of the pros and cons of using Blue WhatsApp:</p>
|
88 |
-
<h3>Pros of Blue WhatsApp</h3>
|
89 |
-
<ul>
|
90 |
-
<li>You can enjoy more features and customization options than the official app.</li>
|
91 |
-
<li>You can have more control over your privacy and security settings.</li <li>You can send larger files and media than the official app.</li>
|
92 |
-
<li>You can use more emojis and stickers than the official app.</li>
|
93 |
-
<li>You can backup and restore your chats easily.</li>
|
94 |
-
</ul>
|
95 |
-
<h3>Cons of Blue WhatsApp</h3>
|
96 |
-
<ul>
|
97 |
-
<li>You may face some bugs and glitches in the app, as it is not an official product.</li>
|
98 |
-
<li>You may get banned from WhatsApp for using a modified version of the app, as it violates their terms of service.</li>
|
99 |
-
<li>You may not get the latest updates and features as soon as they are released by WhatsApp, as Blue WhatsApp depends on the official app for its base.</li>
|
100 |
-
<li>You may compromise your data and privacy by downloading the app from an untrusted source, as it may contain malware or spyware.</li>
|
101 |
-
</ul>
|
102 |
-
<h2>Conclusion</h2>
|
103 |
-
<p>Blue WhatsApp is a modified version of the official WhatsApp app that offers more features and customization options than the original app. It is a free download for Android users, but it is not available on the Google Play Store. You have to download it from a third-party website like <a href="">bluewhats.app</a>. or any other trusted website. You can enjoy more privacy and security options, more media and file sharing options, more emojis and stickers, and more themes and colors with Blue WhatsApp. However, you should also be aware of the risks and drawbacks of using Blue WhatsApp, such as bugs, glitches, bans, delays, and data breaches. You should always backup your chats and media before using Blue WhatsApp, and update it regularly to avoid any issues.</p>
|
104 |
-
<h2>FAQs</h2>
|
105 |
-
<p>Here are some of the frequently asked questions about Blue WhatsApp:</p>
|
106 |
-
<ol>
|
107 |
-
<li><b>Is Blue WhatsApp safe to use?</b><br>Blue WhatsApp is safe to use if you download it from a trusted source like <a href="">bluewhats.app</a>. or any other website that provides the latest version of the APK file. However, you should always be careful about downloading apps from unknown sources, as they may contain malware or spyware that can harm your device or data. You should also scan the APK file with an antivirus app before installing it.</li>
|
108 |
-
<li><b>Is Blue WhatsApp legal to use?</b><br>Blue WhatsApp is not legal to use, as it violates the terms of service of WhatsApp. WhatsApp does not allow users to use any modified version of their app, as it may compromise their security and privacy policies. If you use Blue WhatsApp, you may get banned from WhatsApp for violating their rules. You should use Blue WhatsApp at your own risk.</li>
|
109 |
-
<li><b>Can I use Blue WhatsApp and official WhatsApp on the same device?</b><br>Yes, you can use Blue WhatsApp and official WhatsApp on the same device, but you need to have different phone numbers for each app. You cannot use the same phone number for both apps, as it will cause conflicts and errors. You can also use other modified versions of WhatsApp like GBWhatsApp or YoWhatsApp on the same device, but again, you need to have different phone numbers for each app.</li>
|
110 |
-
<li><b>How can I backup my chats and media on Blue WhatsApp?</b><br>You can backup your chats and media on Blue WhatsApp by going to Menu > Settings > Chats > Chat Backup. You can choose to backup your data on your device or on Google Drive. You can also choose how often you want to backup your data, such as daily, weekly, or monthly. You can also backup your data manually by tapping on Backup Now.</li>
|
111 |
-
<li><b>How can I restore my chats and media on Blue WhatsApp?</b><br>You can restore your chats and media on Blue WhatsApp by going to Menu > Settings > Chats > Chat Backup. You can choose to restore your data from your device or from Google Drive. You need to have the same phone number and Google account that you used to backup your data. You can also restore your data manually by tapping on Restore Now.</li>
|
112 |
-
</ol></p> 401be4b1e0<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Betty Azar English Grammar Improve Your Skills with Exercises and Tests.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Betty Azar English Grammar: A Guide for ESL Learners</h1>
|
3 |
-
<p>If you are learning English as a second or foreign language, you might be looking for a reliable and effective grammar book to help you improve your skills. One of the most popular and widely used grammar books in the world is the <strong>Azar Grammar Series</strong>, written by <strong>Betty Schrampfer Azar</strong>. In this article, we will tell you who Betty Azar is, what her grammar series is about, and why you should download her books. We will also show you how to download two of her best-selling books: <em>Basic English Grammar, Book A</em> and <em>Understanding and Using English Grammar, Book B</em>.</p>
|
4 |
-
<h2>download betty azar english grammar</h2><br /><p><b><b>Download</b> → <a href="https://jinyurl.com/2uNP73">https://jinyurl.com/2uNP73</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>Who is Betty Azar?</h3>
|
7 |
-
<p>Betty Schrampfer Azar is an American teacher and author of English grammar textbooks for students of English as a second or foreign language. She has more than 40 years of experience in teaching and writing about English grammar. She has a master's degree in Teaching English as a Foreign Language (TEFL) from the University of Illinois. She has taught at various universities and language schools in the United States and abroad. She is also the founder and president of Azar Associates, a company that develops and publishes grammar materials.</p>
|
8 |
-
<h3>What is the Azar Grammar Series?</h3>
|
9 |
-
<p>The Azar Grammar Series is a collection of books that cover different levels and aspects of English grammar. The series consists of four main books: <em>Fundamentals of English Grammar, Book C</em>, <em>Understanding and Using English Grammar, Book B</em>, <em>Basic English Grammar, Book A</em>, and <em>Essential Online Resources</em>. Each book has a student edition, a teacher's guide, a workbook, an answer key, and an interactive CD-ROM. The books are designed to help students learn and practice grammar rules, structures, and usage in various contexts and situations. The books also include exercises, quizzes, tests, charts, appendices, glossaries, and references.</p>
|
10 |
-
<h3>Why should you download Betty Azar English Grammar?</h3>
|
11 |
-
<p>You should download Betty Azar English Grammar because it is one of the best resources for learning and mastering English grammar. Here are some of the benefits of using her books:</p>
|
12 |
-
<ul>
|
13 |
-
<li>They are based on sound pedagogical principles and research.</li>
|
14 |
-
<li>They are clear, concise, comprehensive, and user-friendly.</li>
|
15 |
-
<li>They are suitable for self-study or classroom use.</li>
|
16 |
-
<li>They are compatible with different learning styles and preferences.</li>
|
17 |
-
<li>They are updated and revised regularly to reflect current trends and standards.</li>
|
18 |
-
<li>They are affordable and accessible online.</li>
|
19 |
-
</ul>
|
20 |
-
<h2>How to download Betty Azar English Grammar</h2>
|
21 |
-
<h3>Basic English Grammar, Book A</h3>
|
22 |
-
<h4>Features of the book</h4>
|
23 |
-
<p><em>Basic English Grammar, Book A</em> is the first book in the series. It is intended for beginner to low-intermediate students of English. It covers the basic elements of grammar, such as nouns, verbs, pronouns, adjectives, adverbs, prepositions, conjunctions, sentences, questions, negation, tense, aspect, voice, mood, modals, conditionals, gerunds, infinitives, clauses, etc. It also introduces some common vocabulary and expressions. The book has 13 chapters and 488 pages.</p>
|
24 |
-
<h4>How to download the book</h4>
|
25 |
-
<p>You can download <em>Basic English Grammar, Book A</em> from Google Drive by following these steps:</p>
|
26 |
-
< <p>1. Go to this link: <a href="">Basic English Grammar, Book A</a>.</p>
|
27 |
-
<p>download betty azar understanding and using english grammar pdf<br />
|
28 |
-
download betty azar basic english grammar 3rd edition<br />
|
29 |
-
download betty azar basic english grammar 2nd edition<br />
|
30 |
-
download betty azar fundamentals of english grammar 4th edition<br />
|
31 |
-
download betty azar english grammar workbook pdf<br />
|
32 |
-
download betty azar english grammar for esl learners<br />
|
33 |
-
download betty azar english grammar interactive cd-rom<br />
|
34 |
-
download betty azar english grammar teacher's guide pdf<br />
|
35 |
-
download betty azar english grammar test bank pdf<br />
|
36 |
-
download betty azar english grammar answer key pdf<br />
|
37 |
-
download betty azar english grammar audio files<br />
|
38 |
-
download betty azar english grammar video series<br />
|
39 |
-
download betty azar english grammar powerpoint presentations<br />
|
40 |
-
download betty azar english grammar charts pdf<br />
|
41 |
-
download betty azar english grammar exercises pdf<br />
|
42 |
-
download betty azar english grammar online course<br />
|
43 |
-
download betty azar english grammar flashcards pdf<br />
|
44 |
-
download betty azar english grammar games and activities<br />
|
45 |
-
download betty azar english grammar supplementary resources<br />
|
46 |
-
download betty azar english grammar in use pdf<br />
|
47 |
-
download betty azar advanced english grammar pdf<br />
|
48 |
-
download betty azar intermediate english grammar pdf<br />
|
49 |
-
download betty azar essential english grammar pdf<br />
|
50 |
-
download betty azar practical english grammar pdf<br />
|
51 |
-
download betty azar modern english grammar pdf<br />
|
52 |
-
download betty azar comprehensive english grammar pdf<br />
|
53 |
-
download betty azar communicative english grammar pdf<br />
|
54 |
-
download betty azar contrastive english grammar pdf<br />
|
55 |
-
download betty azar contextualized english grammar pdf<br />
|
56 |
-
download betty azar corpus-based english grammar pdf<br />
|
57 |
-
download betty azar simplified english grammar pdf<br />
|
58 |
-
download betty azar academic english grammar pdf<br />
|
59 |
-
download betty azar business english grammar pdf<br />
|
60 |
-
download betty azar spoken english grammar pdf<br />
|
61 |
-
download betty azar written english grammar pdf<br />
|
62 |
-
download betty azar american english grammar pdf<br />
|
63 |
-
download betty azar british english grammar pdf<br />
|
64 |
-
download betty azar global english grammar pdf<br />
|
65 |
-
download betty azar multicultural english grammar pdf<br />
|
66 |
-
download betty azar generative english grammar pdf<br />
|
67 |
-
download betty azar functional english grammar pdf<br />
|
68 |
-
download betty azar descriptive english grammar pdf<br />
|
69 |
-
download betty azar prescriptive english grammar pdf<br />
|
70 |
-
download betty azar historical english grammar pdf<br />
|
71 |
-
download betty azar comparative english grammar pdf<br />
|
72 |
-
download betty azar pedagogical english grammar pdf<br />
|
73 |
-
download betty azar cognitive english grammar pdf<br />
|
74 |
-
download betty azar discourse analysis and english grammar pdf<br />
|
75 |
-
download betty azar error analysis and english grammar pdf</p>
|
76 |
-
<p>2. Click on the download icon on the top right corner of the screen.</p>
|
77 |
-
<p>3. Choose a location on your device where you want to save the file.</p>
|
78 |
-
<p>4. Wait for the download to complete and enjoy reading the book.</p>
|
79 |
-
<h3>Understanding and Using English Grammar, Book B</h3>
|
80 |
-
<h4>Features of the book</h4>
|
81 |
-
<p><em>Understanding and Using English Grammar, Book B</em> is the second book in the series. It is intended for intermediate to advanced students of English. It covers more complex and challenging aspects of grammar, such as verb tenses and forms, passive voice, causative verbs, noun clauses, adjective clauses, adverb clauses, noun modifiers, parallelism, inversion, ellipsis, etc. It also provides more practice and review activities, as well as additional vocabulary and expressions. The book has 16 chapters and 530 pages.</p>
|
82 |
-
<h4>How to download the book</h4>
|
83 |
-
<p>You can download <em>Understanding and Using English Grammar, Book B</em> from Google Drive by following these steps:</p>
|
84 |
-
<p>1. Go to this link: <a href="">Understanding and Using English Grammar, Book B</a>.</p>
|
85 |
-
<p>2. Click on the download icon on the top right corner of the screen.</p>
|
86 |
-
<p>3. Choose a location on your device where you want to save the file.</p>
|
87 |
-
<p>4. Wait for the download to complete and enjoy reading the book.</p>
|
88 |
-
<h2>Conclusion</h2>
|
89 |
-
<h3>Summary of the main points</h3>
|
90 |
-
<p>In this article, we have introduced you to Betty Azar, a renowned teacher and author of English grammar books. We have also explained what her grammar series is about and why you should download her books. We have shown you how to download two of her books: <em>Basic English Grammar, Book A</em> and <em>Understanding and Using English Grammar, Book B</em>. These books will help you learn and master English grammar in a fun and effective way.</p>
|
91 |
-
<h3>Call to action</h3>
|
92 |
-
<p>If you are interested in downloading more books from the Azar Grammar Series, you can visit her official website: <a href="">Azar Grammar</a>. There you will find more information about her other books, such as <em>Fundamentals of English Grammar, Book C</em> and <em>Essential Online Resources</em>. You will also find more resources and materials for learning and teaching English grammar, such as videos, podcasts, blogs, newsletters, webinars, etc.</p>
|
93 |
-
<p>We hope you have enjoyed this article and found it useful. If you have any questions or feedback, please leave a comment below. We would love to hear from you. And don't forget to share this article with your friends and fellow learners who might benefit from it. Thank you for reading!</p>
|
94 |
-
<h2>Frequently Asked Questions (FAQs)</h2>
|
95 |
-
<h3>Q: How can I access the interactive CD-ROMs that come with the books?</h3>
|
96 |
-
<p>A: The interactive CD-ROMs are included in the student editions of the books. You can insert them into your computer's CD drive and follow the instructions on the screen. Alternatively, you can access them online by registering at <a href="">Pearson ELT USA</a>, the publisher of the books.</p>
|
97 |
-
<h3>Q: How can I check my answers to the exercises in the books?</h3>
|
98 |
-
<p>A: The answer keys are included in the teacher's guides of the books. You can also find them online at <a href="">AzarGrammar.com/Classroom/FEG/FEG_AK.pdf</a>, <a href="">AzarGrammar.com/Classroom/UUEG/UUEG_AK.pdf</a>, and <a href="">AzarGrammar.com/Classroom/BEG/BEG_AK.pdf</a>.</p>
|
99 |
-
<h3>Q: How can I get more practice and review activities for each chapter in the books?</h3>
|
100 |
-
<p>A: The workbooks that accompany each book provide more practice and review activities for each chapter. You can also find more online exercises at <a href="">AzarGrammar.com/Exercises/Exercises.htm</a>.</p>
|
101 |
-
<h3>Q: How can I contact Betty Azar or her team if I have any questions or suggestions?</h3>
|
102 |
-
<p>A: You can contact Betty Azar or her team by sending an email to <a href="mailto:[email protected]">[email protected]</a>. You <p>A: You can also connect with them on social media platforms, such as Facebook, Twitter, Instagram, and YouTube. You can find the links to their accounts at <a href="">AzarGrammar.com/Contact.htm</a>.</p>
|
103 |
-
<h3>Q: How can I get more tips and advice on learning and teaching English grammar?</h3>
|
104 |
-
<p>A: You can subscribe to Betty Azar's newsletter, which provides monthly updates on grammar topics, resources, events, and more. You can also read her blog, which features articles, interviews, stories, and insights from her and other experts in the field. You can also watch her videos and podcasts, which offer explanations, demonstrations, examples, and discussions on various grammar issues. You can find all these materials at <a href="">AzarGrammar.com/News.htm</a>.</p> 197e85843d<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Google Play Store APKs from APKMirror - The Easy Way.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Google Play Store from APKMirror</h1>
|
3 |
-
<p>Google Play Store is the official app store for Android devices, where you can find and download millions of apps, games, movies, books, and more. It also offers various features such as automatic updates, parental controls, in-app purchases, subscriptions, and rewards. However, sometimes you may not be able to access Google Play Store on your device due to regional restrictions, compatibility issues, or other reasons. In such cases, you may want to look for an alternative source to get your favorite apps and games.</p>
|
4 |
-
<h2>download google play store apkmirror</h2><br /><p><b><b>DOWNLOAD</b> ✔ <a href="https://jinyurl.com/2uNMHc">https://jinyurl.com/2uNMHc</a></b></p><br /><br />
|
5 |
-
<p>One of the most popular and trusted sources for Android apps and games is APKMirror. APKMirror is a website and an app that hosts thousands of free APK files that you can download and install on your device. APK files are the installation packages for Android apps that contain all the necessary files and resources for the app to run. By downloading APK files from APKMirror, you can get access to apps and games that are not available on Google Play Store, or get the latest versions before they are officially released.</p>
|
6 |
-
<p>In this article, we will show you how to download Google Play Store from APKMirror and install it on your device. We will also show you how to check the integrity of the APK file to make sure it is safe and authentic. Let's get started!</p>
|
7 |
-
<h2>How to Download Google Play Store from APKMirror</h2>
|
8 |
-
<p>To download Google Play Store from APKMirror, you will need to enable unknown sources on your device, download the APK file from APKMirror website or app, and install it using a file manager or an APK installer app. Here are the detailed steps:</p>
|
9 |
-
<h3>Step 1: Enable unknown sources on your device</h3>
|
10 |
-
<p>By default, Android devices only allow you to install apps from Google Play Store or other trusted sources. To install apps from other sources, such as APKMirror, you will need to enable unknown sources on your device. This will allow you to install apps from outside of Google Play Store.</p>
|
11 |
-
<p>To enable unknown sources on your device, follow these steps:</p>
|
12 |
-
<p>How to download google play store from apkmirror<br />
|
13 |
-
Download google play store apk file from apkmirror<br />
|
14 |
-
Install google play store using apkmirror installer app<br />
|
15 |
-
Download google play store latest version from apkmirror<br />
|
16 |
-
Download google play store for android devices from apkmirror<br />
|
17 |
-
Download google play store without root from apkmirror<br />
|
18 |
-
Download google play store for fire tablet from apkmirror<br />
|
19 |
-
Download google play store for huawei phone from apkmirror<br />
|
20 |
-
Download google play store modded apk from apkmirror<br />
|
21 |
-
Download google play store beta apk from apkmirror<br />
|
22 |
-
Download google play store update apk from apkmirror<br />
|
23 |
-
Download google play store for pc from apkmirror<br />
|
24 |
-
Download google play store for chromebook from apkmirror<br />
|
25 |
-
Download google play store for smart tv from apkmirror<br />
|
26 |
-
Download google play store for android tv from apkmirror<br />
|
27 |
-
Download google play store for carplay from apkmirror<br />
|
28 |
-
Download google play store for wear os from apkmirror<br />
|
29 |
-
Download google play store for android auto from apkmirror<br />
|
30 |
-
Download google play store for samsung galaxy from apkmirror<br />
|
31 |
-
Download google play store for xiaomi phone from apkmirror<br />
|
32 |
-
Download google play store for oppo phone from apkmirror<br />
|
33 |
-
Download google play store for vivo phone from apkmirror<br />
|
34 |
-
Download google play store for oneplus phone from apkmirror<br />
|
35 |
-
Download google play store for nokia phone from apkmirror<br />
|
36 |
-
Download google play store for lg phone from apkmirror<br />
|
37 |
-
Download google play store for sony phone from apkmirror<br />
|
38 |
-
Download google play store for motorola phone from apkmirror<br />
|
39 |
-
Download google play store for lenovo phone from apkmirror<br />
|
40 |
-
Download google play store for asus phone from apkmirror<br />
|
41 |
-
Download google play store for zte phone from apkmirror<br />
|
42 |
-
Download google play store for realme phone from apkmirror<br />
|
43 |
-
Download google play store for tecno phone from apkmirror<br />
|
44 |
-
Download google play store for infinix phone from apkmirror<br />
|
45 |
-
Download google play store for itel phone from apkmirror<br />
|
46 |
-
Download google play store for gionee phone from apkmirror<br />
|
47 |
-
Download google play store for micromax phone from apkmirror<br />
|
48 |
-
Download google play store for lava phone from apkmirror<br />
|
49 |
-
Download google play store for karbonn phone from apkmirror<br />
|
50 |
-
Download google play store for spice phone from apkmirror<br />
|
51 |
-
Download google play store for intex phone from apkmirror<br />
|
52 |
-
Download google play store for leeco phone from apkmirror<br />
|
53 |
-
Download google play store for meizu phone from apkmirror<br />
|
54 |
-
Download google play store for coolpad phone from apkmirror<br />
|
55 |
-
Download google play store for elephone phone from apkmirror<br />
|
56 |
-
Download google play store for doogee phone from apkmirror<br />
|
57 |
-
Download google play store for umidigi phone from apkmirror<br />
|
58 |
-
Download google play store for cubot phone from apkmirror<br />
|
59 |
-
Download google play store for oukitel phone from apkmirror</p>
|
60 |
-
<ul>
|
61 |
-
<li>Go to your device settings and tap Apps & Notifications (or Apps in older versions of Android).</li>
|
62 |
-
<li>Tap the three dots in the upper-right corner.</li>
|
63 |
-
<li>Tap Special access.</li>
|
64 |
-
<li>Tap Install unknown apps.</li>
|
65 |
-
<li>Tap Chrome (or whichever web browser you use).</li>
|
66 |
-
<li>Move Allow from this source to the On position.</li>
|
67 |
-
</ul>
|
68 |
-
<p>You can also enable unknown sources for other apps that can install APK files on your device, such as file managers or APK installer apps.</p>
|
69 |
-
<h3>Step 2: Download the APK file from APKMirror website or app</h3>
|
70 |
-
<p>Once you have enabled unknown sources on your device, you can download the APK file for Google Play Store from APKMirror website or app. To do this, follow these steps:</p>
|
71 |
-
<ul>
|
72 |
-
<li>Open your web browser and go to [APKMirror](^1^), one of the most popular and trusted sources for Android apps and games.</li>
|
73 |
-
<li>Search for Google Play Store in the search bar.</li>
|
74 |
-
<li>Select the version that is compatible with your device and tap Download APK.</li>
|
75 |
-
<li>Tap OK when prompted to download the file.</li>
|
76 |
-
<li>Wait for the download to finish.</li>
|
77 |
-
</ul>
|
78 |
-
<p>You can also download the APKMirror app from their website and use it to browse and download APK files on your device. The app has a simple and user-friendly interface that lets you find and install apps and games easily.</p>
|
79 |
-
<h3>Step 3: Install the APK file using a file manager or an APK installer app</h3>
|
80 |
-
<p>After you have downloaded the APK file for Google Play Store, you will need to install it on your device using a file manager or an APK installer app. To do this, follow these steps:</p>
|
81 |
-
<ul>
|
82 |
-
<li>Open your file manager app and locate the downloaded APK file. It should be in the Downloads folder by default.</li>
|
83 |
-
<li>Tap on the APK file to open it.</li>
|
84 |
-
<li>Tap Install when prompted to install the app.</li>
|
85 |
-
<li>Wait for the installation to complete.</li>
|
86 |
-
<li>Tap Open to launch Google Play Store or tap Done to exit.</li>
|
87 |
-
</ul>
|
88 |
-
<p>You can also use an APK installer app, such as [APK Installer], to install APK files on your device. These apps can scan your device for APK files, sort them by name, size, or date, and install them with one tap.</p>
|
89 |
-
<h2>How to Check the Integrity of the APK File</h2>
|
90 |
-
<p>Before you install any APK file on your device, you should always check its integrity to make sure it is safe and authentic. This means verifying that the file has not been tampered with or modified by malicious actors, and that it matches the original file from the developer. There are several ways to check the integrity of an APK file, such as using apksigner tool, using APK Analyzer, or using hash apps. Here are some of them:</p>
|
91 |
-
<h3>Use apksigner tool to verify the signature and certificate of the file</h3>
|
92 |
-
<p>apksigner is a command-line tool that can verify the signature and certificate of an APK file. The signature is a digital code that proves that the file was signed by the developer, and the certificate is a document that contains information about the developer and the app. By verifying these elements, you can ensure that the file is authentic and trustworthy.</p>
|
93 |
-
<p>To use apksigner tool, you will need a computer with Java installed, and a USB cable to connect your device to your computer. You will also need to enable USB debugging on your device. To do this, go to your device settings, tap About phone, tap Build number seven times, go back to settings, tap Developer options, and move USB debugging to the On position.</p>
|
94 |
-
<p>To use apksigner tool, follow these steps:</p>
|
95 |
-
<ul>
|
96 |
-
<li>Download apksigner tool from [here] and unzip it on your computer.</li>
|
97 |
-
<li>Copy the APK file you want to verify from your device to your computer.</li>
|
98 |
-
<li>Open a command prompt window on your computer and navigate to the folder where you unzipped apksigner tool.</li>
|
99 |
-
<li>Type apksigner verify -v <path-to-apk-file> and press Enter. Replace <path-to-apk-file> with the actual path of the APK file on your computer.</li>
|
100 |
-
<li>Check the output of the command. If it says "Verified using v1 scheme (JAR signing): true" and "Verified using v2 scheme (APK Signature Scheme v2): true", then the file is valid and signed by the developer. If it says "DOES NOT VERIFY" or "ERROR", then the file is invalid or corrupted.</li>
|
101 |
-
</ul>
|
102 |
-
<h3>Use APK Analyzer to inspect the contents and size of the file</h3>
|
103 |
-
<p>APK Analyzer is a tool that can inspect the contents and size of an APK file. It can show you information such as app name, package name, version code, version name, permissions, activities, services, resources, assets, libraries, DEX files, native libraries, manifest, certificates, signatures, and more. By inspecting these elements, you can learn more about Google Play Store are: - You can download apps and games that are not available on Google Play Store due to regional restrictions, compatibility issues, or other reasons. - You can download the latest versions of apps and games before they are officially released on Google Play Store, and enjoy the new features and bug fixes. - You can download older versions of apps and games if you prefer them over the newer ones, or if the newer ones don't work well on your device. - You can download APK files directly to your device or computer, and install them offline or on other devices without using Google Play Store.</p>
|
104 |
-
<h3>What are the risks of installing APK files from unknown sources?</h3>
|
105 |
-
<p>Some of the risks of installing APK files from unknown sources are: - You may download and install malicious apps that contain malware, viruses, spyware, or adware that can harm your device or steal your data. - You may download and install fake or modified apps that don't work as intended, or that have unwanted features or ads. - You may download and install apps that violate the terms and conditions of Google Play Store, or that infringe the intellectual property rights of the developers or publishers. - You may download and install apps that are incompatible with your device or operating system, or that cause performance issues or crashes.</p>
|
106 |
-
<h3>How can I update Google Play Store after installing it from APKMirror?</h3>
|
107 |
-
<p>After you install Google Play Store from APKMirror, you can update it in two ways: - You can enable auto-update for Google Play Store on your device. To do this, open Google Play Store, tap the three lines in the upper-left corner, tap Settings, tap Auto-update apps, and select Over any network or Over Wi-Fi only. This will allow Google Play Store to update itself automatically when a new version is available. - You can manually update Google Play Store by downloading the latest version from APKMirror and installing it over the existing one. To do this, follow the same steps as described in the previous section.</p>
|
108 |
-
<h3>How can I uninstall Google Play Store if I don't want it anymore?</h3>
|
109 |
-
<p>If you want to uninstall Google Play Store from your device, you can do it in two ways: - You can disable Google Play Store on your device. To do this, go to your device settings, tap Apps & Notifications (or Apps), tap Google Play Store, tap Disable, and tap OK. This will prevent Google Play Store from running on your device, but it will not remove it completely. - You can remove Google Play Store from your device using a root uninstaller app. To do this, you will need to root your device first. Rooting is a process that gives you full access and control over your device's system. However, rooting is risky and may void your warranty, damage your device, or expose it to security threats. Therefore, you should only root your device if you know what you are doing and at your own risk. After rooting your device, you can use a root uninstaller app, such as [System App Remover], to remove Google Play Store from your device completely.</p>
|
110 |
-
<h3>How can I find more apps and games on APKMirror?</h3>
|
111 |
-
<p>If you want to find more apps and games on APKMirror, you can use the following methods: - You can browse the categories and subcategories of apps and games on APKMirror website or app. You can also filter them by popularity, rating, date, size, or name. - You can search for specific apps and games using the search bar on APKMirror website or app. You can also use advanced search options to refine your results by category, version, minimum Android version, DPI, architecture, or signature. - You can follow APKMirror on social media platforms such as Facebook, Twitter, Instagram, or Telegram to get updates on the latest apps and games available on APKMirror. - You can subscribe to APKMirror newsletter to get email notifications on the latest apps and games available on APKMirror.</p> 401be4b1e0<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Summertime Saga on iPhone A Guide for iOS Users.md
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Summertime Saga on iPhone</h1>
|
3 |
-
<p>If you are looking for a fun and engaging game that combines adventure, romance, comedy, and drama, you might want to check out Summertime Saga. This is a popular visual novel game that lets you explore a fictional town, interact with various characters, and pursue different storylines. In this article, we will show you how to download Summertime Saga on your iPhone using an emulator. We will also share some tips and tricks for playing the game on your mobile device.</p>
|
4 |
-
<h2>download summertime saga on iphone</h2><br /><p><b><b>Download File</b> ►►►►► <a href="https://jinyurl.com/2uNRdL">https://jinyurl.com/2uNRdL</a></b></p><br /><br />
|
5 |
-
<h2>What is Summertime Saga?</h2>
|
6 |
-
<p>Summertime Saga is a game developed by DarkCookie and his team. It is inspired by classic dating sims and adult games, but it also has elements of mystery, comedy, and drama. The game follows the story of a young man who is trying to cope with the death of his father, while also dealing with school, family, friends, and romance. The game has over 65 characters to meet and interact with, 30 locations to visit, and 20 mini-games to play. The game is updated regularly with new content and features.</p>
|
7 |
-
<h2>Why play Summertime Saga on iPhone?</h2>
|
8 |
-
<p>Summertime Saga is a game that can be enjoyed on various platforms, including Windows, Mac, Linux, Android, and iOS. However, playing the game on your iPhone has some advantages over other devices. Here are some of them:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can play the game anytime and anywhere, as long as you have your iPhone with you.</li>
|
11 |
-
<li>You can enjoy the game's graphics and sound effects on a high-quality screen and speaker.</li>
|
12 |
-
<li>You can use touch controls to navigate the game's interface and menus.</li>
|
13 |
-
<li>You can take screenshots and share them with your friends or social media.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>How to download Summertime Saga on iPhone using an emulator?</h2>
|
16 |
-
<p>Unfortunately, Summertime Saga is not available on the App Store due to its mature content. However, there is a way to play the game on your iPhone using an emulator. An emulator is a software that mimics the functionality of another device or system. In this case, you can use an emulator that can run Android or Windows apps on your iPhone. Here are some of the best emulators for iPhone that you can use:</p>
|
17 |
-
<p>How to install summertime saga on iphone<br />
|
18 |
-
Summertime saga ios download link<br />
|
19 |
-
Summertime saga for iphone free download<br />
|
20 |
-
Summertime saga iphone app store<br />
|
21 |
-
Summertime saga ios gameplay<br />
|
22 |
-
Summertime saga ios cheats and tips<br />
|
23 |
-
Summertime saga ios update<br />
|
24 |
-
Summertime saga ios review<br />
|
25 |
-
Summertime saga ios compatible devices<br />
|
26 |
-
Summertime saga ios mod apk<br />
|
27 |
-
Summertime saga ios walkthrough<br />
|
28 |
-
Summertime saga ios characters<br />
|
29 |
-
Summertime saga ios save file<br />
|
30 |
-
Summertime saga ios bug fixes<br />
|
31 |
-
Summertime saga ios best routes<br />
|
32 |
-
Summertime saga ios endings<br />
|
33 |
-
Summertime saga ios secrets and easter eggs<br />
|
34 |
-
Summertime saga ios wiki<br />
|
35 |
-
Summertime saga ios reddit<br />
|
36 |
-
Summertime saga ios discord<br />
|
37 |
-
Summertime saga ios patreon<br />
|
38 |
-
Summertime saga ios latest version<br />
|
39 |
-
Summertime saga ios requirements<br />
|
40 |
-
Summertime saga ios download size<br />
|
41 |
-
Summertime saga ios offline mode<br />
|
42 |
-
Summertime saga iphone no jailbreak<br />
|
43 |
-
Summertime saga iphone emulator<br />
|
44 |
-
Summertime saga iphone alternative games<br />
|
45 |
-
Summertime saga iphone fan art<br />
|
46 |
-
Summertime saga iphone memes<br />
|
47 |
-
Summertime saga iphone screenshots<br />
|
48 |
-
Summertime saga iphone videos<br />
|
49 |
-
Summertime saga iphone news and updates<br />
|
50 |
-
Summertime saga iphone faq and guide<br />
|
51 |
-
Summertime saga iphone forum and community<br />
|
52 |
-
Summertime saga iphone support and feedback<br />
|
53 |
-
Summertime saga iphone donation and support<br />
|
54 |
-
Summertime saga iphone features and benefits<br />
|
55 |
-
Summertime saga iphone pros and cons<br />
|
56 |
-
Summertime saga iphone ratings and reviews</p>
|
57 |
-
<h3>Eclipse</h3>
|
58 |
-
<p>Eclipse is a web-based emulator that can run various games and apps without requiring any installation or jailbreak. It supports Game Boy Advance, Game Boy Color, Nintendo DS, Sega Genesis, Super Nintendo, and more. To use Eclipse, you need to follow these steps:</p>
|
59 |
-
<ol>
|
60 |
-
<li>Open Safari on your iPhone and go to <a href="(^5^)">https://eclipseemu.me/play</a>.</li>
|
61 |
-
<li>Tap on the plus icon at the bottom of the screen and select Add to Home Screen.</li>
|
62 |
-
<li>Name the app as Eclipse and tap Add.</li>
|
63 |
-
<li>Launch Eclipse from your home screen and tap on the plus icon at the top right corner.</li>
|
64 |
-
<li>Select Browse... and choose a ROM file from your device or iCloud Drive. You can download Summertime Saga ROM files from <a href="(^1^)">https://summertimesaga.com/download</a>.</li>
|
65 |
-
<li>Wait for the ROM file to load and start playing Summertime Saga on your iPhone.</li>
|
66 |
-
</ol>
|
67 |
-
<h3>Citra</h3>
|
68 |
-
<p>Citra is a Nintendo 3DS emulator that can run Summertime Saga on your iPhone with high performance and graphics. It also supports online multiplayer, controller support, and cheat codes. To use Citra, you need to follow these steps:</p>
|
69 |
-
<ol>
|
70 |
-
<li>Download the Citra app from the App Store on your iPhone.</li>
|
71 |
-
<li>Launch Citra and tap on the plus icon at the bottom right corner.</li>
|
72 |
-
<li>Select Scan QR Code and scan the QR code from <a href="">https://summertimesaga.com/download</a>.</li>
|
73 |
-
<li>Wait for the game to download and install on your iPhone.</li>
|
74 |
-
<li>Tap on the game icon and start playing Summertime Saga on your iPhone.</li>
|
75 |
-
</ol>
|
76 |
-
<h3>PPSSPP</h3>
|
77 |
-
<p>PPSSPP is a PlayStation Portable emulator that can run Summertime Saga on your iPhone with smooth gameplay and customization options. It also supports save states, cloud sync, and external controllers. To use PPSSPP, you need to follow these steps:</p>
|
78 |
-
<ol>
|
79 |
-
<li>Download the PPSSPP app from the App Store on your iPhone.</li>
|
80 |
-
<li>Launch PPSSPP and tap on the gear icon at the top right corner.</li>
|
81 |
-
<li>Select Storage and enable Allow access to files.</li>
|
82 |
-
<li>Go back to the main menu and tap on Games.</li>
|
83 |
-
<li>Select Browse... and choose a PSP ISO file from your device or iCloud Drive. You can download Summertime Saga PSP ISO files from <a href="">https://summertimesaga.com/download</a>.</li>
|
84 |
-
<li>Tap on the game icon and start playing Summertime Saga on your iPhone.</li>
|
85 |
-
</ol>
|
86 |
-
<h3>iNDS</h3>
|
87 |
-
<p>iNDS is a Nintendo DS emulator that can run Summertime Saga on your iPhone with fast speed and high compatibility. It also supports auto-save, Dropbox sync, and cheat codes. To use iNDS, you need to follow these steps:</p>
|
88 |
-
<ol>
|
89 |
-
<li>Download the iNDS app from <a href="">https://inds.nerd.net</a> using Safari on your iPhone.</li>
|
90 |
-
<li>Tap on Install and confirm the installation.</li>
|
91 |
-
<li>Go to Settings > General > Device Management and trust the developer profile of iNDS.</li>
|
92 |
-
<li>Launch iNDS and tap on the plus icon at the top right corner.</li>
|
93 |
-
<li>Select Download from Web and enter the URL of a NDS ROM file. You can download Summertime Saga NDS ROM files from <a href="">https://summertimesaga.com/download</a>.</li>
|
94 |
-
<li>Wait for the ROM file to download and start playing Summertime Saga on your iPhone.</li>
|
95 |
-
</ol>
|
96 |
-
<h2>Tips and tricks for playing Summertime Saga on iPhone</h2>
|
97 |
-
<p>Now that you know how to download Summertime Saga on your iPhone using an emulator, here are some tips and tricks that can help you enjoy the game more:</p>
|
98 |
-
<h3>How to save and load your progress</h3>
|
99 |
-
<p>To save your progress in Summertime Saga, you need to use the in-game menu. Tap on the menu icon at the top left corner of the screen and select Save. Choose an empty slot and tap Save again. To load your progress, tap on Load and select a slot that has your saved data. You can also use the emulator's save state feature to save and load your progress at any point in the game.</p>
|
100 |
-
<h3>How to access the walkthrough and cheats</h3>
|
101 |
-
<p>If you are stuck or want to skip some parts of the game, you can use the walkthrough and cheats that are available online. The official website of Summertime Saga has a detailed walkthrough that covers all the characters, events, and endings of the game. You can access it from <a href="">https://summertimesaga.com/walkthrough</a>. You can also use cheat codes that can give you money, stats, items, or unlock scenes. You can find them from <a href="">https://summertimesaga.com/cheats</a>.</p>
|
102 |
-
<h3>How to customize your character and preferences</h3>
|
103 |
-
<p>To customize your character and preferences in Summertime Saga, you need to use the in-game menu. Tap on the menu icon at the top left corner of the screen and select Preferences. Here you can change your name, gender, appearance, voice, language, difficulty, sound, display, and controls. You can also enable or disable adult content, incest content, or pregnancy content according to your liking.</p>
|
104 |
-
<h2>Conclusion</h2>
|
105 |
-
<p>Summertime Saga is a fun and engaging game that you can play on your iPhone using an emulator. You can choose from various emulators that can run Android or Windows apps on your iPhone. You can also enjoy the game's graphics, sound, and touch controls on your mobile device. Moreover, you can use the walkthrough and cheats to help you with the game's storylines and events. Summertime Saga is a game that will keep you entertained and engaged for hours. If you are interested in playing Summertime Saga on your iPhone, download an emulator today and start your adventure! <h2>FAQs</h2>
|
106 |
-
<p>Here are some of the frequently asked questions about Summertime Saga and how to play it on iPhone:</p>
|
107 |
-
<h3>Is Summertime Saga free to play?</h3>
|
108 |
-
<p>Yes, Summertime Saga is free to play and download. However, you can support the developers by becoming a patron on Patreon. You can get access to exclusive content, previews, polls, and more by pledging a certain amount per month. You can visit their Patreon page from <a href="">https://www.patreon.com/summertimesaga</a>.</p>
|
109 |
-
<h3>Is Summertime Saga safe to play?</h3>
|
110 |
-
<p>Summertime Saga is safe to play as long as you download it from the official website or a trusted emulator. However, you should be aware that the game contains mature content that is not suitable for minors or sensitive people. The game also has some bugs and glitches that may affect your gameplay. You should always backup your save files before playing or updating the game.</p>
|
111 |
-
<h3>How long is Summertime Saga?</h3>
|
112 |
-
<p>Summertime Saga is a long game that has multiple storylines, endings, and achievements. The game is still in development and new content is added regularly. The current version of the game has over 60 hours of gameplay. However, the length of the game may vary depending on your choices, actions, and preferences.</p>
|
113 |
-
<h3>How to update Summertime Saga on iPhone?</h3>
|
114 |
-
<p>To update Summertime Saga on your iPhone, you need to download the latest version of the game from the official website or the emulator. You can check the latest version of the game from <a href="">https://summertimesaga.com/download</a>. You can also follow their social media accounts or join their Discord server to get notified of new updates.</p>
|
115 |
-
<h3>How to delete Summertime Saga on iPhone?</h3>
|
116 |
-
<p>To delete Summertime Saga on your iPhone, you need to delete the emulator app that you used to play the game. You can also delete the ROM or ISO files that you downloaded from your device or iCloud Drive. However, if you want to keep your save files, you can transfer them to another device or cloud service before deleting the game.</p> 197e85843d<br />
|
117 |
-
<br />
|
118 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Final Destination 1 on Fzmovies A Guide to Downloading and Streaming the Classic Horror Movie.md
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Final Destination 1 on Fzmovies</h1> | <p>If you are a fan of horror movies, you might have heard of Final Destination 1, a 2000 American supernatural horror film that started a successful franchise. In this article, we will tell you what Final Destination 1 is about, why it is worth watching, and how you can download it on Fzmovies, a popular website for streaming and downloading movies for free.</p>
|
3 |
-
<h2>What is Final Destination 1?</h2>
|
4 |
-
<p>Final Destination 1 is the first installment in the Final Destination film series, which revolves around the concept of death as an inevitable force that cannot be cheated or escaped. The film was directed by James Wong, with a screenplay written by Wong, Glen Morgan, and Jeffrey Reddick, based on a story by Reddick. It stars Devon Sawa, Ali Larter, Kerr Smith, and Tony Todd.</p>
|
5 |
-
<h2>download final destination 1 on fzmovies</h2><br /><p><b><b>DOWNLOAD</b> ✸ <a href="https://jinyurl.com/2uNTAc">https://jinyurl.com/2uNTAc</a></b></p><br /><br />
|
6 |
-
<h3>A brief summary of the plot</h3>
|
7 |
-
<p>The film follows Alex Browning, a high school student who has a premonition of a plane crash that kills everyone on board. He manages to get off the plane along with six other people, but soon discovers that death is still after them. One by one, the survivors die in bizarre and gruesome accidents that seem to follow a certain pattern. Alex and his remaining friends must figure out how to stop death from claiming them.</p>
|
8 |
-
<h3>The cast and crew of the film</h3>
|
9 |
-
<p>The film features a talented cast of actors who deliver convincing performances. Devon Sawa plays Alex Browning, the protagonist who has the power of foresight. Ali Larter plays Clear Rivers, Alex's love interest and one of the survivors. Kerr Smith plays Carter Horton, Alex's rival and another survivor. Tony Todd plays William Bludworth, a mysterious mortician who knows about death's design.</p>
|
10 |
-
<p>The film was directed by James Wong, who also co-wrote the screenplay with Glen Morgan. Wong and Morgan are best known for their work on The X-Files, a popular sci-fi TV series. Jeffrey Reddick, who came up with the original story idea for Final Destination 1, also contributed to the screenplay. Reddick was inspired by a real-life plane crash that he read about in a newspaper.</p>
|
11 |
-
<p>How to download final destination 1 on fzmovies app<br />
|
12 |
-
Final destination 1 full movie download on fzmovies net<br />
|
13 |
-
Fzmovies final destination 1 free download in HD quality<br />
|
14 |
-
Watch final destination 1 online on fzmovies website<br />
|
15 |
-
Final destination 1 fzmovies download link<br />
|
16 |
-
Fzmovies skipogist final destination 2000 collection<br />
|
17 |
-
Final destination 1 movie series download from fzmovies<br />
|
18 |
-
FzStudios app for final destination 1 download<br />
|
19 |
-
Final destination 1 torrent download fzmovies<br />
|
20 |
-
Final destination 1 mp4 download fzmovies<br />
|
21 |
-
Final destination 1 subtitles download fzmovies<br />
|
22 |
-
Final destination 1 dual audio download fzmovies<br />
|
23 |
-
Final destination 1 hindi dubbed download fzmovies<br />
|
24 |
-
Final destination 1 streaming on fzmovies<br />
|
25 |
-
Final destination 1 review on fzmovies<br />
|
26 |
-
Final destination 1 cast and crew on fzmovies<br />
|
27 |
-
Final destination 1 trivia and facts on fzmovies<br />
|
28 |
-
Final destination 1 box office collection on fzmovies<br />
|
29 |
-
Final destination 1 awards and nominations on fzmovies<br />
|
30 |
-
Final destination 1 behind the scenes on fzmovies<br />
|
31 |
-
Final destination 1 deleted scenes on fzmovies<br />
|
32 |
-
Final destination 1 soundtrack download on fzmovies<br />
|
33 |
-
Final destination 1 poster and images on fzmovies<br />
|
34 |
-
Final destination 1 trailer and clips on fzmovies<br />
|
35 |
-
Final destination 1 rating and comments on fzmovies<br />
|
36 |
-
Final destination 1 genre and tags on fzmovies<br />
|
37 |
-
Final destination 1 release date and runtime on fzmovies<br />
|
38 |
-
Final destination 1 director and writer on fzmovies<br />
|
39 |
-
Final destination 1 plot and summary on fzmovies<br />
|
40 |
-
Final destination 1 sequel and prequel on fzmovies<br />
|
41 |
-
Final destination franchise download on fzmovies<br />
|
42 |
-
Download final destination movies in order on fzmovies<br />
|
43 |
-
Best final destination movie to download on fzmovies<br />
|
44 |
-
Similar movies to final destination on fzmovies<br />
|
45 |
-
Horror movies like final destination on fzmovies<br />
|
46 |
-
Thriller movies like final destination on fzmovies<br />
|
47 |
-
Mystery movies like final destination on fzmovies<br />
|
48 |
-
Death scenes in final destination movies on fzmovies<br />
|
49 |
-
Survival tips from final destination movies on fzmovies<br />
|
50 |
-
Fan theories about final destination movies on fzmovies</p>
|
51 |
-
<h3>The reception and legacy of the film</h3>
|
52 |
-
<p>Final Destination 1 was released on March 17, 2000, and became a financial success, grossing over $112 million worldwide against a budget of $23 million. The film received mixed reviews from critics, who praised its suspenseful premise and creative death scenes, but criticized its flat characters and lack of logic. The film also received some awards and nominations, such as the Saturn Award for Best Horror Film and Best Performance by a Younger Actor for Sawa.</p>
|
53 |
-
<p>The film's success spawned a media franchise that includes four sequels, a series of novels, and comic books. The sequels follow different groups of people who cheat death in various ways, such as escaping a highway pile-up or a roller coaster derailment. The franchise is known for its elaborate and gory death sequences that involve everyday objects and situations.</p>
|
54 |
-
<h2>What is Fzmovies?</h2>
|
55 |
-
<p>Fzmovies is a website that allows users to stream and download movies for free online. It has a large collection of movies from different genres and countries, such as Hollywood, Bollywood, Nollywood, etc. Users can <p>search for movies by title, genre, year, or quality. They can also request for movies that are not available on the website. Fzmovies is compatible with various devices, such as smartphones, tablets, laptops, etc.</p>
|
56 |
-
<h3>A brief introduction to the website</h3>
|
57 |
-
<p>Fzmovies was launched in 2012 and has since become one of the most popular websites for movie lovers. It offers a user-friendly interface and a fast downloading speed. It also updates its content regularly and adds new releases as soon as possible. Fzmovies has a loyal fan base that visits the website frequently and leaves positive feedback.</p>
|
58 |
-
<h3>The features and benefits of using Fzmovies</h3>
|
59 |
-
<p>Some of the features and benefits of using Fzmovies are:</p>
|
60 |
-
<ul>
|
61 |
-
<li>It is free and does not require registration or subscription.</li>
|
62 |
-
<li>It has a huge database of movies from different categories and languages.</li>
|
63 |
-
<li>It provides high-quality videos in various formats, such as MP4, 3GP, HD, etc.</li>
|
64 |
-
<li>It allows users to download movies in different sizes, depending on their preferences and data plans.</li>
|
65 |
-
<li>It supports subtitles and audio tracks for different languages.</li>
|
66 |
-
<li>It has a search engine that helps users find their desired movies easily.</li>
|
67 |
-
<li>It has a request section where users can ask for movies that are not available on the website.</li>
|
68 |
-
<li>It has a feedback section where users can rate and comment on the movies they watch.</li>
|
69 |
-
<li>It has a social media presence where users can follow and interact with the website.</li>
|
70 |
-
</ul>
|
71 |
-
<h3>The risks and challenges of using Fzmovies</h3>
|
72 |
-
<p>Despite its advantages, Fzmovies also has some risks and challenges that users should be aware of. Some of them are:</p>
|
73 |
-
<ul>
|
74 |
-
<li>It is illegal and violates the copyright laws of the movie industry. Users may face legal consequences if they are caught downloading or streaming pirated movies.</li>
|
75 |
-
<li>It may contain viruses, malware, or spyware that can harm the users' devices or steal their personal information.</li>
|
76 |
-
<li>It may have pop-up ads, banners, or redirects that can annoy the users or lead them to malicious websites.</li>
|
77 |
-
<li>It may have broken links, low-quality videos, or missing subtitles that can affect the users' viewing experience.</li>
|
78 |
-
<li>It may be blocked or banned by the authorities or internet service providers in some countries or regions.</li>
|
79 |
-
</ul>
|
80 |
-
<h2>How to download Final Destination 1 on Fzmovies?</h2>
|
81 |
-
<p>If you want to download Final Destination 1 on Fzmovies, you can follow these simple steps:</p>
|
82 |
-
<h3>The steps to follow</h3>
|
83 |
-
<ol>
|
84 |
-
<li>Go to the official website of Fzmovies at <a href="">https://www.fzmovies.net/</a>.</li>
|
85 |
-
<li>In the search box, type "Final Destination 1" and click on the search button.</li>
|
86 |
-
<li>You will see a list of results that match your query. Click on the one that says "Final Destination (2000)".</li>
|
87 |
-
<li>You will be directed to a page that shows the details of the movie, such as the genre, rating, synopsis, etc. Scroll down to the bottom of the page and click on "Download File".</li>
|
88 |
-
<li>You will see a list of download links that vary in size and quality. Choose the one that suits your needs and click on it.</li>
|
89 |
-
<li>You will be asked to verify that you are not a robot by completing a captcha. Follow the instructions and click on "Continue Download".</li>
|
90 |
-
<li>Your download will start automatically. Wait for it to finish and enjoy watching Final Destination 1 on your device.</li>
|
91 |
-
</ol>
|
92 |
-
<h3>The tips and tricks to enhance the experience</h3>
|
93 |
-
<p>To make your downloading process easier and faster, you can use some tips and tricks such as:</p>
|
94 |
-
<ul>
|
95 |
-
<li>Use a VPN service to bypass any geo-restrictions or censorship that may prevent you from accessing Fzmovies.</li>
|
96 |
-
<li>Use an ad-blocker to avoid any annoying or intrusive ads that may pop up on Fzmovies.</li>
|
97 |
-
<li>Use a download manager to resume your downloads if they are interrupted or paused due to network issues or power outages.</li>
|
98 |
-
<li>Use a video player that supports multiple formats and languages to play your downloaded movies without any hassle.</li>
|
99 |
-
</ul>
|
100 |
-
<h3>The alternatives to Fzmovies</h3>
|
101 |
-
<p>If you are looking for other websites that offer similar services as Fzmovies, you can try some of these alternatives:</p>
|
102 |
-
<ul>
|
103 |
-
<li><a href="">https://www.o2tvseries.com/</a>: A website that specializes in TV shows and series from different genres and countries.</ <li><a href="">https://www.toxicwap.com/</a>: A website that offers movies, TV shows, music, games, and ebooks for free download.</li>
|
104 |
-
<li><a href="">https://www.mp4moviez.in/</a>: A website that provides movies in MP4 format for mobile devices.</li>
|
105 |
-
<li><a href="">https://www.123movies.com/</a>: A website that allows users to stream movies and TV shows online without registration.</li>
|
106 |
-
</ul>
|
107 |
-
<h2>Conclusion</h2>
|
108 |
-
<p>In conclusion, Final Destination 1 is a thrilling and entertaining horror movie that you can download on Fzmovies for free. Fzmovies is a website that offers a large collection of movies from different genres and languages. However, you should also be aware of the risks and challenges of using Fzmovies, such as legal issues, malware, ads, etc. You can also use some tips and tricks to enhance your downloading experience, such as using a VPN, an ad-blocker, a download manager, etc. Alternatively, you can try some other websites that offer similar services as Fzmovies.</p>
|
109 |
-
<p>We hope you enjoyed this article and learned something new. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy watching!</p>
|
110 |
-
<h3>FAQs</h3>
|
111 |
-
<p>Here are some frequently asked questions about Final Destination 1 and Fzmovies:</p>
|
112 |
-
<ol>
|
113 |
-
<li>Q: Is Final Destination 1 based on a true story?<br>
|
114 |
-
A: No, Final Destination 1 is not based on a true story. However, the writer of the film, Jeffrey Reddick, was inspired by a real-life plane crash that he read about in a newspaper.</li>
|
115 |
-
<li>Q: How many movies are there in the Final Destination franchise?<br>
|
116 |
-
A: There are five movies in the Final Destination franchise: Final Destination (2000), Final Destination 2 (2003), Final Destination 3 (2006), The Final Destination (2009), and Final Destination 5 (2011).</li>
|
117 |
-
<li>Q: Is Fzmovies legal and safe?<br>
|
118 |
-
A: No, Fzmovies is not legal and safe. It violates the copyright laws of the movie industry and may contain viruses, malware, or spyware that can harm your devices or steal your personal information.</li>
|
119 |
-
<li>Q: How can I access Fzmovies if it is blocked or banned in my country or region?<br>
|
120 |
-
A: You can use a VPN service to access Fzmovies if it is blocked or banned in your country or region. A VPN service can hide your IP address and location and allow you to access any website anonymously.</li>
|
121 |
-
<li>Q: What are some other websites like Fzmovies?<br>
|
122 |
-
A: Some other websites like Fzmovies are O2tvseries, Toxicwap, Mp4moviez, and 123movies.</li>
|
123 |
-
</ol></p> 401be4b1e0<br />
|
124 |
-
<br />
|
125 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Free APK Download for Gacha Life - The Most Popular Anime Game by Lunime.md
DELETED
@@ -1,156 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Gacha Life Download APK Free: How to Play and Enjoy This Anime Game</h1>
|
3 |
-
<p>If you are a fan of anime, you might have heard of Gacha Life, a popular game that lets you create your own anime characters and stories. But did you know that you can download Gacha Life APK for free and play it on your Android device? In this article, we will show you how to do that, as well as how to play and enjoy this anime game.</p>
|
4 |
-
<h2>What is Gacha Life?</h2>
|
5 |
-
<p>Gacha Life is a game developed by Lunime, a company that specializes in making anime-style games. It was released in October 2018 for Android and iOS devices, and has since gained millions of downloads and positive reviews from players around the world. But what makes Gacha Life so appealing? Here are some of the reasons:</p>
|
6 |
-
<h2>gacha life download apk free</h2><br /><p><b><b>DOWNLOAD</b> ->>->>->> <a href="https://jinyurl.com/2uNMmM">https://jinyurl.com/2uNMmM</a></b></p><br /><br />
|
7 |
-
<h3>A game for anime fans and casual gamers</h3>
|
8 |
-
<p>Gacha Life is a game that anime fans are sure to love because it will let you design and create your very own anime characters. You can choose from hundreds of clothing items, hairstyles, weapons, accessories, and more to dress up your characters in your favorite fashion outfits. You can also customize your characters' appearance, such as their skin color, eye color, hair color, and facial expressions. You can even mix and match different parts from different characters to create unique combinations.</p>
|
9 |
-
<p>But Gacha Life is not just a game for anime fans. It is also a game for casual gamers who just want something to play to destress and not overthink. You can play Gacha Life offline without Wi-Fi or internet connection, so you can enjoy it anytime and anywhere. You can also play mini-games within the game to earn gems, which you can use to gacha for rare items or gifts for your characters. The mini-games are simple and fun, such as Duck & Dodge, Phantom's Remix, Memory Match, and more.</p>
|
10 |
-
<h3>A game for creating and customizing characters</h3>
|
11 |
-
<p>Gacha Life is a game that lets you unleash your creativity by making an avatar that looks like you or an avatar designed according to your preferences. You can save up to 20 characters of your own design in different slots, and switch between them easily. You can also access more unique characters in the preset menu, where you can find characters created by Lunime or other players. You can also recover any character that you accidentally edited or deleted by visiting the preset menu.</p>
|
12 |
-
<p>But creating characters is not the only thing you can do in Gacha Life. You can also customize your characters' profile, where you can change or randomize their name, relationship, personality, and occupation. You can also change the background of your characters by choosing from over a hundred backgrounds available in the game. You can also zoom in or out your characters to get a better view of them.</p>
|
13 |
-
<h3>A game for making stories and scenes</h3>
|
14 |
-
<p>Gacha Life is a game that allows you to express your storytelling skills by making scenes and skits with your characters. You can use the studio mode to create your own scenes with up to 8 characters at a time. You can enter custom text for your characters and choose from many different poses and backgrounds. You can also use props such as weapons, hats, wings, tails, etc. to add more details to your scenes.</p>
|
15 |
-
<p>But studio mode is not the only way to make stories in Gacha Life. You can also use the skit maker mode to create your own stories with up to 2 characters per skit. You can easily combine multiple scenes to create sketches with dialogue and narration. Continuing the article: <h2>How to Download Gacha Life APK for Free?</h2>
|
16 |
-
<p>Gacha Life is a free game that you can download and play on your Android device. However, you need to make sure that you download the game from a safe and reliable source, as there are many fake or malicious websites that may try to trick you into downloading harmful files. Here are some of the ways to download Gacha Life APK for free:</p>
|
17 |
-
<h3>The official sources for downloading the game</h3>
|
18 |
-
<p>The best and safest way to download Gacha Life APK for free is to use the official sources provided by Lunime, the developer of the game. You can visit their website at [Lunime.com](^1^) and click on the Gacha Life banner to access the download page. You can also download the game from the Google Play Store by searching for Gacha Life or clicking on this link: [Gacha Life - Apps on Google Play](^2^). These sources will ensure that you get the latest and updated version of the game, as well as protect your device from any malware or viruses.</p>
|
19 |
-
<p>gacha life apk free download for android<br />
|
20 |
-
gacha life mod apk download free<br />
|
21 |
-
gacha life old version apk free download<br />
|
22 |
-
gacha life pc download free apk<br />
|
23 |
-
gacha life 2 apk free download<br />
|
24 |
-
gacha life apk download free latest version<br />
|
25 |
-
gacha life apk free download no ads<br />
|
26 |
-
gacha life apk free download offline<br />
|
27 |
-
gacha life apk free download full version<br />
|
28 |
-
gacha life apk free download unlimited gems<br />
|
29 |
-
gacha life apk free download 2023<br />
|
30 |
-
gacha life apk free download ios<br />
|
31 |
-
gacha life apk free download windows 10<br />
|
32 |
-
gacha life apk free download laptop<br />
|
33 |
-
gacha life apk free download chromebook<br />
|
34 |
-
gacha life apk free download uptodown<br />
|
35 |
-
gacha life apk free download apkpure<br />
|
36 |
-
gacha life apk free download android 1<br />
|
37 |
-
gacha life apk free download mod menu<br />
|
38 |
-
gacha life apk free download no verification<br />
|
39 |
-
gacha life apk free download no wifi<br />
|
40 |
-
gacha life apk free download online<br />
|
41 |
-
gacha life apk free download update<br />
|
42 |
-
gacha life apk free download hack<br />
|
43 |
-
gacha life apk free download 1.1.4<br />
|
44 |
-
gacha life anime dress up game apk free download<br />
|
45 |
-
how to get gacha life for free on android apk<br />
|
46 |
-
where can i download gacha life for free on android apk<br />
|
47 |
-
how to install gacha life on android for free apk<br />
|
48 |
-
how to play gacha life on android without downloading it for free apk<br />
|
49 |
-
best site to download gacha life for android for free apk<br />
|
50 |
-
how to update gacha life on android for free apk<br />
|
51 |
-
how to get all items in gacha life for android for free apk<br />
|
52 |
-
how to make your own character in gacha life for android for free apk<br />
|
53 |
-
how to create your own story in gacha life for android for free apk<br />
|
54 |
-
how to chat with other players in gacha life for android for free apk<br />
|
55 |
-
how to play mini games in gacha life for android for free apk<br />
|
56 |
-
how to collect gems in gacha life for android for free apk<br />
|
57 |
-
how to get rare gifts in gacha life for android for free apk<br />
|
58 |
-
how to customize your personal look in gacha life for android for free apk<br />
|
59 |
-
how to change your hairstyle, eyes, mouth, and more in gacha life for android for free apk<br />
|
60 |
-
how to enter the studio mode in gacha life for android for free apk<br />
|
61 |
-
how to enter the skit maker in gacha life for android for free apk<br />
|
62 |
-
how to enter the life mode in gacha life for android for free apk<br />
|
63 |
-
how to explore different areas with your own characters in gacha life for android for free apk<br />
|
64 |
-
how to discover new NPCs and learn more about them in gacha life for android for free apk <br />
|
65 |
-
how to restart the game if you experience lag in gacha life for android for free apk <br />
|
66 |
-
how to fix in-app-purchases issues in gacha life for android 6.0+ devices or rooted devices for free apk <br />
|
67 |
-
how to like and follow Gacha Life on Facebook and join their group for more updates and news about the game for android users who downloaded it from Google Play Store or other sources as a APK file</p>
|
68 |
-
<h3>The steps to install the game on your device</h3>
|
69 |
-
<p>Once you have downloaded the Gacha Life APK file from a trusted source, you need to follow these steps to install the game on your device:</p>
|
70 |
-
<ol>
|
71 |
-
<li>Go to your device's settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from the Google Play Store.</li>
|
72 |
-
<li>Locate the Gacha Life APK file in your device's storage and tap on it to start the installation process.</li>
|
73 |
-
<li>Follow the instructions on the screen and wait for the installation to finish.</li>
|
74 |
-
<li>Launch the game and enjoy!</li>
|
75 |
-
</ol>
|
76 |
-
<p>Note: If you encounter any problems or errors during the installation, you may need to uninstall any previous versions of Gacha Life or clear your cache and data before installing the new version.</p>
|
77 |
-
<h3>The precautions to avoid malware and viruses</h3>
|
78 |
-
<p>While downloading Gacha Life APK for free is possible and easy, you also need to be careful and cautious about the potential risks and dangers of downloading files from unknown sources. Here are some of the precautions that you should take to avoid malware and viruses:</p>
|
79 |
-
<ul>
|
80 |
-
<li>Always check the reputation and reviews of the website or source that you are downloading from. Avoid any websites that look suspicious, have pop-up ads, or ask for personal information.</li>
|
81 |
-
<li>Always scan the downloaded file with a reliable antivirus or anti-malware software before opening or installing it. This will help you detect and remove any harmful files that may harm your device or steal your data.</li>
|
82 |
-
<li>Always backup your device's data before installing any new app or game. This will help you restore your device in case something goes wrong or you lose your data.</li>
|
83 |
-
</ul> Continuing the article: <h2>How to Play and Enjoy Gacha Life?</h2>
|
84 |
-
<p>Now that you have downloaded and installed Gacha Life APK for free, you might be wondering how to play and enjoy this anime game. Well, there are many things that you can do in Gacha Life, and you can explore them at your own pace and preference. Here are some of the main features of the game and some tips and tricks to help you get the most out of it:</p>
|
85 |
-
<h3>The main features of the game</h3>
|
86 |
-
<p>Gacha Life is a game that has many modes and options for you to choose from, depending on what you want to do. Here are some of the main features of the game:</p>
|
87 |
-
<table>
|
88 |
-
<tr>
|
89 |
-
<th>Feature</th>
|
90 |
-
<th>Description</th>
|
91 |
-
</tr>
|
92 |
-
<tr>
|
93 |
-
<td>Home</td>
|
94 |
-
<td>This is where you can access the other modes of the game, such as Dress Up, Studio, Gacha, Life, etc. You can also see your character's level, energy, and gems here.</td>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>Dress Up</td>
|
98 |
-
<td>This is where you can create and customize your characters. You can change their clothes, accessories, hair, eyes, etc. You can also save your characters in different slots or load them from the preset menu.</td>
|
99 |
-
</tr>
|
100 |
-
<tr>
|
101 |
-
<td>Studio</td>
|
102 |
-
<td>This is where you can make scenes and stories with your characters. You can enter custom text, choose poses and backgrounds, and use props. You can also save your scenes or load them from the gallery.</td>
|
103 |
-
</tr>
|
104 |
-
<tr>
|
105 |
-
<td>Gacha</td>
|
106 |
-
<td>This is where you can gacha for rare items or gifts for your characters. You can use gems or tickets to gacha for different types of items, such as clothing, accessories, pets, etc. You can also trade your items with other players or NPCs.</td>
|
107 |
-
</tr>
|
108 |
-
<tr>
|
109 |
-
<td>Life</td>
|
110 |
-
<td>This is where you can interact with other characters in different locations. You can talk to them, give them gifts, play mini-games with them, or ask them questions. You can also unlock new locations by increasing your friendship level with them.</td>
|
111 |
-
</tr>
|
112 |
-
<tr>
|
113 |
-
<td>Games</td>
|
114 |
-
<td>This is where you can play mini-games to earn gems or tickets. There are 8 mini-games available in the game, such as Duck & Dodge, Phantom's Remix, Memory Match, etc. You can also see your high scores and achievements here.</td>
|
115 |
-
</tr>
|
116 |
-
<tr>
|
117 |
-
<td>Chat</td>
|
118 |
-
<td>This is where you can chat with other players online. You can join different chat rooms or create your own. You can also use stickers or emojis to express yourself.</td>
|
119 |
-
</tr>
|
120 |
-
<tr>
|
121 |
-
<td>Options</td>
|
122 |
-
<td>This is where you can change the settings of the game, such as the volume, language, quality, etc. You can also see the credits or contact the support team here.</td>
|
123 |
-
</tr>
|
124 |
-
</table>
|
125 |
-
<h3>The tips and tricks to level up and earn gems</h3> Continuing the article: <p>Gacha Life is a game that requires you to level up your character and earn gems to unlock more features and items. Here are some of the tips and tricks that you can use to level up and earn gems faster:</p>
|
126 |
-
<ul>
|
127 |
-
<li>Play the mini-games regularly. The mini-games are a great way to earn gems and tickets, as well as to increase your character's energy. You can play each mini-game up to 5 times per day, and you will get more gems or tickets depending on your score. You can also get bonus gems or tickets by completing achievements or watching ads.</li>
|
128 |
-
<li>Interact with the NPCs in the Life mode. The NPCs are the characters that you can find in different locations in the Life mode. You can talk to them, give them gifts, play mini-games with them, or ask them questions. By doing so, you will increase your friendship level with them, which will unlock new locations, items, and stories. You will also get gems or gifts from them as rewards.</li>
|
129 |
-
<li>Gacha for rare items or gifts. The Gacha mode is where you can gacha for rare items or gifts for your characters. You can use gems or tickets to gacha for different types of items, such as clothing, accessories, pets, etc. The higher the rarity of the item, the more gems or tickets you will need to gacha for it. However, you will also get more experience points and level up faster by gachaing for rare items or gifts.</li>
|
130 |
-
<li>Use the chat mode to socialize with other players. The chat mode is where you can chat with other players online. You can join different chat rooms or create your own. You can also use stickers or emojis to express yourself. By chatting with other players, you will not only have fun and make friends, but also get tips and advice from them on how to play and enjoy Gacha Life.</li>
|
131 |
-
</ul>
|
132 |
-
<h3>The fun and creative ways to use the game</h3>
|
133 |
-
<p>Gacha Life is a game that offers you a lot of freedom and possibilities to use it in fun and creative ways. Here are some of the examples of how you can use the game:</p>
|
134 |
-
<ul>
|
135 |
-
<li>Make your own anime series or movies. You can use the studio mode or the skit maker mode to create your own scenes and stories with your characters. You can also record your voice or add music and sound effects to make it more realistic and immersive. You can then share your creations with other players online or on social media platforms.</li>
|
136 |
-
<li>Make your own comics or memes. You can use the studio mode or the skit maker mode to create your own comics or memes with your characters. You can also use text bubbles, stickers, emojis, or filters to make it more humorous and expressive. You can then share your creations with other players online or on social media platforms.</li>
|
137 |
-
<li>Make your own fan art or cosplay. You can use the dress up mode to create your own fan art or cosplay of your favorite anime characters or celebrities. You can also mix and match different parts from different characters to create unique combinations. You can then share your creations with other players online or on social media platforms.</li>
|
138 |
-
</ul>
|
139 |
-
<h2>Conclusion</h2>
|
140 |
-
<p>Gacha Life is a game that lets you create your own anime characters and stories. It is a game that is free to download and play on your Android device, as long as you follow the steps and precautions mentioned above. It is also a game that has many features and modes for you to explore and enjoy, as well as many ways for you to use it in fun and creative ways. If you are an anime fan or a casual gamer who likes to express yourself through games, then Gacha Life is a game that you should try out.</p>
|
141 |
-
<h2>FAQs</h2>
|
142 |
-
<p>Here are some of the frequently asked questions about Gacha Life:</p>
|
143 |
-
<ol>
|
144 |
-
<li>Is Gacha Life safe for kids?</li>
|
145 |
-
<p>Gacha Life is a game that is rated for ages 9 and up by the Google Play Store. However, some of the content in the game may not be suitable for younger kids, such as violence, profanity, suggestive themes, etc. Therefore, parental guidance and supervision are recommended when playing Gacha Life.</p>
|
146 |
-
<li>Is Gacha Life online or offline?</li>
|
147 |
-
<p>Gacha Life is a game that can be played both online and offline. You can play most of the features and modes of the game offline without Wi-Fi or internet connection, such as dress up, studio, gacha, life, etc. However, some of the features and modes require internet connection to access, such as chat, games, etc.</p>
|
148 |
-
<li>How do I update Gacha Life?</li>
|
149 |
-
<p>To update Gacha Life, you need to visit the official sources provided by Lunime, such Continuing the article: as the website or the Google Play Store and download the latest version of the game. You can also check for updates within the game by going to the options menu and tapping on the check for updates button. You may need to uninstall any previous versions of Gacha Life or clear your cache and data before installing the new version.</p>
|
150 |
-
<li>How do I delete Gacha Life?</li>
|
151 |
-
<p>To delete Gacha Life, you need to go to your device's settings and find the apps or applications menu. Then, you need to locate Gacha Life and tap on it to open its details. Then, you need to tap on the uninstall button and confirm your action. This will remove Gacha Life from your device, along with its data and files.</p>
|
152 |
-
<li>How do I contact Gacha Life support?</li>
|
153 |
-
<p>To contact Gacha Life support, you need to go to the options menu in the game and tap on the contact us button. This will open a form where you can enter your name, email, subject, and message. You can also attach a screenshot if needed. Then, you need to tap on the send button and wait for a reply from the support team.</p>
|
154 |
-
</ol></p> 401be4b1e0<br />
|
155 |
-
<br />
|
156 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/next.config.js
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
/** @type {import('next').NextConfig} */
|
2 |
-
const nextConfig = {
|
3 |
-
// output: 'export',
|
4 |
-
// assetPrefix: '.',
|
5 |
-
webpack: (config, { isServer }) => {
|
6 |
-
if (!isServer) {
|
7 |
-
config.resolve = {
|
8 |
-
...config.resolve,
|
9 |
-
fallback: {
|
10 |
-
'bufferutil': false,
|
11 |
-
'utf-8-validate': false,
|
12 |
-
http: false,
|
13 |
-
https: false,
|
14 |
-
stream: false,
|
15 |
-
// fixes proxy-agent dependencies
|
16 |
-
net: false,
|
17 |
-
dns: false,
|
18 |
-
tls: false,
|
19 |
-
assert: false,
|
20 |
-
// fixes next-i18next dependencies
|
21 |
-
path: false,
|
22 |
-
fs: false,
|
23 |
-
// fixes mapbox dependencies
|
24 |
-
events: false,
|
25 |
-
// fixes sentry dependencies
|
26 |
-
process: false
|
27 |
-
}
|
28 |
-
};
|
29 |
-
}
|
30 |
-
config.module.exprContextCritical = false;
|
31 |
-
|
32 |
-
return config;
|
33 |
-
},
|
34 |
-
}
|
35 |
-
|
36 |
-
module.exports = (...args) => {
|
37 |
-
return nextConfig
|
38 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/components/voice.tsx
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import React, { useEffect } from 'react'
|
2 |
-
import { useSetAtom } from 'jotai'
|
3 |
-
import { useBing } from '@/lib/hooks/use-bing'
|
4 |
-
import Image from 'next/image'
|
5 |
-
import VoiceIcon from '@/assets/images/voice.svg'
|
6 |
-
import VoiceButton from './ui/voice'
|
7 |
-
import { SR } from '@/lib/bots/bing/sr'
|
8 |
-
import { voiceListenAtom } from '@/state'
|
9 |
-
|
10 |
-
const sr = new SR(['发送', '清空', '退出'])
|
11 |
-
|
12 |
-
const Voice = ({ setInput, input, sendMessage, isSpeaking }: Pick<ReturnType<typeof useBing>, 'setInput' | 'sendMessage' | 'input' | 'isSpeaking'>) => {
|
13 |
-
const setListen = useSetAtom(voiceListenAtom)
|
14 |
-
useEffect(() => {
|
15 |
-
if (sr.listening) return
|
16 |
-
sr.transcript = !isSpeaking
|
17 |
-
}, [isSpeaking])
|
18 |
-
|
19 |
-
useEffect(() => {
|
20 |
-
sr.onchange = (msg: string, command?: string) => {
|
21 |
-
switch (command) {
|
22 |
-
case '退出':
|
23 |
-
sr.stop()
|
24 |
-
break;
|
25 |
-
case '发送':
|
26 |
-
sendMessage(input)
|
27 |
-
case '清空':
|
28 |
-
setInput('')
|
29 |
-
break;
|
30 |
-
default:
|
31 |
-
setInput(input + msg)
|
32 |
-
}
|
33 |
-
}
|
34 |
-
}, [input])
|
35 |
-
|
36 |
-
const switchSR = (enable: boolean = false) => {
|
37 |
-
setListen(enable)
|
38 |
-
if (enable) {
|
39 |
-
sr.start()
|
40 |
-
} else {
|
41 |
-
sr.stop()
|
42 |
-
}
|
43 |
-
}
|
44 |
-
|
45 |
-
return sr.listening ? (
|
46 |
-
<VoiceButton onClick={() => switchSR(false)} />
|
47 |
-
) : (
|
48 |
-
<Image alt="start voice" src={VoiceIcon} width={24} className="-mt-0.5" onClick={() => switchSR(true)} />
|
49 |
-
)
|
50 |
-
};
|
51 |
-
|
52 |
-
export default Voice;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
from . import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class SeperableConv2DBNActiv(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
-
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
-
self.conv = nn.Sequential(
|
33 |
-
nn.Conv2d(
|
34 |
-
nin,
|
35 |
-
nin,
|
36 |
-
kernel_size=ksize,
|
37 |
-
stride=stride,
|
38 |
-
padding=pad,
|
39 |
-
dilation=dilation,
|
40 |
-
groups=nin,
|
41 |
-
bias=False,
|
42 |
-
),
|
43 |
-
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
-
nn.BatchNorm2d(nout),
|
45 |
-
activ(),
|
46 |
-
)
|
47 |
-
|
48 |
-
def __call__(self, x):
|
49 |
-
return self.conv(x)
|
50 |
-
|
51 |
-
|
52 |
-
class Encoder(nn.Module):
|
53 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
-
super(Encoder, self).__init__()
|
55 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
-
|
58 |
-
def __call__(self, x):
|
59 |
-
skip = self.conv1(x)
|
60 |
-
h = self.conv2(skip)
|
61 |
-
|
62 |
-
return h, skip
|
63 |
-
|
64 |
-
|
65 |
-
class Decoder(nn.Module):
|
66 |
-
def __init__(
|
67 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
-
):
|
69 |
-
super(Decoder, self).__init__()
|
70 |
-
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
-
|
73 |
-
def __call__(self, x, skip=None):
|
74 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
-
if skip is not None:
|
76 |
-
skip = spec_utils.crop_center(skip, x)
|
77 |
-
x = torch.cat([x, skip], dim=1)
|
78 |
-
h = self.conv(x)
|
79 |
-
|
80 |
-
if self.dropout is not None:
|
81 |
-
h = self.dropout(h)
|
82 |
-
|
83 |
-
return h
|
84 |
-
|
85 |
-
|
86 |
-
class ASPPModule(nn.Module):
|
87 |
-
def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
|
88 |
-
super(ASPPModule, self).__init__()
|
89 |
-
self.conv1 = nn.Sequential(
|
90 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
-
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
-
)
|
93 |
-
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
-
self.conv3 = SeperableConv2DBNActiv(
|
95 |
-
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
-
)
|
97 |
-
self.conv4 = SeperableConv2DBNActiv(
|
98 |
-
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
-
)
|
100 |
-
self.conv5 = SeperableConv2DBNActiv(
|
101 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
-
)
|
103 |
-
self.conv6 = SeperableConv2DBNActiv(
|
104 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
105 |
-
)
|
106 |
-
self.conv7 = SeperableConv2DBNActiv(
|
107 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
108 |
-
)
|
109 |
-
self.bottleneck = nn.Sequential(
|
110 |
-
Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
111 |
-
)
|
112 |
-
|
113 |
-
def forward(self, x):
|
114 |
-
_, _, h, w = x.size()
|
115 |
-
feat1 = F.interpolate(
|
116 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
117 |
-
)
|
118 |
-
feat2 = self.conv2(x)
|
119 |
-
feat3 = self.conv3(x)
|
120 |
-
feat4 = self.conv4(x)
|
121 |
-
feat5 = self.conv5(x)
|
122 |
-
feat6 = self.conv6(x)
|
123 |
-
feat7 = self.conv7(x)
|
124 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
|
125 |
-
bottle = self.bottleneck(out)
|
126 |
-
return bottle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ADRXtractor/ADR_Xtractor/app.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from spacy import displacy
|
3 |
-
|
4 |
-
from transformers import AutoTokenizer, AutoModelForTokenClassification,pipeline
|
5 |
-
tokenizer = AutoTokenizer.from_pretrained("abhibisht89/spanbert-large-cased-finetuned-ade_corpus_v2")
|
6 |
-
model = AutoModelForTokenClassification.from_pretrained("abhibisht89/spanbert-large-cased-finetuned-ade_corpus_v2").to('cpu')
|
7 |
-
adr_ner_model = pipeline(task="ner", model=model, tokenizer=tokenizer,grouped_entities=True)
|
8 |
-
|
9 |
-
def get_adr_from_text(sentence):
|
10 |
-
tokens = adr_ner_model(sentence)
|
11 |
-
entities = []
|
12 |
-
|
13 |
-
for token in tokens:
|
14 |
-
label = token["entity_group"]
|
15 |
-
if label != "O":
|
16 |
-
token["label"] = label
|
17 |
-
entities.append(token)
|
18 |
-
|
19 |
-
params = [{"text": sentence,
|
20 |
-
"ents": entities,
|
21 |
-
"title": None}]
|
22 |
-
|
23 |
-
html = displacy.render(params, style="ent", manual=True, options={
|
24 |
-
"colors": {
|
25 |
-
"DRUG": "#f08080",
|
26 |
-
"ADR": "#9bddff",
|
27 |
-
},
|
28 |
-
})
|
29 |
-
return html
|
30 |
-
|
31 |
-
exp=["Abortion, miscarriage or uterine hemorrhage associated with misoprostol (Cytotec), a labor-inducing drug.",
|
32 |
-
"Addiction to many sedatives and analgesics, such as diazepam, morphine, etc.",
|
33 |
-
"Birth defects associated with thalidomide",
|
34 |
-
"Bleeding of the intestine associated with aspirin therapy",
|
35 |
-
"Cardiovascular disease associated with COX-2 inhibitors (i.e. Vioxx)",
|
36 |
-
"Deafness and kidney failure associated with gentamicin (an antibiotic)",
|
37 |
-
"Having fever after taking paracetamol"]
|
38 |
-
|
39 |
-
desc="An adverse drug reaction (ADR) can be defined as an appreciably harmful or unpleasant reaction resulting from an intervention related to the use of a medicinal product.\
|
40 |
-
The goal of this project is to extracts the adverse drug reaction from unstructured text with the Drug."
|
41 |
-
|
42 |
-
inp=gr.inputs.Textbox(lines=5, placeholder=None, default="", label="text to extract adverse drug reaction and drug mention")
|
43 |
-
out=gr.outputs.HTML(label=None)
|
44 |
-
|
45 |
-
iface = gr.Interface(fn=get_adr_from_text, inputs=inp, outputs=out,examples=exp,article=desc,title="Adverse Drug Reaction Xtractor",theme="huggingface",layout='horizontal')
|
46 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/build_vocab.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from tqdm import tqdm
|
3 |
-
import logging
|
4 |
-
import pickle
|
5 |
-
from collections import Counter
|
6 |
-
import re
|
7 |
-
import fire
|
8 |
-
|
9 |
-
|
10 |
-
class Vocabulary(object):
|
11 |
-
"""Simple vocabulary wrapper."""
|
12 |
-
def __init__(self):
|
13 |
-
self.word2idx = {}
|
14 |
-
self.idx2word = {}
|
15 |
-
self.idx = 0
|
16 |
-
|
17 |
-
def add_word(self, word):
|
18 |
-
if not word in self.word2idx:
|
19 |
-
self.word2idx[word] = self.idx
|
20 |
-
self.idx2word[self.idx] = word
|
21 |
-
self.idx += 1
|
22 |
-
|
23 |
-
def __call__(self, word):
|
24 |
-
if not word in self.word2idx:
|
25 |
-
return self.word2idx["<unk>"]
|
26 |
-
return self.word2idx[word]
|
27 |
-
|
28 |
-
def __getitem__(self, word_id):
|
29 |
-
return self.idx2word[word_id]
|
30 |
-
|
31 |
-
def __len__(self):
|
32 |
-
return len(self.word2idx)
|
33 |
-
|
34 |
-
|
35 |
-
def build_vocab(input_json: str,
|
36 |
-
threshold: int,
|
37 |
-
keep_punctuation: bool,
|
38 |
-
host_address: str,
|
39 |
-
character_level: bool = False,
|
40 |
-
zh: bool = True ):
|
41 |
-
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
|
42 |
-
|
43 |
-
Args:
|
44 |
-
input_json(string): Preprossessed json file. Structure like this:
|
45 |
-
{
|
46 |
-
'audios': [
|
47 |
-
{
|
48 |
-
'audio_id': 'xxx',
|
49 |
-
'captions': [
|
50 |
-
{
|
51 |
-
'caption': 'xxx',
|
52 |
-
'cap_id': 'xxx'
|
53 |
-
}
|
54 |
-
]
|
55 |
-
},
|
56 |
-
...
|
57 |
-
]
|
58 |
-
}
|
59 |
-
threshold (int): Threshold to drop all words with counts < threshold
|
60 |
-
keep_punctuation (bool): Includes or excludes punctuation.
|
61 |
-
|
62 |
-
Returns:
|
63 |
-
vocab (Vocab): Object with the processed vocabulary
|
64 |
-
"""
|
65 |
-
data = json.load(open(input_json, "r"))["audios"]
|
66 |
-
counter = Counter()
|
67 |
-
pretokenized = "tokens" in data[0]["captions"][0]
|
68 |
-
|
69 |
-
if zh:
|
70 |
-
from nltk.parse.corenlp import CoreNLPParser
|
71 |
-
from zhon.hanzi import punctuation
|
72 |
-
if not pretokenized:
|
73 |
-
parser = CoreNLPParser(host_address)
|
74 |
-
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
|
75 |
-
for cap_idx in range(len(data[audio_idx]["captions"])):
|
76 |
-
if pretokenized:
|
77 |
-
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
|
78 |
-
else:
|
79 |
-
caption = data[audio_idx]["captions"][cap_idx]["caption"]
|
80 |
-
# Remove all punctuations
|
81 |
-
if not keep_punctuation:
|
82 |
-
caption = re.sub("[{}]".format(punctuation), "", caption)
|
83 |
-
if character_level:
|
84 |
-
tokens = list(caption)
|
85 |
-
else:
|
86 |
-
tokens = list(parser.tokenize(caption))
|
87 |
-
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
|
88 |
-
counter.update(tokens)
|
89 |
-
else:
|
90 |
-
if pretokenized:
|
91 |
-
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
|
92 |
-
for cap_idx in range(len(data[audio_idx]["captions"])):
|
93 |
-
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
|
94 |
-
counter.update(tokens)
|
95 |
-
else:
|
96 |
-
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
|
97 |
-
captions = {}
|
98 |
-
for audio_idx in range(len(data)):
|
99 |
-
audio_id = data[audio_idx]["audio_id"]
|
100 |
-
captions[audio_id] = []
|
101 |
-
for cap_idx in range(len(data[audio_idx]["captions"])):
|
102 |
-
caption = data[audio_idx]["captions"][cap_idx]["caption"]
|
103 |
-
captions[audio_id].append({
|
104 |
-
"audio_id": audio_id,
|
105 |
-
"id": cap_idx,
|
106 |
-
"caption": caption
|
107 |
-
})
|
108 |
-
tokenizer = PTBTokenizer()
|
109 |
-
captions = tokenizer.tokenize(captions)
|
110 |
-
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
|
111 |
-
audio_id = data[audio_idx]["audio_id"]
|
112 |
-
for cap_idx in range(len(data[audio_idx]["captions"])):
|
113 |
-
tokens = captions[audio_id][cap_idx]
|
114 |
-
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
|
115 |
-
counter.update(tokens.split(" "))
|
116 |
-
|
117 |
-
if not pretokenized:
|
118 |
-
json.dump({ "audios": data }, open(input_json, "w"), indent=4, ensure_ascii=not zh)
|
119 |
-
words = [word for word, cnt in counter.items() if cnt >= threshold]
|
120 |
-
|
121 |
-
# Create a vocab wrapper and add some special tokens.
|
122 |
-
vocab = Vocabulary()
|
123 |
-
vocab.add_word("<pad>")
|
124 |
-
vocab.add_word("<start>")
|
125 |
-
vocab.add_word("<end>")
|
126 |
-
vocab.add_word("<unk>")
|
127 |
-
|
128 |
-
# Add the words to the vocabulary.
|
129 |
-
for word in words:
|
130 |
-
vocab.add_word(word)
|
131 |
-
return vocab
|
132 |
-
|
133 |
-
|
134 |
-
def process(input_json: str,
|
135 |
-
output_file: str,
|
136 |
-
threshold: int = 1,
|
137 |
-
keep_punctuation: bool = False,
|
138 |
-
character_level: bool = False,
|
139 |
-
host_address: str = "http://localhost:9000",
|
140 |
-
zh: bool = False):
|
141 |
-
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
|
142 |
-
logging.basicConfig(level=logging.INFO, format=logfmt)
|
143 |
-
logging.info("Build Vocab")
|
144 |
-
vocabulary = build_vocab(
|
145 |
-
input_json=input_json, threshold=threshold, keep_punctuation=keep_punctuation,
|
146 |
-
host_address=host_address, character_level=character_level, zh=zh)
|
147 |
-
pickle.dump(vocabulary, open(output_file, "wb"))
|
148 |
-
logging.info("Total vocabulary size: {}".format(len(vocabulary)))
|
149 |
-
logging.info("Saved vocab to '{}'".format(output_file))
|
150 |
-
|
151 |
-
|
152 |
-
if __name__ == '__main__':
|
153 |
-
fire.Fire(process)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/wav_evaluation/models/CLAPWrapper.py
DELETED
@@ -1,256 +0,0 @@
|
|
1 |
-
|
2 |
-
import random
|
3 |
-
import torchaudio
|
4 |
-
from torch._six import string_classes
|
5 |
-
import collections
|
6 |
-
import re
|
7 |
-
import torch.nn.functional as F
|
8 |
-
import numpy as np
|
9 |
-
from transformers import AutoTokenizer
|
10 |
-
from wav_evaluation.models.utils import read_config_as_args
|
11 |
-
from wav_evaluation.models.clap import CLAP
|
12 |
-
import math
|
13 |
-
import torchaudio.transforms as T
|
14 |
-
import os
|
15 |
-
import torch
|
16 |
-
from importlib_resources import files
|
17 |
-
|
18 |
-
|
19 |
-
class CLAPWrapper():
|
20 |
-
"""
|
21 |
-
A class for interfacing CLAP model.
|
22 |
-
"""
|
23 |
-
|
24 |
-
def __init__(self, model_fp,config_path, use_cuda=False):
|
25 |
-
self.np_str_obj_array_pattern = re.compile(r'[SaUO]')
|
26 |
-
self.file_path = os.path.realpath(__file__)
|
27 |
-
self.default_collate_err_msg_format = (
|
28 |
-
"default_collate: batch must contain tensors, numpy arrays, numbers, "
|
29 |
-
"dicts or lists; found {}")
|
30 |
-
with open(config_path,'r') as f:
|
31 |
-
self.config_as_str = f.read()
|
32 |
-
self.model_fp = model_fp
|
33 |
-
self.use_cuda = use_cuda
|
34 |
-
self.clap, self.tokenizer, self.args = self.load_clap()
|
35 |
-
|
36 |
-
def load_clap(self):
|
37 |
-
r"""Load CLAP model with args from config file"""
|
38 |
-
|
39 |
-
args = read_config_as_args(self.config_as_str, is_config_str=True)
|
40 |
-
|
41 |
-
if 'bert' in args.text_model:
|
42 |
-
self.token_keys = ['input_ids', 'token_type_ids', 'attention_mask']
|
43 |
-
else:
|
44 |
-
self.token_keys = ['input_ids', 'attention_mask']
|
45 |
-
|
46 |
-
clap = CLAP(
|
47 |
-
audioenc_name=args.audioenc_name,
|
48 |
-
sample_rate=args.sampling_rate,
|
49 |
-
window_size=args.window_size,
|
50 |
-
hop_size=args.hop_size,
|
51 |
-
mel_bins=args.mel_bins,
|
52 |
-
fmin=args.fmin,
|
53 |
-
fmax=args.fmax,
|
54 |
-
classes_num=args.num_classes,
|
55 |
-
out_emb=args.out_emb,
|
56 |
-
text_model=args.text_model,
|
57 |
-
transformer_embed_dim=args.transformer_embed_dim,
|
58 |
-
d_proj=args.d_proj
|
59 |
-
)
|
60 |
-
|
61 |
-
|
62 |
-
# Load pretrained weights for model
|
63 |
-
model_state_dict = torch.load(self.model_fp, map_location=torch.device('cpu'))['model']
|
64 |
-
clap.load_state_dict(model_state_dict)
|
65 |
-
clap.eval() # set clap in eval mode
|
66 |
-
tokenizer = AutoTokenizer.from_pretrained(args.text_model)
|
67 |
-
|
68 |
-
if self.use_cuda and torch.cuda.is_available():
|
69 |
-
clap = clap.cuda()
|
70 |
-
|
71 |
-
return clap, tokenizer, args
|
72 |
-
|
73 |
-
def default_collate(self, batch):
|
74 |
-
r"""Puts each data field into a tensor with outer dimension batch size"""
|
75 |
-
elem = batch[0]
|
76 |
-
elem_type = type(elem)
|
77 |
-
if isinstance(elem, torch.Tensor):
|
78 |
-
out = None
|
79 |
-
if torch.utils.data.get_worker_info() is not None:
|
80 |
-
# If we're in a background process, concatenate directly into a
|
81 |
-
# shared memory tensor to avoid an extra copy
|
82 |
-
numel = sum([x.numel() for x in batch])
|
83 |
-
storage = elem.storage()._new_shared(numel)
|
84 |
-
out = elem.new(storage)
|
85 |
-
return torch.stack(batch, 0, out=out)
|
86 |
-
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
|
87 |
-
and elem_type.__name__ != 'string_':
|
88 |
-
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
|
89 |
-
# array of string classes and object
|
90 |
-
if self.np_str_obj_array_pattern.search(elem.dtype.str) is not None:
|
91 |
-
raise TypeError(
|
92 |
-
self.default_collate_err_msg_format.format(elem.dtype))
|
93 |
-
|
94 |
-
return self.default_collate([torch.as_tensor(b) for b in batch])
|
95 |
-
elif elem.shape == (): # scalars
|
96 |
-
return torch.as_tensor(batch)
|
97 |
-
elif isinstance(elem, float):
|
98 |
-
return torch.tensor(batch, dtype=torch.float64)
|
99 |
-
elif isinstance(elem, int):
|
100 |
-
return torch.tensor(batch)
|
101 |
-
elif isinstance(elem, string_classes):
|
102 |
-
return batch
|
103 |
-
elif isinstance(elem, collections.abc.Mapping):
|
104 |
-
return {key: self.default_collate([d[key] for d in batch]) for key in elem}
|
105 |
-
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
|
106 |
-
return elem_type(*(self.default_collate(samples) for samples in zip(*batch)))
|
107 |
-
elif isinstance(elem, collections.abc.Sequence):
|
108 |
-
# check to make sure that the elements in batch have consistent size
|
109 |
-
it = iter(batch)
|
110 |
-
elem_size = len(next(it))
|
111 |
-
if not all(len(elem) == elem_size for elem in it):
|
112 |
-
raise RuntimeError(
|
113 |
-
'each element in list of batch should be of equal size')
|
114 |
-
transposed = zip(*batch)
|
115 |
-
return [self.default_collate(samples) for samples in transposed]
|
116 |
-
|
117 |
-
raise TypeError(self.default_collate_err_msg_format.format(elem_type))
|
118 |
-
|
119 |
-
def resample_and_duration(self,wav_sr,audio_duration,resample=False):
|
120 |
-
audio_time_series,sample_rate = wav_sr
|
121 |
-
resample_rate = self.args.sampling_rate
|
122 |
-
if resample:
|
123 |
-
resampler = T.Resample(sample_rate, resample_rate)
|
124 |
-
audio_time_series = resampler(audio_time_series)
|
125 |
-
audio_time_series = audio_time_series.reshape(-1)
|
126 |
-
|
127 |
-
# audio_time_series is shorter than predefined audio duration,
|
128 |
-
# so audio_time_series is extended
|
129 |
-
if audio_duration*sample_rate >= audio_time_series.shape[0]:
|
130 |
-
repeat_factor = int(np.ceil((audio_duration*sample_rate) /
|
131 |
-
audio_time_series.shape[0]))
|
132 |
-
# Repeat audio_time_series by repeat_factor to match audio_duration
|
133 |
-
audio_time_series = audio_time_series.repeat(repeat_factor)
|
134 |
-
# remove excess part of audio_time_series
|
135 |
-
audio_time_series = audio_time_series[0:audio_duration*sample_rate]
|
136 |
-
else:
|
137 |
-
# audio_time_series is longer than predefined audio duration,
|
138 |
-
# so audio_time_series is trimmed
|
139 |
-
start_index = random.randrange(
|
140 |
-
audio_time_series.shape[0] - audio_duration*sample_rate)
|
141 |
-
audio_time_series = audio_time_series[start_index:start_index +
|
142 |
-
audio_duration*sample_rate]
|
143 |
-
return torch.FloatTensor(audio_time_series)
|
144 |
-
|
145 |
-
def load_audio_into_tensor(self, audio_path, audio_duration, resample=False):
|
146 |
-
r"""Loads audio file and returns raw audio."""
|
147 |
-
# Randomly sample a segment of audio_duration from the clip or pad to match duration
|
148 |
-
audio_time_series, sample_rate = torchaudio.load(audio_path)
|
149 |
-
return self.resample_and_duration((audio_time_series, sample_rate),audio_duration,resample)
|
150 |
-
|
151 |
-
def preprocess_audio(self, audio_files, resample):
|
152 |
-
r"""Load list of audio files and return raw audio"""
|
153 |
-
audio_tensors = []
|
154 |
-
for audio_file in audio_files:
|
155 |
-
if isinstance(audio_file,str):
|
156 |
-
audio_tensor = self.load_audio_into_tensor(audio_file, self.args.duration, resample)
|
157 |
-
elif isinstance(audio_file,tuple):
|
158 |
-
audio_tensor = self.resample_and_duration(audio_file, self.args.duration, resample)
|
159 |
-
else:
|
160 |
-
raise TypeError(f"type of audiofile is {type(audio_file)},which is not supported")
|
161 |
-
audio_tensor = audio_tensor.reshape(
|
162 |
-
1, -1).cuda() if self.use_cuda and torch.cuda.is_available() else audio_tensor.reshape(1, -1)
|
163 |
-
audio_tensors.append(audio_tensor)
|
164 |
-
return self.default_collate(audio_tensors)
|
165 |
-
|
166 |
-
def preprocess_text(self, text_queries):
|
167 |
-
r"""Load list of class labels and return tokenized text"""
|
168 |
-
tokenized_texts = []
|
169 |
-
for ttext in text_queries:
|
170 |
-
tok = self.tokenizer.encode_plus(
|
171 |
-
text=ttext, add_special_tokens=True, max_length=self.args.text_len, padding="max_length", return_tensors="pt") # max_length=self.args.text_len, padding=True,
|
172 |
-
for key in self.token_keys:
|
173 |
-
tok[key] = tok[key].reshape(-1).cuda() if self.use_cuda and torch.cuda.is_available() else tok[key].reshape(-1)
|
174 |
-
tokenized_texts.append(tok)
|
175 |
-
return self.default_collate(tokenized_texts)
|
176 |
-
|
177 |
-
def get_text_embeddings(self, class_labels):
|
178 |
-
r"""Load list of class labels and return text embeddings"""
|
179 |
-
preprocessed_text = self.preprocess_text(class_labels)
|
180 |
-
text_embeddings = self._get_text_embeddings(preprocessed_text)
|
181 |
-
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
|
182 |
-
return text_embeddings
|
183 |
-
|
184 |
-
def get_audio_embeddings(self, audio_files, resample):
|
185 |
-
r"""Load list of audio files and return a audio embeddings"""
|
186 |
-
preprocessed_audio = self.preprocess_audio(audio_files, resample)
|
187 |
-
audio_embeddings = self._get_audio_embeddings(preprocessed_audio)
|
188 |
-
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
|
189 |
-
return audio_embeddings
|
190 |
-
|
191 |
-
def _get_text_embeddings(self, preprocessed_text):
|
192 |
-
r"""Load preprocessed text and return text embeddings"""
|
193 |
-
with torch.no_grad():
|
194 |
-
text_embeddings = self.clap.caption_encoder(preprocessed_text)
|
195 |
-
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
|
196 |
-
return text_embeddings
|
197 |
-
|
198 |
-
def _get_audio_embeddings(self, preprocessed_audio):
|
199 |
-
r"""Load preprocessed audio and return a audio embeddings"""
|
200 |
-
with torch.no_grad():
|
201 |
-
preprocessed_audio = preprocessed_audio.reshape(
|
202 |
-
preprocessed_audio.shape[0], preprocessed_audio.shape[2])
|
203 |
-
#Append [0] the audio emebdding, [1] has output class probabilities
|
204 |
-
audio_embeddings = self.clap.audio_encoder(preprocessed_audio)[0]
|
205 |
-
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
|
206 |
-
return audio_embeddings
|
207 |
-
|
208 |
-
def compute_similarity(self, audio_embeddings, text_embeddings,use_logit_scale = True):
|
209 |
-
r"""Compute similarity between text and audio embeddings"""
|
210 |
-
if use_logit_scale:
|
211 |
-
logit_scale = self.clap.logit_scale.exp()
|
212 |
-
similarity = logit_scale*text_embeddings @ audio_embeddings.T
|
213 |
-
else:
|
214 |
-
similarity = text_embeddings @ audio_embeddings.T
|
215 |
-
return similarity.T
|
216 |
-
|
217 |
-
def cal_clap_score(self,txt,audio_path):
|
218 |
-
text_embeddings = self.get_text_embeddings([txt])# 经过了norm的embedding
|
219 |
-
audio_embeddings = self.get_audio_embeddings([audio_path], resample=True)# 这一步比较耗时,读取音频并重采样到44100
|
220 |
-
score = self.compute_similarity(audio_embeddings, text_embeddings,use_logit_scale=False).squeeze().cpu().numpy()
|
221 |
-
return score
|
222 |
-
|
223 |
-
def _generic_batch_inference(self, func, *args):
|
224 |
-
r"""Process audio and/or text per batch"""
|
225 |
-
input_tmp = args[0]
|
226 |
-
batch_size = args[-1]
|
227 |
-
# args[0] has audio_files, args[1] has class_labels
|
228 |
-
inputs = [args[0], args[1]] if len(args) == 3 else [args[0]]
|
229 |
-
args0_len = len(args[0])
|
230 |
-
# compute text_embeddings once for all the audio_files batches
|
231 |
-
if len(inputs) == 2:
|
232 |
-
text_embeddings = self.get_text_embeddings(args[1])
|
233 |
-
inputs = [args[0], args[1], text_embeddings]
|
234 |
-
dataset_idx = 0
|
235 |
-
for _ in range(math.ceil(args0_len/batch_size)):
|
236 |
-
next_batch_idx = dataset_idx + batch_size
|
237 |
-
# batch size is bigger than available audio/text items
|
238 |
-
if next_batch_idx >= args0_len:
|
239 |
-
inputs[0] = input_tmp[dataset_idx:]
|
240 |
-
return func(*tuple(inputs))
|
241 |
-
else:
|
242 |
-
inputs[0] = input_tmp[dataset_idx:next_batch_idx]
|
243 |
-
yield func(*tuple(inputs))
|
244 |
-
dataset_idx = next_batch_idx
|
245 |
-
|
246 |
-
def get_audio_embeddings_per_batch(self, audio_files, batch_size):
|
247 |
-
r"""Load preprocessed audio and return a audio embeddings per batch"""
|
248 |
-
return self._generic_batch_inference(self.get_audio_embeddings, audio_files, batch_size)
|
249 |
-
|
250 |
-
def get_text_embeddings_per_batch(self, class_labels, batch_size):
|
251 |
-
r"""Load preprocessed text and return text embeddings per batch"""
|
252 |
-
return self._generic_batch_inference(self.get_text_embeddings, class_labels, batch_size)
|
253 |
-
|
254 |
-
def classify_audio_files_per_batch(self, audio_files, class_labels, batch_size):
|
255 |
-
r"""Compute classification probabilities for each audio recording in a batch and each class label"""
|
256 |
-
return self._generic_batch_inference(self.classify_audio_files, audio_files, class_labels, batch_size)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/g4f/Provider/Providers/Vercel.py
DELETED
@@ -1,162 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import base64
|
4 |
-
import execjs
|
5 |
-
import queue
|
6 |
-
import threading
|
7 |
-
|
8 |
-
from curl_cffi import requests
|
9 |
-
from ...typing import sha256, Dict, get_type_hints
|
10 |
-
|
11 |
-
url = 'https://play.vercel.ai'
|
12 |
-
supports_stream = True
|
13 |
-
needs_auth = False
|
14 |
-
|
15 |
-
models = {
|
16 |
-
'claude-instant-v1': 'anthropic:claude-instant-v1',
|
17 |
-
'claude-v1': 'anthropic:claude-v1',
|
18 |
-
'alpaca-7b': 'replicate:replicate/alpaca-7b',
|
19 |
-
'stablelm-tuned-alpha-7b': 'replicate:stability-ai/stablelm-tuned-alpha-7b',
|
20 |
-
'bloom': 'huggingface:bigscience/bloom',
|
21 |
-
'bloomz': 'huggingface:bigscience/bloomz',
|
22 |
-
'flan-t5-xxl': 'huggingface:google/flan-t5-xxl',
|
23 |
-
'flan-ul2': 'huggingface:google/flan-ul2',
|
24 |
-
'gpt-neox-20b': 'huggingface:EleutherAI/gpt-neox-20b',
|
25 |
-
'oasst-sft-4-pythia-12b-epoch-3.5': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
|
26 |
-
'santacoder': 'huggingface:bigcode/santacoder',
|
27 |
-
'command-medium-nightly': 'cohere:command-medium-nightly',
|
28 |
-
'command-xlarge-nightly': 'cohere:command-xlarge-nightly',
|
29 |
-
'code-cushman-001': 'openai:code-cushman-001',
|
30 |
-
'code-davinci-002': 'openai:code-davinci-002',
|
31 |
-
'gpt-3.5-turbo': 'openai:gpt-3.5-turbo',
|
32 |
-
'text-ada-001': 'openai:text-ada-001',
|
33 |
-
'text-babbage-001': 'openai:text-babbage-001',
|
34 |
-
'text-curie-001': 'openai:text-curie-001',
|
35 |
-
'text-davinci-002': 'openai:text-davinci-002',
|
36 |
-
'text-davinci-003': 'openai:text-davinci-003'
|
37 |
-
}
|
38 |
-
model = models.keys()
|
39 |
-
|
40 |
-
vercel_models = {'anthropic:claude-instant-v1': {'id': 'anthropic:claude-instant-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-instant-v1'}, 'anthropic:claude-v1': {'id': 'anthropic:claude-v1', 'provider': 'anthropic', 'providerHumanName': 'Anthropic', 'makerHumanName': 'Anthropic', 'minBillingTier': 'hobby', 'parameters': {'temperature': {'value': 1, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': ['\n\nHuman:'], 'range': []}}, 'name': 'claude-v1'}, 'replicate:replicate/alpaca-7b': {'id': 'replicate:replicate/alpaca-7b', 'provider': 'replicate', 'providerHumanName': 'Replicate', 'makerHumanName': 'Stanford', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '2014ee1247354f2e81c0b3650d71ca715bc1e610189855f134c30ecb841fae21', 'name': 'alpaca-7b'}, 'replicate:stability-ai/stablelm-tuned-alpha-7b': {'id': 'replicate:stability-ai/stablelm-tuned-alpha-7b', 'provider': 'replicate', 'makerHumanName': 'StabilityAI', 'providerHumanName': 'Replicate', 'parameters': {'temperature': {'value': 0.75, 'range': [0.01, 5]}, 'maximumLength': {'value': 200, 'range': [50, 512]}, 'topP': {'value': 0.95, 'range': [0.01, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'repetitionPenalty': {'value': 1.1765, 'range': [0.01, 5]}, 'stopSequences': {'value': [], 'range': []}}, 'version': '4a9a32b4fd86c2d047f1d271fa93972683ec6ef1cf82f402bd021f267330b50b', 'name': 'stablelm-tuned-alpha-7b'}, 'huggingface:bigscience/bloom': {'id': 'huggingface:bigscience/bloom', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': "Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion model. For the best results: mimic a few words of a webpage similar to the content you want to generate. Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a coherent follow-up.", 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloom'}, 'huggingface:bigscience/bloomz': {'id': 'huggingface:bigscience/bloomz', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigScience', 'instructions': 'We recommend using the model to perform tasks expressed in natural language. For example, given the prompt "Translate to English: Je t\'aime.", the model will most likely answer "I love you.".', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'bloomz'}, 'huggingface:google/flan-t5-xxl': {'id': 'huggingface:google/flan-t5-xxl', 'provider': 'huggingface', 'makerHumanName': 'Google', 'providerHumanName': 'HuggingFace', 'name': 'flan-t5-xxl', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}}, 'huggingface:google/flan-ul2': {'id': 'huggingface:google/flan-ul2', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'Google', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'flan-ul2'}, 'huggingface:EleutherAI/gpt-neox-20b': {'id': 'huggingface:EleutherAI/gpt-neox-20b', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'EleutherAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-neox-20b'}, 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'OpenAssistant', 'parameters': {'maximumLength': {'value': 200, 'range': [50, 1024]}, 'typicalP': {'value': 0.2, 'range': [0.1, 0.99]}, 'repetitionPenalty': {'value': 1, 'range': [0.1, 2]}}, 'name': 'oasst-sft-4-pythia-12b-epoch-3.5'}, 'huggingface:bigcode/santacoder': {
|
41 |
-
'id': 'huggingface:bigcode/santacoder', 'provider': 'huggingface', 'providerHumanName': 'HuggingFace', 'makerHumanName': 'BigCode', 'instructions': 'The model was trained on GitHub code. As such it is not an instruction model and commands like "Write a function that computes the square root." do not work well. You should phrase commands like they occur in source code such as comments (e.g. # the following function computes the sqrt) or write a function signature and docstring and let the model complete the function body.', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 0.95, 'range': [0.01, 0.99]}, 'topK': {'value': 4, 'range': [1, 500]}, 'repetitionPenalty': {'value': 1.03, 'range': [0.1, 2]}}, 'name': 'santacoder'}, 'cohere:command-medium-nightly': {'id': 'cohere:command-medium-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-medium-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'cohere:command-xlarge-nightly': {'id': 'cohere:command-xlarge-nightly', 'provider': 'cohere', 'providerHumanName': 'Cohere', 'makerHumanName': 'Cohere', 'name': 'command-xlarge-nightly', 'parameters': {'temperature': {'value': 0.9, 'range': [0, 2]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0, 1]}, 'topK': {'value': 0, 'range': [0, 500]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:gpt-4': {'id': 'openai:gpt-4', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'gpt-4', 'minBillingTier': 'pro', 'parameters': {'temperature': {'value': 0.7, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:code-cushman-001': {'id': 'openai:code-cushman-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-cushman-001'}, 'openai:code-davinci-002': {'id': 'openai:code-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'code-davinci-002'}, 'openai:gpt-3.5-turbo': {'id': 'openai:gpt-3.5-turbo', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'parameters': {'temperature': {'value': 0.7, 'range': [0, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'topK': {'value': 1, 'range': [1, 500]}, 'presencePenalty': {'value': 1, 'range': [0, 1]}, 'frequencyPenalty': {'value': 1, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}, 'name': 'gpt-3.5-turbo'}, 'openai:text-ada-001': {'id': 'openai:text-ada-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-ada-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-babbage-001': {'id': 'openai:text-babbage-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-babbage-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-curie-001': {'id': 'openai:text-curie-001', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-curie-001', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-002': {'id': 'openai:text-davinci-002', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-002', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}, 'openai:text-davinci-003': {'id': 'openai:text-davinci-003', 'provider': 'openai', 'providerHumanName': 'OpenAI', 'makerHumanName': 'OpenAI', 'name': 'text-davinci-003', 'parameters': {'temperature': {'value': 0.5, 'range': [0.1, 1]}, 'maximumLength': {'value': 200, 'range': [50, 1024]}, 'topP': {'value': 1, 'range': [0.1, 1]}, 'presencePenalty': {'value': 0, 'range': [0, 1]}, 'frequencyPenalty': {'value': 0, 'range': [0, 1]}, 'stopSequences': {'value': [], 'range': []}}}}
|
42 |
-
|
43 |
-
|
44 |
-
# based on https://github.com/ading2210/vercel-llm-api // modified
|
45 |
-
class Client:
|
46 |
-
def __init__(self):
|
47 |
-
self.session = requests.Session()
|
48 |
-
self.headers = {
|
49 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110 Safari/537.36',
|
50 |
-
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
|
51 |
-
'Accept-Encoding': 'gzip, deflate, br',
|
52 |
-
'Accept-Language': 'en-US,en;q=0.5',
|
53 |
-
'Te': 'trailers',
|
54 |
-
'Upgrade-Insecure-Requests': '1'
|
55 |
-
}
|
56 |
-
self.session.headers.update(self.headers)
|
57 |
-
|
58 |
-
def get_token(self):
|
59 |
-
b64 = self.session.get('https://sdk.vercel.ai/openai.jpeg').text
|
60 |
-
data = json.loads(base64.b64decode(b64))
|
61 |
-
|
62 |
-
code = 'const globalThis = {data: `sentinel`}; function token() {return (%s)(%s)}' % (
|
63 |
-
data['c'], data['a'])
|
64 |
-
|
65 |
-
token_string = json.dumps(separators=(',', ':'),
|
66 |
-
obj={'r': execjs.compile(code).call('token'), 't': data['t']})
|
67 |
-
|
68 |
-
return base64.b64encode(token_string.encode()).decode()
|
69 |
-
|
70 |
-
def get_default_params(self, model_id):
|
71 |
-
return {key: param['value'] for key, param in vercel_models[model_id]['parameters'].items()}
|
72 |
-
|
73 |
-
def generate(self, model_id: str, prompt: str, params: dict = {}):
|
74 |
-
if not ':' in model_id:
|
75 |
-
model_id = models[model_id]
|
76 |
-
|
77 |
-
defaults = self.get_default_params(model_id)
|
78 |
-
|
79 |
-
payload = defaults | params | {
|
80 |
-
'prompt': prompt,
|
81 |
-
'model': model_id,
|
82 |
-
}
|
83 |
-
|
84 |
-
headers = self.headers | {
|
85 |
-
'Accept-Encoding': 'gzip, deflate, br',
|
86 |
-
'Custom-Encoding': self.get_token(),
|
87 |
-
'Host': 'sdk.vercel.ai',
|
88 |
-
'Origin': 'https://sdk.vercel.ai',
|
89 |
-
'Referrer': 'https://sdk.vercel.ai',
|
90 |
-
'Sec-Fetch-Dest': 'empty',
|
91 |
-
'Sec-Fetch-Mode': 'cors',
|
92 |
-
'Sec-Fetch-Site': 'same-origin',
|
93 |
-
}
|
94 |
-
|
95 |
-
chunks_queue = queue.Queue()
|
96 |
-
error = None
|
97 |
-
response = None
|
98 |
-
|
99 |
-
def callback(data):
|
100 |
-
chunks_queue.put(data.decode())
|
101 |
-
|
102 |
-
def request_thread():
|
103 |
-
nonlocal response, error
|
104 |
-
for _ in range(3):
|
105 |
-
try:
|
106 |
-
response = self.session.post('https://sdk.vercel.ai/api/generate',
|
107 |
-
json=payload, headers=headers, content_callback=callback)
|
108 |
-
response.raise_for_status()
|
109 |
-
|
110 |
-
except Exception as e:
|
111 |
-
if _ == 2:
|
112 |
-
error = e
|
113 |
-
|
114 |
-
else:
|
115 |
-
continue
|
116 |
-
|
117 |
-
thread = threading.Thread(target=request_thread, daemon=True)
|
118 |
-
thread.start()
|
119 |
-
|
120 |
-
text = ''
|
121 |
-
index = 0
|
122 |
-
while True:
|
123 |
-
try:
|
124 |
-
chunk = chunks_queue.get(block=True, timeout=0.1)
|
125 |
-
|
126 |
-
except queue.Empty:
|
127 |
-
if error:
|
128 |
-
raise error
|
129 |
-
|
130 |
-
elif response:
|
131 |
-
break
|
132 |
-
|
133 |
-
else:
|
134 |
-
continue
|
135 |
-
|
136 |
-
text += chunk
|
137 |
-
lines = text.split('\n')
|
138 |
-
|
139 |
-
if len(lines) - 1 > index:
|
140 |
-
new = lines[index:-1]
|
141 |
-
for word in new:
|
142 |
-
yield json.loads(word)
|
143 |
-
index = len(lines) - 1
|
144 |
-
|
145 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
146 |
-
yield 'Vercel is currently not working.'
|
147 |
-
return
|
148 |
-
|
149 |
-
conversation = 'This is a conversation between a human and a language model, respond to the last message accordingly, referring to the past history of messages if needed.\n'
|
150 |
-
|
151 |
-
for message in messages:
|
152 |
-
conversation += '%s: %s\n' % (message['role'], message['content'])
|
153 |
-
|
154 |
-
conversation += 'assistant: '
|
155 |
-
|
156 |
-
completion = Client().generate(model, conversation)
|
157 |
-
|
158 |
-
for token in completion:
|
159 |
-
yield token
|
160 |
-
|
161 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
162 |
-
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16_in1k.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = ['./resnet50_8xb32_in1k.py']
|
2 |
-
|
3 |
-
# schedule settings
|
4 |
-
optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=512.)
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/deprecated/Wewordle.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import random, string, time
|
4 |
-
from aiohttp import ClientSession
|
5 |
-
|
6 |
-
from ..base_provider import AsyncProvider
|
7 |
-
|
8 |
-
|
9 |
-
class Wewordle(AsyncProvider):
|
10 |
-
url = "https://wewordle.org"
|
11 |
-
working = False
|
12 |
-
supports_gpt_35_turbo = True
|
13 |
-
|
14 |
-
@classmethod
|
15 |
-
async def create_async(
|
16 |
-
cls,
|
17 |
-
model: str,
|
18 |
-
messages: list[dict[str, str]],
|
19 |
-
proxy: str = None,
|
20 |
-
**kwargs
|
21 |
-
) -> str:
|
22 |
-
|
23 |
-
headers = {
|
24 |
-
"accept" : "*/*",
|
25 |
-
"pragma" : "no-cache",
|
26 |
-
"Content-Type" : "application/json",
|
27 |
-
"Connection" : "keep-alive"
|
28 |
-
}
|
29 |
-
|
30 |
-
_user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
|
31 |
-
_app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
|
32 |
-
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
|
33 |
-
data = {
|
34 |
-
"user" : _user_id,
|
35 |
-
"messages" : messages,
|
36 |
-
"subscriber": {
|
37 |
-
"originalPurchaseDate" : None,
|
38 |
-
"originalApplicationVersion" : None,
|
39 |
-
"allPurchaseDatesMillis" : {},
|
40 |
-
"entitlements" : {"active": {}, "all": {}},
|
41 |
-
"allPurchaseDates" : {},
|
42 |
-
"allExpirationDatesMillis" : {},
|
43 |
-
"allExpirationDates" : {},
|
44 |
-
"originalAppUserId" : f"$RCAnonymousID:{_app_id}",
|
45 |
-
"latestExpirationDate" : None,
|
46 |
-
"requestDate" : _request_date,
|
47 |
-
"latestExpirationDateMillis" : None,
|
48 |
-
"nonSubscriptionTransactions" : [],
|
49 |
-
"originalPurchaseDateMillis" : None,
|
50 |
-
"managementURL" : None,
|
51 |
-
"allPurchasedProductIdentifiers": [],
|
52 |
-
"firstSeen" : _request_date,
|
53 |
-
"activeSubscriptions" : [],
|
54 |
-
}
|
55 |
-
}
|
56 |
-
|
57 |
-
|
58 |
-
async with ClientSession(
|
59 |
-
headers=headers
|
60 |
-
) as session:
|
61 |
-
async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
|
62 |
-
response.raise_for_status()
|
63 |
-
content = (await response.json())["message"]["content"]
|
64 |
-
if content:
|
65 |
-
return content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/badgelabel/Factory.d.ts
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import BadgeLabel from './BadgeLabel';
|
2 |
-
|
3 |
-
export default function (
|
4 |
-
config?: BadgeLabel.IConfig
|
5 |
-
): BadgeLabel;
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/PreLayout.js
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
import PreLayoutBase from '../basesizer/PreLayout.js';
|
2 |
-
|
3 |
-
var PreLayout = function () {
|
4 |
-
this._maxChildWidth = undefined;
|
5 |
-
this._maxChildHeight = undefined;
|
6 |
-
PreLayoutBase.call(this);
|
7 |
-
return this;
|
8 |
-
}
|
9 |
-
export default PreLayout;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import { Pan } from '../../../plugins/gestures';
|
2 |
-
export default Pan;
|
|
|
|
|
|
spaces/Amjadd/BookGPT/app.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
import urllib.request
|
2 |
-
import fitz
|
3 |
-
import re
|
4 |
-
import numpy as np
|
5 |
-
import tensorflow_hub as hub
|
6 |
-
import openai
|
7 |
-
import gradio as gr
|
8 |
-
import os
|
9 |
-
from sklearn.neighbors import NearestNeighbors
|
10 |
-
|
11 |
-
|
12 |
-
def download_pdf(url, output_path):
|
13 |
-
urllib.request.urlretrieve(url, output_path)
|
14 |
-
|
15 |
-
|
16 |
-
def preprocess(text):
|
17 |
-
text = text.replace('\n', ' ')
|
18 |
-
text = re.sub('\s+', ' ', text)
|
19 |
-
return text
|
20 |
-
|
21 |
-
|
22 |
-
def pdf_to_text(path, start_page=1, end_page=None):
|
23 |
-
doc = fitz.open(path)
|
24 |
-
total_pages = doc.page_count
|
25 |
-
|
26 |
-
if end_page is None:
|
27 |
-
end_page = total_pages
|
28 |
-
|
29 |
-
text_list = []
|
30 |
-
|
31 |
-
for i in range(start_page-1, end_page):
|
32 |
-
text = doc.load_page(i).get_text("text")
|
33 |
-
text = preprocess(text)
|
34 |
-
text_list.append(text)
|
35 |
-
|
36 |
-
doc.close()
|
37 |
-
return text_list
|
38 |
-
|
39 |
-
|
40 |
-
def text_to_chunks(texts, word_length=150, start_page=1):
|
41 |
-
text_toks = [t.split(' ') for t in texts]
|
42 |
-
page_nums = []
|
43 |
-
chunks = []
|
44 |
-
|
45 |
-
for idx, words in enumerate(text_toks):
|
46 |
-
for i in range(0, len(words), word_length):
|
47 |
-
chunk = words[i:i+word_length]
|
48 |
-
if (i+word_length) > len(words) and (len(chunk) < word_length) and (
|
49 |
-
len(text_toks) != (idx+1)):
|
50 |
-
text_toks[idx+1] = chunk + text_toks[idx+1]
|
51 |
-
continue
|
52 |
-
chunk = ' '.join(chunk).strip()
|
53 |
-
chunk = f'[{idx+start_page}]' + ' ' + '"' + chunk + '"'
|
54 |
-
chunks.append(chunk)
|
55 |
-
return chunks
|
56 |
-
|
57 |
-
|
58 |
-
class SemanticSearch:
|
59 |
-
|
60 |
-
def __init__(self):
|
61 |
-
self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
|
62 |
-
self.fitted = False
|
63 |
-
|
64 |
-
|
65 |
-
def fit(self, data, batch=1000, n_neighbors=5):
|
66 |
-
self.data = data
|
67 |
-
self.embeddings = self.get_text_embedding(data, batch=batch)
|
68 |
-
n_neighbors = min(n_neighbors, len(self.embeddings))
|
69 |
-
self.nn = NearestNeighbors(n_neighbors=n_neighbors)
|
70 |
-
self.nn.fit(self.embeddings)
|
71 |
-
self.fitted = True
|
72 |
-
|
73 |
-
|
74 |
-
def __call__(self, text, return_data=True):
|
75 |
-
inp_emb = self.use([text])
|
76 |
-
neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
|
77 |
-
|
78 |
-
if return_data:
|
79 |
-
return [self.data[i] for i in neighbors]
|
80 |
-
else:
|
81 |
-
return neighbors
|
82 |
-
|
83 |
-
|
84 |
-
def get_text_embedding(self, texts, batch=1000):
|
85 |
-
embeddings = []
|
86 |
-
for i in range(0, len(texts), batch):
|
87 |
-
text_batch = texts[i:(i+batch)]
|
88 |
-
emb_batch = self.use(text_batch)
|
89 |
-
embeddings.append(emb_batch)
|
90 |
-
embeddings = np.vstack(embeddings)
|
91 |
-
return embeddings
|
92 |
-
|
93 |
-
|
94 |
-
recommender = SemanticSearch()
|
95 |
-
|
96 |
-
def load_recommender(path, start_page=1):
|
97 |
-
global recommender
|
98 |
-
texts = pdf_to_text(path, start_page=start_page)
|
99 |
-
chunks = text_to_chunks(texts, start_page=start_page)
|
100 |
-
recommender.fit(chunks)
|
101 |
-
return 'Corpus Loaded.'
|
102 |
-
|
103 |
-
|
104 |
-
def generate_text(prompt, engine="text-davinci-003"):
|
105 |
-
completions = openai.Completion.create(
|
106 |
-
engine=engine,
|
107 |
-
prompt=prompt,
|
108 |
-
max_tokens=512,
|
109 |
-
n=1,
|
110 |
-
stop=None,
|
111 |
-
temperature=0.7,
|
112 |
-
)
|
113 |
-
message = completions.choices[0].text
|
114 |
-
return message
|
115 |
-
|
116 |
-
|
117 |
-
def generate_answer(question):
|
118 |
-
topn_chunks = recommender(question)
|
119 |
-
prompt = ""
|
120 |
-
prompt += 'search results:\n\n'
|
121 |
-
for c in topn_chunks:
|
122 |
-
prompt += c + '\n\n'
|
123 |
-
|
124 |
-
prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\
|
125 |
-
"Cite each reference using [number] notation (every result has this number at the beginning). "\
|
126 |
-
"Citation should be done at the end of each sentence. If the search results mention multiple subjects "\
|
127 |
-
"with the same name, create separate answers for each. Only include information found in the results and "\
|
128 |
-
"don't add any additional information. Make sure the answer is correct and don't output false content. "\
|
129 |
-
"If the text does not relate to the query, simply state 'Found Nothing'. Ignore outlier "\
|
130 |
-
"search results which has nothing to do with the question. Only answer what is asked. The "\
|
131 |
-
"answer should be short and concise.\n\nQuery: {question}\nAnswer: "
|
132 |
-
|
133 |
-
prompt += f"Query: {question}\nAnswer:"
|
134 |
-
answer = generate_text(prompt)
|
135 |
-
return answer
|
136 |
-
|
137 |
-
|
138 |
-
def question_answer(url, file, question, api_key):
|
139 |
-
openai.api_key = api_key
|
140 |
-
|
141 |
-
if url.strip() == '' and file == None:
|
142 |
-
return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'
|
143 |
-
|
144 |
-
if url.strip() != '' and file != None:
|
145 |
-
return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'
|
146 |
-
|
147 |
-
if url.strip() != '':
|
148 |
-
glob_url = url
|
149 |
-
download_pdf(glob_url, 'corpus.pdf')
|
150 |
-
load_recommender('corpus.pdf')
|
151 |
-
|
152 |
-
else:
|
153 |
-
old_file_name = file.name
|
154 |
-
file_name = file.name
|
155 |
-
file_name = file_name[:-12] + file_name[-4:]
|
156 |
-
os.rename(old_file_name, file_name)
|
157 |
-
load_recommender(file_name)
|
158 |
-
|
159 |
-
if question.strip() == '':
|
160 |
-
return '[ERROR]: Question field is empty'
|
161 |
-
|
162 |
-
return generate_answer(question)
|
163 |
-
|
164 |
-
|
165 |
-
title = 'BookGPT'
|
166 |
-
description = "BookGPT allows you to input an entire book and ask questions about its contents. This app uses GPT-3 to generate answers based on the book's information. BookGPT has ability to add reference to the specific page number from where the information was found. This adds credibility to the answers generated also helps you locate the relevant information in the book."
|
167 |
-
|
168 |
-
with gr.Blocks() as demo:
|
169 |
-
|
170 |
-
gr.Markdown(f'<center><h1>{title}</h1></center>')
|
171 |
-
gr.Markdown(description)
|
172 |
-
gr.Markdown("Thank you for all the support this space has received! Unfortunately, my OpenAI $18 grant has been exhausted, so you'll need to enter your own OpenAI API Key to use the app. Sorry for inconvenience :-(.")
|
173 |
-
|
174 |
-
with gr.Row():
|
175 |
-
|
176 |
-
with gr.Group():
|
177 |
-
url = gr.Textbox(label='URL')
|
178 |
-
gr.Markdown("<center><h6>or<h6></center>")
|
179 |
-
file = gr.File(label='PDF', file_types=['.pdf'])
|
180 |
-
question = gr.Textbox(label='question')
|
181 |
-
api_key = gr.Textbox(label='OpenAI API Key')
|
182 |
-
btn = gr.Button(value='Submit')
|
183 |
-
btn.style(full_width=True)
|
184 |
-
|
185 |
-
with gr.Group():
|
186 |
-
answer = gr.Textbox(label='answer')
|
187 |
-
|
188 |
-
btn.click(question_answer, inputs=[url, file, question, api_key], outputs=[answer])
|
189 |
-
|
190 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_dual_guided.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import tempfile
|
18 |
-
import unittest
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import torch
|
22 |
-
|
23 |
-
from diffusers import VersatileDiffusionDualGuidedPipeline
|
24 |
-
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
|
25 |
-
|
26 |
-
|
27 |
-
torch.backends.cuda.matmul.allow_tf32 = False
|
28 |
-
|
29 |
-
|
30 |
-
@nightly
|
31 |
-
@require_torch_gpu
|
32 |
-
class VersatileDiffusionDualGuidedPipelineIntegrationTests(unittest.TestCase):
|
33 |
-
def tearDown(self):
|
34 |
-
# clean up the VRAM after each test
|
35 |
-
super().tearDown()
|
36 |
-
gc.collect()
|
37 |
-
torch.cuda.empty_cache()
|
38 |
-
|
39 |
-
def test_remove_unused_weights_save_load(self):
|
40 |
-
pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained("shi-labs/versatile-diffusion")
|
41 |
-
# remove text_unet
|
42 |
-
pipe.remove_unused_weights()
|
43 |
-
pipe.to(torch_device)
|
44 |
-
pipe.set_progress_bar_config(disable=None)
|
45 |
-
|
46 |
-
second_prompt = load_image(
|
47 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg"
|
48 |
-
)
|
49 |
-
|
50 |
-
generator = torch.manual_seed(0)
|
51 |
-
image = pipe(
|
52 |
-
prompt="first prompt",
|
53 |
-
image=second_prompt,
|
54 |
-
text_to_image_strength=0.75,
|
55 |
-
generator=generator,
|
56 |
-
guidance_scale=7.5,
|
57 |
-
num_inference_steps=2,
|
58 |
-
output_type="numpy",
|
59 |
-
).images
|
60 |
-
|
61 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
62 |
-
pipe.save_pretrained(tmpdirname)
|
63 |
-
pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained(tmpdirname)
|
64 |
-
|
65 |
-
pipe.to(torch_device)
|
66 |
-
pipe.set_progress_bar_config(disable=None)
|
67 |
-
|
68 |
-
generator = generator.manual_seed(0)
|
69 |
-
new_image = pipe(
|
70 |
-
prompt="first prompt",
|
71 |
-
image=second_prompt,
|
72 |
-
text_to_image_strength=0.75,
|
73 |
-
generator=generator,
|
74 |
-
guidance_scale=7.5,
|
75 |
-
num_inference_steps=2,
|
76 |
-
output_type="numpy",
|
77 |
-
).images
|
78 |
-
|
79 |
-
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
|
80 |
-
|
81 |
-
def test_inference_dual_guided(self):
|
82 |
-
pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained("shi-labs/versatile-diffusion")
|
83 |
-
pipe.remove_unused_weights()
|
84 |
-
pipe.to(torch_device)
|
85 |
-
pipe.set_progress_bar_config(disable=None)
|
86 |
-
|
87 |
-
first_prompt = "cyberpunk 2077"
|
88 |
-
second_prompt = load_image(
|
89 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg"
|
90 |
-
)
|
91 |
-
generator = torch.manual_seed(0)
|
92 |
-
image = pipe(
|
93 |
-
prompt=first_prompt,
|
94 |
-
image=second_prompt,
|
95 |
-
text_to_image_strength=0.75,
|
96 |
-
generator=generator,
|
97 |
-
guidance_scale=7.5,
|
98 |
-
num_inference_steps=50,
|
99 |
-
output_type="numpy",
|
100 |
-
).images
|
101 |
-
|
102 |
-
image_slice = image[0, 253:256, 253:256, -1]
|
103 |
-
|
104 |
-
assert image.shape == (1, 512, 512, 3)
|
105 |
-
expected_slice = np.array([0.0787, 0.0849, 0.0826, 0.0812, 0.0807, 0.0795, 0.0818, 0.0798, 0.0779])
|
106 |
-
|
107 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/mask_rcnn_r50_fpn.py',
|
3 |
-
'../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py',
|
4 |
-
'../_base_/default_runtime.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
roi_head=dict(
|
8 |
-
bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15)))
|
9 |
-
# runtime settings
|
10 |
-
runner = dict(type='EpochBasedRunner', max_epochs=15)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
_base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py'
|
2 |
-
|
3 |
-
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/evaluation/metrics.py
DELETED
@@ -1,326 +0,0 @@
|
|
1 |
-
from collections import OrderedDict
|
2 |
-
|
3 |
-
import annotator.uniformer.mmcv as mmcv
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
|
7 |
-
|
8 |
-
def f_score(precision, recall, beta=1):
|
9 |
-
"""calcuate the f-score value.
|
10 |
-
|
11 |
-
Args:
|
12 |
-
precision (float | torch.Tensor): The precision value.
|
13 |
-
recall (float | torch.Tensor): The recall value.
|
14 |
-
beta (int): Determines the weight of recall in the combined score.
|
15 |
-
Default: False.
|
16 |
-
|
17 |
-
Returns:
|
18 |
-
[torch.tensor]: The f-score value.
|
19 |
-
"""
|
20 |
-
score = (1 + beta**2) * (precision * recall) / (
|
21 |
-
(beta**2 * precision) + recall)
|
22 |
-
return score
|
23 |
-
|
24 |
-
|
25 |
-
def intersect_and_union(pred_label,
|
26 |
-
label,
|
27 |
-
num_classes,
|
28 |
-
ignore_index,
|
29 |
-
label_map=dict(),
|
30 |
-
reduce_zero_label=False):
|
31 |
-
"""Calculate intersection and Union.
|
32 |
-
|
33 |
-
Args:
|
34 |
-
pred_label (ndarray | str): Prediction segmentation map
|
35 |
-
or predict result filename.
|
36 |
-
label (ndarray | str): Ground truth segmentation map
|
37 |
-
or label filename.
|
38 |
-
num_classes (int): Number of categories.
|
39 |
-
ignore_index (int): Index that will be ignored in evaluation.
|
40 |
-
label_map (dict): Mapping old labels to new labels. The parameter will
|
41 |
-
work only when label is str. Default: dict().
|
42 |
-
reduce_zero_label (bool): Wether ignore zero label. The parameter will
|
43 |
-
work only when label is str. Default: False.
|
44 |
-
|
45 |
-
Returns:
|
46 |
-
torch.Tensor: The intersection of prediction and ground truth
|
47 |
-
histogram on all classes.
|
48 |
-
torch.Tensor: The union of prediction and ground truth histogram on
|
49 |
-
all classes.
|
50 |
-
torch.Tensor: The prediction histogram on all classes.
|
51 |
-
torch.Tensor: The ground truth histogram on all classes.
|
52 |
-
"""
|
53 |
-
|
54 |
-
if isinstance(pred_label, str):
|
55 |
-
pred_label = torch.from_numpy(np.load(pred_label))
|
56 |
-
else:
|
57 |
-
pred_label = torch.from_numpy((pred_label))
|
58 |
-
|
59 |
-
if isinstance(label, str):
|
60 |
-
label = torch.from_numpy(
|
61 |
-
mmcv.imread(label, flag='unchanged', backend='pillow'))
|
62 |
-
else:
|
63 |
-
label = torch.from_numpy(label)
|
64 |
-
|
65 |
-
if label_map is not None:
|
66 |
-
for old_id, new_id in label_map.items():
|
67 |
-
label[label == old_id] = new_id
|
68 |
-
if reduce_zero_label:
|
69 |
-
label[label == 0] = 255
|
70 |
-
label = label - 1
|
71 |
-
label[label == 254] = 255
|
72 |
-
|
73 |
-
mask = (label != ignore_index)
|
74 |
-
pred_label = pred_label[mask]
|
75 |
-
label = label[mask]
|
76 |
-
|
77 |
-
intersect = pred_label[pred_label == label]
|
78 |
-
area_intersect = torch.histc(
|
79 |
-
intersect.float(), bins=(num_classes), min=0, max=num_classes - 1)
|
80 |
-
area_pred_label = torch.histc(
|
81 |
-
pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1)
|
82 |
-
area_label = torch.histc(
|
83 |
-
label.float(), bins=(num_classes), min=0, max=num_classes - 1)
|
84 |
-
area_union = area_pred_label + area_label - area_intersect
|
85 |
-
return area_intersect, area_union, area_pred_label, area_label
|
86 |
-
|
87 |
-
|
88 |
-
def total_intersect_and_union(results,
|
89 |
-
gt_seg_maps,
|
90 |
-
num_classes,
|
91 |
-
ignore_index,
|
92 |
-
label_map=dict(),
|
93 |
-
reduce_zero_label=False):
|
94 |
-
"""Calculate Total Intersection and Union.
|
95 |
-
|
96 |
-
Args:
|
97 |
-
results (list[ndarray] | list[str]): List of prediction segmentation
|
98 |
-
maps or list of prediction result filenames.
|
99 |
-
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
100 |
-
segmentation maps or list of label filenames.
|
101 |
-
num_classes (int): Number of categories.
|
102 |
-
ignore_index (int): Index that will be ignored in evaluation.
|
103 |
-
label_map (dict): Mapping old labels to new labels. Default: dict().
|
104 |
-
reduce_zero_label (bool): Wether ignore zero label. Default: False.
|
105 |
-
|
106 |
-
Returns:
|
107 |
-
ndarray: The intersection of prediction and ground truth histogram
|
108 |
-
on all classes.
|
109 |
-
ndarray: The union of prediction and ground truth histogram on all
|
110 |
-
classes.
|
111 |
-
ndarray: The prediction histogram on all classes.
|
112 |
-
ndarray: The ground truth histogram on all classes.
|
113 |
-
"""
|
114 |
-
num_imgs = len(results)
|
115 |
-
assert len(gt_seg_maps) == num_imgs
|
116 |
-
total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)
|
117 |
-
total_area_union = torch.zeros((num_classes, ), dtype=torch.float64)
|
118 |
-
total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64)
|
119 |
-
total_area_label = torch.zeros((num_classes, ), dtype=torch.float64)
|
120 |
-
for i in range(num_imgs):
|
121 |
-
area_intersect, area_union, area_pred_label, area_label = \
|
122 |
-
intersect_and_union(
|
123 |
-
results[i], gt_seg_maps[i], num_classes, ignore_index,
|
124 |
-
label_map, reduce_zero_label)
|
125 |
-
total_area_intersect += area_intersect
|
126 |
-
total_area_union += area_union
|
127 |
-
total_area_pred_label += area_pred_label
|
128 |
-
total_area_label += area_label
|
129 |
-
return total_area_intersect, total_area_union, total_area_pred_label, \
|
130 |
-
total_area_label
|
131 |
-
|
132 |
-
|
133 |
-
def mean_iou(results,
|
134 |
-
gt_seg_maps,
|
135 |
-
num_classes,
|
136 |
-
ignore_index,
|
137 |
-
nan_to_num=None,
|
138 |
-
label_map=dict(),
|
139 |
-
reduce_zero_label=False):
|
140 |
-
"""Calculate Mean Intersection and Union (mIoU)
|
141 |
-
|
142 |
-
Args:
|
143 |
-
results (list[ndarray] | list[str]): List of prediction segmentation
|
144 |
-
maps or list of prediction result filenames.
|
145 |
-
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
146 |
-
segmentation maps or list of label filenames.
|
147 |
-
num_classes (int): Number of categories.
|
148 |
-
ignore_index (int): Index that will be ignored in evaluation.
|
149 |
-
nan_to_num (int, optional): If specified, NaN values will be replaced
|
150 |
-
by the numbers defined by the user. Default: None.
|
151 |
-
label_map (dict): Mapping old labels to new labels. Default: dict().
|
152 |
-
reduce_zero_label (bool): Wether ignore zero label. Default: False.
|
153 |
-
|
154 |
-
Returns:
|
155 |
-
dict[str, float | ndarray]:
|
156 |
-
<aAcc> float: Overall accuracy on all images.
|
157 |
-
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
|
158 |
-
<IoU> ndarray: Per category IoU, shape (num_classes, ).
|
159 |
-
"""
|
160 |
-
iou_result = eval_metrics(
|
161 |
-
results=results,
|
162 |
-
gt_seg_maps=gt_seg_maps,
|
163 |
-
num_classes=num_classes,
|
164 |
-
ignore_index=ignore_index,
|
165 |
-
metrics=['mIoU'],
|
166 |
-
nan_to_num=nan_to_num,
|
167 |
-
label_map=label_map,
|
168 |
-
reduce_zero_label=reduce_zero_label)
|
169 |
-
return iou_result
|
170 |
-
|
171 |
-
|
172 |
-
def mean_dice(results,
|
173 |
-
gt_seg_maps,
|
174 |
-
num_classes,
|
175 |
-
ignore_index,
|
176 |
-
nan_to_num=None,
|
177 |
-
label_map=dict(),
|
178 |
-
reduce_zero_label=False):
|
179 |
-
"""Calculate Mean Dice (mDice)
|
180 |
-
|
181 |
-
Args:
|
182 |
-
results (list[ndarray] | list[str]): List of prediction segmentation
|
183 |
-
maps or list of prediction result filenames.
|
184 |
-
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
185 |
-
segmentation maps or list of label filenames.
|
186 |
-
num_classes (int): Number of categories.
|
187 |
-
ignore_index (int): Index that will be ignored in evaluation.
|
188 |
-
nan_to_num (int, optional): If specified, NaN values will be replaced
|
189 |
-
by the numbers defined by the user. Default: None.
|
190 |
-
label_map (dict): Mapping old labels to new labels. Default: dict().
|
191 |
-
reduce_zero_label (bool): Wether ignore zero label. Default: False.
|
192 |
-
|
193 |
-
Returns:
|
194 |
-
dict[str, float | ndarray]: Default metrics.
|
195 |
-
<aAcc> float: Overall accuracy on all images.
|
196 |
-
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
|
197 |
-
<Dice> ndarray: Per category dice, shape (num_classes, ).
|
198 |
-
"""
|
199 |
-
|
200 |
-
dice_result = eval_metrics(
|
201 |
-
results=results,
|
202 |
-
gt_seg_maps=gt_seg_maps,
|
203 |
-
num_classes=num_classes,
|
204 |
-
ignore_index=ignore_index,
|
205 |
-
metrics=['mDice'],
|
206 |
-
nan_to_num=nan_to_num,
|
207 |
-
label_map=label_map,
|
208 |
-
reduce_zero_label=reduce_zero_label)
|
209 |
-
return dice_result
|
210 |
-
|
211 |
-
|
212 |
-
def mean_fscore(results,
|
213 |
-
gt_seg_maps,
|
214 |
-
num_classes,
|
215 |
-
ignore_index,
|
216 |
-
nan_to_num=None,
|
217 |
-
label_map=dict(),
|
218 |
-
reduce_zero_label=False,
|
219 |
-
beta=1):
|
220 |
-
"""Calculate Mean Intersection and Union (mIoU)
|
221 |
-
|
222 |
-
Args:
|
223 |
-
results (list[ndarray] | list[str]): List of prediction segmentation
|
224 |
-
maps or list of prediction result filenames.
|
225 |
-
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
226 |
-
segmentation maps or list of label filenames.
|
227 |
-
num_classes (int): Number of categories.
|
228 |
-
ignore_index (int): Index that will be ignored in evaluation.
|
229 |
-
nan_to_num (int, optional): If specified, NaN values will be replaced
|
230 |
-
by the numbers defined by the user. Default: None.
|
231 |
-
label_map (dict): Mapping old labels to new labels. Default: dict().
|
232 |
-
reduce_zero_label (bool): Wether ignore zero label. Default: False.
|
233 |
-
beta (int): Determines the weight of recall in the combined score.
|
234 |
-
Default: False.
|
235 |
-
|
236 |
-
|
237 |
-
Returns:
|
238 |
-
dict[str, float | ndarray]: Default metrics.
|
239 |
-
<aAcc> float: Overall accuracy on all images.
|
240 |
-
<Fscore> ndarray: Per category recall, shape (num_classes, ).
|
241 |
-
<Precision> ndarray: Per category precision, shape (num_classes, ).
|
242 |
-
<Recall> ndarray: Per category f-score, shape (num_classes, ).
|
243 |
-
"""
|
244 |
-
fscore_result = eval_metrics(
|
245 |
-
results=results,
|
246 |
-
gt_seg_maps=gt_seg_maps,
|
247 |
-
num_classes=num_classes,
|
248 |
-
ignore_index=ignore_index,
|
249 |
-
metrics=['mFscore'],
|
250 |
-
nan_to_num=nan_to_num,
|
251 |
-
label_map=label_map,
|
252 |
-
reduce_zero_label=reduce_zero_label,
|
253 |
-
beta=beta)
|
254 |
-
return fscore_result
|
255 |
-
|
256 |
-
|
257 |
-
def eval_metrics(results,
|
258 |
-
gt_seg_maps,
|
259 |
-
num_classes,
|
260 |
-
ignore_index,
|
261 |
-
metrics=['mIoU'],
|
262 |
-
nan_to_num=None,
|
263 |
-
label_map=dict(),
|
264 |
-
reduce_zero_label=False,
|
265 |
-
beta=1):
|
266 |
-
"""Calculate evaluation metrics
|
267 |
-
Args:
|
268 |
-
results (list[ndarray] | list[str]): List of prediction segmentation
|
269 |
-
maps or list of prediction result filenames.
|
270 |
-
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
271 |
-
segmentation maps or list of label filenames.
|
272 |
-
num_classes (int): Number of categories.
|
273 |
-
ignore_index (int): Index that will be ignored in evaluation.
|
274 |
-
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
|
275 |
-
nan_to_num (int, optional): If specified, NaN values will be replaced
|
276 |
-
by the numbers defined by the user. Default: None.
|
277 |
-
label_map (dict): Mapping old labels to new labels. Default: dict().
|
278 |
-
reduce_zero_label (bool): Wether ignore zero label. Default: False.
|
279 |
-
Returns:
|
280 |
-
float: Overall accuracy on all images.
|
281 |
-
ndarray: Per category accuracy, shape (num_classes, ).
|
282 |
-
ndarray: Per category evaluation metrics, shape (num_classes, ).
|
283 |
-
"""
|
284 |
-
if isinstance(metrics, str):
|
285 |
-
metrics = [metrics]
|
286 |
-
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
|
287 |
-
if not set(metrics).issubset(set(allowed_metrics)):
|
288 |
-
raise KeyError('metrics {} is not supported'.format(metrics))
|
289 |
-
|
290 |
-
total_area_intersect, total_area_union, total_area_pred_label, \
|
291 |
-
total_area_label = total_intersect_and_union(
|
292 |
-
results, gt_seg_maps, num_classes, ignore_index, label_map,
|
293 |
-
reduce_zero_label)
|
294 |
-
all_acc = total_area_intersect.sum() / total_area_label.sum()
|
295 |
-
ret_metrics = OrderedDict({'aAcc': all_acc})
|
296 |
-
for metric in metrics:
|
297 |
-
if metric == 'mIoU':
|
298 |
-
iou = total_area_intersect / total_area_union
|
299 |
-
acc = total_area_intersect / total_area_label
|
300 |
-
ret_metrics['IoU'] = iou
|
301 |
-
ret_metrics['Acc'] = acc
|
302 |
-
elif metric == 'mDice':
|
303 |
-
dice = 2 * total_area_intersect / (
|
304 |
-
total_area_pred_label + total_area_label)
|
305 |
-
acc = total_area_intersect / total_area_label
|
306 |
-
ret_metrics['Dice'] = dice
|
307 |
-
ret_metrics['Acc'] = acc
|
308 |
-
elif metric == 'mFscore':
|
309 |
-
precision = total_area_intersect / total_area_pred_label
|
310 |
-
recall = total_area_intersect / total_area_label
|
311 |
-
f_value = torch.tensor(
|
312 |
-
[f_score(x[0], x[1], beta) for x in zip(precision, recall)])
|
313 |
-
ret_metrics['Fscore'] = f_value
|
314 |
-
ret_metrics['Precision'] = precision
|
315 |
-
ret_metrics['Recall'] = recall
|
316 |
-
|
317 |
-
ret_metrics = {
|
318 |
-
metric: value.numpy()
|
319 |
-
for metric, value in ret_metrics.items()
|
320 |
-
}
|
321 |
-
if nan_to_num is not None:
|
322 |
-
ret_metrics = OrderedDict({
|
323 |
-
metric: np.nan_to_num(metric_value, nan=nan_to_num)
|
324 |
-
for metric, metric_value in ret_metrics.items()
|
325 |
-
})
|
326 |
-
return ret_metrics
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/audiocraft/models/lm.py
DELETED
@@ -1,527 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from dataclasses import dataclass
|
8 |
-
from functools import partial
|
9 |
-
import logging
|
10 |
-
import math
|
11 |
-
import typing as tp
|
12 |
-
|
13 |
-
import torch
|
14 |
-
from torch import nn
|
15 |
-
|
16 |
-
from ..utils import utils
|
17 |
-
from ..modules.streaming import StreamingModule, State
|
18 |
-
from ..modules.transformer import StreamingTransformer, create_norm_fn
|
19 |
-
from ..modules.conditioners import (
|
20 |
-
ConditionFuser,
|
21 |
-
ClassifierFreeGuidanceDropout,
|
22 |
-
AttributeDropout,
|
23 |
-
ConditioningProvider,
|
24 |
-
ConditioningAttributes,
|
25 |
-
ConditionType,
|
26 |
-
)
|
27 |
-
from ..modules.codebooks_patterns import CodebooksPatternProvider
|
28 |
-
from ..modules.activations import get_activation_fn
|
29 |
-
|
30 |
-
|
31 |
-
logger = logging.getLogger(__name__)
|
32 |
-
ConditionTensors = tp.Dict[str, ConditionType]
|
33 |
-
CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]]
|
34 |
-
|
35 |
-
|
36 |
-
def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None):
|
37 |
-
"""LM layer initialization.
|
38 |
-
Inspired from xlformers: https://github.com/fairinternal/xlformers
|
39 |
-
|
40 |
-
Args:
|
41 |
-
method (str): Method name for init function. Valid options are:
|
42 |
-
'gaussian', 'uniform'.
|
43 |
-
input_dim (int): Input dimension of the initialized module.
|
44 |
-
init_depth (Optional[int]): Optional init depth value used to rescale
|
45 |
-
the standard deviation if defined.
|
46 |
-
"""
|
47 |
-
# Compute std
|
48 |
-
std = 1 / math.sqrt(input_dim)
|
49 |
-
# Rescale with depth
|
50 |
-
if init_depth is not None:
|
51 |
-
std = std / math.sqrt(2 * init_depth)
|
52 |
-
|
53 |
-
if method == 'gaussian':
|
54 |
-
return partial(
|
55 |
-
torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std
|
56 |
-
)
|
57 |
-
elif method == 'uniform':
|
58 |
-
bound = math.sqrt(3) * std # ensure the standard deviation is `std`
|
59 |
-
return partial(torch.nn.init.uniform_, a=-bound, b=bound)
|
60 |
-
else:
|
61 |
-
raise ValueError("Unsupported layer initialization method")
|
62 |
-
|
63 |
-
|
64 |
-
def init_layer(m: nn.Module,
|
65 |
-
method: str,
|
66 |
-
init_depth: tp.Optional[int] = None,
|
67 |
-
zero_bias_init: bool = False):
|
68 |
-
"""Wrapper around ``get_init_fn`` for proper initialization of LM modules.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
m (nn.Module): Module to initialize.
|
72 |
-
method (str): Method name for the init function.
|
73 |
-
init_depth (Optional[int]): Optional init depth value used to rescale
|
74 |
-
the standard deviation if defined.
|
75 |
-
zero_bias_init (bool): Whether to initialize the bias to 0 or not.
|
76 |
-
"""
|
77 |
-
if isinstance(m, nn.Linear):
|
78 |
-
init_fn = get_init_fn(method, m.in_features, init_depth=init_depth)
|
79 |
-
if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
|
80 |
-
weight = m.weight.float()
|
81 |
-
init_fn(weight)
|
82 |
-
m.weight.data[:] = weight.half()
|
83 |
-
else:
|
84 |
-
init_fn(m.weight)
|
85 |
-
if zero_bias_init and m.bias is not None:
|
86 |
-
nn.init.constant_(m.bias, 0)
|
87 |
-
elif isinstance(m, nn.Embedding):
|
88 |
-
init_fn = get_init_fn(method, m.embedding_dim, init_depth=None)
|
89 |
-
if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
|
90 |
-
weight = m.weight.float()
|
91 |
-
init_fn(weight)
|
92 |
-
m.weight.data[:] = weight.half()
|
93 |
-
else:
|
94 |
-
init_fn(m.weight)
|
95 |
-
|
96 |
-
|
97 |
-
class ScaledEmbedding(nn.Embedding):
|
98 |
-
"""Boost learning rate for embeddings (with `scale`).
|
99 |
-
"""
|
100 |
-
def __init__(self, *args, lr=None, **kwargs):
|
101 |
-
super().__init__(*args, **kwargs)
|
102 |
-
self.lr = lr
|
103 |
-
|
104 |
-
def make_optim_group(self):
|
105 |
-
group = {"params": list(self.parameters())}
|
106 |
-
if self.lr is not None:
|
107 |
-
group["lr"] = self.lr
|
108 |
-
return group
|
109 |
-
|
110 |
-
|
111 |
-
@dataclass
|
112 |
-
class LMOutput:
|
113 |
-
# The logits are already re-aligned with the input codes
|
114 |
-
# hence no extra shift is required, e.g. when computing CE
|
115 |
-
logits: torch.Tensor # [B, K, T, card]
|
116 |
-
mask: torch.Tensor # [B, K, T]
|
117 |
-
|
118 |
-
|
119 |
-
class LMModel(StreamingModule):
|
120 |
-
"""Transformer-based language model on multiple streams of codes.
|
121 |
-
|
122 |
-
Args:
|
123 |
-
pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving.
|
124 |
-
condition_provider (MusicConditioningProvider): Conditioning provider from metadata.
|
125 |
-
fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input.
|
126 |
-
n_q (int): Number of parallel streams to model.
|
127 |
-
card (int): Cardinality, vocabulary size.
|
128 |
-
dim (int): Dimension of the transformer encoder.
|
129 |
-
num_heads (int): Number of heads for the transformer encoder.
|
130 |
-
hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder.
|
131 |
-
norm (str): Normalization method.
|
132 |
-
norm_first (bool): Use pre-norm instead of post-norm.
|
133 |
-
emb_lr (Optional[float]): Embedding-specific learning rate.
|
134 |
-
bias_proj (bool): Use bias for output projections.
|
135 |
-
weight_init (Optional[str]): Method for weight initialization.
|
136 |
-
depthwise_init (Optional[str]): Method for depthwise weight initialization.
|
137 |
-
zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros.
|
138 |
-
cfg_dropout (float): Classifier-free guidance dropout.
|
139 |
-
cfg_coef (float): Classifier-free guidance coefficient.
|
140 |
-
attribute_dropout (dict): Attribute dropout probabilities.
|
141 |
-
two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps.
|
142 |
-
**kwargs: Additional parameters for the transformer encoder.
|
143 |
-
"""
|
144 |
-
def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider,
|
145 |
-
fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8,
|
146 |
-
hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False,
|
147 |
-
emb_lr: tp.Optional[float] = None, bias_proj: bool = True,
|
148 |
-
weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None,
|
149 |
-
zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0,
|
150 |
-
attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False,
|
151 |
-
**kwargs):
|
152 |
-
super().__init__()
|
153 |
-
self.cfg_coef = cfg_coef
|
154 |
-
self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout)
|
155 |
-
self.att_dropout = AttributeDropout(p=attribute_dropout)
|
156 |
-
self.condition_provider = condition_provider
|
157 |
-
self.fuser = fuser
|
158 |
-
self.card = card
|
159 |
-
embed_dim = self.card + 1
|
160 |
-
self.n_q = n_q
|
161 |
-
self.dim = dim
|
162 |
-
self.pattern_provider = pattern_provider
|
163 |
-
self.two_step_cfg = two_step_cfg
|
164 |
-
self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)])
|
165 |
-
if 'activation' in kwargs:
|
166 |
-
kwargs['activation'] = get_activation_fn(kwargs['activation'])
|
167 |
-
self.transformer = StreamingTransformer(
|
168 |
-
d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim),
|
169 |
-
norm=norm, norm_first=norm_first, **kwargs)
|
170 |
-
self.out_norm: tp.Optional[nn.Module] = None
|
171 |
-
if norm_first:
|
172 |
-
self.out_norm = create_norm_fn(norm, dim)
|
173 |
-
self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)])
|
174 |
-
self._init_weights(weight_init, depthwise_init, zero_bias_init)
|
175 |
-
self._fsdp: tp.Optional[nn.Module]
|
176 |
-
self.__dict__['_fsdp'] = None
|
177 |
-
|
178 |
-
def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool):
|
179 |
-
"""Initialization of the transformer module weights.
|
180 |
-
|
181 |
-
Args:
|
182 |
-
weight_init (Optional[str]): Weight initialization strategy. See ``get_init_fn`` for valid options.
|
183 |
-
depthwise_init (Optional[str]): Depwthwise initialization strategy. The following options are valid:
|
184 |
-
'current' where the depth corresponds to the current layer index or 'global' where the total number
|
185 |
-
of layer is used as depth. If not set, no depthwise initialization strategy is used.
|
186 |
-
zero_bias_init (bool): Whether to initalize bias to zero or not.
|
187 |
-
"""
|
188 |
-
assert depthwise_init is None or depthwise_init in ['current', 'global']
|
189 |
-
assert depthwise_init is None or weight_init is not None, \
|
190 |
-
"If 'depthwise_init' is defined, a 'weight_init' method should be provided."
|
191 |
-
assert not zero_bias_init or weight_init is not None, \
|
192 |
-
"If 'zero_bias_init', a 'weight_init' method should be provided"
|
193 |
-
|
194 |
-
if weight_init is None:
|
195 |
-
return
|
196 |
-
|
197 |
-
for emb_layer in self.emb:
|
198 |
-
init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
|
199 |
-
|
200 |
-
for layer_idx, tr_layer in enumerate(self.transformer.layers):
|
201 |
-
depth = None
|
202 |
-
if depthwise_init == 'current':
|
203 |
-
depth = layer_idx + 1
|
204 |
-
elif depthwise_init == 'global':
|
205 |
-
depth = len(self.transformer.layers)
|
206 |
-
init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init)
|
207 |
-
tr_layer.apply(init_fn)
|
208 |
-
|
209 |
-
for linear in self.linears:
|
210 |
-
init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
|
211 |
-
|
212 |
-
@property
|
213 |
-
def special_token_id(self) -> int:
|
214 |
-
return self.card
|
215 |
-
|
216 |
-
@property
|
217 |
-
def num_codebooks(self) -> int:
|
218 |
-
return self.n_q
|
219 |
-
|
220 |
-
def forward(self, sequence: torch.Tensor,
|
221 |
-
conditions: tp.List[ConditioningAttributes],
|
222 |
-
condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor:
|
223 |
-
"""Apply language model on sequence and conditions.
|
224 |
-
Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and
|
225 |
-
S the sequence steps, return the logits with shape [B, card, K, S].
|
226 |
-
|
227 |
-
Args:
|
228 |
-
indices (torch.Tensor): indices of the codes to model.
|
229 |
-
conditions (list[ConditioningAttributes]): conditionings to use when modeling
|
230 |
-
the given codes. Note that when evaluating multiple time with the same conditioning
|
231 |
-
you should pre-compute those and pass them as `condition_tensors`.
|
232 |
-
condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning
|
233 |
-
tensors, see `conditions`.
|
234 |
-
Returns:
|
235 |
-
torch.Tensor: Logits.
|
236 |
-
"""
|
237 |
-
B, K, S = sequence.shape
|
238 |
-
assert K == self.num_codebooks, 'Sequence shape must match the specified number of codebooks'
|
239 |
-
input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
|
240 |
-
if condition_tensors is None:
|
241 |
-
assert not self._is_streaming, "Conditions tensors should be precomputed when streaming."
|
242 |
-
# apply dropout modules
|
243 |
-
conditions = self.cfg_dropout(conditions)
|
244 |
-
conditions = self.att_dropout(conditions)
|
245 |
-
tokenized = self.condition_provider.tokenize(conditions)
|
246 |
-
# encode conditions and fuse, both have a streaming cache to not recompute when generating.
|
247 |
-
condition_tensors = self.condition_provider(tokenized)
|
248 |
-
else:
|
249 |
-
assert not conditions, "Shouldn't pass both conditions and condition_tensors."
|
250 |
-
|
251 |
-
input_, cross_attention_input = self.fuser(input_, condition_tensors)
|
252 |
-
|
253 |
-
out = self.transformer(input_, cross_attention_src=cross_attention_input)
|
254 |
-
if self.out_norm:
|
255 |
-
out = self.out_norm(out)
|
256 |
-
logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card]
|
257 |
-
|
258 |
-
# remove the prefix from the model outputs
|
259 |
-
if len(self.fuser.fuse2cond['prepend']) > 0:
|
260 |
-
logits = logits[:, :, -S:]
|
261 |
-
|
262 |
-
return logits # [B, K, S, card]
|
263 |
-
|
264 |
-
def compute_predictions(
|
265 |
-
self, codes: torch.Tensor,
|
266 |
-
conditions: tp.List[ConditioningAttributes],
|
267 |
-
condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput:
|
268 |
-
"""Given an input tensor of codes [B, K, T] and list of conditions, runs the model
|
269 |
-
forward using the specified codes interleaving pattern.
|
270 |
-
|
271 |
-
Args:
|
272 |
-
codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size,
|
273 |
-
K the number of codebooks and T the number of timesteps.
|
274 |
-
conditions (list[ConditioningAttributes]): conditionings to use when modeling
|
275 |
-
the given codes. Note that when evaluating multiple time with the same conditioning
|
276 |
-
you should pre-compute those and pass them as `condition_tensors`.
|
277 |
-
condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning
|
278 |
-
tensors, see `conditions`.
|
279 |
-
Returns:
|
280 |
-
LMOutput: Language model outputs
|
281 |
-
logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes,
|
282 |
-
i.e. the first item corresponds to logits to predict the first code, meaning that
|
283 |
-
no additional shifting of codes and logits is required.
|
284 |
-
mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions.
|
285 |
-
Given the specified interleaving strategies, parts of the logits and codes should
|
286 |
-
not be considered as valid predictions because of invalid context.
|
287 |
-
"""
|
288 |
-
B, K, T = codes.shape
|
289 |
-
codes = codes.contiguous()
|
290 |
-
# map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens
|
291 |
-
pattern = self.pattern_provider.get_pattern(T)
|
292 |
-
sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence(
|
293 |
-
codes, self.special_token_id, keep_only_valid_steps=True
|
294 |
-
)
|
295 |
-
# apply model on pattern sequence
|
296 |
-
model = self if self._fsdp is None else self._fsdp
|
297 |
-
logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card]
|
298 |
-
# map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card]
|
299 |
-
# and provide the corresponding mask over invalid positions of tokens
|
300 |
-
logits = logits.permute(0, 3, 1, 2) # [B, card, K, S]
|
301 |
-
# note: we use nans as special token to make it obvious if we feed unexpected logits
|
302 |
-
logits, logits_indexes, logits_mask = pattern.revert_pattern_logits(
|
303 |
-
logits, float('nan'), keep_only_valid_steps=True
|
304 |
-
)
|
305 |
-
logits = logits.permute(0, 2, 3, 1) # [B, K, T, card]
|
306 |
-
logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T]
|
307 |
-
return LMOutput(logits, logits_mask)
|
308 |
-
|
309 |
-
def _sample_next_token(self,
|
310 |
-
sequence: torch.Tensor,
|
311 |
-
cfg_conditions: CFGConditions,
|
312 |
-
unconditional_state: State,
|
313 |
-
use_sampling: bool = False,
|
314 |
-
temp: float = 1.0,
|
315 |
-
top_k: int = 0,
|
316 |
-
top_p: float = 0.0,
|
317 |
-
cfg_coef: tp.Optional[float] = None) -> torch.Tensor:
|
318 |
-
"""Sample next token from the model given a sequence and a set of conditions. The model supports
|
319 |
-
multiple sampling strategies (greedy sampling, softmax, top-k, top-p...).
|
320 |
-
|
321 |
-
Args:
|
322 |
-
sequence (torch.Tensor): Current sequence of shape [B, K, S]
|
323 |
-
with K corresponding to the number of codebooks and S the number of sequence steps.
|
324 |
-
S = 1 in streaming mode, except for the first step that contains a bigger prompt.
|
325 |
-
condition_tensors (Dict[str, ConditionType): Set of conditions. If CFG is used,
|
326 |
-
should be twice the batch size, being the concatenation of the conditions + null conditions.
|
327 |
-
use_sampling (bool): Whether to use a sampling strategy or not.
|
328 |
-
temp (float): Sampling temperature.
|
329 |
-
top_k (int): K for "top-k" sampling.
|
330 |
-
top_p (float): P for "top-p" sampling.
|
331 |
-
cfg_coef (float): classifier free guidance coefficient
|
332 |
-
Returns:
|
333 |
-
next_token (torch.Tensor): Next token tensor of shape [B, K, 1].
|
334 |
-
"""
|
335 |
-
B = sequence.shape[0]
|
336 |
-
cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
|
337 |
-
model = self if self._fsdp is None else self._fsdp
|
338 |
-
if self.two_step_cfg and cfg_conditions != {}:
|
339 |
-
assert isinstance(cfg_conditions, tuple)
|
340 |
-
condition_tensors, null_condition_tensors = cfg_conditions
|
341 |
-
cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors)
|
342 |
-
state = self.get_streaming_state()
|
343 |
-
self.set_streaming_state(unconditional_state)
|
344 |
-
uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors)
|
345 |
-
unconditional_state.update(self.get_streaming_state())
|
346 |
-
self.set_streaming_state(state)
|
347 |
-
logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef
|
348 |
-
else:
|
349 |
-
assert isinstance(cfg_conditions, dict)
|
350 |
-
condition_tensors = cfg_conditions
|
351 |
-
if condition_tensors:
|
352 |
-
# Preparing for CFG, predicting both conditional and unconditional logits.
|
353 |
-
sequence = torch.cat([sequence, sequence], dim=0)
|
354 |
-
all_logits = model(
|
355 |
-
sequence,
|
356 |
-
conditions=[], condition_tensors=condition_tensors)
|
357 |
-
if condition_tensors:
|
358 |
-
cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card]
|
359 |
-
logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef
|
360 |
-
else:
|
361 |
-
logits = all_logits
|
362 |
-
|
363 |
-
logits = logits.permute(0, 1, 3, 2) # [B, K, card, T]
|
364 |
-
logits = logits[..., -1] # [B x K x card]
|
365 |
-
|
366 |
-
# Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error.
|
367 |
-
if use_sampling and temp > 0.0:
|
368 |
-
probs = torch.softmax(logits / temp, dim=-1)
|
369 |
-
if top_p > 0.0:
|
370 |
-
next_token = utils.sample_top_p(probs, p=top_p)
|
371 |
-
elif top_k > 0:
|
372 |
-
next_token = utils.sample_top_k(probs, k=top_k)
|
373 |
-
else:
|
374 |
-
next_token = utils.multinomial(probs, num_samples=1)
|
375 |
-
else:
|
376 |
-
next_token = torch.argmax(logits, dim=-1, keepdim=True)
|
377 |
-
|
378 |
-
return next_token
|
379 |
-
|
380 |
-
@torch.no_grad()
|
381 |
-
def generate(self,
|
382 |
-
prompt: tp.Optional[torch.Tensor] = None,
|
383 |
-
conditions: tp.List[ConditioningAttributes] = [],
|
384 |
-
num_samples: tp.Optional[int] = None,
|
385 |
-
max_gen_len: int = 256,
|
386 |
-
use_sampling: bool = True,
|
387 |
-
temp: float = 1.0,
|
388 |
-
top_k: int = 250,
|
389 |
-
top_p: float = 0.0,
|
390 |
-
cfg_coef: tp.Optional[float] = None,
|
391 |
-
two_step_cfg: bool = False,
|
392 |
-
remove_prompts: bool = False,
|
393 |
-
check: bool = False,
|
394 |
-
callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor:
|
395 |
-
"""Generate tokens sampling from the model given a prompt or unconditionally. Generation can
|
396 |
-
be perform in a greedy fashion or using sampling with top K and top P strategies.
|
397 |
-
|
398 |
-
Args:
|
399 |
-
prompt (Optional[torch.Tensor]): Prompt tokens of shape [B, K, T].
|
400 |
-
conditions_tensors (Dict[str, torch.Tensor]): Set of conditions or None.
|
401 |
-
num_samples (int or None): Number of samples to generate when no prompt and no conditions are given.
|
402 |
-
max_gen_len (int): Maximum generation length.
|
403 |
-
use_sampling (bool): Whether to use a sampling strategy or not.
|
404 |
-
temp (float): Sampling temperature.
|
405 |
-
top_k (int): K for "top-k" sampling.
|
406 |
-
top_p (float): P for "top-p" sampling.
|
407 |
-
remove_prompts (bool): Whether to remove prompts from generation or not.
|
408 |
-
Returns:
|
409 |
-
torch.Tensor: Generated tokens.
|
410 |
-
"""
|
411 |
-
assert not self.training, "generation shouldn't be used in training mode."
|
412 |
-
first_param = next(iter(self.parameters()))
|
413 |
-
device = first_param.device
|
414 |
-
|
415 |
-
# Checking all input shapes are consistents.
|
416 |
-
possible_num_samples = []
|
417 |
-
if num_samples is not None:
|
418 |
-
possible_num_samples.append(num_samples)
|
419 |
-
elif prompt is not None:
|
420 |
-
possible_num_samples.append(prompt.shape[0])
|
421 |
-
elif conditions:
|
422 |
-
possible_num_samples.append(len(conditions))
|
423 |
-
else:
|
424 |
-
possible_num_samples.append(1)
|
425 |
-
assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsitent inputs shapes"
|
426 |
-
num_samples = possible_num_samples[0]
|
427 |
-
|
428 |
-
# below we create set of conditions: one conditional and one unconditional
|
429 |
-
# to do that we merge the regular condition together with the null condition
|
430 |
-
# we then do 1 forward pass instead of 2.
|
431 |
-
# the reason for that is two-fold:
|
432 |
-
# 1. it is about x2 faster than doing 2 forward passes
|
433 |
-
# 2. avoid the streaming API treating the 2 passes as part of different time steps
|
434 |
-
# We also support doing two different passes, in particular to ensure that
|
435 |
-
# the padding structure is exactly the same between train anf test.
|
436 |
-
# With a batch size of 1, this can be slower though.
|
437 |
-
cfg_conditions: CFGConditions
|
438 |
-
two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
|
439 |
-
if conditions:
|
440 |
-
null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions)
|
441 |
-
if two_step_cfg:
|
442 |
-
cfg_conditions = (
|
443 |
-
self.condition_provider(self.condition_provider.tokenize(conditions)),
|
444 |
-
self.condition_provider(self.condition_provider.tokenize(null_conditions)),
|
445 |
-
)
|
446 |
-
else:
|
447 |
-
conditions = conditions + null_conditions
|
448 |
-
tokenized = self.condition_provider.tokenize(conditions)
|
449 |
-
cfg_conditions = self.condition_provider(tokenized)
|
450 |
-
else:
|
451 |
-
cfg_conditions = {}
|
452 |
-
|
453 |
-
if prompt is None:
|
454 |
-
assert num_samples > 0
|
455 |
-
prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device)
|
456 |
-
|
457 |
-
B, K, T = prompt.shape
|
458 |
-
start_offset = T
|
459 |
-
assert start_offset < max_gen_len
|
460 |
-
|
461 |
-
pattern = self.pattern_provider.get_pattern(max_gen_len)
|
462 |
-
# this token is used as default value for codes that are not generated yet
|
463 |
-
unknown_token = -1
|
464 |
-
|
465 |
-
# we generate codes up to the max_gen_len that will be mapped to the pattern sequence
|
466 |
-
gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device)
|
467 |
-
# filling the gen_codes with the prompt if needed
|
468 |
-
gen_codes[..., :start_offset] = prompt
|
469 |
-
# create the gen_sequence with proper interleaving from the pattern: [B, K, S]
|
470 |
-
gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id)
|
471 |
-
# retrieve the start_offset in the sequence:
|
472 |
-
# it is the first sequence step that contains the `start_offset` timestep
|
473 |
-
start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset)
|
474 |
-
assert start_offset_sequence is not None
|
475 |
-
|
476 |
-
with self.streaming():
|
477 |
-
unconditional_state = self.get_streaming_state()
|
478 |
-
prev_offset = 0
|
479 |
-
gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S]
|
480 |
-
for offset in range(start_offset_sequence, gen_sequence_len):
|
481 |
-
# get current sequence (note that the streaming API is providing the caching over previous offsets)
|
482 |
-
curr_sequence = gen_sequence[..., prev_offset:offset]
|
483 |
-
curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1)
|
484 |
-
if check:
|
485 |
-
# check coherence between mask and sequence
|
486 |
-
assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all()
|
487 |
-
# should never happen as gen_sequence is filled progressively
|
488 |
-
assert not (curr_sequence == unknown_token).any()
|
489 |
-
# sample next token from the model, next token shape is [B, K, 1]
|
490 |
-
next_token = self._sample_next_token(
|
491 |
-
curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p,
|
492 |
-
cfg_coef=cfg_coef)
|
493 |
-
# ensure the tokens that should be masked are properly set to special_token_id
|
494 |
-
# as the model never output special_token_id
|
495 |
-
valid_mask = mask[..., offset:offset+1].expand(B, -1, -1)
|
496 |
-
next_token[~valid_mask] = self.special_token_id
|
497 |
-
# ensure we don't overwrite prompt tokens, we only write over unknown tokens
|
498 |
-
# (then mask tokens should be left as is as well, which is correct)
|
499 |
-
gen_sequence[..., offset:offset+1] = torch.where(
|
500 |
-
gen_sequence[..., offset:offset+1] == unknown_token,
|
501 |
-
next_token, gen_sequence[..., offset:offset+1]
|
502 |
-
)
|
503 |
-
prev_offset = offset
|
504 |
-
if callback is not None:
|
505 |
-
callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence)
|
506 |
-
unconditional_state.clear()
|
507 |
-
|
508 |
-
# ensure sequence has been entirely filled
|
509 |
-
assert not (gen_sequence == unknown_token).any()
|
510 |
-
# ensure gen_sequence pattern and mask are matching
|
511 |
-
# which means the gen_sequence is valid according to the pattern
|
512 |
-
assert (
|
513 |
-
gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id)
|
514 |
-
).all()
|
515 |
-
# get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps
|
516 |
-
out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token)
|
517 |
-
|
518 |
-
# sanity checks over the returned codes and corresponding masks
|
519 |
-
assert (out_codes[..., :max_gen_len] != unknown_token).all()
|
520 |
-
assert (out_mask[..., :max_gen_len] == 1).all()
|
521 |
-
|
522 |
-
out_start_offset = start_offset if remove_prompts else 0
|
523 |
-
out_codes = out_codes[..., out_start_offset:max_gen_len]
|
524 |
-
|
525 |
-
# ensure the returned codes are all valid
|
526 |
-
assert (out_codes >= 0).all() and (out_codes <= self.card).all()
|
527 |
-
return out_codes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtomdffAI/wechatgpt4atom/config.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
# encoding:utf-8
|
2 |
-
|
3 |
-
import json
|
4 |
-
import os
|
5 |
-
from common.log import logger
|
6 |
-
|
7 |
-
config = {}
|
8 |
-
|
9 |
-
|
10 |
-
def load_config():
|
11 |
-
global config
|
12 |
-
config_path = "config.json"
|
13 |
-
if not os.path.exists(config_path):
|
14 |
-
raise Exception('配置文件不存在,请根据config-template.json模板创建config.json文件')
|
15 |
-
|
16 |
-
config_str = read_file(config_path)
|
17 |
-
# 将json字符串反序列化为dict类型
|
18 |
-
config = json.loads(config_str)
|
19 |
-
config['open_ai_api_key'] = os.getenv('API_KEY')
|
20 |
-
logger.info("[INIT] load config: {}".format(config))
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
def get_root():
|
25 |
-
return os.path.dirname(os.path.abspath( __file__ ))
|
26 |
-
|
27 |
-
|
28 |
-
def read_file(path):
|
29 |
-
with open(path, mode='r', encoding='utf-8') as f:
|
30 |
-
return f.read()
|
31 |
-
|
32 |
-
|
33 |
-
def conf():
|
34 |
-
return config
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_registry.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import unittest
|
3 |
-
import torch
|
4 |
-
|
5 |
-
from detectron2.modeling.meta_arch import GeneralizedRCNN
|
6 |
-
from detectron2.utils.registry import _convert_target_to_string, locate
|
7 |
-
|
8 |
-
|
9 |
-
class A:
|
10 |
-
class B:
|
11 |
-
pass
|
12 |
-
|
13 |
-
|
14 |
-
class TestLocate(unittest.TestCase):
|
15 |
-
def _test_obj(self, obj):
|
16 |
-
name = _convert_target_to_string(obj)
|
17 |
-
newobj = locate(name)
|
18 |
-
self.assertIs(obj, newobj)
|
19 |
-
|
20 |
-
def test_basic(self):
|
21 |
-
self._test_obj(GeneralizedRCNN)
|
22 |
-
|
23 |
-
def test_inside_class(self):
|
24 |
-
# requires using __qualname__ instead of __name__
|
25 |
-
self._test_obj(A.B)
|
26 |
-
|
27 |
-
def test_builtin(self):
|
28 |
-
self._test_obj(len)
|
29 |
-
self._test_obj(dict)
|
30 |
-
|
31 |
-
def test_pytorch_optim(self):
|
32 |
-
# pydoc.locate does not work for it
|
33 |
-
self._test_obj(torch.optim.SGD)
|
34 |
-
|
35 |
-
def test_failure(self):
|
36 |
-
with self.assertRaises(ImportError):
|
37 |
-
locate("asdf")
|
38 |
-
|
39 |
-
def test_compress_target(self):
|
40 |
-
from detectron2.data.transforms import RandomCrop
|
41 |
-
|
42 |
-
name = _convert_target_to_string(RandomCrop)
|
43 |
-
# name shouldn't contain 'augmentation_impl'
|
44 |
-
self.assertEqual(name, "detectron2.data.transforms.RandomCrop")
|
45 |
-
self.assertIs(RandomCrop, locate(name))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Barbie Dreamhouse Adventures Hack Apk.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Barbie Dreamhouse Adventures Hack APK: Cómo obtener monedas ilimitadas y acceso VIP</h1>
|
3 |
-
<p>¿Te encanta jugar Barbie Dreamhouse Adventures, el juego de simulación donde puedes crear tu propia experiencia Barbie? ¿Te gustaría tener más monedas para comprar nuevos artículos y trajes, o acceder a funciones VIP como habitaciones exclusivas, mascotas y peinados? Si es así, usted podría estar interesado en el uso de un hack apk para el juego. </p>
|
4 |
-
<p>Un apk hack es una versión modificada de la aplicación de juego original que le permite engañar y obtener recursos ilimitados, desbloquear características premium, y las restricciones de bypass. Con un hack apk, se puede disfrutar de Barbie Dreamhouse Aventuras sin gastar dinero o esperar anuncios. </p>
|
5 |
-
<h2>barbie dreamhouse adventures hack apk</h2><br /><p><b><b>Download File</b> ⚹⚹⚹ <a href="https://bltlly.com/2v6Js4">https://bltlly.com/2v6Js4</a></b></p><br /><br />
|
6 |
-
<p>En este artículo, le mostraremos cómo descargar e instalar Barbie Dreamhouse Adventures hack apk, cómo usarlo para obtener monedas ilimitadas y acceso VIP, y cómo jugar el juego con consejos y trucos. ¡Sigue leyendo para saber más! </p>
|
7 |
-
<h2>Cómo descargar e instalar Barbie Dreamhouse aventuras Hack APK</h2>
|
8 |
-
<p>Antes de que pueda utilizar Barbie Dreamhouse aventuras hack apk, es necesario descargarlo de una fuente confiable. Hay muchos sitios web que afirman ofrecer apks hack, pero algunos de ellos pueden ser falsos, anticuados, o infectados con malware. Para evitar cualquier riesgo, se recomienda utilizar [este sitio web]( 1 ), que tiene un enlace verificado para la última versión de Barbie Dreamhouse Adventures hack apk. </p>
|
9 |
-
<p>Una vez que haya descargado el archivo apk hack, es necesario habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de la tienda de aplicaciones oficial. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </p>
|
10 |
-
<p>Ahora puede instalar el apk hack tocando en el archivo y siguiendo las instrucciones. Es posible que necesite conceder algunos permisos para que la aplicación funcione correctamente. Una vez completada la instalación, puede abrir la aplicación y comenzar a jugar. </p>
|
11 |
-
<h2>Cómo utilizar Barbie Dreamhouse aventuras Hack APK</h2>
|
12 |
-
|
13 |
-
<p>Con monedas ilimitadas, puedes comprar todo lo que quieras en el juego, como muebles nuevos, ropa, accesorios, etc. También puedes desbloquear funciones VIP que normalmente solo están disponibles para suscriptores de pago. Estos incluyen habitaciones exclusivas como un spa, un estudio de baile, una sala de mascotas, etc., así como mascotas especiales, peinados, trajes, etc.</p>
|
14 |
-
<p>Con el hack apk, se puede disfrutar de Barbie Dreamhouse Adventures sin limitaciones o interrupciones. Puede diseñar su casa de ensueño, vestir a Barbie y sus amigos, unirse a actividades divertidas y mini juegos, y explorar Malibu con su convertible rosa. </p>
|
15 |
-
<p></p>
|
16 |
-
<h2>Consejos y trucos para Barbie Dreamhouse Adventures Game</h2>
|
17 |
-
<p>Incluso con el hack apk, todavía puede querer algunos consejos y trucos para hacer su juego más divertido y emocionante. Estos son algunos de ellos:</p>
|
18 |
-
<ul>
|
19 |
-
<li>Cómo diseñar tu casa de ensueño y decorar habitaciones: Puedes elegir entre diferentes temas y estilos para tus habitaciones, como moderno, clásico, glam, etc. También puedes mezclar y combinar diferentes elementos para crear tu propio look único. Puedes arrastrar y soltar elementos para colocarlos en cualquier lugar que desees, o usar la función de decoración automática para que el juego lo haga por ti. También puede cambiar el fondo de pantalla, el suelo y el techo de sus habitaciones. </li>
|
20 |
-
<li>Cómo vestir a Barbie y sus amigos en trajes de moda: Usted puede elegir entre una variedad de ropa, zapatos, accesorios y peinados para Barbie y sus amigos. También puede crear sus propios trajes mediante la combinación de diferentes elementos y colores. Puede guardar sus trajes favoritos en su armario y cambiar entre ellos en cualquier momento. También puedes compartir tus atuendos con otros jugadores y obtener comentarios. </li>
|
21 |
-
<li>Cómo participar en actividades divertidas y mini juegos: Puedes unirte a Barbie y sus amigos en varias actividades y mini juegos, como hornear, bailar, nadar, jardinería, etc. Puedes ganar monedas y recompensas completando tareas y desafíos. También puedes descubrir sorpresas y secretos ocultos en el juego. </li>
|
22 |
-
</ul>
|
23 |
-
<h2>Conclusión</h2>
|
24 |
-
|
25 |
-
<p>Si usted es un fan de Barbie y juegos de simulación, usted debe probar definitivamente Barbie Dreamhouse Adventures hack apk. Te dará un nuevo nivel de diversión y emoción. ¡Descárgalo ahora y comienza tu aventura! </p>
|
26 |
-
<h2>Preguntas frecuentes</h2>
|
27 |
-
<p>Aquí hay algunas preguntas frecuentes y respuestas sobre Barbie Dreamhouse aventuras hack apk y el juego:</p>
|
28 |
-
<h3>Es Barbie Dreamhouse aventuras hack apk seguro de usar? </h3>
|
29 |
-
<p>Sí, es seguro de usar siempre y cuando lo descargue de una fuente confiable como [este sitio web]. El apk hack es probado y verificado por muchos usuarios y no contiene ningún virus o malware. Sin embargo, siempre debes tener cuidado al instalar aplicaciones de fuentes desconocidas y conceder permisos solo cuando sea necesario. </p>
|
30 |
-
<h3>¿Voy a conseguir prohibido para el uso de Barbie Dreamhouse aventuras hack apk? </h3>
|
31 |
-
<p>No, no se le prohibió el uso de Barbie Dreamhouse Adventures hack apk. El hack apk está diseñado para ser indetectable por los servidores del juego y no interfiere con la experiencia de otros jugadores. Sin embargo, debes usarlo responsablemente y no abusar de él o presumir de ello a otros jugadores. </p>
|
32 |
-
<h3>¿Puedo actualizar Barbie Dreamhouse aventuras hack apk? </h3>
|
33 |
-
<p>Sí, se puede actualizar Barbie Dreamhouse aventuras hack apk cada vez que hay una nueva versión disponible. Puede comprobar si hay actualizaciones en [este sitio web] o activar la función de actualización automática en el menú de hackeo. Sin embargo, siempre debe hacer copias de seguridad de sus datos antes de actualizar para evitar cualquier pérdida o corrupción. </p>
|
34 |
-
<h3>¿Puedo jugar Barbie Dreamhouse aventuras hack apk offline? </h3>
|
35 |
-
<p>Sí, puedes jugar Barbie Dreamhouse Adventures hack apk offline sin conexión a Internet. Sin embargo, algunas funciones pueden no funcionar correctamente o requerir una verificación en línea. Por ejemplo, es posible que no puedas acceder a funciones VIP o compartir tus atuendos con otros jugadores sin conexión. </p>
|
36 |
-
<h3>¿Puedo jugar Barbie Dreamhouse aventuras hack apk en otros dispositivos? </h3> 64aa2da5cf<br />
|
37 |
-
<br />
|
38 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Buscando Capcut Editor De Vdeo Aplicacin.md
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Buscando CapCut Editor de Video App Descargar? </h1>
|
3 |
-
<p>Si usted está buscando un editor de vídeo potente y fácil de usar y la aplicación fabricante de vídeo, es posible que desee comprobar CapCut. CapCut es una aplicación gratuita que ofrece todo lo que necesita para crear impresionantes videos de alta calidad para TikTok, YouTube, Instagram, WhatsApp, Facebook y más. En este artículo, le diremos qué es CapCut, por qué debe usarlo, cómo descargarlo e instalarlo en su dispositivo y cómo usarlo para editar y hacer videos. </p>
|
4 |
-
<h2>Qué es CapCut y por qué deberías usarlo</h2>
|
5 |
-
<p>CapCut es una aplicación gratuita todo-en-uno editor de vídeo y fabricante de vídeo que es desarrollado por Bytedance Pte. Ltd., la empresa detrás de TikTok. CapCut era anteriormente conocido como Viamaker, pero ha sido renombrado y mejorado con más características y funciones. Aquí hay algunas razones por las que deberías usar CapCut:</p>
|
6 |
-
<h2>buscando capcut editor de vídeo aplicación</h2><br /><p><b><b>Download</b> >>>>> <a href="https://bltlly.com/2v6JpY">https://bltlly.com/2v6JpY</a></b></p><br /><br />
|
7 |
-
<h3>CapCut es una aplicación gratuita todo-en-uno editor de vídeo y fabricante de vídeo</h3>
|
8 |
-
<p>A diferencia de algunas otras aplicaciones de edición de video que requieren que usted pague por características o suscripciones premium, CapCut es completamente gratuito. Puede acceder a todas las funciones básicas y avanzadas sin limitaciones ni marcas de agua. También puede disfrutar de fuentes y efectos gratuitos dentro de la aplicación que se actualizan semanalmente con las últimas tendencias. </p>
|
9 |
-
<h3>CapCut ofrece funciones de edición de vídeo avanzadas y fáciles de usar</h3>
|
10 |
-
<p>CapCut tiene una interfaz fácil de usar que le permite editar y hacer videos en cuestión de segundos. Puede recortar, dividir, combinar, ajustar la velocidad, acercar/alejar, invertir/rebobinar, congelar, transicionar, animar, estabilizar y mucho más con sus vídeos. También puede usar funciones avanzadas como animación de fotograma clave, cámara lenta suave, clave de croma, Picture-in-Picture (PIP), subtítulos automáticos, texto a voz, seguimiento de movimiento y eliminación de fondo. </p>
|
11 |
-
<h3>CapCut te ayuda a crear vídeos impresionantes para plataformas de redes sociales</h3>
|
12 |
-
|
13 |
-
<h2>Cómo descargar e instalar CapCut en tu dispositivo</h2>
|
14 |
-
<p>CapCut está disponible para dispositivos Android desde Google Play Store, dispositivos iOS desde App Store y otros dispositivos desde Uptodown. Estos son los pasos para descargar e instalar CapCut en su dispositivo:</p>
|
15 |
-
<h3>Descargar CapCut de Google Play Store para dispositivos Android</h3>
|
16 |
-
<ol>
|
17 |
-
<li>Abre Google Play Store en tu dispositivo Android. </li>
|
18 |
-
<li>Buscar "CapCut" o escanear el código QR a continuación. </li>
|
19 |
-
<li>Toque en "Instalar" para descargar e instalar la aplicación. </li>
|
20 |
-
<li>Abra la aplicación y conceda los permisos necesarios. </li>
|
21 |
-
<li>Disfruta editando y haciendo videos con CapCut.</li>
|
22 |
-
</ol>
|
23 |
-
<img src="https://play.google.com/store/apps/apps/details?id=com <h3>Descargar CapCut desde la App Store para dispositivos iOS</h3>
|
24 |
-
<ol>
|
25 |
-
<li>Abrir App Store en tu dispositivo iOS. </li>
|
26 |
-
<li>Buscar "CapCut" o escanear el código QR a continuación. </li>
|
27 |
-
<li>Toque en "Obtener" para descargar e instalar la aplicación. </li>
|
28 |
-
<li>Abra la aplicación y conceda los permisos necesarios. </li>
|
29 |
-
<li>Disfruta editando y haciendo videos con CapCut.</li>
|
30 |
-
</ol>
|
31 |
-
<img src="https:/apps.apple.com/us/app/capcut-video-editor/id1500855883" alt="CapCut App Store Código QR" width="200" height=">
|
32 |
-
<h3>Descargar CapCut de Uptodown para otros dispositivos</h3>
|
33 |
-
<ol>
|
34 |
-
<li>Abra Uptodown en su dispositivo o visite en su navegador. </li>
|
35 |
-
<li>Buscar "CapCut" o escanear el código QR a continuación. </li>
|
36 |
-
<li>Toque en "Descargar APK" para descargar el archivo de la aplicación. </li>
|
37 |
-
<li>Localice y abra el archivo descargado y siga las instrucciones para instalar la aplicación. </li>
|
38 |
-
<li>Abra la aplicación y conceda los permisos necesarios. </li>
|
39 |
-
<li>Disfruta editando y haciendo videos con CapCut.</li>
|
40 |
-
</ol>
|
41 |
-
<img src="https://viamaker.en.uptodown.com/android/download" alt="Código QR de CapCut Uptodown" width="200" height="200">
|
42 |
-
<h2>Cómo usar CapCut para editar y hacer videos</h2>
|
43 |
-
|
44 |
-
<h3>Importar o grabar vídeos con CapCut</h3>
|
45 |
-
<p>Para iniciar un nuevo proyecto, pulse el botón "+" en la pantalla principal de CapCut. Puede optar por importar vídeos de la galería de su dispositivo o grabar nuevos vídeos con la cámara incorporada. También puede utilizar plantillas de otros usuarios o de la biblioteca de la aplicación. Puede seleccionar varios vídeos a la vez y organizarlos en el orden que desee. También puedes previsualizar tus vídeos antes de importarlos. </p>
|
46 |
-
<h3>Recortar, dividir, combinar y ajustar la velocidad de los vídeos con CapCut</h3>
|
47 |
-
<p>Para editar sus vídeos, toque en el clip de vídeo que desea modificar en la línea de tiempo. Puede utilizar la herramienta de recorte para cortar partes no deseadas de su vídeo. También puede utilizar la herramienta de división para dividir el vídeo en dos o más segmentos. También puede utilizar la herramienta de combinación para combinar dos o más clips de vídeo en uno. También puede utilizar la herramienta de velocidad para cambiar la velocidad de reproducción de su vídeo. Puede aplicar curvas de velocidad para crear transiciones suaves entre diferentes velocidades. </p>
|
48 |
-
<h3>Añadir texto, pegatinas, filtros, efectos y música a los vídeos con CapCut</h3>
|
49 |
-
<p>Para mejorar tus videos, toca el botón "+" en la línea de tiempo. Puede agregar texto a sus videos con diferentes fuentes, estilos, colores y animaciones. También puedes añadir pegatinas a tus vídeos desde la biblioteca de la aplicación o desde la galería de tu dispositivo. También puede agregar filtros a sus videos para cambiar su estado de ánimo y tono. También puede agregar efectos a sus videos para crear imágenes increíbles. También puedes agregar música a tus videos desde la biblioteca de la aplicación o desde la biblioteca de música de tu dispositivo. También puede ajustar el volumen, fundido de entrada/salida y sincronización de su música. </p>
|
50 |
-
<h3>Utilice animación de fotograma clave, cámara lenta, clave de croma y estabilización con CapCut</h3>
|
51 |
-
|
52 |
-
<h3>Exportar y compartir vídeos con CapCut</h3>
|
53 |
-
<p>Para exportar y compartir tus videos, toca el botón "Exportar" en la esquina superior derecha de CapCut. Puede elegir la resolución, el formato y la calidad de su vídeo. También puede habilitar el HDR inteligente para una mejor representación del color. También puede obtener una vista previa de su vídeo antes de exportarlo. Una vez que tu vídeo se exporta, puedes compartirlo en TikTok u otras plataformas de redes sociales con un solo toque. </p>
|
54 |
-
<h2>Conclusión y preguntas frecuentes</h2>
|
55 |
-
<p>CapCut es una aplicación gratuita todo-en-uno editor de video y fabricante de video que ofrece todo lo que necesita para crear impresionantes videos de alta calidad para TikTok, YouTube, Instagram, WhatsApp, Facebook y más. Tiene una interfaz fácil de usar que le permite editar y hacer videos en cuestión de segundos. Tiene una rica biblioteca de clips de música y efectos de sonido que puede agregar a sus videos. Tiene una amplia gama de filtros, efectos, colores y pegatinas que puedes agregar a tus videos. Tiene funciones de edición de video avanzadas y fáciles de usar, como animación de fotograma clave, cámara lenta, clave de croma y estabilización. También le permite personalizar la resolución, el formato y la calidad de la exportación de vídeo. También puedes compartir fácilmente tus videos en TikTok y otras plataformas de redes sociales con un solo clic. </p>
|
56 |
-
<p></p>
|
57 |
-
<p>Si está buscando una aplicación de editor de video CapCut, puede seguir los pasos de este artículo para descargar e instalar CapCut en su dispositivo. También puede seguir los consejos de este artículo para usar CapCut para editar y hacer videos. Esperamos que este artículo le resulte útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p>
|
58 |
-
<p>Aquí hay algunas preguntas frecuentes que puede tener sobre CapCut:</p>
|
59 |
-
<ol>
|
60 |
-
<li><b>¿Es seguro usar CapCut? </b></li>
|
61 |
-
<p>Sí, CapCut es seguro de usar. No contiene ningún virus, malware o spyware. Tampoco recopila ni comparte ninguna información personal o confidencial de su dispositivo. Puede usar CapCut sin preocupaciones. </p>
|
62 |
-
|
63 |
-
<p>CapCut es compatible con la mayoría de los dispositivos Android que ejecutan Android 5.0 o superior, y la mayoría de los dispositivos iOS que ejecutan iOS 11.0 o superior. También puedes usar CapCut en otros dispositivos como Windows PC, Mac o Chromebook descargando el archivo APK de Uptodown.</p>
|
64 |
-
<li><b>¿Cómo puedo actualizar CapCut? </b></li>
|
65 |
-
<p>Puede actualizar CapCut visitando Google Play Store o App Store en su dispositivo y buscando actualizaciones. También puede habilitar actualizaciones automáticas para CapCut en la configuración de su dispositivo. Alternativamente, puede visitar Uptodown y descargar la última versión del archivo CapCut APK. </p>
|
66 |
-
<li><b>¿Cómo puedo contactar al soporte de CapCut? </b></li>
|
67 |
-
<p>Puede ponerse en contacto con el soporte de CapCut visitando su sitio web oficial y llenando el formulario de comentarios. También puede enviarlos por correo electrónico a [email protected] o seguirlos en sus cuentas de redes sociales como Facebook, Instagram, Twitter y YouTube.</p>
|
68 |
-
<li><b>¿Cómo puedo aprender más sobre CapCut? </b></li>
|
69 |
-
<p>Usted puede aprender más sobre CapCut visitando su sitio web oficial y leyendo sus entradas de blog, tutoriales, consejos y trucos. También puedes ver sus vídeos en YouTube y aprender de las experiencias y creaciones de otros usuarios. </p>
|
70 |
-
</ol></p> 64aa2da5cf<br />
|
71 |
-
<br />
|
72 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Como Hacer Un Anillo De Plata.md
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Mod Hungry Shark Evolution Moneda ilimitada Serta Diamond v8.2.0</h1>
|
3 |
-
<p>¿Eres un fan de Hungry Shark Evolution, el juego de aventura acuática lleno de acción donde tomas el control de un tiburón muy hambriento y vas a un alboroto en el océano? ¿Quieres desbloquear más tiburones, accesorios y misiones sin gastar dinero real? Si es así, entonces usted podría estar interesado en la descarga de mod tiburón hambriento evolución ilimitada moneda serta diamante v8.2.0, una versión modificada del juego que afirma darle monedas ilimitadas y diamantes gratis. En este artículo, le diremos qué es Hungry Shark Evolution, qué características tiene, por qué debe descargar el mod apk, cómo hacerlo y qué precauciones y riesgos debe tener en cuenta. </p>
|
4 |
-
<h2>¿Qué es la evolución del tiburón hambriento? </h2>
|
5 |
-
<p>Hungry Shark Evolution es un popular juego para móviles desarrollado por Ubisoft Entertainment que fue lanzado en 2012. Es la quinta entrega de la serie Hungry Shark y se ha descargado más de 100 millones de veces en Google Play Store. El juego también está disponible en dispositivos iOS y Apple TV. </p>
|
6 |
-
<h2>como hacer un anillo de plata</h2><br /><p><b><b>Download</b> –––––>>> <a href="https://bltlly.com/2v6LGG">https://bltlly.com/2v6LGG</a></b></p><br /><br />
|
7 |
-
<p>En Hungry Shark Evolution, puedes elegir entre más de una docena de tiburones únicos y otras criaturas para evolucionar y explorar un mundo abierto tanto por encima como por debajo de las olas. Puede disfrutar de impresionantes gráficos en 3D y efectos de sonido a medida que descubre y devora misteriosas criaturas de las profundidades, reclutar tiburones bebé para aumentar sus poderes depredadores, equipar accesorios impresionantes como láseres, jetpacks y sombreros de copa, encontrar y recoger objetos de bonificación hundidos, completar misiones desafiantes, activar la fiebre del oro para sobrevivir más tiempo y puntuación más alta, participar en eventos regulares en el juego y ganar premios de edición limitada, y más. </p>
|
8 |
-
<h3>Características de la evolución del tiburón hambriento</h3>
|
9 |
-
<p>Algunas de las características principales de Hungry Shark Evolution son:</p>
|
10 |
-
<ul>
|
11 |
-
|
12 |
-
<li>Un mundo abierto tanto por encima como por debajo de las olas que incluye varios lugares como Islas del Pacífico, Mar Arábigo, Mar de China Meridional, Océano Ártico, Mares Jurásicos, Mares prehistóricos.</li>
|
13 |
-
<li>Impresionantes gráficos en 3D y efectos de sonido que te hacen sentir como si estuvieras en el océano. </li>
|
14 |
-
<li>Criaturas misteriosas de las profundidades que puedes descubrir y devorar, como tortugas, rayas, delfines, ballenas, pingüinos, focas, humanos, buzos, submarinos, helicópteros, barcos, minas, medusas, cangrejos, peces, tiburones y más. </li>
|
15 |
-
<li> Tiburones bebé que usted puede reclutar para aumentar sus poderes depredadores, tales como tiburón bebé arrecife, tiburón bebé mako, tiburón martillo bebé, tiburón tigre bebé, tiburón blanco gran bebé, megalodon bebé, mamá grande bebé, bebé sr. snappy, bebé alan, bebé moby dick, ballena asesino bebé, Bebé narval, bebé leo, bebé nessie, bebé dave, bebé kraken. </li>
|
16 |
-
<li>Accesorios impresionantes que puede equipar para mejorar su juego, como láseres, jetpacks, sombreros de copa, gafas de sol, auriculares, paraguas, fuegos artificiales, campos de fuerza, imanes, dispositivos de camuflaje. </li>
|
17 |
-
<li>Objetos de bonificación hundidos que puedes encontrar y recoger para ganar monedas y gemas adicionales. </li>
|
18 |
-
<li>Misiones desafiantes que puedes completar para ganar recompensas y logros. </li>
|
19 |
-
<li>Modo de fiebre del oro que se puede activar para sobrevivir más tiempo y puntuación más alta por comer todo en su camino sin tener daño. </li>
|
20 |
-
<li>Eventos regulares en el juego en los que puedes participar y ganar premios de edición limitada. </li>
|
21 |
-
<li>Tablas de clasificación en línea que puede competir con otros jugadores de todo el mundo. </li>
|
22 |
-
<li>Características sociales de Facebook que puedes usar para compartir tu progreso y desafiar a tus amigos. </li>
|
23 |
-
</ul>
|
24 |
-
<h3> ¿Por qué descargar mod hambriento tiburón evolución ilimitada moneda serta diamante v8.2.0? </h3>
|
25 |
-
|
26 |
-
<p>Si desea disfrutar de todas las características de Hungry Shark Evolution sin gastar dinero o perder el tiempo, es posible que desee descargar mod hambriento tiburón evolución ilimitada moneda serta diamante v8.2.0. Esta es una versión modificada del juego que pretende darle monedas ilimitadas y diamantes gratis. Con este mod apk, puede desbloquear todos los tiburones y accesorios que desee, actualizar sus tiburones al nivel máximo, completar todas las misiones con facilidad, activar el modo de fiebre del oro cuando quieras, y dominar las tablas de clasificación. Suena tentador, ¿verdad? </p>
|
27 |
-
<h4>Beneficios de monedas y diamantes ilimitados</h4>
|
28 |
-
<p>Algunos de los beneficios de tener monedas y diamantes ilimitados en Hungry Shark Evolution son:</p>
|
29 |
-
<ul>
|
30 |
-
<li> Puede desbloquear todos los tiburones y otras criaturas en el juego y evolucionar a su máximo potencial. </li>
|
31 |
-
<li>Puedes equipar todos los accesorios del juego y personalizar tus tiburones según tu preferencia. </li>
|
32 |
-
<li> Puedes comprar todos los artículos en la tienda y usarlos tanto como quieras. </li>
|
33 |
-
<li>Puedes completar todas las misiones en el juego y ganar todas las recompensas y logros. </li>
|
34 |
-
<li> Puede activar el modo de fiebre del oro en cualquier momento que desee y la puntuación más alta que nunca. </li>
|
35 |
-
<li>Puedes participar en todos los eventos del juego y ganar todos los premios de edición limitada. </li>
|
36 |
-
<li>Puedes competir con otros jugadores en las tablas de clasificación online y mostrar tus habilidades. </li>
|
37 |
-
<li>Puedes compartir tu progreso y desafiar a tus amigos en Facebook.</li>
|
38 |
-
</ul>
|
39 |
-
<h4>Cómo descargar e instalar el mod apk</h4>
|
40 |
-
<p>Si usted está interesado en la descarga de mod hambriento tiburón evolución ilimitada moneda serta diamante v8.2.0, aquí están los pasos que debe seguir:</p>
|
41 |
-
<ol>
|
42 |
-
<li>En primer lugar, es necesario desinstalar la versión original de Hungry Shark Evolution desde su dispositivo si lo tiene instalado. </li>
|
43 |
-
|
44 |
-
<li>Después de descargar el archivo, es necesario habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.</li>
|
45 |
-
<li>Siguiente, es necesario localizar el archivo descargado en el almacenamiento del dispositivo y toque en él para iniciar el proceso de instalación. </li>
|
46 |
-
<li>Siga las instrucciones en la pantalla y espere a que termine la instalación. </li>
|
47 |
-
<li>Finalmente, puede iniciar el juego desde el cajón de la aplicación o la pantalla de inicio y disfrutar de monedas y diamantes ilimitados. </li>
|
48 |
-
</ol>
|
49 |
-
<h <h4>Precauciones y riesgos de usar el mod apk</h4>
|
50 |
-
<p>Si bien descargar mod hungry shark evolution unlimited coin serta diamond v8.2.0 puede parecer una gran idea, también debe ser consciente de algunas precauciones y riesgos que vienen con él. Estos son algunos de ellos:</p>
|
51 |
-
<ul>
|
52 |
-
<li>Usted puede violar los términos y condiciones del juego y obtener prohibido de jugar en línea o acceder a sus características. </li>
|
53 |
-
<li>Puede exponer su dispositivo a malware, virus u otro software dañino que puede dañar sus datos o comprometer su seguridad. </li>
|
54 |
-
<li>Usted puede perder su progreso o datos en el juego si el apk mod no es compatible con su dispositivo o la última versión del juego. </li>
|
55 |
-
<li>Puedes experimentar errores, fallos, fallos o errores en el juego que pueden afectar tu juego o rendimiento. </li>
|
56 |
-
<li>Puedes perder la diversión y el desafío del juego al tener todo desbloqueado e ilimitado. </li>
|
57 |
-
</ul>
|
58 |
-
<p>Por lo tanto, usted debe descargar y utilizar el apk mod a su propio riesgo y discreción. No nos hacemos responsables de las consecuencias que puedan derivarse de su uso. </p>
|
59 |
-
<p></p>
|
60 |
-
<h2>Conclusión</h2>
|
61 |
-
|
62 |
-
<p>Si quieres disfrutar de todas estas características sin gastar dinero o perder tiempo, puedes descargar mod hungry shark evolution unlimited coin serta diamond v8.2.0, una versión modificada del juego que pretende darte monedas ilimitadas y diamantes gratis. Con este mod apk, puede desbloquear todos los tiburones y accesorios que desee, actualizar sus tiburones al nivel máximo, completar todas las misiones con facilidad, activar el modo de fiebre del oro cuando quieras, y dominar las tablas de clasificación. Sin embargo, también debe ser consciente de algunas precauciones y riesgos que vienen con el uso de la apk mod, tales como violar los términos y condiciones del juego, exponer su dispositivo a malware o virus, perder su progreso o datos en el juego, experimentar errores o errores en el juego, o perder la diversión y el desafío del juego. </p>
|
63 |
-
<p>Por lo tanto, usted debe descargar y utilizar el apk mod a su propio riesgo y discreción. Esperamos que este artículo ha sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejarlos en la sección de comentarios a continuación. ¡Gracias por leer! </p>
|
64 |
-
<h3>Preguntas frecuentes</h3>
|
65 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Hungry Shark Evolution y su mod apk:</p>
|
66 |
-
<ol>
|
67 |
-
<li>Q: ¿Es Hungry Shark Evolution libre para jugar? </li>
|
68 |
-
<li>A: Sí, Hungry Shark Evolution es gratis para jugar en dispositivos Android, iOS y Apple TV. Sin embargo, algunas características requieren dinero real para desbloquear o acceder. </li>
|
69 |
-
<li>Q: ¿Cuál es la última versión de Hungry Shark Evolution? </li>
|
70 |
-
<li>A: La última versión de Hungry Shark Evolution a partir de junio de 2023 es v8.2.0. Fue lanzado en mayo de 2023 y agregó nuevas características como nuevos tiburones (Ancient Lava Shark, Ancient alíen Shark), nuevos accesorios (Lava Jetpack), nuevas misiones (Lava World), nuevos eventos (Lava Rush), nuevos logros (Lava Master), nuevas tablas de clasificación (Lava Legends) y más. </li>
|
71 |
-
<li>P: ¿Cómo puedo obtener más monedas y gemas en Hungry Shark Evolution? </li>
|
72 |
-
|
73 |
-
<li>Q: ¿Es el mod apk seguro de usar? </li>
|
74 |
-
<li>A: El mod apk no es un producto oficial de Ubisoft Entertainment y no está respaldado o apoyado por ellos. Por lo tanto, no se garantiza que sea seguro o confiable. Es posible que encuentre malware, virus u otro software dañino que puede dañar su dispositivo o comprometer su seguridad. También puede violar los términos y condiciones del juego y obtener prohibido jugar en línea o acceder a sus características. También puede perder su progreso o datos en el juego si el apk mod no es compatible con su dispositivo o la última versión del juego. También es posible que experimentes errores, fallos, fallos o errores en el juego que pueden afectar tu juego o rendimiento. Por lo tanto, debe utilizar el apk mod a su propio riesgo y discreción. </li>
|
75 |
-
<li>Q: ¿Cómo puedo actualizar el apk mod? </li>
|
76 |
-
<li>A: El apk mod no se actualiza automáticamente y es necesario descargar e instalar la última versión manualmente cada vez que hay una nueva actualización para el juego. Puede consultar las actualizaciones en Internet o utilizar este enlace: [Descargar Mod Hungry Shark Evolution Unlimited Coin Serta Diamond v8.2.0]. Sin embargo, debe tener cuidado con la descarga de fuentes desconocidas y asegurarse de que el archivo es seguro y compatible con su dispositivo y el juego. </li>
|
77 |
-
<li>Q: ¿Cómo puedo desinstalar el apk mod? </li>
|
78 |
-
<li>A: Si desea desinstalar el apk mod, simplemente puede ir a la configuración del dispositivo, encontrar la aplicación, y toque en desinstalar. Sin embargo, debes ser consciente de que perderás todo tu progreso y datos en el juego si lo haces. Si desea mantener su progreso y los datos, puede tratar de copia de seguridad de sus datos antes de desinstalar el apk mod y restaurarlo después de instalar la versión original del juego de Google Play Store o App Store.</li>
|
79 |
-
</ol></p> 64aa2da5cf<br />
|
80 |
-
<br />
|
81 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat/src/app.html
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en" class="h-full">
|
3 |
-
<head>
|
4 |
-
<meta charset="utf-8" />
|
5 |
-
<link rel="icon" href="%sveltekit.assets%/favicon.png" />
|
6 |
-
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no" />
|
7 |
-
<title>BetterChat</title>
|
8 |
-
<script>
|
9 |
-
if (
|
10 |
-
localStorage.theme === "dark" ||
|
11 |
-
(!("theme" in localStorage) && window.matchMedia("(prefers-color-scheme: dark)").matches)
|
12 |
-
) {
|
13 |
-
document.documentElement.classList.add("dark");
|
14 |
-
}
|
15 |
-
|
16 |
-
// For some reason, Sveltekit doesn't let us load env variables from .env here, so we load it from hooks.server.ts
|
17 |
-
window.gaId = "%gaId%";
|
18 |
-
</script>
|
19 |
-
%sveltekit.head%
|
20 |
-
</head>
|
21 |
-
<body data-sveltekit-preload-data="hover" class="h-full dark:bg-gray-900">
|
22 |
-
<div class="contents h-full">%sveltekit.body%</div>
|
23 |
-
|
24 |
-
<!-- Google Tag Manager -->
|
25 |
-
<script>
|
26 |
-
if (window.gaId) {
|
27 |
-
const script = document.createElement("script");
|
28 |
-
script.src = "https://www.googletagmanager.com/gtag/js?id=" + window.gaId;
|
29 |
-
script.async = true;
|
30 |
-
document.head.appendChild(script);
|
31 |
-
|
32 |
-
window.dataLayer = window.dataLayer || [];
|
33 |
-
function gtag() {
|
34 |
-
dataLayer.push(arguments);
|
35 |
-
}
|
36 |
-
gtag("js", new Date());
|
37 |
-
/// ^ See https://developers.google.com/tag-platform/gtagjs/install
|
38 |
-
gtag("config", window.gaId);
|
39 |
-
gtag("consent", "default", { ad_storage: "denied", analytics_storage: "denied" });
|
40 |
-
/// ^ See https://developers.google.com/tag-platform/gtagjs/reference#consent
|
41 |
-
/// TODO: ask the user for their consent and update this with gtag('consent', 'update')
|
42 |
-
}
|
43 |
-
</script>
|
44 |
-
</body>
|
45 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat_new/src/lib/updateSettings.ts
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import { invalidate } from "$app/navigation";
|
2 |
-
import { base } from "$app/paths";
|
3 |
-
import { error } from "$lib/stores/errors";
|
4 |
-
import type { Settings } from "./types/Settings";
|
5 |
-
import { UrlDependency } from "./types/UrlDependency";
|
6 |
-
|
7 |
-
export async function updateSettings(
|
8 |
-
settings: Partial<Omit<Settings, "sessionId">>
|
9 |
-
): Promise<boolean> {
|
10 |
-
try {
|
11 |
-
const res = await fetch(`${base}/settings`, {
|
12 |
-
method: "PATCH",
|
13 |
-
headers: { "Content-Type": "application/json" },
|
14 |
-
body: JSON.stringify(settings),
|
15 |
-
});
|
16 |
-
if (!res.ok) {
|
17 |
-
error.set("Error while updating settings, try again.");
|
18 |
-
return false;
|
19 |
-
}
|
20 |
-
await invalidate(UrlDependency.Settings);
|
21 |
-
return true;
|
22 |
-
} catch (err) {
|
23 |
-
console.error(err);
|
24 |
-
error.set(String(err));
|
25 |
-
return false;
|
26 |
-
}
|
27 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/default_styles.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
from typing import Dict
|
2 |
-
|
3 |
-
from .style import Style
|
4 |
-
|
5 |
-
DEFAULT_STYLES: Dict[str, Style] = {
|
6 |
-
"none": Style.null(),
|
7 |
-
"reset": Style(
|
8 |
-
color="default",
|
9 |
-
bgcolor="default",
|
10 |
-
dim=False,
|
11 |
-
bold=False,
|
12 |
-
italic=False,
|
13 |
-
underline=False,
|
14 |
-
blink=False,
|
15 |
-
blink2=False,
|
16 |
-
reverse=False,
|
17 |
-
conceal=False,
|
18 |
-
strike=False,
|
19 |
-
),
|
20 |
-
"dim": Style(dim=True),
|
21 |
-
"bright": Style(dim=False),
|
22 |
-
"bold": Style(bold=True),
|
23 |
-
"strong": Style(bold=True),
|
24 |
-
"code": Style(reverse=True, bold=True),
|
25 |
-
"italic": Style(italic=True),
|
26 |
-
"emphasize": Style(italic=True),
|
27 |
-
"underline": Style(underline=True),
|
28 |
-
"blink": Style(blink=True),
|
29 |
-
"blink2": Style(blink2=True),
|
30 |
-
"reverse": Style(reverse=True),
|
31 |
-
"strike": Style(strike=True),
|
32 |
-
"black": Style(color="black"),
|
33 |
-
"red": Style(color="red"),
|
34 |
-
"green": Style(color="green"),
|
35 |
-
"yellow": Style(color="yellow"),
|
36 |
-
"magenta": Style(color="magenta"),
|
37 |
-
"cyan": Style(color="cyan"),
|
38 |
-
"white": Style(color="white"),
|
39 |
-
"inspect.attr": Style(color="yellow", italic=True),
|
40 |
-
"inspect.attr.dunder": Style(color="yellow", italic=True, dim=True),
|
41 |
-
"inspect.callable": Style(bold=True, color="red"),
|
42 |
-
"inspect.async_def": Style(italic=True, color="bright_cyan"),
|
43 |
-
"inspect.def": Style(italic=True, color="bright_cyan"),
|
44 |
-
"inspect.class": Style(italic=True, color="bright_cyan"),
|
45 |
-
"inspect.error": Style(bold=True, color="red"),
|
46 |
-
"inspect.equals": Style(),
|
47 |
-
"inspect.help": Style(color="cyan"),
|
48 |
-
"inspect.doc": Style(dim=True),
|
49 |
-
"inspect.value.border": Style(color="green"),
|
50 |
-
"live.ellipsis": Style(bold=True, color="red"),
|
51 |
-
"layout.tree.row": Style(dim=False, color="red"),
|
52 |
-
"layout.tree.column": Style(dim=False, color="blue"),
|
53 |
-
"logging.keyword": Style(bold=True, color="yellow"),
|
54 |
-
"logging.level.notset": Style(dim=True),
|
55 |
-
"logging.level.debug": Style(color="green"),
|
56 |
-
"logging.level.info": Style(color="blue"),
|
57 |
-
"logging.level.warning": Style(color="red"),
|
58 |
-
"logging.level.error": Style(color="red", bold=True),
|
59 |
-
"logging.level.critical": Style(color="red", bold=True, reverse=True),
|
60 |
-
"log.level": Style.null(),
|
61 |
-
"log.time": Style(color="cyan", dim=True),
|
62 |
-
"log.message": Style.null(),
|
63 |
-
"log.path": Style(dim=True),
|
64 |
-
"repr.ellipsis": Style(color="yellow"),
|
65 |
-
"repr.indent": Style(color="green", dim=True),
|
66 |
-
"repr.error": Style(color="red", bold=True),
|
67 |
-
"repr.str": Style(color="green", italic=False, bold=False),
|
68 |
-
"repr.brace": Style(bold=True),
|
69 |
-
"repr.comma": Style(bold=True),
|
70 |
-
"repr.ipv4": Style(bold=True, color="bright_green"),
|
71 |
-
"repr.ipv6": Style(bold=True, color="bright_green"),
|
72 |
-
"repr.eui48": Style(bold=True, color="bright_green"),
|
73 |
-
"repr.eui64": Style(bold=True, color="bright_green"),
|
74 |
-
"repr.tag_start": Style(bold=True),
|
75 |
-
"repr.tag_name": Style(color="bright_magenta", bold=True),
|
76 |
-
"repr.tag_contents": Style(color="default"),
|
77 |
-
"repr.tag_end": Style(bold=True),
|
78 |
-
"repr.attrib_name": Style(color="yellow", italic=False),
|
79 |
-
"repr.attrib_equal": Style(bold=True),
|
80 |
-
"repr.attrib_value": Style(color="magenta", italic=False),
|
81 |
-
"repr.number": Style(color="cyan", bold=True, italic=False),
|
82 |
-
"repr.number_complex": Style(color="cyan", bold=True, italic=False), # same
|
83 |
-
"repr.bool_true": Style(color="bright_green", italic=True),
|
84 |
-
"repr.bool_false": Style(color="bright_red", italic=True),
|
85 |
-
"repr.none": Style(color="magenta", italic=True),
|
86 |
-
"repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False),
|
87 |
-
"repr.uuid": Style(color="bright_yellow", bold=False),
|
88 |
-
"repr.call": Style(color="magenta", bold=True),
|
89 |
-
"repr.path": Style(color="magenta"),
|
90 |
-
"repr.filename": Style(color="bright_magenta"),
|
91 |
-
"rule.line": Style(color="bright_green"),
|
92 |
-
"rule.text": Style.null(),
|
93 |
-
"json.brace": Style(bold=True),
|
94 |
-
"json.bool_true": Style(color="bright_green", italic=True),
|
95 |
-
"json.bool_false": Style(color="bright_red", italic=True),
|
96 |
-
"json.null": Style(color="magenta", italic=True),
|
97 |
-
"json.number": Style(color="cyan", bold=True, italic=False),
|
98 |
-
"json.str": Style(color="green", italic=False, bold=False),
|
99 |
-
"json.key": Style(color="blue", bold=True),
|
100 |
-
"prompt": Style.null(),
|
101 |
-
"prompt.choices": Style(color="magenta", bold=True),
|
102 |
-
"prompt.default": Style(color="cyan", bold=True),
|
103 |
-
"prompt.invalid": Style(color="red"),
|
104 |
-
"prompt.invalid.choice": Style(color="red"),
|
105 |
-
"pretty": Style.null(),
|
106 |
-
"scope.border": Style(color="blue"),
|
107 |
-
"scope.key": Style(color="yellow", italic=True),
|
108 |
-
"scope.key.special": Style(color="yellow", italic=True, dim=True),
|
109 |
-
"scope.equals": Style(color="red"),
|
110 |
-
"table.header": Style(bold=True),
|
111 |
-
"table.footer": Style(bold=True),
|
112 |
-
"table.cell": Style.null(),
|
113 |
-
"table.title": Style(italic=True),
|
114 |
-
"table.caption": Style(italic=True, dim=True),
|
115 |
-
"traceback.error": Style(color="red", italic=True),
|
116 |
-
"traceback.border.syntax_error": Style(color="bright_red"),
|
117 |
-
"traceback.border": Style(color="red"),
|
118 |
-
"traceback.text": Style.null(),
|
119 |
-
"traceback.title": Style(color="red", bold=True),
|
120 |
-
"traceback.exc_type": Style(color="bright_red", bold=True),
|
121 |
-
"traceback.exc_value": Style.null(),
|
122 |
-
"traceback.offset": Style(color="bright_red", bold=True),
|
123 |
-
"bar.back": Style(color="grey23"),
|
124 |
-
"bar.complete": Style(color="rgb(249,38,114)"),
|
125 |
-
"bar.finished": Style(color="rgb(114,156,31)"),
|
126 |
-
"bar.pulse": Style(color="rgb(249,38,114)"),
|
127 |
-
"progress.description": Style.null(),
|
128 |
-
"progress.filesize": Style(color="green"),
|
129 |
-
"progress.filesize.total": Style(color="green"),
|
130 |
-
"progress.download": Style(color="green"),
|
131 |
-
"progress.elapsed": Style(color="yellow"),
|
132 |
-
"progress.percentage": Style(color="magenta"),
|
133 |
-
"progress.remaining": Style(color="cyan"),
|
134 |
-
"progress.data.speed": Style(color="red"),
|
135 |
-
"progress.spinner": Style(color="green"),
|
136 |
-
"status.spinner": Style(color="green"),
|
137 |
-
"tree": Style(),
|
138 |
-
"tree.line": Style(),
|
139 |
-
"markdown.paragraph": Style(),
|
140 |
-
"markdown.text": Style(),
|
141 |
-
"markdown.em": Style(italic=True),
|
142 |
-
"markdown.emph": Style(italic=True), # For commonmark backwards compatibility
|
143 |
-
"markdown.strong": Style(bold=True),
|
144 |
-
"markdown.code": Style(bold=True, color="cyan", bgcolor="black"),
|
145 |
-
"markdown.code_block": Style(color="cyan", bgcolor="black"),
|
146 |
-
"markdown.block_quote": Style(color="magenta"),
|
147 |
-
"markdown.list": Style(color="cyan"),
|
148 |
-
"markdown.item": Style(),
|
149 |
-
"markdown.item.bullet": Style(color="yellow", bold=True),
|
150 |
-
"markdown.item.number": Style(color="yellow", bold=True),
|
151 |
-
"markdown.hr": Style(color="yellow"),
|
152 |
-
"markdown.h1.border": Style(),
|
153 |
-
"markdown.h1": Style(bold=True),
|
154 |
-
"markdown.h2": Style(bold=True, underline=True),
|
155 |
-
"markdown.h3": Style(bold=True),
|
156 |
-
"markdown.h4": Style(bold=True, dim=True),
|
157 |
-
"markdown.h5": Style(underline=True),
|
158 |
-
"markdown.h6": Style(italic=True),
|
159 |
-
"markdown.h7": Style(italic=True, dim=True),
|
160 |
-
"markdown.link": Style(color="bright_blue"),
|
161 |
-
"markdown.link_url": Style(color="blue", underline=True),
|
162 |
-
"markdown.s": Style(strike=True),
|
163 |
-
"iso8601.date": Style(color="blue"),
|
164 |
-
"iso8601.time": Style(color="magenta"),
|
165 |
-
"iso8601.timezone": Style(color="yellow"),
|
166 |
-
}
|
167 |
-
|
168 |
-
|
169 |
-
if __name__ == "__main__": # pragma: no cover
|
170 |
-
import argparse
|
171 |
-
import io
|
172 |
-
|
173 |
-
from pip._vendor.rich.console import Console
|
174 |
-
from pip._vendor.rich.table import Table
|
175 |
-
from pip._vendor.rich.text import Text
|
176 |
-
|
177 |
-
parser = argparse.ArgumentParser()
|
178 |
-
parser.add_argument("--html", action="store_true", help="Export as HTML table")
|
179 |
-
args = parser.parse_args()
|
180 |
-
html: bool = args.html
|
181 |
-
console = Console(record=True, width=70, file=io.StringIO()) if html else Console()
|
182 |
-
|
183 |
-
table = Table("Name", "Styling")
|
184 |
-
|
185 |
-
for style_name, style in DEFAULT_STYLES.items():
|
186 |
-
table.add_row(Text(style_name, style=style), str(style))
|
187 |
-
|
188 |
-
console.print(table)
|
189 |
-
if html:
|
190 |
-
print(console.export_html(inline_styles=True))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/securetransport.py
DELETED
@@ -1,921 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
SecureTranport support for urllib3 via ctypes.
|
3 |
-
|
4 |
-
This makes platform-native TLS available to urllib3 users on macOS without the
|
5 |
-
use of a compiler. This is an important feature because the Python Package
|
6 |
-
Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
|
7 |
-
that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
|
8 |
-
this is to give macOS users an alternative solution to the problem, and that
|
9 |
-
solution is to use SecureTransport.
|
10 |
-
|
11 |
-
We use ctypes here because this solution must not require a compiler. That's
|
12 |
-
because pip is not allowed to require a compiler either.
|
13 |
-
|
14 |
-
This is not intended to be a seriously long-term solution to this problem.
|
15 |
-
The hope is that PEP 543 will eventually solve this issue for us, at which
|
16 |
-
point we can retire this contrib module. But in the short term, we need to
|
17 |
-
solve the impending tire fire that is Python on Mac without this kind of
|
18 |
-
contrib module. So...here we are.
|
19 |
-
|
20 |
-
To use this module, simply import and inject it::
|
21 |
-
|
22 |
-
import pip._vendor.urllib3.contrib.securetransport as securetransport
|
23 |
-
securetransport.inject_into_urllib3()
|
24 |
-
|
25 |
-
Happy TLSing!
|
26 |
-
|
27 |
-
This code is a bastardised version of the code found in Will Bond's oscrypto
|
28 |
-
library. An enormous debt is owed to him for blazing this trail for us. For
|
29 |
-
that reason, this code should be considered to be covered both by urllib3's
|
30 |
-
license and by oscrypto's:
|
31 |
-
|
32 |
-
.. code-block::
|
33 |
-
|
34 |
-
Copyright (c) 2015-2016 Will Bond <[email protected]>
|
35 |
-
|
36 |
-
Permission is hereby granted, free of charge, to any person obtaining a
|
37 |
-
copy of this software and associated documentation files (the "Software"),
|
38 |
-
to deal in the Software without restriction, including without limitation
|
39 |
-
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
40 |
-
and/or sell copies of the Software, and to permit persons to whom the
|
41 |
-
Software is furnished to do so, subject to the following conditions:
|
42 |
-
|
43 |
-
The above copyright notice and this permission notice shall be included in
|
44 |
-
all copies or substantial portions of the Software.
|
45 |
-
|
46 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
47 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
48 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
49 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
50 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
51 |
-
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
52 |
-
DEALINGS IN THE SOFTWARE.
|
53 |
-
"""
|
54 |
-
from __future__ import absolute_import
|
55 |
-
|
56 |
-
import contextlib
|
57 |
-
import ctypes
|
58 |
-
import errno
|
59 |
-
import os.path
|
60 |
-
import shutil
|
61 |
-
import socket
|
62 |
-
import ssl
|
63 |
-
import struct
|
64 |
-
import threading
|
65 |
-
import weakref
|
66 |
-
|
67 |
-
from pip._vendor import six
|
68 |
-
|
69 |
-
from .. import util
|
70 |
-
from ..util.ssl_ import PROTOCOL_TLS_CLIENT
|
71 |
-
from ._securetransport.bindings import CoreFoundation, Security, SecurityConst
|
72 |
-
from ._securetransport.low_level import (
|
73 |
-
_assert_no_error,
|
74 |
-
_build_tls_unknown_ca_alert,
|
75 |
-
_cert_array_from_pem,
|
76 |
-
_create_cfstring_array,
|
77 |
-
_load_client_cert_chain,
|
78 |
-
_temporary_keychain,
|
79 |
-
)
|
80 |
-
|
81 |
-
try: # Platform-specific: Python 2
|
82 |
-
from socket import _fileobject
|
83 |
-
except ImportError: # Platform-specific: Python 3
|
84 |
-
_fileobject = None
|
85 |
-
from ..packages.backports.makefile import backport_makefile
|
86 |
-
|
87 |
-
__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
|
88 |
-
|
89 |
-
# SNI always works
|
90 |
-
HAS_SNI = True
|
91 |
-
|
92 |
-
orig_util_HAS_SNI = util.HAS_SNI
|
93 |
-
orig_util_SSLContext = util.ssl_.SSLContext
|
94 |
-
|
95 |
-
# This dictionary is used by the read callback to obtain a handle to the
|
96 |
-
# calling wrapped socket. This is a pretty silly approach, but for now it'll
|
97 |
-
# do. I feel like I should be able to smuggle a handle to the wrapped socket
|
98 |
-
# directly in the SSLConnectionRef, but for now this approach will work I
|
99 |
-
# guess.
|
100 |
-
#
|
101 |
-
# We need to lock around this structure for inserts, but we don't do it for
|
102 |
-
# reads/writes in the callbacks. The reasoning here goes as follows:
|
103 |
-
#
|
104 |
-
# 1. It is not possible to call into the callbacks before the dictionary is
|
105 |
-
# populated, so once in the callback the id must be in the dictionary.
|
106 |
-
# 2. The callbacks don't mutate the dictionary, they only read from it, and
|
107 |
-
# so cannot conflict with any of the insertions.
|
108 |
-
#
|
109 |
-
# This is good: if we had to lock in the callbacks we'd drastically slow down
|
110 |
-
# the performance of this code.
|
111 |
-
_connection_refs = weakref.WeakValueDictionary()
|
112 |
-
_connection_ref_lock = threading.Lock()
|
113 |
-
|
114 |
-
# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
|
115 |
-
# for no better reason than we need *a* limit, and this one is right there.
|
116 |
-
SSL_WRITE_BLOCKSIZE = 16384
|
117 |
-
|
118 |
-
# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
|
119 |
-
# individual cipher suites. We need to do this because this is how
|
120 |
-
# SecureTransport wants them.
|
121 |
-
CIPHER_SUITES = [
|
122 |
-
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
123 |
-
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
124 |
-
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
125 |
-
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
126 |
-
SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
127 |
-
SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
128 |
-
SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
|
129 |
-
SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
|
130 |
-
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
131 |
-
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
132 |
-
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
133 |
-
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
134 |
-
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
135 |
-
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
136 |
-
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
137 |
-
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
138 |
-
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
139 |
-
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
140 |
-
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
141 |
-
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
142 |
-
SecurityConst.TLS_AES_256_GCM_SHA384,
|
143 |
-
SecurityConst.TLS_AES_128_GCM_SHA256,
|
144 |
-
SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
145 |
-
SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
146 |
-
SecurityConst.TLS_AES_128_CCM_8_SHA256,
|
147 |
-
SecurityConst.TLS_AES_128_CCM_SHA256,
|
148 |
-
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
|
149 |
-
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
|
150 |
-
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
|
151 |
-
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
|
152 |
-
]
|
153 |
-
|
154 |
-
# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
|
155 |
-
# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
|
156 |
-
# TLSv1 to 1.2 are supported on macOS 10.8+
|
157 |
-
_protocol_to_min_max = {
|
158 |
-
util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
|
159 |
-
PROTOCOL_TLS_CLIENT: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
|
160 |
-
}
|
161 |
-
|
162 |
-
if hasattr(ssl, "PROTOCOL_SSLv2"):
|
163 |
-
_protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
|
164 |
-
SecurityConst.kSSLProtocol2,
|
165 |
-
SecurityConst.kSSLProtocol2,
|
166 |
-
)
|
167 |
-
if hasattr(ssl, "PROTOCOL_SSLv3"):
|
168 |
-
_protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
|
169 |
-
SecurityConst.kSSLProtocol3,
|
170 |
-
SecurityConst.kSSLProtocol3,
|
171 |
-
)
|
172 |
-
if hasattr(ssl, "PROTOCOL_TLSv1"):
|
173 |
-
_protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
|
174 |
-
SecurityConst.kTLSProtocol1,
|
175 |
-
SecurityConst.kTLSProtocol1,
|
176 |
-
)
|
177 |
-
if hasattr(ssl, "PROTOCOL_TLSv1_1"):
|
178 |
-
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
|
179 |
-
SecurityConst.kTLSProtocol11,
|
180 |
-
SecurityConst.kTLSProtocol11,
|
181 |
-
)
|
182 |
-
if hasattr(ssl, "PROTOCOL_TLSv1_2"):
|
183 |
-
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
|
184 |
-
SecurityConst.kTLSProtocol12,
|
185 |
-
SecurityConst.kTLSProtocol12,
|
186 |
-
)
|
187 |
-
|
188 |
-
|
189 |
-
def inject_into_urllib3():
|
190 |
-
"""
|
191 |
-
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
|
192 |
-
"""
|
193 |
-
util.SSLContext = SecureTransportContext
|
194 |
-
util.ssl_.SSLContext = SecureTransportContext
|
195 |
-
util.HAS_SNI = HAS_SNI
|
196 |
-
util.ssl_.HAS_SNI = HAS_SNI
|
197 |
-
util.IS_SECURETRANSPORT = True
|
198 |
-
util.ssl_.IS_SECURETRANSPORT = True
|
199 |
-
|
200 |
-
|
201 |
-
def extract_from_urllib3():
|
202 |
-
"""
|
203 |
-
Undo monkey-patching by :func:`inject_into_urllib3`.
|
204 |
-
"""
|
205 |
-
util.SSLContext = orig_util_SSLContext
|
206 |
-
util.ssl_.SSLContext = orig_util_SSLContext
|
207 |
-
util.HAS_SNI = orig_util_HAS_SNI
|
208 |
-
util.ssl_.HAS_SNI = orig_util_HAS_SNI
|
209 |
-
util.IS_SECURETRANSPORT = False
|
210 |
-
util.ssl_.IS_SECURETRANSPORT = False
|
211 |
-
|
212 |
-
|
213 |
-
def _read_callback(connection_id, data_buffer, data_length_pointer):
|
214 |
-
"""
|
215 |
-
SecureTransport read callback. This is called by ST to request that data
|
216 |
-
be returned from the socket.
|
217 |
-
"""
|
218 |
-
wrapped_socket = None
|
219 |
-
try:
|
220 |
-
wrapped_socket = _connection_refs.get(connection_id)
|
221 |
-
if wrapped_socket is None:
|
222 |
-
return SecurityConst.errSSLInternal
|
223 |
-
base_socket = wrapped_socket.socket
|
224 |
-
|
225 |
-
requested_length = data_length_pointer[0]
|
226 |
-
|
227 |
-
timeout = wrapped_socket.gettimeout()
|
228 |
-
error = None
|
229 |
-
read_count = 0
|
230 |
-
|
231 |
-
try:
|
232 |
-
while read_count < requested_length:
|
233 |
-
if timeout is None or timeout >= 0:
|
234 |
-
if not util.wait_for_read(base_socket, timeout):
|
235 |
-
raise socket.error(errno.EAGAIN, "timed out")
|
236 |
-
|
237 |
-
remaining = requested_length - read_count
|
238 |
-
buffer = (ctypes.c_char * remaining).from_address(
|
239 |
-
data_buffer + read_count
|
240 |
-
)
|
241 |
-
chunk_size = base_socket.recv_into(buffer, remaining)
|
242 |
-
read_count += chunk_size
|
243 |
-
if not chunk_size:
|
244 |
-
if not read_count:
|
245 |
-
return SecurityConst.errSSLClosedGraceful
|
246 |
-
break
|
247 |
-
except (socket.error) as e:
|
248 |
-
error = e.errno
|
249 |
-
|
250 |
-
if error is not None and error != errno.EAGAIN:
|
251 |
-
data_length_pointer[0] = read_count
|
252 |
-
if error == errno.ECONNRESET or error == errno.EPIPE:
|
253 |
-
return SecurityConst.errSSLClosedAbort
|
254 |
-
raise
|
255 |
-
|
256 |
-
data_length_pointer[0] = read_count
|
257 |
-
|
258 |
-
if read_count != requested_length:
|
259 |
-
return SecurityConst.errSSLWouldBlock
|
260 |
-
|
261 |
-
return 0
|
262 |
-
except Exception as e:
|
263 |
-
if wrapped_socket is not None:
|
264 |
-
wrapped_socket._exception = e
|
265 |
-
return SecurityConst.errSSLInternal
|
266 |
-
|
267 |
-
|
268 |
-
def _write_callback(connection_id, data_buffer, data_length_pointer):
|
269 |
-
"""
|
270 |
-
SecureTransport write callback. This is called by ST to request that data
|
271 |
-
actually be sent on the network.
|
272 |
-
"""
|
273 |
-
wrapped_socket = None
|
274 |
-
try:
|
275 |
-
wrapped_socket = _connection_refs.get(connection_id)
|
276 |
-
if wrapped_socket is None:
|
277 |
-
return SecurityConst.errSSLInternal
|
278 |
-
base_socket = wrapped_socket.socket
|
279 |
-
|
280 |
-
bytes_to_write = data_length_pointer[0]
|
281 |
-
data = ctypes.string_at(data_buffer, bytes_to_write)
|
282 |
-
|
283 |
-
timeout = wrapped_socket.gettimeout()
|
284 |
-
error = None
|
285 |
-
sent = 0
|
286 |
-
|
287 |
-
try:
|
288 |
-
while sent < bytes_to_write:
|
289 |
-
if timeout is None or timeout >= 0:
|
290 |
-
if not util.wait_for_write(base_socket, timeout):
|
291 |
-
raise socket.error(errno.EAGAIN, "timed out")
|
292 |
-
chunk_sent = base_socket.send(data)
|
293 |
-
sent += chunk_sent
|
294 |
-
|
295 |
-
# This has some needless copying here, but I'm not sure there's
|
296 |
-
# much value in optimising this data path.
|
297 |
-
data = data[chunk_sent:]
|
298 |
-
except (socket.error) as e:
|
299 |
-
error = e.errno
|
300 |
-
|
301 |
-
if error is not None and error != errno.EAGAIN:
|
302 |
-
data_length_pointer[0] = sent
|
303 |
-
if error == errno.ECONNRESET or error == errno.EPIPE:
|
304 |
-
return SecurityConst.errSSLClosedAbort
|
305 |
-
raise
|
306 |
-
|
307 |
-
data_length_pointer[0] = sent
|
308 |
-
|
309 |
-
if sent != bytes_to_write:
|
310 |
-
return SecurityConst.errSSLWouldBlock
|
311 |
-
|
312 |
-
return 0
|
313 |
-
except Exception as e:
|
314 |
-
if wrapped_socket is not None:
|
315 |
-
wrapped_socket._exception = e
|
316 |
-
return SecurityConst.errSSLInternal
|
317 |
-
|
318 |
-
|
319 |
-
# We need to keep these two objects references alive: if they get GC'd while
|
320 |
-
# in use then SecureTransport could attempt to call a function that is in freed
|
321 |
-
# memory. That would be...uh...bad. Yeah, that's the word. Bad.
|
322 |
-
_read_callback_pointer = Security.SSLReadFunc(_read_callback)
|
323 |
-
_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
|
324 |
-
|
325 |
-
|
326 |
-
class WrappedSocket(object):
|
327 |
-
"""
|
328 |
-
API-compatibility wrapper for Python's OpenSSL wrapped socket object.
|
329 |
-
|
330 |
-
Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
|
331 |
-
collector of PyPy.
|
332 |
-
"""
|
333 |
-
|
334 |
-
def __init__(self, socket):
|
335 |
-
self.socket = socket
|
336 |
-
self.context = None
|
337 |
-
self._makefile_refs = 0
|
338 |
-
self._closed = False
|
339 |
-
self._exception = None
|
340 |
-
self._keychain = None
|
341 |
-
self._keychain_dir = None
|
342 |
-
self._client_cert_chain = None
|
343 |
-
|
344 |
-
# We save off the previously-configured timeout and then set it to
|
345 |
-
# zero. This is done because we use select and friends to handle the
|
346 |
-
# timeouts, but if we leave the timeout set on the lower socket then
|
347 |
-
# Python will "kindly" call select on that socket again for us. Avoid
|
348 |
-
# that by forcing the timeout to zero.
|
349 |
-
self._timeout = self.socket.gettimeout()
|
350 |
-
self.socket.settimeout(0)
|
351 |
-
|
352 |
-
@contextlib.contextmanager
|
353 |
-
def _raise_on_error(self):
|
354 |
-
"""
|
355 |
-
A context manager that can be used to wrap calls that do I/O from
|
356 |
-
SecureTransport. If any of the I/O callbacks hit an exception, this
|
357 |
-
context manager will correctly propagate the exception after the fact.
|
358 |
-
This avoids silently swallowing those exceptions.
|
359 |
-
|
360 |
-
It also correctly forces the socket closed.
|
361 |
-
"""
|
362 |
-
self._exception = None
|
363 |
-
|
364 |
-
# We explicitly don't catch around this yield because in the unlikely
|
365 |
-
# event that an exception was hit in the block we don't want to swallow
|
366 |
-
# it.
|
367 |
-
yield
|
368 |
-
if self._exception is not None:
|
369 |
-
exception, self._exception = self._exception, None
|
370 |
-
self.close()
|
371 |
-
raise exception
|
372 |
-
|
373 |
-
def _set_ciphers(self):
|
374 |
-
"""
|
375 |
-
Sets up the allowed ciphers. By default this matches the set in
|
376 |
-
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
|
377 |
-
custom and doesn't allow changing at this time, mostly because parsing
|
378 |
-
OpenSSL cipher strings is going to be a freaking nightmare.
|
379 |
-
"""
|
380 |
-
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
|
381 |
-
result = Security.SSLSetEnabledCiphers(
|
382 |
-
self.context, ciphers, len(CIPHER_SUITES)
|
383 |
-
)
|
384 |
-
_assert_no_error(result)
|
385 |
-
|
386 |
-
def _set_alpn_protocols(self, protocols):
|
387 |
-
"""
|
388 |
-
Sets up the ALPN protocols on the context.
|
389 |
-
"""
|
390 |
-
if not protocols:
|
391 |
-
return
|
392 |
-
protocols_arr = _create_cfstring_array(protocols)
|
393 |
-
try:
|
394 |
-
result = Security.SSLSetALPNProtocols(self.context, protocols_arr)
|
395 |
-
_assert_no_error(result)
|
396 |
-
finally:
|
397 |
-
CoreFoundation.CFRelease(protocols_arr)
|
398 |
-
|
399 |
-
def _custom_validate(self, verify, trust_bundle):
|
400 |
-
"""
|
401 |
-
Called when we have set custom validation. We do this in two cases:
|
402 |
-
first, when cert validation is entirely disabled; and second, when
|
403 |
-
using a custom trust DB.
|
404 |
-
Raises an SSLError if the connection is not trusted.
|
405 |
-
"""
|
406 |
-
# If we disabled cert validation, just say: cool.
|
407 |
-
if not verify:
|
408 |
-
return
|
409 |
-
|
410 |
-
successes = (
|
411 |
-
SecurityConst.kSecTrustResultUnspecified,
|
412 |
-
SecurityConst.kSecTrustResultProceed,
|
413 |
-
)
|
414 |
-
try:
|
415 |
-
trust_result = self._evaluate_trust(trust_bundle)
|
416 |
-
if trust_result in successes:
|
417 |
-
return
|
418 |
-
reason = "error code: %d" % (trust_result,)
|
419 |
-
except Exception as e:
|
420 |
-
# Do not trust on error
|
421 |
-
reason = "exception: %r" % (e,)
|
422 |
-
|
423 |
-
# SecureTransport does not send an alert nor shuts down the connection.
|
424 |
-
rec = _build_tls_unknown_ca_alert(self.version())
|
425 |
-
self.socket.sendall(rec)
|
426 |
-
# close the connection immediately
|
427 |
-
# l_onoff = 1, activate linger
|
428 |
-
# l_linger = 0, linger for 0 seoncds
|
429 |
-
opts = struct.pack("ii", 1, 0)
|
430 |
-
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts)
|
431 |
-
self.close()
|
432 |
-
raise ssl.SSLError("certificate verify failed, %s" % reason)
|
433 |
-
|
434 |
-
def _evaluate_trust(self, trust_bundle):
|
435 |
-
# We want data in memory, so load it up.
|
436 |
-
if os.path.isfile(trust_bundle):
|
437 |
-
with open(trust_bundle, "rb") as f:
|
438 |
-
trust_bundle = f.read()
|
439 |
-
|
440 |
-
cert_array = None
|
441 |
-
trust = Security.SecTrustRef()
|
442 |
-
|
443 |
-
try:
|
444 |
-
# Get a CFArray that contains the certs we want.
|
445 |
-
cert_array = _cert_array_from_pem(trust_bundle)
|
446 |
-
|
447 |
-
# Ok, now the hard part. We want to get the SecTrustRef that ST has
|
448 |
-
# created for this connection, shove our CAs into it, tell ST to
|
449 |
-
# ignore everything else it knows, and then ask if it can build a
|
450 |
-
# chain. This is a buuuunch of code.
|
451 |
-
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
|
452 |
-
_assert_no_error(result)
|
453 |
-
if not trust:
|
454 |
-
raise ssl.SSLError("Failed to copy trust reference")
|
455 |
-
|
456 |
-
result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
|
457 |
-
_assert_no_error(result)
|
458 |
-
|
459 |
-
result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
|
460 |
-
_assert_no_error(result)
|
461 |
-
|
462 |
-
trust_result = Security.SecTrustResultType()
|
463 |
-
result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
|
464 |
-
_assert_no_error(result)
|
465 |
-
finally:
|
466 |
-
if trust:
|
467 |
-
CoreFoundation.CFRelease(trust)
|
468 |
-
|
469 |
-
if cert_array is not None:
|
470 |
-
CoreFoundation.CFRelease(cert_array)
|
471 |
-
|
472 |
-
return trust_result.value
|
473 |
-
|
474 |
-
def handshake(
|
475 |
-
self,
|
476 |
-
server_hostname,
|
477 |
-
verify,
|
478 |
-
trust_bundle,
|
479 |
-
min_version,
|
480 |
-
max_version,
|
481 |
-
client_cert,
|
482 |
-
client_key,
|
483 |
-
client_key_passphrase,
|
484 |
-
alpn_protocols,
|
485 |
-
):
|
486 |
-
"""
|
487 |
-
Actually performs the TLS handshake. This is run automatically by
|
488 |
-
wrapped socket, and shouldn't be needed in user code.
|
489 |
-
"""
|
490 |
-
# First, we do the initial bits of connection setup. We need to create
|
491 |
-
# a context, set its I/O funcs, and set the connection reference.
|
492 |
-
self.context = Security.SSLCreateContext(
|
493 |
-
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
|
494 |
-
)
|
495 |
-
result = Security.SSLSetIOFuncs(
|
496 |
-
self.context, _read_callback_pointer, _write_callback_pointer
|
497 |
-
)
|
498 |
-
_assert_no_error(result)
|
499 |
-
|
500 |
-
# Here we need to compute the handle to use. We do this by taking the
|
501 |
-
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
|
502 |
-
# just keep incrementing by one until we find a free space.
|
503 |
-
with _connection_ref_lock:
|
504 |
-
handle = id(self) % 2147483647
|
505 |
-
while handle in _connection_refs:
|
506 |
-
handle = (handle + 1) % 2147483647
|
507 |
-
_connection_refs[handle] = self
|
508 |
-
|
509 |
-
result = Security.SSLSetConnection(self.context, handle)
|
510 |
-
_assert_no_error(result)
|
511 |
-
|
512 |
-
# If we have a server hostname, we should set that too.
|
513 |
-
if server_hostname:
|
514 |
-
if not isinstance(server_hostname, bytes):
|
515 |
-
server_hostname = server_hostname.encode("utf-8")
|
516 |
-
|
517 |
-
result = Security.SSLSetPeerDomainName(
|
518 |
-
self.context, server_hostname, len(server_hostname)
|
519 |
-
)
|
520 |
-
_assert_no_error(result)
|
521 |
-
|
522 |
-
# Setup the ciphers.
|
523 |
-
self._set_ciphers()
|
524 |
-
|
525 |
-
# Setup the ALPN protocols.
|
526 |
-
self._set_alpn_protocols(alpn_protocols)
|
527 |
-
|
528 |
-
# Set the minimum and maximum TLS versions.
|
529 |
-
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
|
530 |
-
_assert_no_error(result)
|
531 |
-
|
532 |
-
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
|
533 |
-
_assert_no_error(result)
|
534 |
-
|
535 |
-
# If there's a trust DB, we need to use it. We do that by telling
|
536 |
-
# SecureTransport to break on server auth. We also do that if we don't
|
537 |
-
# want to validate the certs at all: we just won't actually do any
|
538 |
-
# authing in that case.
|
539 |
-
if not verify or trust_bundle is not None:
|
540 |
-
result = Security.SSLSetSessionOption(
|
541 |
-
self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
|
542 |
-
)
|
543 |
-
_assert_no_error(result)
|
544 |
-
|
545 |
-
# If there's a client cert, we need to use it.
|
546 |
-
if client_cert:
|
547 |
-
self._keychain, self._keychain_dir = _temporary_keychain()
|
548 |
-
self._client_cert_chain = _load_client_cert_chain(
|
549 |
-
self._keychain, client_cert, client_key
|
550 |
-
)
|
551 |
-
result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
|
552 |
-
_assert_no_error(result)
|
553 |
-
|
554 |
-
while True:
|
555 |
-
with self._raise_on_error():
|
556 |
-
result = Security.SSLHandshake(self.context)
|
557 |
-
|
558 |
-
if result == SecurityConst.errSSLWouldBlock:
|
559 |
-
raise socket.timeout("handshake timed out")
|
560 |
-
elif result == SecurityConst.errSSLServerAuthCompleted:
|
561 |
-
self._custom_validate(verify, trust_bundle)
|
562 |
-
continue
|
563 |
-
else:
|
564 |
-
_assert_no_error(result)
|
565 |
-
break
|
566 |
-
|
567 |
-
def fileno(self):
|
568 |
-
return self.socket.fileno()
|
569 |
-
|
570 |
-
# Copy-pasted from Python 3.5 source code
|
571 |
-
def _decref_socketios(self):
|
572 |
-
if self._makefile_refs > 0:
|
573 |
-
self._makefile_refs -= 1
|
574 |
-
if self._closed:
|
575 |
-
self.close()
|
576 |
-
|
577 |
-
def recv(self, bufsiz):
|
578 |
-
buffer = ctypes.create_string_buffer(bufsiz)
|
579 |
-
bytes_read = self.recv_into(buffer, bufsiz)
|
580 |
-
data = buffer[:bytes_read]
|
581 |
-
return data
|
582 |
-
|
583 |
-
def recv_into(self, buffer, nbytes=None):
|
584 |
-
# Read short on EOF.
|
585 |
-
if self._closed:
|
586 |
-
return 0
|
587 |
-
|
588 |
-
if nbytes is None:
|
589 |
-
nbytes = len(buffer)
|
590 |
-
|
591 |
-
buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
|
592 |
-
processed_bytes = ctypes.c_size_t(0)
|
593 |
-
|
594 |
-
with self._raise_on_error():
|
595 |
-
result = Security.SSLRead(
|
596 |
-
self.context, buffer, nbytes, ctypes.byref(processed_bytes)
|
597 |
-
)
|
598 |
-
|
599 |
-
# There are some result codes that we want to treat as "not always
|
600 |
-
# errors". Specifically, those are errSSLWouldBlock,
|
601 |
-
# errSSLClosedGraceful, and errSSLClosedNoNotify.
|
602 |
-
if result == SecurityConst.errSSLWouldBlock:
|
603 |
-
# If we didn't process any bytes, then this was just a time out.
|
604 |
-
# However, we can get errSSLWouldBlock in situations when we *did*
|
605 |
-
# read some data, and in those cases we should just read "short"
|
606 |
-
# and return.
|
607 |
-
if processed_bytes.value == 0:
|
608 |
-
# Timed out, no data read.
|
609 |
-
raise socket.timeout("recv timed out")
|
610 |
-
elif result in (
|
611 |
-
SecurityConst.errSSLClosedGraceful,
|
612 |
-
SecurityConst.errSSLClosedNoNotify,
|
613 |
-
):
|
614 |
-
# The remote peer has closed this connection. We should do so as
|
615 |
-
# well. Note that we don't actually return here because in
|
616 |
-
# principle this could actually be fired along with return data.
|
617 |
-
# It's unlikely though.
|
618 |
-
self.close()
|
619 |
-
else:
|
620 |
-
_assert_no_error(result)
|
621 |
-
|
622 |
-
# Ok, we read and probably succeeded. We should return whatever data
|
623 |
-
# was actually read.
|
624 |
-
return processed_bytes.value
|
625 |
-
|
626 |
-
def settimeout(self, timeout):
|
627 |
-
self._timeout = timeout
|
628 |
-
|
629 |
-
def gettimeout(self):
|
630 |
-
return self._timeout
|
631 |
-
|
632 |
-
def send(self, data):
|
633 |
-
processed_bytes = ctypes.c_size_t(0)
|
634 |
-
|
635 |
-
with self._raise_on_error():
|
636 |
-
result = Security.SSLWrite(
|
637 |
-
self.context, data, len(data), ctypes.byref(processed_bytes)
|
638 |
-
)
|
639 |
-
|
640 |
-
if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
|
641 |
-
# Timed out
|
642 |
-
raise socket.timeout("send timed out")
|
643 |
-
else:
|
644 |
-
_assert_no_error(result)
|
645 |
-
|
646 |
-
# We sent, and probably succeeded. Tell them how much we sent.
|
647 |
-
return processed_bytes.value
|
648 |
-
|
649 |
-
def sendall(self, data):
|
650 |
-
total_sent = 0
|
651 |
-
while total_sent < len(data):
|
652 |
-
sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
|
653 |
-
total_sent += sent
|
654 |
-
|
655 |
-
def shutdown(self):
|
656 |
-
with self._raise_on_error():
|
657 |
-
Security.SSLClose(self.context)
|
658 |
-
|
659 |
-
def close(self):
|
660 |
-
# TODO: should I do clean shutdown here? Do I have to?
|
661 |
-
if self._makefile_refs < 1:
|
662 |
-
self._closed = True
|
663 |
-
if self.context:
|
664 |
-
CoreFoundation.CFRelease(self.context)
|
665 |
-
self.context = None
|
666 |
-
if self._client_cert_chain:
|
667 |
-
CoreFoundation.CFRelease(self._client_cert_chain)
|
668 |
-
self._client_cert_chain = None
|
669 |
-
if self._keychain:
|
670 |
-
Security.SecKeychainDelete(self._keychain)
|
671 |
-
CoreFoundation.CFRelease(self._keychain)
|
672 |
-
shutil.rmtree(self._keychain_dir)
|
673 |
-
self._keychain = self._keychain_dir = None
|
674 |
-
return self.socket.close()
|
675 |
-
else:
|
676 |
-
self._makefile_refs -= 1
|
677 |
-
|
678 |
-
def getpeercert(self, binary_form=False):
|
679 |
-
# Urgh, annoying.
|
680 |
-
#
|
681 |
-
# Here's how we do this:
|
682 |
-
#
|
683 |
-
# 1. Call SSLCopyPeerTrust to get hold of the trust object for this
|
684 |
-
# connection.
|
685 |
-
# 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
|
686 |
-
# 3. To get the CN, call SecCertificateCopyCommonName and process that
|
687 |
-
# string so that it's of the appropriate type.
|
688 |
-
# 4. To get the SAN, we need to do something a bit more complex:
|
689 |
-
# a. Call SecCertificateCopyValues to get the data, requesting
|
690 |
-
# kSecOIDSubjectAltName.
|
691 |
-
# b. Mess about with this dictionary to try to get the SANs out.
|
692 |
-
#
|
693 |
-
# This is gross. Really gross. It's going to be a few hundred LoC extra
|
694 |
-
# just to repeat something that SecureTransport can *already do*. So my
|
695 |
-
# operating assumption at this time is that what we want to do is
|
696 |
-
# instead to just flag to urllib3 that it shouldn't do its own hostname
|
697 |
-
# validation when using SecureTransport.
|
698 |
-
if not binary_form:
|
699 |
-
raise ValueError("SecureTransport only supports dumping binary certs")
|
700 |
-
trust = Security.SecTrustRef()
|
701 |
-
certdata = None
|
702 |
-
der_bytes = None
|
703 |
-
|
704 |
-
try:
|
705 |
-
# Grab the trust store.
|
706 |
-
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
|
707 |
-
_assert_no_error(result)
|
708 |
-
if not trust:
|
709 |
-
# Probably we haven't done the handshake yet. No biggie.
|
710 |
-
return None
|
711 |
-
|
712 |
-
cert_count = Security.SecTrustGetCertificateCount(trust)
|
713 |
-
if not cert_count:
|
714 |
-
# Also a case that might happen if we haven't handshaked.
|
715 |
-
# Handshook? Handshaken?
|
716 |
-
return None
|
717 |
-
|
718 |
-
leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
|
719 |
-
assert leaf
|
720 |
-
|
721 |
-
# Ok, now we want the DER bytes.
|
722 |
-
certdata = Security.SecCertificateCopyData(leaf)
|
723 |
-
assert certdata
|
724 |
-
|
725 |
-
data_length = CoreFoundation.CFDataGetLength(certdata)
|
726 |
-
data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
|
727 |
-
der_bytes = ctypes.string_at(data_buffer, data_length)
|
728 |
-
finally:
|
729 |
-
if certdata:
|
730 |
-
CoreFoundation.CFRelease(certdata)
|
731 |
-
if trust:
|
732 |
-
CoreFoundation.CFRelease(trust)
|
733 |
-
|
734 |
-
return der_bytes
|
735 |
-
|
736 |
-
def version(self):
|
737 |
-
protocol = Security.SSLProtocol()
|
738 |
-
result = Security.SSLGetNegotiatedProtocolVersion(
|
739 |
-
self.context, ctypes.byref(protocol)
|
740 |
-
)
|
741 |
-
_assert_no_error(result)
|
742 |
-
if protocol.value == SecurityConst.kTLSProtocol13:
|
743 |
-
raise ssl.SSLError("SecureTransport does not support TLS 1.3")
|
744 |
-
elif protocol.value == SecurityConst.kTLSProtocol12:
|
745 |
-
return "TLSv1.2"
|
746 |
-
elif protocol.value == SecurityConst.kTLSProtocol11:
|
747 |
-
return "TLSv1.1"
|
748 |
-
elif protocol.value == SecurityConst.kTLSProtocol1:
|
749 |
-
return "TLSv1"
|
750 |
-
elif protocol.value == SecurityConst.kSSLProtocol3:
|
751 |
-
return "SSLv3"
|
752 |
-
elif protocol.value == SecurityConst.kSSLProtocol2:
|
753 |
-
return "SSLv2"
|
754 |
-
else:
|
755 |
-
raise ssl.SSLError("Unknown TLS version: %r" % protocol)
|
756 |
-
|
757 |
-
def _reuse(self):
|
758 |
-
self._makefile_refs += 1
|
759 |
-
|
760 |
-
def _drop(self):
|
761 |
-
if self._makefile_refs < 1:
|
762 |
-
self.close()
|
763 |
-
else:
|
764 |
-
self._makefile_refs -= 1
|
765 |
-
|
766 |
-
|
767 |
-
if _fileobject: # Platform-specific: Python 2
|
768 |
-
|
769 |
-
def makefile(self, mode, bufsize=-1):
|
770 |
-
self._makefile_refs += 1
|
771 |
-
return _fileobject(self, mode, bufsize, close=True)
|
772 |
-
|
773 |
-
else: # Platform-specific: Python 3
|
774 |
-
|
775 |
-
def makefile(self, mode="r", buffering=None, *args, **kwargs):
|
776 |
-
# We disable buffering with SecureTransport because it conflicts with
|
777 |
-
# the buffering that ST does internally (see issue #1153 for more).
|
778 |
-
buffering = 0
|
779 |
-
return backport_makefile(self, mode, buffering, *args, **kwargs)
|
780 |
-
|
781 |
-
|
782 |
-
WrappedSocket.makefile = makefile
|
783 |
-
|
784 |
-
|
785 |
-
class SecureTransportContext(object):
|
786 |
-
"""
|
787 |
-
I am a wrapper class for the SecureTransport library, to translate the
|
788 |
-
interface of the standard library ``SSLContext`` object to calls into
|
789 |
-
SecureTransport.
|
790 |
-
"""
|
791 |
-
|
792 |
-
def __init__(self, protocol):
|
793 |
-
self._min_version, self._max_version = _protocol_to_min_max[protocol]
|
794 |
-
self._options = 0
|
795 |
-
self._verify = False
|
796 |
-
self._trust_bundle = None
|
797 |
-
self._client_cert = None
|
798 |
-
self._client_key = None
|
799 |
-
self._client_key_passphrase = None
|
800 |
-
self._alpn_protocols = None
|
801 |
-
|
802 |
-
@property
|
803 |
-
def check_hostname(self):
|
804 |
-
"""
|
805 |
-
SecureTransport cannot have its hostname checking disabled. For more,
|
806 |
-
see the comment on getpeercert() in this file.
|
807 |
-
"""
|
808 |
-
return True
|
809 |
-
|
810 |
-
@check_hostname.setter
|
811 |
-
def check_hostname(self, value):
|
812 |
-
"""
|
813 |
-
SecureTransport cannot have its hostname checking disabled. For more,
|
814 |
-
see the comment on getpeercert() in this file.
|
815 |
-
"""
|
816 |
-
pass
|
817 |
-
|
818 |
-
@property
|
819 |
-
def options(self):
|
820 |
-
# TODO: Well, crap.
|
821 |
-
#
|
822 |
-
# So this is the bit of the code that is the most likely to cause us
|
823 |
-
# trouble. Essentially we need to enumerate all of the SSL options that
|
824 |
-
# users might want to use and try to see if we can sensibly translate
|
825 |
-
# them, or whether we should just ignore them.
|
826 |
-
return self._options
|
827 |
-
|
828 |
-
@options.setter
|
829 |
-
def options(self, value):
|
830 |
-
# TODO: Update in line with above.
|
831 |
-
self._options = value
|
832 |
-
|
833 |
-
@property
|
834 |
-
def verify_mode(self):
|
835 |
-
return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
|
836 |
-
|
837 |
-
@verify_mode.setter
|
838 |
-
def verify_mode(self, value):
|
839 |
-
self._verify = True if value == ssl.CERT_REQUIRED else False
|
840 |
-
|
841 |
-
def set_default_verify_paths(self):
|
842 |
-
# So, this has to do something a bit weird. Specifically, what it does
|
843 |
-
# is nothing.
|
844 |
-
#
|
845 |
-
# This means that, if we had previously had load_verify_locations
|
846 |
-
# called, this does not undo that. We need to do that because it turns
|
847 |
-
# out that the rest of the urllib3 code will attempt to load the
|
848 |
-
# default verify paths if it hasn't been told about any paths, even if
|
849 |
-
# the context itself was sometime earlier. We resolve that by just
|
850 |
-
# ignoring it.
|
851 |
-
pass
|
852 |
-
|
853 |
-
def load_default_certs(self):
|
854 |
-
return self.set_default_verify_paths()
|
855 |
-
|
856 |
-
def set_ciphers(self, ciphers):
|
857 |
-
# For now, we just require the default cipher string.
|
858 |
-
if ciphers != util.ssl_.DEFAULT_CIPHERS:
|
859 |
-
raise ValueError("SecureTransport doesn't support custom cipher strings")
|
860 |
-
|
861 |
-
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
|
862 |
-
# OK, we only really support cadata and cafile.
|
863 |
-
if capath is not None:
|
864 |
-
raise ValueError("SecureTransport does not support cert directories")
|
865 |
-
|
866 |
-
# Raise if cafile does not exist.
|
867 |
-
if cafile is not None:
|
868 |
-
with open(cafile):
|
869 |
-
pass
|
870 |
-
|
871 |
-
self._trust_bundle = cafile or cadata
|
872 |
-
|
873 |
-
def load_cert_chain(self, certfile, keyfile=None, password=None):
|
874 |
-
self._client_cert = certfile
|
875 |
-
self._client_key = keyfile
|
876 |
-
self._client_cert_passphrase = password
|
877 |
-
|
878 |
-
def set_alpn_protocols(self, protocols):
|
879 |
-
"""
|
880 |
-
Sets the ALPN protocols that will later be set on the context.
|
881 |
-
|
882 |
-
Raises a NotImplementedError if ALPN is not supported.
|
883 |
-
"""
|
884 |
-
if not hasattr(Security, "SSLSetALPNProtocols"):
|
885 |
-
raise NotImplementedError(
|
886 |
-
"SecureTransport supports ALPN only in macOS 10.12+"
|
887 |
-
)
|
888 |
-
self._alpn_protocols = [six.ensure_binary(p) for p in protocols]
|
889 |
-
|
890 |
-
def wrap_socket(
|
891 |
-
self,
|
892 |
-
sock,
|
893 |
-
server_side=False,
|
894 |
-
do_handshake_on_connect=True,
|
895 |
-
suppress_ragged_eofs=True,
|
896 |
-
server_hostname=None,
|
897 |
-
):
|
898 |
-
# So, what do we do here? Firstly, we assert some properties. This is a
|
899 |
-
# stripped down shim, so there is some functionality we don't support.
|
900 |
-
# See PEP 543 for the real deal.
|
901 |
-
assert not server_side
|
902 |
-
assert do_handshake_on_connect
|
903 |
-
assert suppress_ragged_eofs
|
904 |
-
|
905 |
-
# Ok, we're good to go. Now we want to create the wrapped socket object
|
906 |
-
# and store it in the appropriate place.
|
907 |
-
wrapped_socket = WrappedSocket(sock)
|
908 |
-
|
909 |
-
# Now we can handshake
|
910 |
-
wrapped_socket.handshake(
|
911 |
-
server_hostname,
|
912 |
-
self._verify,
|
913 |
-
self._trust_bundle,
|
914 |
-
self._min_version,
|
915 |
-
self._max_version,
|
916 |
-
self._client_cert,
|
917 |
-
self._client_key,
|
918 |
-
self._client_key_passphrase,
|
919 |
-
self._alpn_protocols,
|
920 |
-
)
|
921 |
-
return wrapped_socket
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/examples/block/Makefile
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
#/******************************************************************************
|
2 |
-
# * Copyright (c) 2011, Duane Merrill. All rights reserved.
|
3 |
-
# * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
# *
|
5 |
-
# * Redistribution and use in source and binary forms, with or without
|
6 |
-
# * modification, are permitted provided that the following conditions are met:
|
7 |
-
# * * Redistributions of source code must retain the above copyright
|
8 |
-
# * notice, this list of conditions and the following disclaimer.
|
9 |
-
# * * Redistributions in binary form must reproduce the above copyright
|
10 |
-
# * notice, this list of conditions and the following disclaimer in the
|
11 |
-
# * documentation and/or other materials provided with the distribution.
|
12 |
-
# * * Neither the name of the NVIDIA CORPORATION nor the
|
13 |
-
# * names of its contributors may be used to endorse or promote products
|
14 |
-
# * derived from this software without specific prior written permission.
|
15 |
-
# *
|
16 |
-
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
17 |
-
# * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
-
# * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
19 |
-
# * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
20 |
-
# * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
-
# * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
-
# * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
-
# * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
-
# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
-
# * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
-
# *
|
27 |
-
#******************************************************************************/
|
28 |
-
|
29 |
-
#-------------------------------------------------------------------------------
|
30 |
-
#
|
31 |
-
# Makefile usage
|
32 |
-
#
|
33 |
-
# make <target> [sm=<XXX,...>] [cdp=<0|1>] [force32=<0|1>] [abi=<0|1>] [open64=<0|1>] [verbose=<0|1>] [keep=<0|1>]
|
34 |
-
#
|
35 |
-
#-------------------------------------------------------------------------------
|
36 |
-
|
37 |
-
include ../../common.mk
|
38 |
-
|
39 |
-
|
40 |
-
#-------------------------------------------------------------------------------
|
41 |
-
# Includes
|
42 |
-
#-------------------------------------------------------------------------------
|
43 |
-
|
44 |
-
INC += -I$(CUB_DIR) -I$(CUB_DIR)test
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
#-------------------------------------------------------------------------------
|
49 |
-
# Dependency Lists
|
50 |
-
#-------------------------------------------------------------------------------
|
51 |
-
|
52 |
-
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
|
53 |
-
|
54 |
-
DEPS = $(CUB_DEPS) \
|
55 |
-
$(CUB_DIR)test/Makefile \
|
56 |
-
$(CUB_DIR)test/test_util.h \
|
57 |
-
$(CUB_DIR)test/mersenne.h \
|
58 |
-
|
59 |
-
ALL = example_block_radix_sort \
|
60 |
-
example_block_reduce \
|
61 |
-
example_block_scan
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
#-------------------------------------------------------------------------------
|
66 |
-
# make default
|
67 |
-
#-------------------------------------------------------------------------------
|
68 |
-
|
69 |
-
default:
|
70 |
-
|
71 |
-
|
72 |
-
#-------------------------------------------------------------------------------
|
73 |
-
# make clean
|
74 |
-
#-------------------------------------------------------------------------------
|
75 |
-
|
76 |
-
clean :
|
77 |
-
rm -f bin/*$(CPU_ARCH_SUFFIX)*
|
78 |
-
rm -f *.i* *.cubin *.cu.c *.cudafe* *.fatbin.c *.ptx *.hash *.cu.cpp *.o
|
79 |
-
|
80 |
-
|
81 |
-
#-------------------------------------------------------------------------------
|
82 |
-
# make all
|
83 |
-
#-------------------------------------------------------------------------------
|
84 |
-
|
85 |
-
all : $(ALL)
|
86 |
-
|
87 |
-
#-------------------------------------------------------------------------------
|
88 |
-
# make run
|
89 |
-
#-------------------------------------------------------------------------------
|
90 |
-
|
91 |
-
run :
|
92 |
-
for i in $(ALL); do ./bin/$${i}_$(BIN_SUFFIX) --device=$(device) || exit 1; done
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
#-------------------------------------------------------------------------------
|
98 |
-
# make example_block_reduce
|
99 |
-
#-------------------------------------------------------------------------------
|
100 |
-
|
101 |
-
example_block_reduce: bin/example_block_reduce_$(BIN_SUFFIX)
|
102 |
-
|
103 |
-
bin/example_block_reduce_$(BIN_SUFFIX) : example_block_reduce.cu $(DEPS)
|
104 |
-
mkdir -p bin
|
105 |
-
$(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_reduce_$(BIN_SUFFIX) example_block_reduce.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
|
106 |
-
|
107 |
-
|
108 |
-
#-------------------------------------------------------------------------------
|
109 |
-
# make example_block_scan
|
110 |
-
#-------------------------------------------------------------------------------
|
111 |
-
|
112 |
-
example_block_scan: bin/example_block_scan_$(BIN_SUFFIX)
|
113 |
-
|
114 |
-
bin/example_block_scan_$(BIN_SUFFIX) : example_block_scan.cu $(DEPS)
|
115 |
-
mkdir -p bin
|
116 |
-
$(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_scan_$(BIN_SUFFIX) example_block_scan.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
|
117 |
-
|
118 |
-
|
119 |
-
#-------------------------------------------------------------------------------
|
120 |
-
# make example_block_radix_sort
|
121 |
-
#-------------------------------------------------------------------------------
|
122 |
-
|
123 |
-
example_block_radix_sort: bin/example_block_radix_sort_$(BIN_SUFFIX)
|
124 |
-
|
125 |
-
bin/example_block_radix_sort_$(BIN_SUFFIX) : example_block_radix_sort.cu $(DEPS)
|
126 |
-
mkdir -p bin
|
127 |
-
$(NVCC) $(DEFINES) $(SM_TARGETS) -o bin/example_block_radix_sort_$(BIN_SUFFIX) example_block_radix_sort.cu $(NVCCFLAGS) $(CPU_ARCH) $(INC) $(LIBS) -O3
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/internal/benchmark/compare_benchmark_results.py
DELETED
@@ -1,1308 +0,0 @@
|
|
1 |
-
#! /usr/bin/env python
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
|
4 |
-
###############################################################################
|
5 |
-
# Copyright (c) 2012-7 Bryce Adelstein Lelbach aka wash <[email protected]>
|
6 |
-
#
|
7 |
-
# Distributed under the Boost Software License, Version 1.0. (See accompanying
|
8 |
-
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
9 |
-
###############################################################################
|
10 |
-
|
11 |
-
###############################################################################
|
12 |
-
# Copyright (c) 2018 NVIDIA Corporation
|
13 |
-
#
|
14 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
15 |
-
# you may not use this file except in compliance with the License.
|
16 |
-
# You may obtain a copy of the License at
|
17 |
-
#
|
18 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
19 |
-
#
|
20 |
-
# Unless required by applicable law or agreed to in writing, software
|
21 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
22 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
23 |
-
# See the License for the specific language governing permissions and
|
24 |
-
# limitations under the License.
|
25 |
-
###############################################################################
|
26 |
-
|
27 |
-
# XXX Put code shared with `combine_benchmark_results.py` in a common place.
|
28 |
-
|
29 |
-
# XXX Relative uncertainty.
|
30 |
-
|
31 |
-
# XXX Create uncertain value class which is quantity + uncertainty.
|
32 |
-
|
33 |
-
from sys import exit, stdout
|
34 |
-
|
35 |
-
from os.path import splitext
|
36 |
-
|
37 |
-
from itertools import imap # Lazy map.
|
38 |
-
|
39 |
-
from math import sqrt, log10, floor
|
40 |
-
|
41 |
-
from collections import deque
|
42 |
-
|
43 |
-
from argparse import ArgumentParser as argument_parser
|
44 |
-
from argparse import Action as argument_action
|
45 |
-
|
46 |
-
from csv import DictReader as csv_dict_reader
|
47 |
-
from csv import DictWriter as csv_dict_writer
|
48 |
-
|
49 |
-
from re import compile as regex_compile
|
50 |
-
|
51 |
-
###############################################################################
|
52 |
-
|
53 |
-
def unpack_tuple(f):
|
54 |
-
"""Return a unary function that calls `f` with its argument unpacked."""
|
55 |
-
return lambda args: f(*iter(args))
|
56 |
-
|
57 |
-
def strip_dict(d):
|
58 |
-
"""Strip leading and trailing whitespace from all keys and values in `d`.
|
59 |
-
|
60 |
-
Returns:
|
61 |
-
The modified dict `d`.
|
62 |
-
"""
|
63 |
-
d.update({key: value.strip() for (key, value) in d.items()})
|
64 |
-
return d
|
65 |
-
|
66 |
-
def merge_dicts(d0, d1):
|
67 |
-
"""Create a new `dict` that is the union of `dict`s `d0` and `d1`."""
|
68 |
-
d = d0.copy()
|
69 |
-
d.update(d1)
|
70 |
-
return d
|
71 |
-
|
72 |
-
def change_key_in_dict(d, old_key, new_key):
|
73 |
-
"""Change the key of the entry in `d` with key `old_key` to `new_key`. If
|
74 |
-
there is an existing entry
|
75 |
-
|
76 |
-
Returns:
|
77 |
-
The modified dict `d`.
|
78 |
-
|
79 |
-
Raises:
|
80 |
-
KeyError : If `old_key` is not in `d`.
|
81 |
-
"""
|
82 |
-
d[new_key] = d.pop(old_key)
|
83 |
-
return d
|
84 |
-
|
85 |
-
def key_from_dict(d):
|
86 |
-
"""Create a hashable key from a `dict` by converting the `dict` to a tuple."""
|
87 |
-
return tuple(sorted(d.items()))
|
88 |
-
|
89 |
-
def strip_list(l):
|
90 |
-
"""Strip leading and trailing whitespace from all values in `l`."""
|
91 |
-
for i, value in enumerate(l): l[i] = value.strip()
|
92 |
-
return l
|
93 |
-
|
94 |
-
def remove_from_list(l, item):
|
95 |
-
"""Remove the first occurence of `item` from list `l` and return a tuple of
|
96 |
-
the index that was removed and the element that was removed.
|
97 |
-
|
98 |
-
Raises:
|
99 |
-
ValueError : If `item` is not in `l`.
|
100 |
-
"""
|
101 |
-
idx = l.index(item)
|
102 |
-
item = l.pop(idx)
|
103 |
-
return (idx, item)
|
104 |
-
|
105 |
-
###############################################################################
|
106 |
-
|
107 |
-
def int_or_float(x):
|
108 |
-
"""Convert `x` to either `int` or `float`, preferring `int`.
|
109 |
-
|
110 |
-
Raises:
|
111 |
-
ValueError : If `x` is not convertible to either `int` or `float`
|
112 |
-
"""
|
113 |
-
try:
|
114 |
-
return int(x)
|
115 |
-
except ValueError:
|
116 |
-
return float(x)
|
117 |
-
|
118 |
-
def try_int_or_float(x):
|
119 |
-
"""Try to convert `x` to either `int` or `float`, preferring `int`. `x` is
|
120 |
-
returned unmodified if conversion fails.
|
121 |
-
"""
|
122 |
-
try:
|
123 |
-
return int_or_float(x)
|
124 |
-
except ValueError:
|
125 |
-
return x
|
126 |
-
|
127 |
-
###############################################################################
|
128 |
-
|
129 |
-
def ranges_overlap(x1, x2, y1, y2):
|
130 |
-
"""Returns true if the ranges `[x1, x2]` and `[y1, y2]` overlap,
|
131 |
-
where `x1 <= x2` and `y1 <= y2`.
|
132 |
-
|
133 |
-
Raises:
|
134 |
-
AssertionError : If `x1 > x2` or `y1 > y2`.
|
135 |
-
"""
|
136 |
-
assert x1 <= x2
|
137 |
-
assert y1 <= y2
|
138 |
-
return x1 <= y2 and y1 <= x2
|
139 |
-
|
140 |
-
def ranges_overlap_uncertainty(x, x_unc, y, y_unc):
|
141 |
-
"""Returns true if the ranges `[x - x_unc, x + x_unc]` and
|
142 |
-
`[y - y_unc, y + y_unc]` overlap, where `x_unc >= 0` and `y_unc >= 0`.
|
143 |
-
|
144 |
-
Raises:
|
145 |
-
AssertionError : If `x_unc < 0` or `y_unc < 0`.
|
146 |
-
"""
|
147 |
-
assert x_unc >= 0
|
148 |
-
assert y_unc >= 0
|
149 |
-
return ranges_overlap(x - x_unc, x + x_unc, y - y_unc, y + y_unc)
|
150 |
-
|
151 |
-
###############################################################################
|
152 |
-
|
153 |
-
# Formulas for propagation of uncertainty from:
|
154 |
-
#
|
155 |
-
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
|
156 |
-
#
|
157 |
-
# Even though it's Wikipedia, I trust it as I helped write that table.
|
158 |
-
#
|
159 |
-
# XXX Replace with a proper reference.
|
160 |
-
|
161 |
-
def uncertainty_multiplicative(f, A, A_abs_unc, B, B_abs_unc):
|
162 |
-
"""Compute the propagated uncertainty from the multiplication of two
|
163 |
-
uncertain values, `A +/- A_abs_unc` and `B +/- B_abs_unc`. Given `f = AB` or
|
164 |
-
`f = A/B`, where `A != 0` and `B != 0`, the uncertainty in `f` is
|
165 |
-
approximately:
|
166 |
-
|
167 |
-
.. math::
|
168 |
-
|
169 |
-
\sigma_f = |f| \sqrt{\frac{\sigma_A}{A} ^ 2 + \frac{\sigma_B}{B} ^ 2}
|
170 |
-
|
171 |
-
Raises:
|
172 |
-
ZeroDivisionError : If `A == 0` or `B == 0`.
|
173 |
-
"""
|
174 |
-
return abs(f) * sqrt((A_abs_unc / A) ** 2 + (B_abs_unc / B) ** 2);
|
175 |
-
|
176 |
-
def uncertainty_additive(c, A_abs_unc, d, B_abs_unc):
|
177 |
-
"""Compute the propagated uncertainty from addition of two uncertain values,
|
178 |
-
`A +/- A_abs_unc` and `B +/- B_abs_unc`. Given `f = cA + dB`, where `c` and
|
179 |
-
`d` are certain constants, the uncertainty in `f` is approximately:
|
180 |
-
|
181 |
-
.. math::
|
182 |
-
|
183 |
-
f_{\sigma} = \sqrt{c ^ 2 * A_{\sigma} ^ 2 + d ^ 2 * B_{\sigma} ^ 2}
|
184 |
-
"""
|
185 |
-
return sqrt(((c ** 2) * (A_abs_unc ** 2)) + ((d ** 2) * (B_abs_unc ** 2)))
|
186 |
-
|
187 |
-
###############################################################################
|
188 |
-
|
189 |
-
# XXX Create change class.
|
190 |
-
|
191 |
-
def absolute_change(old, new):
|
192 |
-
"""Computes the absolute change from old to new:
|
193 |
-
|
194 |
-
.. math::
|
195 |
-
|
196 |
-
absolute_change = new - old
|
197 |
-
"""
|
198 |
-
return new - old
|
199 |
-
|
200 |
-
def absolute_change_uncertainty(old, old_unc, new, new_unc):
|
201 |
-
"""Computes the uncertainty in the absolute change from old to new and returns
|
202 |
-
a tuple of the absolute change and the absolute change uncertainty.
|
203 |
-
"""
|
204 |
-
absolute_change = new - old
|
205 |
-
absolute_change_unc = uncertainty_additive(1.0, new_unc, -1.0, old_unc)
|
206 |
-
|
207 |
-
return (absolute_change, absolute_change_unc)
|
208 |
-
|
209 |
-
def percent_change(old, new):
|
210 |
-
"""Computes the percent change from old to new:
|
211 |
-
|
212 |
-
.. math::
|
213 |
-
|
214 |
-
percent_change = 100 \frac{new - old}{abs(old)}
|
215 |
-
"""
|
216 |
-
return float(new - old) / abs(old)
|
217 |
-
|
218 |
-
def percent_change_uncertainty(old, old_unc, new, new_unc):
|
219 |
-
"""Computes the uncertainty in the percent change from old to new and returns
|
220 |
-
a tuple of the absolute change, the absolute change uncertainty, the percent
|
221 |
-
change and the percent change uncertainty.
|
222 |
-
"""
|
223 |
-
# Let's break this down into a few sub-operations:
|
224 |
-
#
|
225 |
-
# absolute_change = new - old <- Additive propagation.
|
226 |
-
# relative_change = change / abs(old) <- Multiplicative propagation.
|
227 |
-
# percent_change = 100 * y <- Multiplicative propagation.
|
228 |
-
|
229 |
-
if old == 0:
|
230 |
-
# We can't compute relative change because the old value is 0.
|
231 |
-
return (float("nan"), float("nan"), float("nan"), float("nan"))
|
232 |
-
|
233 |
-
(absolute_change, absolute_change_unc) = absolute_change_uncertainty(
|
234 |
-
old, old_unc, new, new_unc
|
235 |
-
)
|
236 |
-
|
237 |
-
if absolute_change == 0:
|
238 |
-
# We can't compute relative change uncertainty because the relative
|
239 |
-
# uncertainty of a value of 0 is undefined.
|
240 |
-
return (absolute_change, absolute_change_unc, float("nan"), float("nan"))
|
241 |
-
|
242 |
-
relative_change = float(absolute_change) / abs(old)
|
243 |
-
relative_change_unc = uncertainty_multiplicative(
|
244 |
-
relative_change, absolute_change, absolute_change_unc, old, old_unc
|
245 |
-
)
|
246 |
-
|
247 |
-
percent_change = 100.0 * relative_change
|
248 |
-
percent_change_unc = uncertainty_multiplicative(
|
249 |
-
percent_change, 100.0, 0.0, relative_change, relative_change_unc
|
250 |
-
)
|
251 |
-
|
252 |
-
return (
|
253 |
-
absolute_change, absolute_change_unc, percent_change, percent_change_unc
|
254 |
-
)
|
255 |
-
|
256 |
-
###############################################################################
|
257 |
-
|
258 |
-
def find_significant_digit(x):
|
259 |
-
"""Return the significant digit of the number x. The result is the number of
|
260 |
-
digits after the decimal place to round to (negative numbers indicate rounding
|
261 |
-
before the decimal place)."""
|
262 |
-
if x == 0: return 0
|
263 |
-
return -int(floor(log10(abs(x))))
|
264 |
-
|
265 |
-
def round_with_int_conversion(x, ndigits = None):
|
266 |
-
"""Rounds `x` to `ndigits` after the the decimal place. If `ndigits` is less
|
267 |
-
than 1, convert the result to `int`. If `ndigits` is `None`, the significant
|
268 |
-
digit of `x` is used."""
|
269 |
-
if ndigits is None: ndigits = find_significant_digit(x)
|
270 |
-
x_rounded = round(x, ndigits)
|
271 |
-
return int(x_rounded) if ndigits < 1 else x_rounded
|
272 |
-
|
273 |
-
###############################################################################
|
274 |
-
|
275 |
-
class measured_variable(object):
|
276 |
-
"""A meta-variable representing measured data. It is composed of three raw
|
277 |
-
variables plus units meta-data.
|
278 |
-
|
279 |
-
Attributes:
|
280 |
-
quantity (`str`) :
|
281 |
-
Name of the quantity variable of this object.
|
282 |
-
uncertainty (`str`) :
|
283 |
-
Name of the uncertainty variable of this object.
|
284 |
-
sample_size (`str`) :
|
285 |
-
Name of the sample size variable of this object.
|
286 |
-
units (units class or `None`) :
|
287 |
-
The units the value is measured in.
|
288 |
-
"""
|
289 |
-
|
290 |
-
def __init__(self, quantity, uncertainty, sample_size, units = None):
|
291 |
-
self.quantity = quantity
|
292 |
-
self.uncertainty = uncertainty
|
293 |
-
self.sample_size = sample_size
|
294 |
-
self.units = units
|
295 |
-
|
296 |
-
def as_tuple(self):
|
297 |
-
return (self.quantity, self.uncertainty, self.sample_size, self.units)
|
298 |
-
|
299 |
-
def __iter__(self):
|
300 |
-
return iter(self.as_tuple())
|
301 |
-
|
302 |
-
def __str__(self):
|
303 |
-
return str(self.as_tuple())
|
304 |
-
|
305 |
-
def __repr__(self):
|
306 |
-
return str(self)
|
307 |
-
|
308 |
-
class measured_value(object):
|
309 |
-
"""An object that represents a value determined by multiple measurements.
|
310 |
-
|
311 |
-
Attributes:
|
312 |
-
quantity (scalar) :
|
313 |
-
The quantity of the value, e.g. the arithmetic mean.
|
314 |
-
uncertainty (scalar) :
|
315 |
-
The measurement uncertainty, e.g. the sample standard deviation.
|
316 |
-
sample_size (`int`) :
|
317 |
-
The number of observations contributing to the value.
|
318 |
-
units (units class or `None`) :
|
319 |
-
The units the value is measured in.
|
320 |
-
"""
|
321 |
-
|
322 |
-
def __init__(self, quantity, uncertainty, sample_size = 1, units = None):
|
323 |
-
self.quantity = quantity
|
324 |
-
self.uncertainty = uncertainty
|
325 |
-
self.sample_size = sample_size
|
326 |
-
self.units = units
|
327 |
-
|
328 |
-
def as_tuple(self):
|
329 |
-
return (self.quantity, self.uncertainty, self.sample_size, self.units)
|
330 |
-
|
331 |
-
def __iter__(self):
|
332 |
-
return iter(self.as_tuple())
|
333 |
-
|
334 |
-
def __str__(self):
|
335 |
-
return str(self.as_tuple())
|
336 |
-
|
337 |
-
def __repr__(self):
|
338 |
-
return str(self)
|
339 |
-
|
340 |
-
###############################################################################
|
341 |
-
|
342 |
-
def arithmetic_mean(X):
|
343 |
-
"""Computes the arithmetic mean of the sequence `X`.
|
344 |
-
|
345 |
-
Let:
|
346 |
-
|
347 |
-
* `n = len(X)`.
|
348 |
-
* `u` denote the arithmetic mean of `X`.
|
349 |
-
|
350 |
-
.. math::
|
351 |
-
|
352 |
-
u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
|
353 |
-
"""
|
354 |
-
return sum(X) / len(X)
|
355 |
-
|
356 |
-
def sample_variance(X, u = None):
|
357 |
-
"""Computes the sample variance of the sequence `X`.
|
358 |
-
|
359 |
-
Let:
|
360 |
-
|
361 |
-
* `n = len(X)`.
|
362 |
-
* `u` denote the arithmetic mean of `X`.
|
363 |
-
* `s` denote the sample standard deviation of `X`.
|
364 |
-
|
365 |
-
.. math::
|
366 |
-
|
367 |
-
v = \frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}
|
368 |
-
|
369 |
-
Args:
|
370 |
-
X (`Iterable`) : The sequence of values.
|
371 |
-
u (number) : The arithmetic mean of `X`.
|
372 |
-
"""
|
373 |
-
if u is None: u = arithmetic_mean(X)
|
374 |
-
return sum(imap(lambda X_i: (X_i - u) ** 2, X)) / (len(X) - 1)
|
375 |
-
|
376 |
-
def sample_standard_deviation(X, u = None, v = None):
|
377 |
-
"""Computes the sample standard deviation of the sequence `X`.
|
378 |
-
|
379 |
-
Let:
|
380 |
-
|
381 |
-
* `n = len(X)`.
|
382 |
-
* `u` denote the arithmetic mean of `X`.
|
383 |
-
* `v` denote the sample variance of `X`.
|
384 |
-
* `s` denote the sample standard deviation of `X`.
|
385 |
-
|
386 |
-
.. math::
|
387 |
-
|
388 |
-
s &= \sqrt{v}
|
389 |
-
&= \sqrt{\frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}}
|
390 |
-
|
391 |
-
Args:
|
392 |
-
X (`Iterable`) : The sequence of values.
|
393 |
-
u (number) : The arithmetic mean of `X`.
|
394 |
-
v (number) : The sample variance of `X`.
|
395 |
-
"""
|
396 |
-
if u is None: u = arithmetic_mean(X)
|
397 |
-
if v is None: v = sample_variance(X, u)
|
398 |
-
return sqrt(v)
|
399 |
-
|
400 |
-
def combine_sample_size(As):
|
401 |
-
"""Computes the combined sample variance of a group of `measured_value`s.
|
402 |
-
|
403 |
-
Let:
|
404 |
-
|
405 |
-
* `g = len(As)`.
|
406 |
-
* `n_i = As[i].samples`.
|
407 |
-
* `n` denote the combined sample size of `As`.
|
408 |
-
|
409 |
-
.. math::
|
410 |
-
|
411 |
-
n = \sum{i = 0}^{g - 1} n_i
|
412 |
-
"""
|
413 |
-
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i), As))
|
414 |
-
|
415 |
-
def combine_arithmetic_mean(As, n = None):
|
416 |
-
"""Computes the combined arithmetic mean of a group of `measured_value`s.
|
417 |
-
|
418 |
-
Let:
|
419 |
-
|
420 |
-
* `g = len(As)`.
|
421 |
-
* `u_i = As[i].quantity`.
|
422 |
-
* `n_i = As[i].samples`.
|
423 |
-
* `n` denote the combined sample size of `As`.
|
424 |
-
* `u` denote the arithmetic mean of the quantities of `As`.
|
425 |
-
|
426 |
-
.. math::
|
427 |
-
|
428 |
-
u = \frac{\sum{i = 0}^{g - 1} n_i u_i}{n}
|
429 |
-
"""
|
430 |
-
if n is None: n = combine_sample_size(As)
|
431 |
-
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i * u_i), As)) / n
|
432 |
-
|
433 |
-
def combine_sample_variance(As, n = None, u = None):
|
434 |
-
"""Computes the combined sample variance of a group of `measured_value`s.
|
435 |
-
|
436 |
-
Let:
|
437 |
-
|
438 |
-
* `g = len(As)`.
|
439 |
-
* `u_i = As[i].quantity`.
|
440 |
-
* `s_i = As[i].uncertainty`.
|
441 |
-
* `n_i = As[i].samples`.
|
442 |
-
* `n` denote the combined sample size of `As`.
|
443 |
-
* `u` denote the arithmetic mean of the quantities of `As`.
|
444 |
-
* `v` denote the sample variance of `X`.
|
445 |
-
|
446 |
-
.. math::
|
447 |
-
|
448 |
-
v = \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
|
449 |
-
|
450 |
-
Args:
|
451 |
-
As (`Iterable` of `measured_value`s) : The sequence of values.
|
452 |
-
n (number) : The combined sample sizes of `As`.
|
453 |
-
u (number) : The combined arithmetic mean of `As`.
|
454 |
-
"""
|
455 |
-
if n <= 1: return 0
|
456 |
-
if n is None: n = combine_sample_size(As)
|
457 |
-
if u is None: u = combine_arithmetic_mean(As, n)
|
458 |
-
return sum(imap(unpack_tuple(
|
459 |
-
lambda u_i, s_i, n_i, t_i: n_i * (u_i - u) ** 2 + (s_i ** 2) * (n_i - 1)
|
460 |
-
), As)) / (n - 1)
|
461 |
-
|
462 |
-
def combine_sample_standard_deviation(As, n = None, u = None, v = None):
|
463 |
-
"""Computes the combined sample standard deviation of a group of
|
464 |
-
`measured_value`s.
|
465 |
-
|
466 |
-
Let:
|
467 |
-
|
468 |
-
* `g = len(As)`.
|
469 |
-
* `u_i = As[i].quantity`.
|
470 |
-
* `s_i = As[i].uncertainty`.
|
471 |
-
* `n_i = As[i].samples`.
|
472 |
-
* `n` denote the combined sample size of `As`.
|
473 |
-
* `u` denote the arithmetic mean of the quantities of `As`.
|
474 |
-
* `v` denote the sample variance of `X`.
|
475 |
-
* `s` denote the sample standard deviation of `X`.
|
476 |
-
|
477 |
-
.. math::
|
478 |
-
v &= \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
|
479 |
-
|
480 |
-
s &= \sqrt{v}
|
481 |
-
|
482 |
-
Args:
|
483 |
-
As (`Iterable` of `measured_value`s) : The sequence of values.
|
484 |
-
n (number) : The combined sample sizes of `As`.
|
485 |
-
u (number) : The combined arithmetic mean of `As`.
|
486 |
-
v (number) : The combined sample variance of `As`.
|
487 |
-
"""
|
488 |
-
if n <= 1: return 0
|
489 |
-
if n is None: n = combine_sample_size(As)
|
490 |
-
if u is None: u = combine_arithmetic_mean(As, n)
|
491 |
-
if v is None: v = combine_sample_variance(As, n, u)
|
492 |
-
return sqrt(v)
|
493 |
-
|
494 |
-
###############################################################################
|
495 |
-
|
496 |
-
def store_const_multiple(const, *destinations):
|
497 |
-
"""Returns an `argument_action` class that sets multiple argument
|
498 |
-
destinations (`destinations`) to `const`."""
|
499 |
-
class store_const_multiple_action(argument_action):
|
500 |
-
def __init__(self, *args, **kwargs):
|
501 |
-
super(store_const_multiple_action, self).__init__(
|
502 |
-
metavar = None, nargs = 0, const = const, *args, **kwargs
|
503 |
-
)
|
504 |
-
|
505 |
-
def __call__(self, parser, namespace, values, option_string = None):
|
506 |
-
for destination in destinations:
|
507 |
-
setattr(namespace, destination, const)
|
508 |
-
|
509 |
-
return store_const_multiple_action
|
510 |
-
|
511 |
-
def store_true_multiple(*destinations):
|
512 |
-
"""Returns an `argument_action` class that sets multiple argument
|
513 |
-
destinations (`destinations`) to `True`."""
|
514 |
-
return store_const_multiple(True, *destinations)
|
515 |
-
|
516 |
-
def store_false_multiple(*destinations):
|
517 |
-
"""Returns an `argument_action` class that sets multiple argument
|
518 |
-
destinations (`destinations`) to `False`."""
|
519 |
-
return store_const_multiple(False, *destinations)
|
520 |
-
|
521 |
-
###############################################################################
|
522 |
-
|
523 |
-
def process_program_arguments():
|
524 |
-
ap = argument_parser(
|
525 |
-
description = (
|
526 |
-
"Compares two sets of combined performance results and identifies "
|
527 |
-
"statistically significant changes."
|
528 |
-
)
|
529 |
-
)
|
530 |
-
|
531 |
-
ap.add_argument(
|
532 |
-
"baseline_input_file",
|
533 |
-
help = ("CSV file containing the baseline performance results. The first "
|
534 |
-
"two rows should be a header. The 1st header row specifies the "
|
535 |
-
"name of each variable, and the 2nd header row specifies the units "
|
536 |
-
"for that variable. The baseline results may be a superset of the "
|
537 |
-
"observed performance results, but the reverse is not true. The "
|
538 |
-
"baseline results must contain data for every datapoint in the "
|
539 |
-
"observed performance results."),
|
540 |
-
type = str
|
541 |
-
)
|
542 |
-
|
543 |
-
ap.add_argument(
|
544 |
-
"observed_input_file",
|
545 |
-
help = ("CSV file containing the observed performance results. The first "
|
546 |
-
"two rows should be a header. The 1st header row specifies the name "
|
547 |
-
"of header row specifies the units for that variable."),
|
548 |
-
type = str
|
549 |
-
)
|
550 |
-
|
551 |
-
ap.add_argument(
|
552 |
-
"-o", "--output-file",
|
553 |
-
help = ("The file that results are written to. If `-`, results are "
|
554 |
-
"written to stdout."),
|
555 |
-
action = "store", type = str, default = "-",
|
556 |
-
metavar = "OUTPUT"
|
557 |
-
)
|
558 |
-
|
559 |
-
ap.add_argument(
|
560 |
-
"-c", "--control-variable",
|
561 |
-
help = ("Treat the specified variable as a control variable. This means "
|
562 |
-
"it will be filtered out when forming dataset keys. For example, "
|
563 |
-
"this could be used to ignore a timestamp variable that is "
|
564 |
-
"different in the baseline and observed results. May be specified "
|
565 |
-
"multiple times."),
|
566 |
-
action = "append", type = str, dest = "control_variables", default = [],
|
567 |
-
metavar = "QUANTITY"
|
568 |
-
)
|
569 |
-
|
570 |
-
ap.add_argument(
|
571 |
-
"-d", "--dependent-variable",
|
572 |
-
help = ("Treat the specified three variables as a dependent variable. The "
|
573 |
-
"1st variable is the measured quantity, the 2nd is the uncertainty "
|
574 |
-
"of the measurement and the 3rd is the sample size. The defaults "
|
575 |
-
"are the dependent variables of Thrust's benchmark suite. May be "
|
576 |
-
"specified multiple times."),
|
577 |
-
action = "append", type = str, dest = "dependent_variables", default = [],
|
578 |
-
metavar = "QUANTITY,UNCERTAINTY,SAMPLES"
|
579 |
-
)
|
580 |
-
|
581 |
-
ap.add_argument(
|
582 |
-
"-t", "--change-threshold",
|
583 |
-
help = ("Treat relative changes less than this amount (a percentage) as "
|
584 |
-
"statistically insignificant. The default is 5%%."),
|
585 |
-
action = "store", type = float, default = 5,
|
586 |
-
metavar = "PERCENTAGE"
|
587 |
-
)
|
588 |
-
|
589 |
-
ap.add_argument(
|
590 |
-
"-p", "--preserve-whitespace",
|
591 |
-
help = ("Don't trim leading and trailing whitespace from each CSV cell."),
|
592 |
-
action = "store_true", default = False
|
593 |
-
)
|
594 |
-
|
595 |
-
ap.add_argument(
|
596 |
-
"--output-all-variables",
|
597 |
-
help = ("Don't omit original absolute values in output."),
|
598 |
-
action = "store_true", default = False
|
599 |
-
)
|
600 |
-
|
601 |
-
ap.add_argument(
|
602 |
-
"--output-all-datapoints",
|
603 |
-
help = ("Don't omit datapoints that are statistically indistinguishable "
|
604 |
-
"in output."),
|
605 |
-
action = "store_true", default = False
|
606 |
-
)
|
607 |
-
|
608 |
-
ap.add_argument(
|
609 |
-
"-a", "--output-all",
|
610 |
-
help = ("Equivalent to `--output-all-variables --output-all-datapoints`."),
|
611 |
-
action = store_true_multiple("output_all_variables", "output_all_datapoints")
|
612 |
-
)
|
613 |
-
|
614 |
-
return ap.parse_args()
|
615 |
-
|
616 |
-
###############################################################################
|
617 |
-
|
618 |
-
def filter_comments(f, s = "#"):
|
619 |
-
"""Return an iterator to the file `f` which filters out all lines beginning
|
620 |
-
with `s`."""
|
621 |
-
return filter(lambda line: not line.startswith(s), f)
|
622 |
-
|
623 |
-
###############################################################################
|
624 |
-
|
625 |
-
class io_manager(object):
|
626 |
-
"""Manages I/O operations and represents the input data as an `Iterable`
|
627 |
-
sequence of `dict`s.
|
628 |
-
|
629 |
-
It is `Iterable` and an `Iterator`. It can be used with `with`.
|
630 |
-
|
631 |
-
Attributes:
|
632 |
-
preserve_whitespace (`bool`) :
|
633 |
-
If `False`, leading and trailing whitespace is stripped from each CSV cell.
|
634 |
-
writer (`csv_dict_writer`) :
|
635 |
-
CSV writer object that the output is written to.
|
636 |
-
output_file (`file` or `stdout`) :
|
637 |
-
The output `file` object.
|
638 |
-
baseline_reader (`csv_dict_reader`) :
|
639 |
-
CSV reader object for the baseline results.
|
640 |
-
observed_reader (`csv_dict_reader`) :
|
641 |
-
CSV reader object for the observed results.
|
642 |
-
baseline_input_file (`file`) :
|
643 |
-
`file` object for the baseline results.
|
644 |
-
observed_input_file (`file`) :
|
645 |
-
`file` object for the observed results..
|
646 |
-
variable_names (`list` of `str`s) :
|
647 |
-
Names of the variables, in order.
|
648 |
-
variable_units (`list` of `str`s) :
|
649 |
-
Units of the variables, in order.
|
650 |
-
"""
|
651 |
-
|
652 |
-
def __init__(self,
|
653 |
-
baseline_input_file, observed_input_file,
|
654 |
-
output_file,
|
655 |
-
preserve_whitespace = False):
|
656 |
-
"""Read input files and open the output file and construct a new `io_manager`
|
657 |
-
object.
|
658 |
-
|
659 |
-
If `preserve_whitespace` is `False`, leading and trailing whitespace is
|
660 |
-
stripped from each CSV cell.
|
661 |
-
|
662 |
-
Raises
|
663 |
-
AssertionError :
|
664 |
-
If `type(preserve_whitespace) != bool`.
|
665 |
-
"""
|
666 |
-
assert type(preserve_whitespace) == bool
|
667 |
-
|
668 |
-
self.preserve_whitespace = preserve_whitespace
|
669 |
-
|
670 |
-
# Open baseline results.
|
671 |
-
self.baseline_input_file = open(baseline_input_file)
|
672 |
-
self.baseline_reader = csv_dict_reader(
|
673 |
-
filter_comments(self.baseline_input_file)
|
674 |
-
)
|
675 |
-
|
676 |
-
if not self.preserve_whitespace:
|
677 |
-
strip_list(self.baseline_reader.fieldnames)
|
678 |
-
|
679 |
-
self.variable_names = list(self.baseline_reader.fieldnames) # Copy.
|
680 |
-
self.variable_units = self.baseline_reader.next()
|
681 |
-
|
682 |
-
if not self.preserve_whitespace:
|
683 |
-
strip_dict(self.variable_units)
|
684 |
-
|
685 |
-
# Open observed results.
|
686 |
-
self.observed_input_file = open(observed_input_file)
|
687 |
-
self.observed_reader = csv_dict_reader(
|
688 |
-
filter_comments(self.observed_input_file)
|
689 |
-
)
|
690 |
-
|
691 |
-
if not self.preserve_whitespace:
|
692 |
-
strip_list(self.observed_reader.fieldnames)
|
693 |
-
|
694 |
-
# Make sure all inputs have the same variables schema.
|
695 |
-
assert self.variable_names == self.observed_reader.fieldnames, \
|
696 |
-
"Observed results input file (`" + observed_input_file + "`) " + \
|
697 |
-
"variable schema `" + str(self.observed_reader.fieldnames) + "` does " + \
|
698 |
-
"not match the baseline results input file (`" + baseline_input_file + \
|
699 |
-
"`) variable schema `" + str(self.variable_names) + "`."
|
700 |
-
|
701 |
-
# Consume the next row, which should be the second line of the header.
|
702 |
-
observed_variable_units = self.observed_reader.next()
|
703 |
-
|
704 |
-
if not self.preserve_whitespace:
|
705 |
-
strip_dict(observed_variable_units)
|
706 |
-
|
707 |
-
# Make sure all inputs have the same units schema.
|
708 |
-
assert self.variable_units == observed_variable_units, \
|
709 |
-
"Observed results input file (`" + observed_input_file + "`) " + \
|
710 |
-
"units schema `" + str(observed_variable_units) + "` does not " + \
|
711 |
-
"match the baseline results input file (`" + baseline_input_file + \
|
712 |
-
"`) units schema `" + str(self.variable_units) + "`."
|
713 |
-
|
714 |
-
if output_file == "-": # Output to stdout.
|
715 |
-
self.output_file = stdout
|
716 |
-
else: # Output to user-specified file.
|
717 |
-
self.output_file = open(output_file, "w")
|
718 |
-
|
719 |
-
self.writer = csv_dict_writer(
|
720 |
-
self.output_file, fieldnames = self.variable_names
|
721 |
-
)
|
722 |
-
|
723 |
-
def __enter__(self):
|
724 |
-
"""Called upon entering a `with` statement."""
|
725 |
-
return self
|
726 |
-
|
727 |
-
def __exit__(self, *args):
|
728 |
-
"""Called upon exiting a `with` statement."""
|
729 |
-
if self.output_file is stdout:
|
730 |
-
self.output_file = None
|
731 |
-
elif self.output_file is not None:
|
732 |
-
self.output_file.__exit__(*args)
|
733 |
-
|
734 |
-
self.baseline_input_file.__exit__(*args)
|
735 |
-
self.observed_input_file.__exit__(*args)
|
736 |
-
|
737 |
-
def append_variable(self, name, units):
|
738 |
-
"""Add a new variable to the output schema."""
|
739 |
-
self.variable_names.append(name)
|
740 |
-
self.variable_units.update({name : units})
|
741 |
-
|
742 |
-
# Update CSV writer field names.
|
743 |
-
self.writer.fieldnames = self.variable_names
|
744 |
-
|
745 |
-
def insert_variable(self, idx, name, units):
|
746 |
-
"""Insert a new variable into the output schema at index `idx`."""
|
747 |
-
self.variable_names.insert(idx, name)
|
748 |
-
self.variable_units.update({name : units})
|
749 |
-
|
750 |
-
# Update CSV writer field names.
|
751 |
-
self.writer.fieldnames = self.variable_names
|
752 |
-
|
753 |
-
def remove_variable(self, name):
|
754 |
-
"""Remove variable from the output schema and return a tuple of the variable
|
755 |
-
index and the variable units.
|
756 |
-
|
757 |
-
Raises:
|
758 |
-
ValueError : If `name` is not in the output schema.
|
759 |
-
"""
|
760 |
-
# Remove the variable and get its index, which we'll need to remove the
|
761 |
-
# corresponding units entry.
|
762 |
-
(idx, item) = remove_from_list(self.variable_names, name)
|
763 |
-
|
764 |
-
# Remove the units entry.
|
765 |
-
units = self.variable_units.pop(item)
|
766 |
-
|
767 |
-
# Update CSV writer field names.
|
768 |
-
self.writer.fieldnames = self.variable_names
|
769 |
-
|
770 |
-
return (idx, units)
|
771 |
-
|
772 |
-
#############################################################################
|
773 |
-
# Input Stream.
|
774 |
-
|
775 |
-
def baseline(self):
|
776 |
-
"""Return an iterator to the baseline results input sequence."""
|
777 |
-
return imap(lambda row: strip_dict(row), self.baseline_reader)
|
778 |
-
|
779 |
-
def observed(self):
|
780 |
-
"""Return an iterator to the observed results input sequence."""
|
781 |
-
return imap(lambda row: strip_dict(row), self.observed_reader)
|
782 |
-
|
783 |
-
#############################################################################
|
784 |
-
# Output.
|
785 |
-
|
786 |
-
def write_header(self):
|
787 |
-
"""Write the header for the output CSV file."""
|
788 |
-
# Write the first line of the header.
|
789 |
-
self.writer.writeheader()
|
790 |
-
|
791 |
-
# Write the second line of the header.
|
792 |
-
self.writer.writerow(self.variable_units)
|
793 |
-
|
794 |
-
def write(self, d):
|
795 |
-
"""Write a record (a `dict`) to the output CSV file."""
|
796 |
-
self.writer.writerow(d)
|
797 |
-
|
798 |
-
###############################################################################
|
799 |
-
|
800 |
-
class dependent_variable_parser(object):
|
801 |
-
"""Parses a `--dependent-variable=AVG,STDEV,TRIALS` command line argument."""
|
802 |
-
|
803 |
-
#############################################################################
|
804 |
-
# Grammar
|
805 |
-
|
806 |
-
# Parse a variable_name.
|
807 |
-
variable_name_rule = r'[^,]+'
|
808 |
-
|
809 |
-
# Parse a variable classification.
|
810 |
-
dependent_variable_rule = r'(' + variable_name_rule + r')' \
|
811 |
-
+ r',' \
|
812 |
-
+ r'(' + variable_name_rule + r')' \
|
813 |
-
+ r',' \
|
814 |
-
+ r'(' + variable_name_rule + r')'
|
815 |
-
|
816 |
-
engine = regex_compile(dependent_variable_rule)
|
817 |
-
|
818 |
-
#############################################################################
|
819 |
-
|
820 |
-
def __call__(self, s):
|
821 |
-
"""Parses the string `s` with the form "AVG,STDEV,TRIALS".
|
822 |
-
|
823 |
-
Returns:
|
824 |
-
A `measured_variable`.
|
825 |
-
|
826 |
-
Raises:
|
827 |
-
AssertionError : If parsing fails.
|
828 |
-
"""
|
829 |
-
|
830 |
-
match = self.engine.match(s)
|
831 |
-
|
832 |
-
assert match is not None, \
|
833 |
-
"Dependent variable (-d) `" +s+ "` is invalid, the format is " + \
|
834 |
-
"`AVG,STDEV,TRIALS`."
|
835 |
-
|
836 |
-
return measured_variable(match.group(1), match.group(2), match.group(3))
|
837 |
-
|
838 |
-
###############################################################################
|
839 |
-
|
840 |
-
class record_aggregator(object):
|
841 |
-
"""Consumes and combines records and represents the result as an `Iterable`
|
842 |
-
sequence of `dict`s.
|
843 |
-
|
844 |
-
It is `Iterable` and an `Iterator`.
|
845 |
-
|
846 |
-
Attributes:
|
847 |
-
dependent_variables (`list` of `measured_variable`s) :
|
848 |
-
A list of dependent variables provided on the command line.
|
849 |
-
control_variables (`list` of `str`s) :
|
850 |
-
A list of control variables provided on the command line.
|
851 |
-
dataset (`dict`) :
|
852 |
-
A mapping of distinguishing (e.g. control + independent) values (`tuple`s
|
853 |
-
of variable-quantity pairs) to `list`s of dependent values (`dict`s from
|
854 |
-
variables to lists of cells).
|
855 |
-
in_order_dataset_keys :
|
856 |
-
A list of unique dataset keys (e.g. distinguishing variables) in order of
|
857 |
-
appearance.
|
858 |
-
"""
|
859 |
-
|
860 |
-
def __init__(self, dependent_variables, control_variables):
|
861 |
-
"""Construct a new `record_aggregator` object.
|
862 |
-
|
863 |
-
Raises:
|
864 |
-
AssertionError : If parsing of dependent variables fails.
|
865 |
-
"""
|
866 |
-
self.dependent_variables = dependent_variables
|
867 |
-
self.control_variables = control_variables
|
868 |
-
|
869 |
-
self.dataset = {}
|
870 |
-
|
871 |
-
self.in_order_dataset_keys = deque()
|
872 |
-
|
873 |
-
#############################################################################
|
874 |
-
# Insertion.
|
875 |
-
|
876 |
-
def key_from_dict(self, d):
|
877 |
-
"""Create a hashable key from a `dict` by filtering out control variables
|
878 |
-
and then converting the `dict` to a tuple.
|
879 |
-
|
880 |
-
Raises:
|
881 |
-
AssertionError : If any control variable was not found in `d`.
|
882 |
-
"""
|
883 |
-
distinguishing_values = d.copy()
|
884 |
-
|
885 |
-
# Filter out control variables.
|
886 |
-
for var in self.control_variables:
|
887 |
-
distinguishing_values.pop(var, None)
|
888 |
-
|
889 |
-
return key_from_dict(distinguishing_values)
|
890 |
-
|
891 |
-
def append(self, record):
|
892 |
-
"""Add `record` to the dataset.
|
893 |
-
|
894 |
-
Raises:
|
895 |
-
ValueError : If any `str`-to-numeric conversions fail.
|
896 |
-
"""
|
897 |
-
# The distinguishing variables are the control and independent variables.
|
898 |
-
# They form the key for each record in the dataset. Records with the same
|
899 |
-
# distinguishing variables are treated as observations of the same
|
900 |
-
# datapoint.
|
901 |
-
dependent_values = {}
|
902 |
-
|
903 |
-
# To allow the same sample size variable to be used for multiple dependent
|
904 |
-
# variables, we don't pop sample size variables until we're done processing
|
905 |
-
# all variables.
|
906 |
-
sample_size_variables = []
|
907 |
-
|
908 |
-
# Separate the dependent values from the distinguishing variables and
|
909 |
-
# perform `str`-to-numeric conversions.
|
910 |
-
for var in self.dependent_variables:
|
911 |
-
quantity, uncertainty, sample_size, units = var.as_tuple()
|
912 |
-
|
913 |
-
dependent_values[quantity] = [int_or_float(record.pop(quantity))]
|
914 |
-
dependent_values[uncertainty] = [int_or_float(record.pop(uncertainty))]
|
915 |
-
dependent_values[sample_size] = [int(record[sample_size])]
|
916 |
-
|
917 |
-
sample_size_variables.append(sample_size)
|
918 |
-
|
919 |
-
# Pop sample size variables.
|
920 |
-
for var in sample_size_variables:
|
921 |
-
# Allowed to fail, as we may have duplicates.
|
922 |
-
record.pop(var, None)
|
923 |
-
|
924 |
-
distinguishing_values = self.key_from_dict(record)
|
925 |
-
|
926 |
-
if distinguishing_values in self.dataset:
|
927 |
-
# These distinguishing values already exist, so get the `dict` they're
|
928 |
-
# mapped to, look up each key in `dependent_values` in the `dict`, and
|
929 |
-
# add the corresponding quantity in `dependent_values` to the list in the
|
930 |
-
# the `dict`.
|
931 |
-
for var, columns in dependent_values.iteritems():
|
932 |
-
self.dataset[distinguishing_values][var] += columns
|
933 |
-
else:
|
934 |
-
# These distinguishing values aren't in the dataset, so add them and
|
935 |
-
# record them in `in_order_dataset_keys`.
|
936 |
-
self.dataset[distinguishing_values] = dependent_values
|
937 |
-
self.in_order_dataset_keys.append(distinguishing_values)
|
938 |
-
|
939 |
-
#############################################################################
|
940 |
-
# Postprocessing.
|
941 |
-
|
942 |
-
def combine_dependent_values(self, dependent_values):
|
943 |
-
"""Takes a mapping of dependent variables to lists of cells and returns
|
944 |
-
a new mapping with the cells combined.
|
945 |
-
|
946 |
-
Raises:
|
947 |
-
AssertionError : If class invariants were violated.
|
948 |
-
"""
|
949 |
-
combined_dependent_values = dependent_values.copy()
|
950 |
-
|
951 |
-
for var in self.dependent_variables:
|
952 |
-
quantity, uncertainty, sample_size, units = var.as_tuple()
|
953 |
-
|
954 |
-
quantities = dependent_values[quantity]
|
955 |
-
uncertainties = dependent_values[uncertainty]
|
956 |
-
sample_sizes = dependent_values[sample_size]
|
957 |
-
|
958 |
-
if type(sample_size) is list:
|
959 |
-
# Sample size hasn't been combined yet.
|
960 |
-
assert len(quantities) == len(uncertainties) \
|
961 |
-
and len(uncertainties) == len(sample_sizes), \
|
962 |
-
"Length of quantities list `(" + str(len(quantities)) + ")`, " + \
|
963 |
-
"length of uncertainties list `(" + str(len(uncertainties)) + \
|
964 |
-
"),` and length of sample sizes list `(" + str(len(sample_sizes)) + \
|
965 |
-
")` are not the same."
|
966 |
-
else:
|
967 |
-
# Another dependent variable that uses our sample size has combined it
|
968 |
-
# already.
|
969 |
-
assert len(quantities) == len(uncertainties), \
|
970 |
-
"Length of quantities list `(" + str(len(quantities)) + ")` and " + \
|
971 |
-
"length of uncertainties list `(" + str(len(uncertainties)) + \
|
972 |
-
")` are not the same."
|
973 |
-
|
974 |
-
# Convert the three separate `list`s into one list of `measured_value`s.
|
975 |
-
measured_values = []
|
976 |
-
|
977 |
-
for i in range(len(quantities)):
|
978 |
-
mv = measured_value(
|
979 |
-
quantities[i], uncertainties[i], sample_sizes[i], units
|
980 |
-
)
|
981 |
-
|
982 |
-
measured_values.append(mv)
|
983 |
-
|
984 |
-
# Combine the `measured_value`s.
|
985 |
-
combined_sample_size = combine_sample_size(
|
986 |
-
measured_values
|
987 |
-
)
|
988 |
-
|
989 |
-
combined_arithmetic_mean = combine_arithmetic_mean(
|
990 |
-
measured_values, combined_sample_size
|
991 |
-
)
|
992 |
-
|
993 |
-
combined_sample_standard_deviation = combine_sample_standard_deviation(
|
994 |
-
measured_values, combined_sample_size, combined_arithmetic_mean
|
995 |
-
)
|
996 |
-
|
997 |
-
# Round the quantity and uncertainty to the significant digit of
|
998 |
-
# uncertainty and insert the combined values into the results.
|
999 |
-
sigdig = find_significant_digit(combined_sample_standard_deviation)
|
1000 |
-
|
1001 |
-
# combined_arithmetic_mean = round_with_int_conversion(
|
1002 |
-
# combined_arithmetic_mean, sigdig
|
1003 |
-
# )
|
1004 |
-
|
1005 |
-
# combined_sample_standard_deviation = round_with_int_conversion(
|
1006 |
-
# combined_sample_standard_deviation, sigdig
|
1007 |
-
# )
|
1008 |
-
|
1009 |
-
combined_dependent_values[quantity] = combined_arithmetic_mean
|
1010 |
-
combined_dependent_values[uncertainty] = combined_sample_standard_deviation
|
1011 |
-
combined_dependent_values[sample_size] = combined_sample_size
|
1012 |
-
|
1013 |
-
return combined_dependent_values
|
1014 |
-
|
1015 |
-
#############################################################################
|
1016 |
-
# Output Stream.
|
1017 |
-
|
1018 |
-
def __iter__(self):
|
1019 |
-
"""Return an iterator to the output sequence of separated distinguishing
|
1020 |
-
variables and dependent variables (a tuple of two `dict`s).
|
1021 |
-
|
1022 |
-
This is a requirement for the `Iterable` protocol.
|
1023 |
-
"""
|
1024 |
-
return self
|
1025 |
-
|
1026 |
-
def records(self):
|
1027 |
-
"""Return an iterator to the output sequence of CSV rows (`dict`s of
|
1028 |
-
variables to values).
|
1029 |
-
"""
|
1030 |
-
return imap(unpack_tuple(lambda dist, dep: merge_dicts(dist, dep)), self)
|
1031 |
-
|
1032 |
-
def next(self):
|
1033 |
-
"""Produce the components of the next output record - a tuple of two
|
1034 |
-
`dict`s. The first `dict` is a mapping of distinguishing variables to
|
1035 |
-
distinguishing values, the second `dict` is a mapping of dependent
|
1036 |
-
variables to combined dependent values. Combining the two dicts forms a
|
1037 |
-
CSV row suitable for output.
|
1038 |
-
|
1039 |
-
This is a requirement for the `Iterator` protocol.
|
1040 |
-
|
1041 |
-
Raises:
|
1042 |
-
StopIteration : If there is no more output.
|
1043 |
-
AssertionError : If class invariants were violated.
|
1044 |
-
"""
|
1045 |
-
assert len(self.dataset.keys()) == len(self.in_order_dataset_keys), \
|
1046 |
-
"Number of dataset keys (`" + str(len(self.dataset.keys())) + \
|
1047 |
-
"`) is not equal to the number of keys in the ordering list (`" + \
|
1048 |
-
str(len(self.in_order_dataset_keys)) + "`)."
|
1049 |
-
|
1050 |
-
if len(self.in_order_dataset_keys) == 0:
|
1051 |
-
raise StopIteration()
|
1052 |
-
|
1053 |
-
# Get the next set of distinguishing values and convert them to a `dict`.
|
1054 |
-
raw_distinguishing_values = self.in_order_dataset_keys.popleft()
|
1055 |
-
distinguishing_values = dict(raw_distinguishing_values)
|
1056 |
-
|
1057 |
-
dependent_values = self.dataset.pop(raw_distinguishing_values)
|
1058 |
-
|
1059 |
-
combined_dependent_values = self.combine_dependent_values(dependent_values)
|
1060 |
-
|
1061 |
-
return (distinguishing_values, combined_dependent_values)
|
1062 |
-
|
1063 |
-
def __getitem__(self, distinguishing_values):
|
1064 |
-
"""Produce the dependent component, a `dict` mapping dependent variables to
|
1065 |
-
combined dependent values, associated with `distinguishing_values`.
|
1066 |
-
|
1067 |
-
Args:
|
1068 |
-
distinguishing_values (`dict`) :
|
1069 |
-
A `dict` mapping distinguishing variables to distinguishing values.
|
1070 |
-
|
1071 |
-
Raises:
|
1072 |
-
KeyError : If `distinguishing_values` is not in the dataset.
|
1073 |
-
"""
|
1074 |
-
raw_distinguishing_values = self.key_from_dict(distinguishing_values)
|
1075 |
-
|
1076 |
-
dependent_values = self.dataset[raw_distinguishing_values]
|
1077 |
-
|
1078 |
-
combined_dependent_values = self.combine_dependent_values(dependent_values)
|
1079 |
-
|
1080 |
-
return combined_dependent_values
|
1081 |
-
|
1082 |
-
###############################################################################
|
1083 |
-
|
1084 |
-
args = process_program_arguments()
|
1085 |
-
|
1086 |
-
if len(args.dependent_variables) == 0:
|
1087 |
-
args.dependent_variables = [
|
1088 |
-
"STL Average Walltime,STL Walltime Uncertainty,STL Trials",
|
1089 |
-
"STL Average Throughput,STL Throughput Uncertainty,STL Trials",
|
1090 |
-
"Thrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials",
|
1091 |
-
"Thrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"
|
1092 |
-
]
|
1093 |
-
|
1094 |
-
# Parse dependent variable options.
|
1095 |
-
dependent_variables = []
|
1096 |
-
|
1097 |
-
parse_dependent_variable = dependent_variable_parser()
|
1098 |
-
|
1099 |
-
#if args.dependent_variables is not None:
|
1100 |
-
for var in args.dependent_variables:
|
1101 |
-
dependent_variables.append(parse_dependent_variable(var))
|
1102 |
-
|
1103 |
-
# Read input files and open the output file.
|
1104 |
-
with io_manager(args.baseline_input_file,
|
1105 |
-
args.observed_input_file,
|
1106 |
-
args.output_file,
|
1107 |
-
args.preserve_whitespace) as iom:
|
1108 |
-
|
1109 |
-
# Create record aggregators.
|
1110 |
-
baseline_ra = record_aggregator(dependent_variables, args.control_variables)
|
1111 |
-
observed_ra = record_aggregator(dependent_variables, args.control_variables)
|
1112 |
-
|
1113 |
-
# Duplicate dependent variables: one for baseline results, one for observed
|
1114 |
-
# results.
|
1115 |
-
baseline_suffix = " - `{0}`".format(
|
1116 |
-
args.baseline_input_file
|
1117 |
-
)
|
1118 |
-
observed_suffix = " - `{0}`".format(
|
1119 |
-
args.observed_input_file
|
1120 |
-
)
|
1121 |
-
|
1122 |
-
for var in dependent_variables:
|
1123 |
-
# Remove the existing quantity variable:
|
1124 |
-
#
|
1125 |
-
# [ ..., a, b, c, ... ]
|
1126 |
-
# ^- remove b at index i
|
1127 |
-
#
|
1128 |
-
(quantity_idx, quantity_units) = iom.remove_variable(var.quantity)
|
1129 |
-
|
1130 |
-
# If the `--output-all-variables` option was specified, add the new baseline
|
1131 |
-
# and observed quantity variables. Note that we insert in the reverse of
|
1132 |
-
# the order we desire (which is baseline then observed):
|
1133 |
-
#
|
1134 |
-
# [ ..., a, b_1, c, ... ]
|
1135 |
-
# ^- insert b_1 at index i
|
1136 |
-
#
|
1137 |
-
# [ ..., a, b_0, b_1, c, ... ]
|
1138 |
-
# ^- insert b_0 at index i
|
1139 |
-
#
|
1140 |
-
if args.output_all_variables:
|
1141 |
-
iom.insert_variable(
|
1142 |
-
quantity_idx, var.quantity + observed_suffix, quantity_units
|
1143 |
-
)
|
1144 |
-
iom.insert_variable(
|
1145 |
-
quantity_idx, var.quantity + baseline_suffix, quantity_units
|
1146 |
-
)
|
1147 |
-
|
1148 |
-
# Remove the existing uncertainty variable.
|
1149 |
-
(uncertainty_idx, uncertainty_units) = iom.remove_variable(var.uncertainty)
|
1150 |
-
|
1151 |
-
# If the `--output-all-variables` option was specified, add the new baseline
|
1152 |
-
# and observed uncertainty variables.
|
1153 |
-
if args.output_all_variables:
|
1154 |
-
iom.insert_variable(
|
1155 |
-
uncertainty_idx, var.uncertainty + observed_suffix, uncertainty_units
|
1156 |
-
)
|
1157 |
-
iom.insert_variable(
|
1158 |
-
uncertainty_idx, var.uncertainty + baseline_suffix, uncertainty_units
|
1159 |
-
)
|
1160 |
-
|
1161 |
-
try:
|
1162 |
-
# Remove the existing sample size variable.
|
1163 |
-
(sample_size_idx, sample_size_units) = iom.remove_variable(var.sample_size)
|
1164 |
-
|
1165 |
-
# If the `--output-all-variables` option was specified, add the new
|
1166 |
-
# baseline and observed sample size variables.
|
1167 |
-
if args.output_all_variables:
|
1168 |
-
iom.insert_variable(
|
1169 |
-
sample_size_idx, var.sample_size + observed_suffix, sample_size_units
|
1170 |
-
)
|
1171 |
-
iom.insert_variable(
|
1172 |
-
sample_size_idx, var.sample_size + baseline_suffix, sample_size_units
|
1173 |
-
)
|
1174 |
-
except ValueError:
|
1175 |
-
# This is alright, because dependent variables may share the same sample
|
1176 |
-
# size variable.
|
1177 |
-
pass
|
1178 |
-
|
1179 |
-
for var in args.control_variables:
|
1180 |
-
iom.remove_variable(var)
|
1181 |
-
|
1182 |
-
# Add change variables.
|
1183 |
-
absolute_change_suffix = " - Change (`{0}` - `{1}`)".format(
|
1184 |
-
args.observed_input_file, args.baseline_input_file
|
1185 |
-
)
|
1186 |
-
|
1187 |
-
percent_change_suffix = " - % Change (`{0}` to `{1}`)".format(
|
1188 |
-
args.observed_input_file, args.baseline_input_file
|
1189 |
-
)
|
1190 |
-
|
1191 |
-
for var in dependent_variables:
|
1192 |
-
iom.append_variable(var.quantity + absolute_change_suffix, var.units)
|
1193 |
-
iom.append_variable(var.uncertainty + absolute_change_suffix, var.units)
|
1194 |
-
iom.append_variable(var.quantity + percent_change_suffix, "")
|
1195 |
-
iom.append_variable(var.uncertainty + percent_change_suffix, "")
|
1196 |
-
|
1197 |
-
# Add all baseline input data to the `record_aggregator`.
|
1198 |
-
for record in iom.baseline():
|
1199 |
-
baseline_ra.append(record)
|
1200 |
-
|
1201 |
-
for record in iom.observed():
|
1202 |
-
observed_ra.append(record)
|
1203 |
-
|
1204 |
-
iom.write_header()
|
1205 |
-
|
1206 |
-
# Compare and output results.
|
1207 |
-
for distinguishing_values, observed_dependent_values in observed_ra:
|
1208 |
-
try:
|
1209 |
-
baseline_dependent_values = baseline_ra[distinguishing_values]
|
1210 |
-
except KeyError:
|
1211 |
-
assert False, \
|
1212 |
-
"Distinguishing value `" + \
|
1213 |
-
str(baseline_ra.key_from_dict(distinguishing_values)) + \
|
1214 |
-
"` was not found in the baseline results."
|
1215 |
-
|
1216 |
-
statistically_significant_change = False
|
1217 |
-
|
1218 |
-
record = distinguishing_values.copy()
|
1219 |
-
|
1220 |
-
# Compute changes, add the values and changes to the record, and identify
|
1221 |
-
# changes that are statistically significant.
|
1222 |
-
for var in dependent_variables:
|
1223 |
-
# Compute changes.
|
1224 |
-
baseline_quantity = baseline_dependent_values[var.quantity]
|
1225 |
-
baseline_uncertainty = baseline_dependent_values[var.uncertainty]
|
1226 |
-
baseline_sample_size = baseline_dependent_values[var.sample_size]
|
1227 |
-
|
1228 |
-
observed_quantity = observed_dependent_values[var.quantity]
|
1229 |
-
observed_uncertainty = observed_dependent_values[var.uncertainty]
|
1230 |
-
observed_sample_size = observed_dependent_values[var.sample_size]
|
1231 |
-
|
1232 |
-
(abs_change, abs_change_unc, per_change, per_change_unc) = \
|
1233 |
-
percent_change_uncertainty(
|
1234 |
-
baseline_quantity, baseline_uncertainty,
|
1235 |
-
observed_quantity, observed_uncertainty
|
1236 |
-
)
|
1237 |
-
|
1238 |
-
# Round the change quantities and uncertainties to the significant digit
|
1239 |
-
# of uncertainty.
|
1240 |
-
try:
|
1241 |
-
abs_change_sigdig = max(
|
1242 |
-
find_significant_digit(abs_change),
|
1243 |
-
find_significant_digit(abs_change_unc),
|
1244 |
-
)
|
1245 |
-
|
1246 |
-
# abs_change = round_with_int_conversion(
|
1247 |
-
# abs_change, abs_change_sigdig
|
1248 |
-
# )
|
1249 |
-
# abs_change_unc = round_with_int_conversion(
|
1250 |
-
# abs_change_unc, abs_change_sigdig
|
1251 |
-
# )
|
1252 |
-
except:
|
1253 |
-
# Any value errors should be due to NaNs returned by
|
1254 |
-
# `percent_change_uncertainty` because quantities or change in
|
1255 |
-
# quantities was 0. We can ignore these.
|
1256 |
-
pass
|
1257 |
-
|
1258 |
-
try:
|
1259 |
-
per_change_sigdig = max(
|
1260 |
-
find_significant_digit(per_change),
|
1261 |
-
find_significant_digit(per_change_unc)
|
1262 |
-
)
|
1263 |
-
|
1264 |
-
# per_change = round_with_int_conversion(
|
1265 |
-
# per_change, per_change_sigdig
|
1266 |
-
# )
|
1267 |
-
# per_change_unc = round_with_int_conversion(
|
1268 |
-
# per_change_unc, per_change_sigdig
|
1269 |
-
# )
|
1270 |
-
except:
|
1271 |
-
# Any value errors should be due to NaNs returned by
|
1272 |
-
# `percent_change_uncertainty` because quantities or change in
|
1273 |
-
# quantities was 0. We can ignore these.
|
1274 |
-
pass
|
1275 |
-
|
1276 |
-
# Add the values (if the `--output-all-variables` option was specified)
|
1277 |
-
# and the changes to the record. Note that the record's schema is
|
1278 |
-
# different from the original schema. If multiple dependent variables
|
1279 |
-
# share the same sample size variable, it's fine - they will overwrite
|
1280 |
-
# each other, but with the same value.
|
1281 |
-
if args.output_all_variables:
|
1282 |
-
record[var.quantity + baseline_suffix] = baseline_quantity
|
1283 |
-
record[var.uncertainty + baseline_suffix] = baseline_uncertainty
|
1284 |
-
record[var.sample_size + baseline_suffix] = baseline_sample_size
|
1285 |
-
record[var.quantity + observed_suffix] = observed_quantity
|
1286 |
-
record[var.uncertainty + observed_suffix] = observed_uncertainty
|
1287 |
-
record[var.sample_size + observed_suffix] = observed_sample_size
|
1288 |
-
|
1289 |
-
record[var.quantity + absolute_change_suffix] = abs_change
|
1290 |
-
record[var.uncertainty + absolute_change_suffix] = abs_change_unc
|
1291 |
-
record[var.quantity + percent_change_suffix] = per_change
|
1292 |
-
record[var.uncertainty + percent_change_suffix] = per_change_unc
|
1293 |
-
|
1294 |
-
# If the range of uncertainties overlap don't overlap and the percentage
|
1295 |
-
# change is greater than the change threshold, then change is
|
1296 |
-
# statistically significant.
|
1297 |
-
overlap = ranges_overlap_uncertainty(
|
1298 |
-
baseline_quantity, baseline_uncertainty,
|
1299 |
-
observed_quantity, observed_uncertainty
|
1300 |
-
)
|
1301 |
-
if not overlap and per_change >= args.change_threshold:
|
1302 |
-
statistically_significant_change = True
|
1303 |
-
|
1304 |
-
# Print the record if a statistically significant change was found or if the
|
1305 |
-
# `--output-all-datapoints` option was specified.
|
1306 |
-
if args.output_all_datapoints or statistically_significant_change:
|
1307 |
-
iom.write(record)
|
1308 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/minmax.h
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
namespace thrust
|
22 |
-
{
|
23 |
-
|
24 |
-
|
25 |
-
template<typename T, typename BinaryPredicate>
|
26 |
-
__host__ __device__
|
27 |
-
T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp)
|
28 |
-
{
|
29 |
-
return comp(rhs, lhs) ? rhs : lhs;
|
30 |
-
} // end min()
|
31 |
-
|
32 |
-
template<typename T>
|
33 |
-
__host__ __device__
|
34 |
-
T min THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs)
|
35 |
-
{
|
36 |
-
return rhs < lhs ? rhs : lhs;
|
37 |
-
} // end min()
|
38 |
-
|
39 |
-
template<typename T, typename BinaryPredicate>
|
40 |
-
__host__ __device__
|
41 |
-
T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs, BinaryPredicate comp)
|
42 |
-
{
|
43 |
-
return comp(lhs,rhs) ? rhs : lhs;
|
44 |
-
} // end max()
|
45 |
-
|
46 |
-
template<typename T>
|
47 |
-
__host__ __device__
|
48 |
-
T max THRUST_PREVENT_MACRO_SUBSTITUTION (const T &lhs, const T &rhs)
|
49 |
-
{
|
50 |
-
return lhs < rhs ? rhs : lhs;
|
51 |
-
} // end max()
|
52 |
-
|
53 |
-
|
54 |
-
} // end thrust
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/for_each.h
DELETED
@@ -1,159 +0,0 @@
|
|
1 |
-
|
2 |
-
/******************************************************************************
|
3 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
*
|
5 |
-
* Redistribution and use in source and binary forms, with or without
|
6 |
-
* modification, are permitted provided that the following conditions are met:
|
7 |
-
* * Redistributions of source code must retain the above copyright
|
8 |
-
* notice, this list of conditions and the following disclaimer.
|
9 |
-
* * Redistributions in binary form must reproduce the above copyright
|
10 |
-
* notice, this list of conditions and the following disclaimer in the
|
11 |
-
* documentation and/or other materials provided with the distribution.
|
12 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
13 |
-
* names of its contributors may be used to endorse or promote products
|
14 |
-
* derived from this software without specific prior written permission.
|
15 |
-
*
|
16 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
17 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
18 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
19 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
20 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
-
*
|
27 |
-
******************************************************************************/
|
28 |
-
|
29 |
-
// TODO: Move into system::cuda
|
30 |
-
|
31 |
-
#pragma once
|
32 |
-
|
33 |
-
#include <thrust/detail/config.h>
|
34 |
-
#include <thrust/detail/cpp14_required.h>
|
35 |
-
|
36 |
-
#if THRUST_CPP_DIALECT >= 2014
|
37 |
-
|
38 |
-
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
39 |
-
|
40 |
-
#include <thrust/system/cuda/config.h>
|
41 |
-
|
42 |
-
#include <thrust/system/cuda/detail/async/customization.h>
|
43 |
-
#include <thrust/system/cuda/detail/parallel_for.h>
|
44 |
-
#include <thrust/system/cuda/future.h>
|
45 |
-
#include <thrust/iterator/iterator_traits.h>
|
46 |
-
#include <thrust/distance.h>
|
47 |
-
|
48 |
-
#include <type_traits>
|
49 |
-
|
50 |
-
namespace thrust
|
51 |
-
{
|
52 |
-
|
53 |
-
namespace system { namespace cuda { namespace detail
|
54 |
-
{
|
55 |
-
|
56 |
-
template <typename ForwardIt, typename UnaryFunction>
|
57 |
-
struct async_for_each_fn
|
58 |
-
{
|
59 |
-
ForwardIt first;
|
60 |
-
UnaryFunction f;
|
61 |
-
|
62 |
-
__host__ __device__
|
63 |
-
async_for_each_fn(ForwardIt&& first_, UnaryFunction&& f_)
|
64 |
-
: first(std::move(first_)), f(std::move(f_))
|
65 |
-
{}
|
66 |
-
|
67 |
-
template <typename Index>
|
68 |
-
__host__ __device__
|
69 |
-
void operator()(Index idx)
|
70 |
-
{
|
71 |
-
f(thrust::raw_reference_cast(first[idx]));
|
72 |
-
}
|
73 |
-
};
|
74 |
-
|
75 |
-
template <
|
76 |
-
typename DerivedPolicy
|
77 |
-
, typename ForwardIt, typename Size, typename UnaryFunction
|
78 |
-
>
|
79 |
-
auto async_for_each_n(
|
80 |
-
execution_policy<DerivedPolicy>& policy,
|
81 |
-
ForwardIt first,
|
82 |
-
Size n,
|
83 |
-
UnaryFunction func
|
84 |
-
) -> unique_eager_event
|
85 |
-
{
|
86 |
-
unique_eager_event e;
|
87 |
-
|
88 |
-
// Set up stream with dependencies.
|
89 |
-
|
90 |
-
cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
|
91 |
-
|
92 |
-
if (thrust::cuda_cub::default_stream() != user_raw_stream)
|
93 |
-
{
|
94 |
-
e = make_dependent_event(
|
95 |
-
std::tuple_cat(
|
96 |
-
std::make_tuple(
|
97 |
-
unique_stream(nonowning, user_raw_stream)
|
98 |
-
)
|
99 |
-
, extract_dependencies(
|
100 |
-
std::move(thrust::detail::derived_cast(policy))
|
101 |
-
)
|
102 |
-
)
|
103 |
-
);
|
104 |
-
}
|
105 |
-
else
|
106 |
-
{
|
107 |
-
e = make_dependent_event(
|
108 |
-
extract_dependencies(
|
109 |
-
std::move(thrust::detail::derived_cast(policy))
|
110 |
-
)
|
111 |
-
);
|
112 |
-
}
|
113 |
-
|
114 |
-
// Run for_each.
|
115 |
-
|
116 |
-
async_for_each_fn<ForwardIt, UnaryFunction> wrapped(
|
117 |
-
std::move(first), std::move(func)
|
118 |
-
);
|
119 |
-
|
120 |
-
thrust::cuda_cub::throw_on_error(
|
121 |
-
thrust::cuda_cub::__parallel_for::parallel_for(
|
122 |
-
n, std::move(wrapped), e.stream().native_handle()
|
123 |
-
)
|
124 |
-
, "after for_each launch"
|
125 |
-
);
|
126 |
-
|
127 |
-
return e;
|
128 |
-
}
|
129 |
-
|
130 |
-
}}} // namespace system::cuda::detail
|
131 |
-
|
132 |
-
namespace cuda_cub
|
133 |
-
{
|
134 |
-
|
135 |
-
// ADL entry point.
|
136 |
-
template <
|
137 |
-
typename DerivedPolicy
|
138 |
-
, typename ForwardIt, typename Sentinel, typename UnaryFunction
|
139 |
-
>
|
140 |
-
auto async_for_each(
|
141 |
-
execution_policy<DerivedPolicy>& policy,
|
142 |
-
ForwardIt first,
|
143 |
-
Sentinel last,
|
144 |
-
UnaryFunction&& func
|
145 |
-
)
|
146 |
-
THRUST_RETURNS(
|
147 |
-
thrust::system::cuda::detail::async_for_each_n(
|
148 |
-
policy, first, distance(first, last), THRUST_FWD(func)
|
149 |
-
)
|
150 |
-
);
|
151 |
-
|
152 |
-
} // cuda_cub
|
153 |
-
|
154 |
-
} // end namespace thrust
|
155 |
-
|
156 |
-
#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
157 |
-
|
158 |
-
#endif
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/sort.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the sort.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch sort
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/sort.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/sort.h>
|
32 |
-
#include <thrust/system/cuda/detail/sort.h>
|
33 |
-
#include <thrust/system/omp/detail/sort.h>
|
34 |
-
#include <thrust/system/tbb/detail/sort.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_SORT_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/sort.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_SORT_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_SORT_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_SORT_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/sort.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_SORT_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_SORT_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/type_traits/is_execution_policy.h
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
#include <thrust/detail/type_traits.h>
|
22 |
-
#include <thrust/detail/execution_policy.h>
|
23 |
-
|
24 |
-
namespace thrust
|
25 |
-
{
|
26 |
-
|
27 |
-
/// Unary metafunction that is \c true if \c T is an \a ExecutionPolicy and
|
28 |
-
/// \c false otherwise.
|
29 |
-
template <typename T>
|
30 |
-
#if THRUST_CPP_DIALECT >= 2011
|
31 |
-
using is_execution_policy =
|
32 |
-
#else
|
33 |
-
struct is_execution_policy :
|
34 |
-
#endif
|
35 |
-
detail::is_base_of<detail::execution_policy_marker, T>
|
36 |
-
#if THRUST_CPP_DIALECT < 2011
|
37 |
-
{}
|
38 |
-
#endif
|
39 |
-
;
|
40 |
-
|
41 |
-
/// <CODE>constexpr bool</CODE> that is \c true if \c T is an \a ExecutionPolicy
|
42 |
-
/// and \c false otherwise.
|
43 |
-
#if THRUST_CPP_DIALECT >= 2014
|
44 |
-
template <typename T>
|
45 |
-
constexpr bool is_execution_policy_v = is_execution_policy<T>::value;
|
46 |
-
#endif
|
47 |
-
|
48 |
-
} // end namespace thrust
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Text2Human/Text2Human/train_vqvae.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import logging
|
3 |
-
import os
|
4 |
-
import os.path as osp
|
5 |
-
import random
|
6 |
-
import time
|
7 |
-
|
8 |
-
import torch
|
9 |
-
|
10 |
-
from data.segm_attr_dataset import DeepFashionAttrSegmDataset
|
11 |
-
from models import create_model
|
12 |
-
from utils.logger import MessageLogger, get_root_logger, init_tb_logger
|
13 |
-
from utils.options import dict2str, dict_to_nonedict, parse
|
14 |
-
from utils.util import make_exp_dirs
|
15 |
-
|
16 |
-
|
17 |
-
def main():
|
18 |
-
# options
|
19 |
-
parser = argparse.ArgumentParser()
|
20 |
-
parser.add_argument('-opt', type=str, help='Path to option YAML file.')
|
21 |
-
args = parser.parse_args()
|
22 |
-
opt = parse(args.opt, is_train=True)
|
23 |
-
|
24 |
-
# mkdir and loggers
|
25 |
-
make_exp_dirs(opt)
|
26 |
-
log_file = osp.join(opt['path']['log'], f"train_{opt['name']}.log")
|
27 |
-
logger = get_root_logger(
|
28 |
-
logger_name='base', log_level=logging.INFO, log_file=log_file)
|
29 |
-
logger.info(dict2str(opt))
|
30 |
-
# initialize tensorboard logger
|
31 |
-
tb_logger = None
|
32 |
-
if opt['use_tb_logger'] and 'debug' not in opt['name']:
|
33 |
-
tb_logger = init_tb_logger(log_dir='./tb_logger/' + opt['name'])
|
34 |
-
|
35 |
-
# convert to NoneDict, which returns None for missing keys
|
36 |
-
opt = dict_to_nonedict(opt)
|
37 |
-
|
38 |
-
# set up data loader
|
39 |
-
train_dataset = DeepFashionAttrSegmDataset(
|
40 |
-
img_dir=opt['train_img_dir'],
|
41 |
-
segm_dir=opt['segm_dir'],
|
42 |
-
pose_dir=opt['pose_dir'],
|
43 |
-
ann_dir=opt['train_ann_file'],
|
44 |
-
xflip=True)
|
45 |
-
train_loader = torch.utils.data.DataLoader(
|
46 |
-
dataset=train_dataset,
|
47 |
-
batch_size=opt['batch_size'],
|
48 |
-
shuffle=True,
|
49 |
-
num_workers=opt['num_workers'],
|
50 |
-
persistent_workers=True,
|
51 |
-
drop_last=True)
|
52 |
-
logger.info(f'Number of train set: {len(train_dataset)}.')
|
53 |
-
opt['max_iters'] = opt['num_epochs'] * len(
|
54 |
-
train_dataset) // opt['batch_size']
|
55 |
-
|
56 |
-
val_dataset = DeepFashionAttrSegmDataset(
|
57 |
-
img_dir=opt['train_img_dir'],
|
58 |
-
segm_dir=opt['segm_dir'],
|
59 |
-
pose_dir=opt['pose_dir'],
|
60 |
-
ann_dir=opt['val_ann_file'])
|
61 |
-
val_loader = torch.utils.data.DataLoader(
|
62 |
-
dataset=val_dataset, batch_size=1, shuffle=False)
|
63 |
-
logger.info(f'Number of val set: {len(val_dataset)}.')
|
64 |
-
|
65 |
-
test_dataset = DeepFashionAttrSegmDataset(
|
66 |
-
img_dir=opt['test_img_dir'],
|
67 |
-
segm_dir=opt['segm_dir'],
|
68 |
-
pose_dir=opt['pose_dir'],
|
69 |
-
ann_dir=opt['test_ann_file'])
|
70 |
-
test_loader = torch.utils.data.DataLoader(
|
71 |
-
dataset=test_dataset, batch_size=1, shuffle=False)
|
72 |
-
logger.info(f'Number of test set: {len(test_dataset)}.')
|
73 |
-
|
74 |
-
current_iter = 0
|
75 |
-
best_epoch = None
|
76 |
-
best_loss = 100000
|
77 |
-
|
78 |
-
model = create_model(opt)
|
79 |
-
|
80 |
-
data_time, iter_time = 0, 0
|
81 |
-
current_iter = 0
|
82 |
-
|
83 |
-
# create message logger (formatted outputs)
|
84 |
-
msg_logger = MessageLogger(opt, current_iter, tb_logger)
|
85 |
-
|
86 |
-
for epoch in range(opt['num_epochs']):
|
87 |
-
lr = model.update_learning_rate(epoch)
|
88 |
-
|
89 |
-
for _, batch_data in enumerate(train_loader):
|
90 |
-
data_time = time.time() - data_time
|
91 |
-
|
92 |
-
current_iter += 1
|
93 |
-
|
94 |
-
model.optimize_parameters(batch_data, current_iter)
|
95 |
-
|
96 |
-
iter_time = time.time() - iter_time
|
97 |
-
if current_iter % opt['print_freq'] == 0:
|
98 |
-
log_vars = {'epoch': epoch, 'iter': current_iter}
|
99 |
-
log_vars.update({'lrs': [lr]})
|
100 |
-
log_vars.update({'time': iter_time, 'data_time': data_time})
|
101 |
-
log_vars.update(model.get_current_log())
|
102 |
-
msg_logger(log_vars)
|
103 |
-
|
104 |
-
data_time = time.time()
|
105 |
-
iter_time = time.time()
|
106 |
-
|
107 |
-
if epoch % opt['val_freq'] == 0:
|
108 |
-
save_dir = f'{opt["path"]["visualization"]}/valset/epoch_{epoch:03d}' # noqa
|
109 |
-
os.makedirs(save_dir, exist_ok=opt['debug'])
|
110 |
-
val_loss_total = model.inference(val_loader, save_dir)
|
111 |
-
|
112 |
-
save_dir = f'{opt["path"]["visualization"]}/testset/epoch_{epoch:03d}' # noqa
|
113 |
-
os.makedirs(save_dir, exist_ok=opt['debug'])
|
114 |
-
test_loss_total = model.inference(test_loader, save_dir)
|
115 |
-
|
116 |
-
logger.info(f'Epoch: {epoch}, '
|
117 |
-
f'val_loss_total: {val_loss_total}, '
|
118 |
-
f'test_loss_total: {test_loss_total}.')
|
119 |
-
|
120 |
-
if test_loss_total < best_loss:
|
121 |
-
best_epoch = epoch
|
122 |
-
best_loss = test_loss_total
|
123 |
-
|
124 |
-
logger.info(f'Best epoch: {best_epoch}, '
|
125 |
-
f'Best test loss: {best_loss: .4f}.')
|
126 |
-
|
127 |
-
# save model
|
128 |
-
model.save_network(f'{opt["path"]["models"]}/epoch{epoch}.pth')
|
129 |
-
|
130 |
-
|
131 |
-
if __name__ == '__main__':
|
132 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from ..builder import BBOX_SAMPLERS
|
5 |
-
from .random_sampler import RandomSampler
|
6 |
-
|
7 |
-
|
8 |
-
@BBOX_SAMPLERS.register_module()
|
9 |
-
class IoUBalancedNegSampler(RandomSampler):
|
10 |
-
"""IoU Balanced Sampling.
|
11 |
-
|
12 |
-
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
|
13 |
-
|
14 |
-
Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
|
15 |
-
are sampled from proposals whose IoU are lower than `floor_thr` randomly.
|
16 |
-
The others are sampled from proposals whose IoU are higher than
|
17 |
-
`floor_thr`. These proposals are sampled from some bins evenly, which are
|
18 |
-
split by `num_bins` via IoU evenly.
|
19 |
-
|
20 |
-
Args:
|
21 |
-
num (int): number of proposals.
|
22 |
-
pos_fraction (float): fraction of positive proposals.
|
23 |
-
floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
|
24 |
-
set to -1 if all using IoU balanced sampling.
|
25 |
-
floor_fraction (float): sampling fraction of proposals under floor_thr.
|
26 |
-
num_bins (int): number of bins in IoU balanced sampling.
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self,
|
30 |
-
num,
|
31 |
-
pos_fraction,
|
32 |
-
floor_thr=-1,
|
33 |
-
floor_fraction=0,
|
34 |
-
num_bins=3,
|
35 |
-
**kwargs):
|
36 |
-
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
|
37 |
-
**kwargs)
|
38 |
-
assert floor_thr >= 0 or floor_thr == -1
|
39 |
-
assert 0 <= floor_fraction <= 1
|
40 |
-
assert num_bins >= 1
|
41 |
-
|
42 |
-
self.floor_thr = floor_thr
|
43 |
-
self.floor_fraction = floor_fraction
|
44 |
-
self.num_bins = num_bins
|
45 |
-
|
46 |
-
def sample_via_interval(self, max_overlaps, full_set, num_expected):
|
47 |
-
"""Sample according to the iou interval.
|
48 |
-
|
49 |
-
Args:
|
50 |
-
max_overlaps (torch.Tensor): IoU between bounding boxes and ground
|
51 |
-
truth boxes.
|
52 |
-
full_set (set(int)): A full set of indices of boxes。
|
53 |
-
num_expected (int): Number of expected samples。
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
np.ndarray: Indices of samples
|
57 |
-
"""
|
58 |
-
max_iou = max_overlaps.max()
|
59 |
-
iou_interval = (max_iou - self.floor_thr) / self.num_bins
|
60 |
-
per_num_expected = int(num_expected / self.num_bins)
|
61 |
-
|
62 |
-
sampled_inds = []
|
63 |
-
for i in range(self.num_bins):
|
64 |
-
start_iou = self.floor_thr + i * iou_interval
|
65 |
-
end_iou = self.floor_thr + (i + 1) * iou_interval
|
66 |
-
tmp_set = set(
|
67 |
-
np.where(
|
68 |
-
np.logical_and(max_overlaps >= start_iou,
|
69 |
-
max_overlaps < end_iou))[0])
|
70 |
-
tmp_inds = list(tmp_set & full_set)
|
71 |
-
if len(tmp_inds) > per_num_expected:
|
72 |
-
tmp_sampled_set = self.random_choice(tmp_inds,
|
73 |
-
per_num_expected)
|
74 |
-
else:
|
75 |
-
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
|
76 |
-
sampled_inds.append(tmp_sampled_set)
|
77 |
-
|
78 |
-
sampled_inds = np.concatenate(sampled_inds)
|
79 |
-
if len(sampled_inds) < num_expected:
|
80 |
-
num_extra = num_expected - len(sampled_inds)
|
81 |
-
extra_inds = np.array(list(full_set - set(sampled_inds)))
|
82 |
-
if len(extra_inds) > num_extra:
|
83 |
-
extra_inds = self.random_choice(extra_inds, num_extra)
|
84 |
-
sampled_inds = np.concatenate([sampled_inds, extra_inds])
|
85 |
-
|
86 |
-
return sampled_inds
|
87 |
-
|
88 |
-
def _sample_neg(self, assign_result, num_expected, **kwargs):
|
89 |
-
"""Sample negative boxes.
|
90 |
-
|
91 |
-
Args:
|
92 |
-
assign_result (:obj:`AssignResult`): The assigned results of boxes.
|
93 |
-
num_expected (int): The number of expected negative samples
|
94 |
-
|
95 |
-
Returns:
|
96 |
-
Tensor or ndarray: sampled indices.
|
97 |
-
"""
|
98 |
-
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
|
99 |
-
if neg_inds.numel() != 0:
|
100 |
-
neg_inds = neg_inds.squeeze(1)
|
101 |
-
if len(neg_inds) <= num_expected:
|
102 |
-
return neg_inds
|
103 |
-
else:
|
104 |
-
max_overlaps = assign_result.max_overlaps.cpu().numpy()
|
105 |
-
# balance sampling for negative samples
|
106 |
-
neg_set = set(neg_inds.cpu().numpy())
|
107 |
-
|
108 |
-
if self.floor_thr > 0:
|
109 |
-
floor_set = set(
|
110 |
-
np.where(
|
111 |
-
np.logical_and(max_overlaps >= 0,
|
112 |
-
max_overlaps < self.floor_thr))[0])
|
113 |
-
iou_sampling_set = set(
|
114 |
-
np.where(max_overlaps >= self.floor_thr)[0])
|
115 |
-
elif self.floor_thr == 0:
|
116 |
-
floor_set = set(np.where(max_overlaps == 0)[0])
|
117 |
-
iou_sampling_set = set(
|
118 |
-
np.where(max_overlaps > self.floor_thr)[0])
|
119 |
-
else:
|
120 |
-
floor_set = set()
|
121 |
-
iou_sampling_set = set(
|
122 |
-
np.where(max_overlaps > self.floor_thr)[0])
|
123 |
-
# for sampling interval calculation
|
124 |
-
self.floor_thr = 0
|
125 |
-
|
126 |
-
floor_neg_inds = list(floor_set & neg_set)
|
127 |
-
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
|
128 |
-
num_expected_iou_sampling = int(num_expected *
|
129 |
-
(1 - self.floor_fraction))
|
130 |
-
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
|
131 |
-
if self.num_bins >= 2:
|
132 |
-
iou_sampled_inds = self.sample_via_interval(
|
133 |
-
max_overlaps, set(iou_sampling_neg_inds),
|
134 |
-
num_expected_iou_sampling)
|
135 |
-
else:
|
136 |
-
iou_sampled_inds = self.random_choice(
|
137 |
-
iou_sampling_neg_inds, num_expected_iou_sampling)
|
138 |
-
else:
|
139 |
-
iou_sampled_inds = np.array(
|
140 |
-
iou_sampling_neg_inds, dtype=np.int)
|
141 |
-
num_expected_floor = num_expected - len(iou_sampled_inds)
|
142 |
-
if len(floor_neg_inds) > num_expected_floor:
|
143 |
-
sampled_floor_inds = self.random_choice(
|
144 |
-
floor_neg_inds, num_expected_floor)
|
145 |
-
else:
|
146 |
-
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
|
147 |
-
sampled_inds = np.concatenate(
|
148 |
-
(sampled_floor_inds, iou_sampled_inds))
|
149 |
-
if len(sampled_inds) < num_expected:
|
150 |
-
num_extra = num_expected - len(sampled_inds)
|
151 |
-
extra_inds = np.array(list(neg_set - set(sampled_inds)))
|
152 |
-
if len(extra_inds) > num_extra:
|
153 |
-
extra_inds = self.random_choice(extra_inds, num_extra)
|
154 |
-
sampled_inds = np.concatenate((sampled_inds, extra_inds))
|
155 |
-
sampled_inds = torch.from_numpy(sampled_inds).long().to(
|
156 |
-
assign_result.gt_inds.device)
|
157 |
-
return sampled_inds
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/evaluation/panoptic_evaluation.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import contextlib
|
3 |
-
import io
|
4 |
-
import itertools
|
5 |
-
import json
|
6 |
-
import logging
|
7 |
-
import numpy as np
|
8 |
-
import os
|
9 |
-
import tempfile
|
10 |
-
from collections import OrderedDict
|
11 |
-
from typing import Optional
|
12 |
-
from PIL import Image
|
13 |
-
from tabulate import tabulate
|
14 |
-
|
15 |
-
from detectron2.data import MetadataCatalog
|
16 |
-
from detectron2.utils import comm
|
17 |
-
from detectron2.utils.file_io import PathManager
|
18 |
-
|
19 |
-
from .evaluator import DatasetEvaluator
|
20 |
-
|
21 |
-
logger = logging.getLogger(__name__)
|
22 |
-
|
23 |
-
|
24 |
-
class COCOPanopticEvaluator(DatasetEvaluator):
|
25 |
-
"""
|
26 |
-
Evaluate Panoptic Quality metrics on COCO using PanopticAPI.
|
27 |
-
It saves panoptic segmentation prediction in `output_dir`
|
28 |
-
|
29 |
-
It contains a synchronize call and has to be called from all workers.
|
30 |
-
"""
|
31 |
-
|
32 |
-
def __init__(self, dataset_name: str, output_dir: Optional[str] = None):
|
33 |
-
"""
|
34 |
-
Args:
|
35 |
-
dataset_name: name of the dataset
|
36 |
-
output_dir: output directory to save results for evaluation.
|
37 |
-
"""
|
38 |
-
self._metadata = MetadataCatalog.get(dataset_name)
|
39 |
-
self._thing_contiguous_id_to_dataset_id = {
|
40 |
-
v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
|
41 |
-
}
|
42 |
-
self._stuff_contiguous_id_to_dataset_id = {
|
43 |
-
v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items()
|
44 |
-
}
|
45 |
-
|
46 |
-
self._output_dir = output_dir
|
47 |
-
if self._output_dir is not None:
|
48 |
-
PathManager.mkdirs(self._output_dir)
|
49 |
-
|
50 |
-
def reset(self):
|
51 |
-
self._predictions = []
|
52 |
-
|
53 |
-
def _convert_category_id(self, segment_info):
|
54 |
-
isthing = segment_info.pop("isthing", None)
|
55 |
-
if isthing is None:
|
56 |
-
# the model produces panoptic category id directly. No more conversion needed
|
57 |
-
return segment_info
|
58 |
-
if isthing is True:
|
59 |
-
segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[
|
60 |
-
segment_info["category_id"]
|
61 |
-
]
|
62 |
-
else:
|
63 |
-
segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[
|
64 |
-
segment_info["category_id"]
|
65 |
-
]
|
66 |
-
return segment_info
|
67 |
-
|
68 |
-
def process(self, inputs, outputs):
|
69 |
-
from panopticapi.utils import id2rgb
|
70 |
-
|
71 |
-
for input, output in zip(inputs, outputs):
|
72 |
-
panoptic_img, segments_info = output["panoptic_seg"]
|
73 |
-
panoptic_img = panoptic_img.cpu().numpy()
|
74 |
-
if segments_info is None:
|
75 |
-
# If "segments_info" is None, we assume "panoptic_img" is a
|
76 |
-
# H*W int32 image storing the panoptic_id in the format of
|
77 |
-
# category_id * label_divisor + instance_id. We reserve -1 for
|
78 |
-
# VOID label, and add 1 to panoptic_img since the official
|
79 |
-
# evaluation script uses 0 for VOID label.
|
80 |
-
label_divisor = self._metadata.label_divisor
|
81 |
-
segments_info = []
|
82 |
-
for panoptic_label in np.unique(panoptic_img):
|
83 |
-
if panoptic_label == -1:
|
84 |
-
# VOID region.
|
85 |
-
continue
|
86 |
-
pred_class = panoptic_label // label_divisor
|
87 |
-
isthing = (
|
88 |
-
pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values()
|
89 |
-
)
|
90 |
-
segments_info.append(
|
91 |
-
{
|
92 |
-
"id": int(panoptic_label) + 1,
|
93 |
-
"category_id": int(pred_class),
|
94 |
-
"isthing": bool(isthing),
|
95 |
-
}
|
96 |
-
)
|
97 |
-
# Official evaluation script uses 0 for VOID label.
|
98 |
-
panoptic_img += 1
|
99 |
-
|
100 |
-
file_name = os.path.basename(input["file_name"])
|
101 |
-
file_name_png = os.path.splitext(file_name)[0] + ".png"
|
102 |
-
with io.BytesIO() as out:
|
103 |
-
Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
|
104 |
-
segments_info = [self._convert_category_id(x) for x in segments_info]
|
105 |
-
self._predictions.append(
|
106 |
-
{
|
107 |
-
"image_id": input["image_id"],
|
108 |
-
"file_name": file_name_png,
|
109 |
-
"png_string": out.getvalue(),
|
110 |
-
"segments_info": segments_info,
|
111 |
-
}
|
112 |
-
)
|
113 |
-
|
114 |
-
def evaluate(self):
|
115 |
-
comm.synchronize()
|
116 |
-
|
117 |
-
self._predictions = comm.gather(self._predictions)
|
118 |
-
self._predictions = list(itertools.chain(*self._predictions))
|
119 |
-
if not comm.is_main_process():
|
120 |
-
return
|
121 |
-
|
122 |
-
# PanopticApi requires local files
|
123 |
-
gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
|
124 |
-
gt_folder = PathManager.get_local_path(self._metadata.panoptic_root)
|
125 |
-
|
126 |
-
with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
|
127 |
-
logger.info("Writing all panoptic predictions to {} ...".format(pred_dir))
|
128 |
-
for p in self._predictions:
|
129 |
-
with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
|
130 |
-
f.write(p.pop("png_string"))
|
131 |
-
|
132 |
-
with open(gt_json, "r") as f:
|
133 |
-
json_data = json.load(f)
|
134 |
-
json_data["annotations"] = self._predictions
|
135 |
-
|
136 |
-
output_dir = self._output_dir or pred_dir
|
137 |
-
predictions_json = os.path.join(output_dir, "predictions.json")
|
138 |
-
with PathManager.open(predictions_json, "w") as f:
|
139 |
-
f.write(json.dumps(json_data))
|
140 |
-
|
141 |
-
from panopticapi.evaluation import pq_compute
|
142 |
-
|
143 |
-
with contextlib.redirect_stdout(io.StringIO()):
|
144 |
-
pq_res = pq_compute(
|
145 |
-
gt_json,
|
146 |
-
PathManager.get_local_path(predictions_json),
|
147 |
-
gt_folder=gt_folder,
|
148 |
-
pred_folder=pred_dir,
|
149 |
-
)
|
150 |
-
|
151 |
-
res = {}
|
152 |
-
res["PQ"] = 100 * pq_res["All"]["pq"]
|
153 |
-
res["SQ"] = 100 * pq_res["All"]["sq"]
|
154 |
-
res["RQ"] = 100 * pq_res["All"]["rq"]
|
155 |
-
res["PQ_th"] = 100 * pq_res["Things"]["pq"]
|
156 |
-
res["SQ_th"] = 100 * pq_res["Things"]["sq"]
|
157 |
-
res["RQ_th"] = 100 * pq_res["Things"]["rq"]
|
158 |
-
res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
|
159 |
-
res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
|
160 |
-
res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
|
161 |
-
|
162 |
-
results = OrderedDict({"panoptic_seg": res})
|
163 |
-
_print_panoptic_results(pq_res)
|
164 |
-
|
165 |
-
return results
|
166 |
-
|
167 |
-
|
168 |
-
def _print_panoptic_results(pq_res):
|
169 |
-
headers = ["", "PQ", "SQ", "RQ", "#categories"]
|
170 |
-
data = []
|
171 |
-
for name in ["All", "Things", "Stuff"]:
|
172 |
-
row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]]
|
173 |
-
data.append(row)
|
174 |
-
table = tabulate(
|
175 |
-
data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center"
|
176 |
-
)
|
177 |
-
logger.info("Panoptic Evaluation Results:\n" + table)
|
178 |
-
|
179 |
-
|
180 |
-
if __name__ == "__main__":
|
181 |
-
from detectron2.utils.logger import setup_logger
|
182 |
-
|
183 |
-
logger = setup_logger()
|
184 |
-
import argparse
|
185 |
-
|
186 |
-
parser = argparse.ArgumentParser()
|
187 |
-
parser.add_argument("--gt-json")
|
188 |
-
parser.add_argument("--gt-dir")
|
189 |
-
parser.add_argument("--pred-json")
|
190 |
-
parser.add_argument("--pred-dir")
|
191 |
-
args = parser.parse_args()
|
192 |
-
|
193 |
-
from panopticapi.evaluation import pq_compute
|
194 |
-
|
195 |
-
with contextlib.redirect_stdout(io.StringIO()):
|
196 |
-
pq_res = pq_compute(
|
197 |
-
args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir
|
198 |
-
)
|
199 |
-
_print_panoptic_results(pq_res)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/regnet.py
DELETED
@@ -1,452 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
"""
|
3 |
-
Implementation of RegNet models from :paper:`dds` and :paper:`scaling`.
|
4 |
-
|
5 |
-
This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications.
|
6 |
-
Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify
|
7 |
-
model loading.
|
8 |
-
"""
|
9 |
-
|
10 |
-
import numpy as np
|
11 |
-
from torch import nn
|
12 |
-
|
13 |
-
from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm
|
14 |
-
|
15 |
-
from .backbone import Backbone
|
16 |
-
|
17 |
-
__all__ = [
|
18 |
-
"AnyNet",
|
19 |
-
"RegNet",
|
20 |
-
"ResStem",
|
21 |
-
"SimpleStem",
|
22 |
-
"VanillaBlock",
|
23 |
-
"ResBasicBlock",
|
24 |
-
"ResBottleneckBlock",
|
25 |
-
]
|
26 |
-
|
27 |
-
|
28 |
-
def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False):
|
29 |
-
"""Helper for building a conv2d layer."""
|
30 |
-
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
|
31 |
-
s, p, g, b = stride, (k - 1) // 2, groups, bias
|
32 |
-
return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
|
33 |
-
|
34 |
-
|
35 |
-
def gap2d():
|
36 |
-
"""Helper for building a global average pooling layer."""
|
37 |
-
return nn.AdaptiveAvgPool2d((1, 1))
|
38 |
-
|
39 |
-
|
40 |
-
def pool2d(k, *, stride=1):
|
41 |
-
"""Helper for building a pool2d layer."""
|
42 |
-
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
|
43 |
-
return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)
|
44 |
-
|
45 |
-
|
46 |
-
def init_weights(m):
|
47 |
-
"""Performs ResNet-style weight initialization."""
|
48 |
-
if isinstance(m, nn.Conv2d):
|
49 |
-
# Note that there is no bias due to BN
|
50 |
-
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
51 |
-
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
|
52 |
-
elif isinstance(m, nn.BatchNorm2d):
|
53 |
-
m.weight.data.fill_(1.0)
|
54 |
-
m.bias.data.zero_()
|
55 |
-
elif isinstance(m, nn.Linear):
|
56 |
-
m.weight.data.normal_(mean=0.0, std=0.01)
|
57 |
-
m.bias.data.zero_()
|
58 |
-
|
59 |
-
|
60 |
-
class ResStem(CNNBlockBase):
|
61 |
-
"""ResNet stem for ImageNet: 7x7, BN, AF, MaxPool."""
|
62 |
-
|
63 |
-
def __init__(self, w_in, w_out, norm, activation_class):
|
64 |
-
super().__init__(w_in, w_out, 4)
|
65 |
-
self.conv = conv2d(w_in, w_out, 7, stride=2)
|
66 |
-
self.bn = get_norm(norm, w_out)
|
67 |
-
self.af = activation_class()
|
68 |
-
self.pool = pool2d(3, stride=2)
|
69 |
-
|
70 |
-
def forward(self, x):
|
71 |
-
for layer in self.children():
|
72 |
-
x = layer(x)
|
73 |
-
return x
|
74 |
-
|
75 |
-
|
76 |
-
class SimpleStem(CNNBlockBase):
|
77 |
-
"""Simple stem for ImageNet: 3x3, BN, AF."""
|
78 |
-
|
79 |
-
def __init__(self, w_in, w_out, norm, activation_class):
|
80 |
-
super().__init__(w_in, w_out, 2)
|
81 |
-
self.conv = conv2d(w_in, w_out, 3, stride=2)
|
82 |
-
self.bn = get_norm(norm, w_out)
|
83 |
-
self.af = activation_class()
|
84 |
-
|
85 |
-
def forward(self, x):
|
86 |
-
for layer in self.children():
|
87 |
-
x = layer(x)
|
88 |
-
return x
|
89 |
-
|
90 |
-
|
91 |
-
class SE(nn.Module):
|
92 |
-
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid."""
|
93 |
-
|
94 |
-
def __init__(self, w_in, w_se, activation_class):
|
95 |
-
super().__init__()
|
96 |
-
self.avg_pool = gap2d()
|
97 |
-
self.f_ex = nn.Sequential(
|
98 |
-
conv2d(w_in, w_se, 1, bias=True),
|
99 |
-
activation_class(),
|
100 |
-
conv2d(w_se, w_in, 1, bias=True),
|
101 |
-
nn.Sigmoid(),
|
102 |
-
)
|
103 |
-
|
104 |
-
def forward(self, x):
|
105 |
-
return x * self.f_ex(self.avg_pool(x))
|
106 |
-
|
107 |
-
|
108 |
-
class VanillaBlock(CNNBlockBase):
|
109 |
-
"""Vanilla block: [3x3 conv, BN, Relu] x2."""
|
110 |
-
|
111 |
-
def __init__(self, w_in, w_out, stride, norm, activation_class, _params):
|
112 |
-
super().__init__(w_in, w_out, stride)
|
113 |
-
self.a = conv2d(w_in, w_out, 3, stride=stride)
|
114 |
-
self.a_bn = get_norm(norm, w_out)
|
115 |
-
self.a_af = activation_class()
|
116 |
-
self.b = conv2d(w_out, w_out, 3)
|
117 |
-
self.b_bn = get_norm(norm, w_out)
|
118 |
-
self.b_af = activation_class()
|
119 |
-
|
120 |
-
def forward(self, x):
|
121 |
-
for layer in self.children():
|
122 |
-
x = layer(x)
|
123 |
-
return x
|
124 |
-
|
125 |
-
|
126 |
-
class BasicTransform(nn.Module):
|
127 |
-
"""Basic transformation: [3x3 conv, BN, Relu] x2."""
|
128 |
-
|
129 |
-
def __init__(self, w_in, w_out, stride, norm, activation_class, _params):
|
130 |
-
super().__init__()
|
131 |
-
self.a = conv2d(w_in, w_out, 3, stride=stride)
|
132 |
-
self.a_bn = get_norm(norm, w_out)
|
133 |
-
self.a_af = activation_class()
|
134 |
-
self.b = conv2d(w_out, w_out, 3)
|
135 |
-
self.b_bn = get_norm(norm, w_out)
|
136 |
-
self.b_bn.final_bn = True
|
137 |
-
|
138 |
-
def forward(self, x):
|
139 |
-
for layer in self.children():
|
140 |
-
x = layer(x)
|
141 |
-
return x
|
142 |
-
|
143 |
-
|
144 |
-
class ResBasicBlock(CNNBlockBase):
|
145 |
-
"""Residual basic block: x + f(x), f = basic transform."""
|
146 |
-
|
147 |
-
def __init__(self, w_in, w_out, stride, norm, activation_class, params):
|
148 |
-
super().__init__(w_in, w_out, stride)
|
149 |
-
self.proj, self.bn = None, None
|
150 |
-
if (w_in != w_out) or (stride != 1):
|
151 |
-
self.proj = conv2d(w_in, w_out, 1, stride=stride)
|
152 |
-
self.bn = get_norm(norm, w_out)
|
153 |
-
self.f = BasicTransform(w_in, w_out, stride, norm, activation_class, params)
|
154 |
-
self.af = activation_class()
|
155 |
-
|
156 |
-
def forward(self, x):
|
157 |
-
x_p = self.bn(self.proj(x)) if self.proj else x
|
158 |
-
return self.af(x_p + self.f(x))
|
159 |
-
|
160 |
-
|
161 |
-
class BottleneckTransform(nn.Module):
|
162 |
-
"""Bottleneck transformation: 1x1, 3x3 [+SE], 1x1."""
|
163 |
-
|
164 |
-
def __init__(self, w_in, w_out, stride, norm, activation_class, params):
|
165 |
-
super().__init__()
|
166 |
-
w_b = int(round(w_out * params["bot_mul"]))
|
167 |
-
w_se = int(round(w_in * params["se_r"]))
|
168 |
-
groups = w_b // params["group_w"]
|
169 |
-
self.a = conv2d(w_in, w_b, 1)
|
170 |
-
self.a_bn = get_norm(norm, w_b)
|
171 |
-
self.a_af = activation_class()
|
172 |
-
self.b = conv2d(w_b, w_b, 3, stride=stride, groups=groups)
|
173 |
-
self.b_bn = get_norm(norm, w_b)
|
174 |
-
self.b_af = activation_class()
|
175 |
-
self.se = SE(w_b, w_se, activation_class) if w_se else None
|
176 |
-
self.c = conv2d(w_b, w_out, 1)
|
177 |
-
self.c_bn = get_norm(norm, w_out)
|
178 |
-
self.c_bn.final_bn = True
|
179 |
-
|
180 |
-
def forward(self, x):
|
181 |
-
for layer in self.children():
|
182 |
-
x = layer(x)
|
183 |
-
return x
|
184 |
-
|
185 |
-
|
186 |
-
class ResBottleneckBlock(CNNBlockBase):
|
187 |
-
"""Residual bottleneck block: x + f(x), f = bottleneck transform."""
|
188 |
-
|
189 |
-
def __init__(self, w_in, w_out, stride, norm, activation_class, params):
|
190 |
-
super().__init__(w_in, w_out, stride)
|
191 |
-
self.proj, self.bn = None, None
|
192 |
-
if (w_in != w_out) or (stride != 1):
|
193 |
-
self.proj = conv2d(w_in, w_out, 1, stride=stride)
|
194 |
-
self.bn = get_norm(norm, w_out)
|
195 |
-
self.f = BottleneckTransform(w_in, w_out, stride, norm, activation_class, params)
|
196 |
-
self.af = activation_class()
|
197 |
-
|
198 |
-
def forward(self, x):
|
199 |
-
x_p = self.bn(self.proj(x)) if self.proj else x
|
200 |
-
return self.af(x_p + self.f(x))
|
201 |
-
|
202 |
-
|
203 |
-
class AnyStage(nn.Module):
|
204 |
-
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
|
205 |
-
|
206 |
-
def __init__(self, w_in, w_out, stride, d, block_class, norm, activation_class, params):
|
207 |
-
super().__init__()
|
208 |
-
for i in range(d):
|
209 |
-
block = block_class(w_in, w_out, stride, norm, activation_class, params)
|
210 |
-
self.add_module("b{}".format(i + 1), block)
|
211 |
-
stride, w_in = 1, w_out
|
212 |
-
|
213 |
-
def forward(self, x):
|
214 |
-
for block in self.children():
|
215 |
-
x = block(x)
|
216 |
-
return x
|
217 |
-
|
218 |
-
|
219 |
-
class AnyNet(Backbone):
|
220 |
-
"""AnyNet model. See :paper:`dds`."""
|
221 |
-
|
222 |
-
def __init__(
|
223 |
-
self,
|
224 |
-
*,
|
225 |
-
stem_class,
|
226 |
-
stem_width,
|
227 |
-
block_class,
|
228 |
-
depths,
|
229 |
-
widths,
|
230 |
-
group_widths,
|
231 |
-
strides,
|
232 |
-
bottleneck_ratios,
|
233 |
-
se_ratio,
|
234 |
-
activation_class,
|
235 |
-
freeze_at=0,
|
236 |
-
norm="BN",
|
237 |
-
out_features=None,
|
238 |
-
):
|
239 |
-
"""
|
240 |
-
Args:
|
241 |
-
stem_class (callable): A callable taking 4 arguments (channels in, channels out,
|
242 |
-
normalization, callable returning an activation function) that returns another
|
243 |
-
callable implementing the stem module.
|
244 |
-
stem_width (int): The number of output channels that the stem produces.
|
245 |
-
block_class (callable): A callable taking 6 arguments (channels in, channels out,
|
246 |
-
stride, normalization, callable returning an activation function, a dict of
|
247 |
-
block-specific parameters) that returns another callable implementing the repeated
|
248 |
-
block module.
|
249 |
-
depths (list[int]): Number of blocks in each stage.
|
250 |
-
widths (list[int]): For each stage, the number of output channels of each block.
|
251 |
-
group_widths (list[int]): For each stage, the number of channels per group in group
|
252 |
-
convolution, if the block uses group convolution.
|
253 |
-
strides (list[int]): The stride that each network stage applies to its input.
|
254 |
-
bottleneck_ratios (list[float]): For each stage, the ratio of the number of bottleneck
|
255 |
-
channels to the number of block input channels (or, equivalently, output channels),
|
256 |
-
if the block uses a bottleneck.
|
257 |
-
se_ratio (float): The ratio of the number of channels used inside the squeeze-excitation
|
258 |
-
(SE) module to it number of input channels, if SE the block uses SE.
|
259 |
-
activation_class (callable): A callable taking no arguments that returns another
|
260 |
-
callable implementing an activation function.
|
261 |
-
freeze_at (int): The number of stages at the beginning to freeze.
|
262 |
-
see :meth:`freeze` for detailed explanation.
|
263 |
-
norm (str or callable): normalization for all conv layers.
|
264 |
-
See :func:`layers.get_norm` for supported format.
|
265 |
-
out_features (list[str]): name of the layers whose outputs should
|
266 |
-
be returned in forward. RegNet's use "stem" and "s1", "s2", etc for the stages after
|
267 |
-
the stem. If None, will return the output of the last layer.
|
268 |
-
"""
|
269 |
-
super().__init__()
|
270 |
-
self.stem = stem_class(3, stem_width, norm, activation_class)
|
271 |
-
|
272 |
-
current_stride = self.stem.stride
|
273 |
-
self._out_feature_strides = {"stem": current_stride}
|
274 |
-
self._out_feature_channels = {"stem": self.stem.out_channels}
|
275 |
-
self.stages_and_names = []
|
276 |
-
prev_w = stem_width
|
277 |
-
|
278 |
-
for i, (d, w, s, b, g) in enumerate(
|
279 |
-
zip(depths, widths, strides, bottleneck_ratios, group_widths)
|
280 |
-
):
|
281 |
-
params = {"bot_mul": b, "group_w": g, "se_r": se_ratio}
|
282 |
-
stage = AnyStage(prev_w, w, s, d, block_class, norm, activation_class, params)
|
283 |
-
name = "s{}".format(i + 1)
|
284 |
-
self.add_module(name, stage)
|
285 |
-
self.stages_and_names.append((stage, name))
|
286 |
-
self._out_feature_strides[name] = current_stride = int(
|
287 |
-
current_stride * np.prod([k.stride for k in stage.children()])
|
288 |
-
)
|
289 |
-
self._out_feature_channels[name] = list(stage.children())[-1].out_channels
|
290 |
-
prev_w = w
|
291 |
-
|
292 |
-
self.apply(init_weights)
|
293 |
-
|
294 |
-
if out_features is None:
|
295 |
-
out_features = [name]
|
296 |
-
self._out_features = out_features
|
297 |
-
assert len(self._out_features)
|
298 |
-
children = [x[0] for x in self.named_children()]
|
299 |
-
for out_feature in self._out_features:
|
300 |
-
assert out_feature in children, "Available children: {} does not include {}".format(
|
301 |
-
", ".join(children), out_feature
|
302 |
-
)
|
303 |
-
self.freeze(freeze_at)
|
304 |
-
|
305 |
-
def forward(self, x):
|
306 |
-
"""
|
307 |
-
Args:
|
308 |
-
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
|
309 |
-
|
310 |
-
Returns:
|
311 |
-
dict[str->Tensor]: names and the corresponding features
|
312 |
-
"""
|
313 |
-
assert x.dim() == 4, f"Model takes an input of shape (N, C, H, W). Got {x.shape} instead!"
|
314 |
-
outputs = {}
|
315 |
-
x = self.stem(x)
|
316 |
-
if "stem" in self._out_features:
|
317 |
-
outputs["stem"] = x
|
318 |
-
for stage, name in self.stages_and_names:
|
319 |
-
x = stage(x)
|
320 |
-
if name in self._out_features:
|
321 |
-
outputs[name] = x
|
322 |
-
return outputs
|
323 |
-
|
324 |
-
def output_shape(self):
|
325 |
-
return {
|
326 |
-
name: ShapeSpec(
|
327 |
-
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
|
328 |
-
)
|
329 |
-
for name in self._out_features
|
330 |
-
}
|
331 |
-
|
332 |
-
def freeze(self, freeze_at=0):
|
333 |
-
"""
|
334 |
-
Freeze the first several stages of the model. Commonly used in fine-tuning.
|
335 |
-
|
336 |
-
Layers that produce the same feature map spatial size are defined as one
|
337 |
-
"stage" by :paper:`FPN`.
|
338 |
-
|
339 |
-
Args:
|
340 |
-
freeze_at (int): number of stages to freeze.
|
341 |
-
`1` means freezing the stem. `2` means freezing the stem and
|
342 |
-
one residual stage, etc.
|
343 |
-
|
344 |
-
Returns:
|
345 |
-
nn.Module: this model itself
|
346 |
-
"""
|
347 |
-
if freeze_at >= 1:
|
348 |
-
self.stem.freeze()
|
349 |
-
for idx, (stage, _) in enumerate(self.stages_and_names, start=2):
|
350 |
-
if freeze_at >= idx:
|
351 |
-
for block in stage.children():
|
352 |
-
block.freeze()
|
353 |
-
return self
|
354 |
-
|
355 |
-
|
356 |
-
def adjust_block_compatibility(ws, bs, gs):
|
357 |
-
"""Adjusts the compatibility of widths, bottlenecks, and groups."""
|
358 |
-
assert len(ws) == len(bs) == len(gs)
|
359 |
-
assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs))
|
360 |
-
vs = [int(max(1, w * b)) for w, b in zip(ws, bs)]
|
361 |
-
gs = [int(min(g, v)) for g, v in zip(gs, vs)]
|
362 |
-
ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)]
|
363 |
-
vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)]
|
364 |
-
ws = [int(v / b) for v, b in zip(vs, bs)]
|
365 |
-
assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs))
|
366 |
-
return ws, bs, gs
|
367 |
-
|
368 |
-
|
369 |
-
def generate_regnet_parameters(w_a, w_0, w_m, d, q=8):
|
370 |
-
"""Generates per stage widths and depths from RegNet parameters."""
|
371 |
-
assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0
|
372 |
-
# Generate continuous per-block ws
|
373 |
-
ws_cont = np.arange(d) * w_a + w_0
|
374 |
-
# Generate quantized per-block ws
|
375 |
-
ks = np.round(np.log(ws_cont / w_0) / np.log(w_m))
|
376 |
-
ws_all = w_0 * np.power(w_m, ks)
|
377 |
-
ws_all = np.round(np.divide(ws_all, q)).astype(int) * q
|
378 |
-
# Generate per stage ws and ds (assumes ws_all are sorted)
|
379 |
-
ws, ds = np.unique(ws_all, return_counts=True)
|
380 |
-
# Compute number of actual stages and total possible stages
|
381 |
-
num_stages, total_stages = len(ws), ks.max() + 1
|
382 |
-
# Convert numpy arrays to lists and return
|
383 |
-
ws, ds, ws_all, ws_cont = (x.tolist() for x in (ws, ds, ws_all, ws_cont))
|
384 |
-
return ws, ds, num_stages, total_stages, ws_all, ws_cont
|
385 |
-
|
386 |
-
|
387 |
-
class RegNet(AnyNet):
|
388 |
-
"""RegNet model. See :paper:`dds`."""
|
389 |
-
|
390 |
-
def __init__(
|
391 |
-
self,
|
392 |
-
*,
|
393 |
-
stem_class,
|
394 |
-
stem_width,
|
395 |
-
block_class,
|
396 |
-
depth,
|
397 |
-
w_a,
|
398 |
-
w_0,
|
399 |
-
w_m,
|
400 |
-
group_width,
|
401 |
-
stride=2,
|
402 |
-
bottleneck_ratio=1.0,
|
403 |
-
se_ratio=0.0,
|
404 |
-
activation_class=None,
|
405 |
-
freeze_at=0,
|
406 |
-
norm="BN",
|
407 |
-
out_features=None,
|
408 |
-
):
|
409 |
-
"""
|
410 |
-
Build a RegNet from the parameterization described in :paper:`dds` Section 3.3.
|
411 |
-
|
412 |
-
Args:
|
413 |
-
See :class:`AnyNet` for arguments that are not listed here.
|
414 |
-
depth (int): Total number of blocks in the RegNet.
|
415 |
-
w_a (float): Factor by which block width would increase prior to quantizing block widths
|
416 |
-
by stage. See :paper:`dds` Section 3.3.
|
417 |
-
w_0 (int): Initial block width. See :paper:`dds` Section 3.3.
|
418 |
-
w_m (float): Parameter controlling block width quantization.
|
419 |
-
See :paper:`dds` Section 3.3.
|
420 |
-
group_width (int): Number of channels per group in group convolution, if the block uses
|
421 |
-
group convolution.
|
422 |
-
bottleneck_ratio (float): The ratio of the number of bottleneck channels to the number
|
423 |
-
of block input channels (or, equivalently, output channels), if the block uses a
|
424 |
-
bottleneck.
|
425 |
-
stride (int): The stride that each network stage applies to its input.
|
426 |
-
"""
|
427 |
-
ws, ds = generate_regnet_parameters(w_a, w_0, w_m, depth)[0:2]
|
428 |
-
ss = [stride for _ in ws]
|
429 |
-
bs = [bottleneck_ratio for _ in ws]
|
430 |
-
gs = [group_width for _ in ws]
|
431 |
-
ws, bs, gs = adjust_block_compatibility(ws, bs, gs)
|
432 |
-
|
433 |
-
def default_activation_class():
|
434 |
-
return nn.ReLU(inplace=True)
|
435 |
-
|
436 |
-
super().__init__(
|
437 |
-
stem_class=stem_class,
|
438 |
-
stem_width=stem_width,
|
439 |
-
block_class=block_class,
|
440 |
-
depths=ds,
|
441 |
-
widths=ws,
|
442 |
-
strides=ss,
|
443 |
-
group_widths=gs,
|
444 |
-
bottleneck_ratios=bs,
|
445 |
-
se_ratio=se_ratio,
|
446 |
-
activation_class=default_activation_class
|
447 |
-
if activation_class is None
|
448 |
-
else activation_class,
|
449 |
-
freeze_at=freeze_at,
|
450 |
-
norm=norm,
|
451 |
-
out_features=out_features,
|
452 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat.v2/Dockerfile
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
# Use the official Python 3.9 image as the base image
|
2 |
-
FROM python:3.9
|
3 |
-
|
4 |
-
# Expose the port
|
5 |
-
EXPOSE 7860
|
6 |
-
|
7 |
-
# Keeps Python from generating .pyc files in the container
|
8 |
-
ENV PYTHONDONTWRITEBYTECODE=1
|
9 |
-
|
10 |
-
# Turns off buffering for easier container logging
|
11 |
-
ENV PYTHONUNBUFFERED=1
|
12 |
-
|
13 |
-
# Set the PYNGROK_CONFIG environment variable
|
14 |
-
ENV PYNGROK_CONFIG /tmp/pyngrok.yml
|
15 |
-
|
16 |
-
# Set the NGROK_PATH environment variable to a writable location
|
17 |
-
ENV NGROK_PATH /tmp/ngrok
|
18 |
-
|
19 |
-
# Copy requirements.txt into the container
|
20 |
-
COPY requirements.txt .
|
21 |
-
|
22 |
-
# RUN apt-get update
|
23 |
-
# RUN apt-get install -y wget
|
24 |
-
# RUN wget -q https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
|
25 |
-
# RUN apt-get install ./google-chrome-stable_current_amd64.deb -y
|
26 |
-
|
27 |
-
|
28 |
-
# RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -
|
29 |
-
# RUN sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list'
|
30 |
-
# RUN apt-get -y update
|
31 |
-
# RUN apt-get install -y google-chrome-stable
|
32 |
-
|
33 |
-
# # install chromedriver
|
34 |
-
# RUN apt-get install -yqq unzip
|
35 |
-
# RUN wget -O /tmp/chromedriver.zip http://chromedriver.storage.googleapis.com/`curl -sS chromedriver.storage.googleapis.com/LATEST_RELEASE`/chromedriver_linux64.zip
|
36 |
-
# RUN unzip /tmp/chromedriver.zip chromedriver -d /usr/local/bin/
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
# RUN apt install wget -y
|
43 |
-
# RUN wget https://github.com/mozilla/geckodriver/releases/download/v0.32.0/geckodriver-v0.32.0-linux64.tar.gz
|
44 |
-
# RUN tar -xzvf geckodriver-v0.32.0-linux64.tar.gz -C /usr/local/bin
|
45 |
-
# RUN chmod +x /usr/local/bin/geckodriver
|
46 |
-
# RUN geckodriver -V
|
47 |
-
|
48 |
-
|
49 |
-
# RUN apt install firefox-esr -y
|
50 |
-
# RUN apt-get install firefox-geckodriver
|
51 |
-
|
52 |
-
# Upgrade pip and install the required packages
|
53 |
-
RUN pip install --upgrade pip && \
|
54 |
-
pip install -r requirements.txt
|
55 |
-
|
56 |
-
# Install sudo and create the necessary directories before copying the files
|
57 |
-
RUN apt-get update && \
|
58 |
-
apt-get install -y sudo && \
|
59 |
-
mkdir -p /code/image
|
60 |
-
|
61 |
-
# Creates a non-root user with an explicit UID and adds permission to access the /code folder
|
62 |
-
RUN adduser -u 5678 --disabled-password --gecos "" appuser && \
|
63 |
-
usermod -aG sudo appuser && \
|
64 |
-
usermod -aG root appuser && \
|
65 |
-
chown -R appuser:appuser /code
|
66 |
-
|
67 |
-
# Create the pyngrok bin directory and set the ownership and permissions for appuser
|
68 |
-
RUN mkdir -p /usr/local/lib/python3.9/site-packages/pyngrok/bin && \
|
69 |
-
chown -R appuser:appuser /usr/local/lib/python3.9/site-packages/pyngrok/bin && \
|
70 |
-
chmod -R 777 /usr/local/lib/python3.9/site-packages/pyngrok/bin
|
71 |
-
|
72 |
-
RUN mkdir -p /.ngrok2 && \
|
73 |
-
chown -R appuser:appuser /.ngrok2 && \
|
74 |
-
chmod -R 777 /.ngrok2
|
75 |
-
|
76 |
-
RUN apt-get update && \
|
77 |
-
apt-get install -y curl
|
78 |
-
|
79 |
-
RUN echo "deb http://deb.debian.org/debian/ unstable main contrib non-free" >> /etc/apt/sources.list.d/debian.list
|
80 |
-
|
81 |
-
|
82 |
-
# RUN apt install firefox-esr && \
|
83 |
-
# apt install geckodriver
|
84 |
-
|
85 |
-
# Set the working directory and copy the files
|
86 |
-
WORKDIR /code
|
87 |
-
|
88 |
-
# Set the ownership and permissions for the /code directory and its contents
|
89 |
-
RUN chown -R appuser:appuser /code && \
|
90 |
-
chmod -R 777 /code
|
91 |
-
|
92 |
-
COPY . /code
|
93 |
-
|
94 |
-
# RUN chown -R appuser:appuser /code/data.csv && \
|
95 |
-
# chmod -R 777 /code/data.csv
|
96 |
-
|
97 |
-
# Copy the pyngrok.yml configuration file
|
98 |
-
COPY pyngrok.yml /tmp/pyngrok.yml
|
99 |
-
|
100 |
-
# Set the TRANSFORMERS_CACHE environment variable to a cache directory inside /tmp
|
101 |
-
ENV TRANSFORMERS_CACHE /tmp/transformers_cache
|
102 |
-
ENV TORCH_HOME /tmp/torch_cache
|
103 |
-
|
104 |
-
USER appuser
|
105 |
-
|
106 |
-
|
107 |
-
RUN git clone https://github.com/rphrp1985/gpt4f
|
108 |
-
# WORKDIR /gpt4f
|
109 |
-
# COPY . /gpt4f
|
110 |
-
# RUN cd gpt4f
|
111 |
-
# RUN ls
|
112 |
-
|
113 |
-
# cp -R / /root/dest-folder
|
114 |
-
RUN cp -R gpt4f/* /code
|
115 |
-
RUN ls
|
116 |
-
CMD python run.py
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
# Start the application using pyngrok
|
125 |
-
# CMD python main.py
|
126 |
-
# Get the public IP address and display it
|
127 |
-
# RUN curl -s https://api.ipify.org | xargs echo "Public IP:"
|
128 |
-
RUN pip install gunicorn
|
129 |
-
|
130 |
-
# Start the Uvicorn server
|
131 |
-
# ENTRYPOINT ["python", "main.py"]
|
132 |
-
# CMD ["sh", "-c", "python main.py & sleep infinity"]
|
133 |
-
CMD ["gunicorn", "--bind", "0.0.0.0:7860","run:app"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cong723/gpt-academic-public/Dockerfile
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
|
2 |
-
# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic .
|
3 |
-
# 如何运行: docker run --rm -it --net=host gpt-academic
|
4 |
-
FROM python:3.11
|
5 |
-
|
6 |
-
RUN echo '[global]' > /etc/pip.conf && \
|
7 |
-
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
8 |
-
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
9 |
-
|
10 |
-
|
11 |
-
WORKDIR /gpt
|
12 |
-
COPY requirements.txt .
|
13 |
-
RUN pip3 install -r requirements.txt
|
14 |
-
|
15 |
-
COPY . .
|
16 |
-
|
17 |
-
# 可选步骤,用于预热模块
|
18 |
-
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
19 |
-
|
20 |
-
CMD ["python3", "-u", "main.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CristianGonzalez281098/Cheto/app.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
examples = [["The Moon's orbit around Earth has"], ["There once was a pineapple"]]
|
4 |
-
|
5 |
-
gr.Interface.load("huggingface/gpt2", title = "Mi Demo Cristian", examples=examples).launch();
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/merge/__init__.py
DELETED
@@ -1,211 +0,0 @@
|
|
1 |
-
# Copyright 2013 Google, Inc. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
4 |
-
|
5 |
-
from fontTools import ttLib
|
6 |
-
import fontTools.merge.base
|
7 |
-
from fontTools.merge.cmap import (
|
8 |
-
computeMegaGlyphOrder,
|
9 |
-
computeMegaCmap,
|
10 |
-
renameCFFCharStrings,
|
11 |
-
)
|
12 |
-
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
|
13 |
-
from fontTools.merge.options import Options
|
14 |
-
import fontTools.merge.tables
|
15 |
-
from fontTools.misc.loggingTools import Timer
|
16 |
-
from functools import reduce
|
17 |
-
import sys
|
18 |
-
import logging
|
19 |
-
|
20 |
-
|
21 |
-
log = logging.getLogger("fontTools.merge")
|
22 |
-
timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO)
|
23 |
-
|
24 |
-
|
25 |
-
class Merger(object):
|
26 |
-
"""Font merger.
|
27 |
-
|
28 |
-
This class merges multiple files into a single OpenType font, taking into
|
29 |
-
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
|
30 |
-
cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
|
31 |
-
all the fonts).
|
32 |
-
|
33 |
-
If multiple glyphs map to the same Unicode value, and the glyphs are considered
|
34 |
-
sufficiently different (that is, they differ in any of paths, widths, or
|
35 |
-
height), then subsequent glyphs are renamed and a lookup in the ``locl``
|
36 |
-
feature will be created to disambiguate them. For example, if the arguments
|
37 |
-
are an Arabic font and a Latin font and both contain a set of parentheses,
|
38 |
-
the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
|
39 |
-
and a lookup will be inserted into the to ``locl`` feature (creating it if
|
40 |
-
necessary) under the ``latn`` script to substitute ``parenleft`` with
|
41 |
-
``parenleft#1`` etc.
|
42 |
-
|
43 |
-
Restrictions:
|
44 |
-
|
45 |
-
- All fonts must have the same units per em.
|
46 |
-
- If duplicate glyph disambiguation takes place as described above then the
|
47 |
-
fonts must have a ``GSUB`` table.
|
48 |
-
|
49 |
-
Attributes:
|
50 |
-
options: Currently unused.
|
51 |
-
"""
|
52 |
-
|
53 |
-
def __init__(self, options=None):
|
54 |
-
|
55 |
-
if not options:
|
56 |
-
options = Options()
|
57 |
-
|
58 |
-
self.options = options
|
59 |
-
|
60 |
-
def _openFonts(self, fontfiles):
|
61 |
-
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
|
62 |
-
for font, fontfile in zip(fonts, fontfiles):
|
63 |
-
font._merger__fontfile = fontfile
|
64 |
-
font._merger__name = font["name"].getDebugName(4)
|
65 |
-
return fonts
|
66 |
-
|
67 |
-
def merge(self, fontfiles):
|
68 |
-
"""Merges fonts together.
|
69 |
-
|
70 |
-
Args:
|
71 |
-
fontfiles: A list of file names to be merged
|
72 |
-
|
73 |
-
Returns:
|
74 |
-
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
|
75 |
-
this to write it out to an OTF file.
|
76 |
-
"""
|
77 |
-
#
|
78 |
-
# Settle on a mega glyph order.
|
79 |
-
#
|
80 |
-
fonts = self._openFonts(fontfiles)
|
81 |
-
glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
|
82 |
-
computeMegaGlyphOrder(self, glyphOrders)
|
83 |
-
|
84 |
-
# Take first input file sfntVersion
|
85 |
-
sfntVersion = fonts[0].sfntVersion
|
86 |
-
|
87 |
-
# Reload fonts and set new glyph names on them.
|
88 |
-
fonts = self._openFonts(fontfiles)
|
89 |
-
for font, glyphOrder in zip(fonts, glyphOrders):
|
90 |
-
font.setGlyphOrder(glyphOrder)
|
91 |
-
if "CFF " in font:
|
92 |
-
renameCFFCharStrings(self, glyphOrder, font["CFF "])
|
93 |
-
|
94 |
-
cmaps = [font["cmap"] for font in fonts]
|
95 |
-
self.duplicateGlyphsPerFont = [{} for _ in fonts]
|
96 |
-
computeMegaCmap(self, cmaps)
|
97 |
-
|
98 |
-
mega = ttLib.TTFont(sfntVersion=sfntVersion)
|
99 |
-
mega.setGlyphOrder(self.glyphOrder)
|
100 |
-
|
101 |
-
for font in fonts:
|
102 |
-
self._preMerge(font)
|
103 |
-
|
104 |
-
self.fonts = fonts
|
105 |
-
|
106 |
-
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
|
107 |
-
allTags.remove("GlyphOrder")
|
108 |
-
|
109 |
-
for tag in sorted(allTags):
|
110 |
-
if tag in self.options.drop_tables:
|
111 |
-
continue
|
112 |
-
|
113 |
-
with timer("merge '%s'" % tag):
|
114 |
-
tables = [font.get(tag, NotImplemented) for font in fonts]
|
115 |
-
|
116 |
-
log.info("Merging '%s'.", tag)
|
117 |
-
clazz = ttLib.getTableClass(tag)
|
118 |
-
table = clazz(tag).merge(self, tables)
|
119 |
-
# XXX Clean this up and use: table = mergeObjects(tables)
|
120 |
-
|
121 |
-
if table is not NotImplemented and table is not False:
|
122 |
-
mega[tag] = table
|
123 |
-
log.info("Merged '%s'.", tag)
|
124 |
-
else:
|
125 |
-
log.info("Dropped '%s'.", tag)
|
126 |
-
|
127 |
-
del self.duplicateGlyphsPerFont
|
128 |
-
del self.fonts
|
129 |
-
|
130 |
-
self._postMerge(mega)
|
131 |
-
|
132 |
-
return mega
|
133 |
-
|
134 |
-
def mergeObjects(self, returnTable, logic, tables):
|
135 |
-
# Right now we don't use self at all. Will use in the future
|
136 |
-
# for options and logging.
|
137 |
-
|
138 |
-
allKeys = set.union(
|
139 |
-
set(),
|
140 |
-
*(vars(table).keys() for table in tables if table is not NotImplemented),
|
141 |
-
)
|
142 |
-
for key in allKeys:
|
143 |
-
try:
|
144 |
-
mergeLogic = logic[key]
|
145 |
-
except KeyError:
|
146 |
-
try:
|
147 |
-
mergeLogic = logic["*"]
|
148 |
-
except KeyError:
|
149 |
-
raise Exception(
|
150 |
-
"Don't know how to merge key %s of class %s"
|
151 |
-
% (key, returnTable.__class__.__name__)
|
152 |
-
)
|
153 |
-
if mergeLogic is NotImplemented:
|
154 |
-
continue
|
155 |
-
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
|
156 |
-
if value is not NotImplemented:
|
157 |
-
setattr(returnTable, key, value)
|
158 |
-
|
159 |
-
return returnTable
|
160 |
-
|
161 |
-
def _preMerge(self, font):
|
162 |
-
layoutPreMerge(font)
|
163 |
-
|
164 |
-
def _postMerge(self, font):
|
165 |
-
layoutPostMerge(font)
|
166 |
-
|
167 |
-
if "OS/2" in font:
|
168 |
-
# https://github.com/fonttools/fonttools/issues/2538
|
169 |
-
# TODO: Add an option to disable this?
|
170 |
-
font["OS/2"].recalcAvgCharWidth(font)
|
171 |
-
|
172 |
-
|
173 |
-
__all__ = ["Options", "Merger", "main"]
|
174 |
-
|
175 |
-
|
176 |
-
@timer("make one with everything (TOTAL TIME)")
|
177 |
-
def main(args=None):
|
178 |
-
"""Merge multiple fonts into one"""
|
179 |
-
from fontTools import configLogger
|
180 |
-
|
181 |
-
if args is None:
|
182 |
-
args = sys.argv[1:]
|
183 |
-
|
184 |
-
options = Options()
|
185 |
-
args = options.parse_opts(args, ignore_unknown=["output-file"])
|
186 |
-
outfile = "merged.ttf"
|
187 |
-
fontfiles = []
|
188 |
-
for g in args:
|
189 |
-
if g.startswith("--output-file="):
|
190 |
-
outfile = g[14:]
|
191 |
-
continue
|
192 |
-
fontfiles.append(g)
|
193 |
-
|
194 |
-
if len(args) < 1:
|
195 |
-
print("usage: pyftmerge font...", file=sys.stderr)
|
196 |
-
return 1
|
197 |
-
|
198 |
-
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
|
199 |
-
if options.timing:
|
200 |
-
timer.logger.setLevel(logging.DEBUG)
|
201 |
-
else:
|
202 |
-
timer.logger.disabled = True
|
203 |
-
|
204 |
-
merger = Merger(options=options)
|
205 |
-
font = merger.merge(fontfiles)
|
206 |
-
with timer("compile and save font"):
|
207 |
-
font.save(outfile)
|
208 |
-
|
209 |
-
|
210 |
-
if __name__ == "__main__":
|
211 |
-
sys.exit(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/otlLib/maxContextCalc.py
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
__all__ = ["maxCtxFont"]
|
2 |
-
|
3 |
-
|
4 |
-
def maxCtxFont(font):
|
5 |
-
"""Calculate the usMaxContext value for an entire font."""
|
6 |
-
|
7 |
-
maxCtx = 0
|
8 |
-
for tag in ("GSUB", "GPOS"):
|
9 |
-
if tag not in font:
|
10 |
-
continue
|
11 |
-
table = font[tag].table
|
12 |
-
if not table.LookupList:
|
13 |
-
continue
|
14 |
-
for lookup in table.LookupList.Lookup:
|
15 |
-
for st in lookup.SubTable:
|
16 |
-
maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st)
|
17 |
-
return maxCtx
|
18 |
-
|
19 |
-
|
20 |
-
def maxCtxSubtable(maxCtx, tag, lookupType, st):
|
21 |
-
"""Calculate usMaxContext based on a single lookup table (and an existing
|
22 |
-
max value).
|
23 |
-
"""
|
24 |
-
|
25 |
-
# single positioning, single / multiple substitution
|
26 |
-
if (tag == "GPOS" and lookupType == 1) or (
|
27 |
-
tag == "GSUB" and lookupType in (1, 2, 3)
|
28 |
-
):
|
29 |
-
maxCtx = max(maxCtx, 1)
|
30 |
-
|
31 |
-
# pair positioning
|
32 |
-
elif tag == "GPOS" and lookupType == 2:
|
33 |
-
maxCtx = max(maxCtx, 2)
|
34 |
-
|
35 |
-
# ligatures
|
36 |
-
elif tag == "GSUB" and lookupType == 4:
|
37 |
-
for ligatures in st.ligatures.values():
|
38 |
-
for ligature in ligatures:
|
39 |
-
maxCtx = max(maxCtx, ligature.CompCount)
|
40 |
-
|
41 |
-
# context
|
42 |
-
elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5):
|
43 |
-
maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub")
|
44 |
-
|
45 |
-
# chained context
|
46 |
-
elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6):
|
47 |
-
maxCtx = maxCtxContextualSubtable(
|
48 |
-
maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain"
|
49 |
-
)
|
50 |
-
|
51 |
-
# extensions
|
52 |
-
elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7):
|
53 |
-
maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
|
54 |
-
|
55 |
-
# reverse-chained context
|
56 |
-
elif tag == "GSUB" and lookupType == 8:
|
57 |
-
maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse")
|
58 |
-
|
59 |
-
return maxCtx
|
60 |
-
|
61 |
-
|
62 |
-
def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""):
|
63 |
-
"""Calculate usMaxContext based on a contextual feature subtable."""
|
64 |
-
|
65 |
-
if st.Format == 1:
|
66 |
-
for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)):
|
67 |
-
if ruleset is None:
|
68 |
-
continue
|
69 |
-
for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)):
|
70 |
-
if rule is None:
|
71 |
-
continue
|
72 |
-
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
|
73 |
-
|
74 |
-
elif st.Format == 2:
|
75 |
-
for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)):
|
76 |
-
if ruleset is None:
|
77 |
-
continue
|
78 |
-
for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)):
|
79 |
-
if rule is None:
|
80 |
-
continue
|
81 |
-
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
|
82 |
-
|
83 |
-
elif st.Format == 3:
|
84 |
-
maxCtx = maxCtxContextualRule(maxCtx, st, chain)
|
85 |
-
|
86 |
-
return maxCtx
|
87 |
-
|
88 |
-
|
89 |
-
def maxCtxContextualRule(maxCtx, st, chain):
|
90 |
-
"""Calculate usMaxContext based on a contextual feature rule."""
|
91 |
-
|
92 |
-
if not chain:
|
93 |
-
return max(maxCtx, st.GlyphCount)
|
94 |
-
elif chain == "Reverse":
|
95 |
-
return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount)
|
96 |
-
return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|