Commit
·
aba3bb7
1
Parent(s):
5883a4e
Update parquet files (step 26 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/17TheWord/vits-models/mel_processing.py +0 -101
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat 8.1 0 Professional The Ultimate Guide to PDF Creation and Editing.md +0 -31
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Colletorz.com Game Collector Pro 4.0.2 [TrT-TcT] Serial Key Keygen UPDATED.md +0 -113
- spaces/1gistliPinn/ChatGPT4/Examples/3d Sexvilla 2 Everlust Sex Coins Hack.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Art Of Analog Layout By Alan Hastings Pdf Free Download ((FREE)).md +0 -8
- spaces/1gistliPinn/ChatGPT4/Examples/Download Mastercam X8 Full Crack TOP 64bit Windows.md +0 -34
- spaces/1line/AutoGPT/autogpt/setup.py +0 -77
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us APK How to Install and Enjoy the Game on Android 4.4.4 Devices.md +0 -255
- spaces/1phancelerku/anime-remove-background/Bluetooth Driver for Windows 7 (32-bit) - Download Now and Enjoy Wireless Connectivity.md +0 -113
- spaces/1phancelerku/anime-remove-background/Brasfoot 18 Premium The Best Soccer Manager Game for Android.md +0 -147
- spaces/2023Liu2023/bingo/src/components/header.tsx +0 -12
- spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/dataset.py +0 -183
- spaces/AIConsultant/MusicGen/audiocraft/losses/stftloss.py +0 -207
- spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/pytorch_utils.py +0 -251
- spaces/AIGC-Audio/Make_An_Audio/README.md +0 -12
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/AiService.py +0 -40
- spaces/Adr740/Hadith_AI_Explorer/__init__.py +0 -0
- spaces/AgentVerse/agentVerse/ui/run.sh +0 -4
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/facebook/Factory.d.ts +0 -6
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/GetChartData.js +0 -15
- spaces/AlanMars/QYL-AI-Space/README.md +0 -13
- spaces/Alfasign/Midjourney_Prompt/app.py +0 -56
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/SingleChannel.py +0 -109
- spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/persistence.py +0 -251
- spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/decoder/tensoRF_decoder.py +0 -346
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/autoencoder_asym_kl.py +0 -180
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py +0 -1
- spaces/AriaMei/TTSdemo/transforms.py +0 -193
- spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/unet.py +0 -437
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/vl_utils.py +0 -100
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/connection.py +0 -572
- spaces/AtomdffAI/wechatgpt4atom/channel/channel.py +0 -31
- spaces/AtomdffAI/wechatgpt4atom/docker/build.debian.sh +0 -9
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/__init__.py +0 -10
- spaces/BAAI/AltDiffusion-m9/app.py +0 -330
- spaces/BartPoint/VoiceChange/infer_pack/modules/F0Predictor/F0Predictor.py +0 -16
- spaces/Benson/text-generation/Examples/Asistente De Descarga.md +0 -75
- spaces/Benson/text-generation/Examples/Bombsquad 1.4.153 Apk.md +0 -61
- spaces/Blessin/movie-poster-generator/app.py +0 -58
- spaces/Bong15/Rewrite/app.py +0 -60
- spaces/BraydenMoore/MARCI-NFL-Betting/Dockerfile +0 -31
- spaces/CVPR/LIVE/pybind11/tests/test_tagbased_polymorphic.py +0 -21
- spaces/CVPR/LIVE/thrust/dependencies/cub/CONTRIBUTING.md +0 -366
- spaces/CVPR/LIVE/thrust/thrust/detail/allocator/malloc_allocator.h +0 -52
- spaces/CVPR/LIVE/thrust/thrust/detail/select_system.h +0 -85
- spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/time_counter.py +0 -62
- spaces/CikeyQI/Yunzai/Yunzai/lib/puppeteer/puppeteer.js +0 -23
- spaces/CognitiveLabs/GPT-4-Vision-Chat/README.md +0 -10
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-0ee118f2.js +0 -2
- spaces/Dinoking/Guccio-AI-Designer/netdissect/tool/lightbox.html +0 -59
spaces/17TheWord/vits-models/mel_processing.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.utils.data
|
3 |
-
from librosa.filters import mel as librosa_mel_fn
|
4 |
-
|
5 |
-
MAX_WAV_VALUE = 32768.0
|
6 |
-
|
7 |
-
|
8 |
-
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
9 |
-
"""
|
10 |
-
PARAMS
|
11 |
-
------
|
12 |
-
C: compression factor
|
13 |
-
"""
|
14 |
-
return torch.log(torch.clamp(x, min=clip_val) * C)
|
15 |
-
|
16 |
-
|
17 |
-
def dynamic_range_decompression_torch(x, C=1):
|
18 |
-
"""
|
19 |
-
PARAMS
|
20 |
-
------
|
21 |
-
C: compression factor used to compress
|
22 |
-
"""
|
23 |
-
return torch.exp(x) / C
|
24 |
-
|
25 |
-
|
26 |
-
def spectral_normalize_torch(magnitudes):
|
27 |
-
output = dynamic_range_compression_torch(magnitudes)
|
28 |
-
return output
|
29 |
-
|
30 |
-
|
31 |
-
def spectral_de_normalize_torch(magnitudes):
|
32 |
-
output = dynamic_range_decompression_torch(magnitudes)
|
33 |
-
return output
|
34 |
-
|
35 |
-
|
36 |
-
mel_basis = {}
|
37 |
-
hann_window = {}
|
38 |
-
|
39 |
-
|
40 |
-
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
41 |
-
if torch.min(y) < -1.:
|
42 |
-
print('min value is ', torch.min(y))
|
43 |
-
if torch.max(y) > 1.:
|
44 |
-
print('max value is ', torch.max(y))
|
45 |
-
|
46 |
-
global hann_window
|
47 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
48 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
49 |
-
if wnsize_dtype_device not in hann_window:
|
50 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
51 |
-
|
52 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
53 |
-
y = y.squeeze(1)
|
54 |
-
|
55 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
56 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
57 |
-
|
58 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
59 |
-
return spec
|
60 |
-
|
61 |
-
|
62 |
-
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
63 |
-
global mel_basis
|
64 |
-
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
65 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
66 |
-
if fmax_dtype_device not in mel_basis:
|
67 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
68 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
69 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
70 |
-
spec = spectral_normalize_torch(spec)
|
71 |
-
return spec
|
72 |
-
|
73 |
-
|
74 |
-
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
75 |
-
if torch.min(y) < -1.:
|
76 |
-
print('min value is ', torch.min(y))
|
77 |
-
if torch.max(y) > 1.:
|
78 |
-
print('max value is ', torch.max(y))
|
79 |
-
|
80 |
-
global mel_basis, hann_window
|
81 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
82 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
83 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
84 |
-
if fmax_dtype_device not in mel_basis:
|
85 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
86 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
87 |
-
if wnsize_dtype_device not in hann_window:
|
88 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
89 |
-
|
90 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
91 |
-
y = y.squeeze(1)
|
92 |
-
|
93 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
94 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
95 |
-
|
96 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
97 |
-
|
98 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
99 |
-
spec = spectral_normalize_torch(spec)
|
100 |
-
|
101 |
-
return spec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Acrobat 8.1 0 Professional The Ultimate Guide to PDF Creation and Editing.md
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Use Adobe Acrobat 8.1 0 Professional</h1>
|
3 |
-
<p>Adobe Acrobat 8.1 0 Professional is a software application that allows you to create, edit, and share PDF documents. PDF stands for Portable Document Format, which is a universal file format that preserves the layout, fonts, images, and hyperlinks of any document. With Adobe Acrobat 8.1 0 Professional, you can:</p>
|
4 |
-
<h2>keygen adobe acrobat 8.1 0 professional</h2><br /><p><b><b>Download Zip</b> ⇒ <a href="https://byltly.com/2uKvya">https://byltly.com/2uKvya</a></b></p><br /><br />
|
5 |
-
<ul>
|
6 |
-
<li>Create PDF files from various sources, such as Microsoft Word, Excel, PowerPoint, web pages, scanned documents, and more.</li>
|
7 |
-
<li>Edit PDF files by adding comments, annotations, bookmarks, headers, footers, watermarks, and more.</li>
|
8 |
-
<li>Share PDF files by emailing them, uploading them to online services, or creating interactive forms.</li>
|
9 |
-
<li>Protect PDF files by encrypting them, adding passwords, digital signatures, and redaction.</li>
|
10 |
-
<li>Optimize PDF files by reducing their size, enhancing their quality, and making them accessible.</li>
|
11 |
-
</ul>
|
12 |
-
<p>In this article, we will show you how to use some of the basic features of Adobe Acrobat 8.1 0 Professional. To get started, you need to install the software on your computer. You can download a replacement version of Acrobat 8 Pro from <a href="https://helpx.adobe.com/acrobat/kb/acrobat-8-9-product-downloads.html">here</a>. You will also need your serial number to activate the software. The updates are available <a href="ftp://ftp.adobe.com/pub/adobe/acrobat/win/8.x/">here</a>.</p>
|
13 |
-
|
14 |
-
<h2>How to Create a PDF File</h2>
|
15 |
-
<p>There are several ways to create a PDF file with Adobe Acrobat 8.1 0 Professional. Here are some of the most common methods:</p>
|
16 |
-
<ul>
|
17 |
-
<li>From within Acrobat: You can create a PDF file from any file that can be printed. To do this, open Acrobat and click on the Create PDF button on the toolbar. Then browse to the file you want to convert and click Open. The file will be converted to PDF and opened in Acrobat.</li>
|
18 |
-
<li>From another application: You can create a PDF file from any application that has a Print option. To do this, open the application and the file you want to convert. Then choose File > Print and select Adobe PDF as the printer. Click OK and choose a location and a name for the PDF file.</li>
|
19 |
-
<li>From a scanner: You can create a PDF file from a scanned document or image. To do this, open Acrobat and click on the Create PDF button on the toolbar. Then choose From Scanner and select your scanner device. Adjust the settings as needed and click Scan. The scanned document or image will be converted to PDF and opened in Acrobat.</li>
|
20 |
-
<li>From a web page: You can create a PDF file from a web page or a link. To do this, open Acrobat and click on the Create PDF button on the toolbar. Then choose From Web Page and enter the URL of the web page you want to convert. Click Settings to adjust the options as needed and click Create. The web page will be converted to PDF and opened in Acrobat.</li>
|
21 |
-
</ul>
|
22 |
-
|
23 |
-
<h2>How to Edit a PDF File</h2>
|
24 |
-
<p>Once you have created a PDF file with Adobe Acrobat 8.1 0 Professional, you can edit it in various ways. Here are some of the most common editing tasks:</p>
|
25 |
-
<p></p>
|
26 |
-
<ul>
|
27 |
-
<li>Add comments: You can add comments to a PDF file to provide feedback, suggestions, or questions. To do this, open the PDF file in Acrobat and click on the Comment button on the toolbar. Then choose the type of comment you want to add from the menu, such as Sticky Note, Text Box, Highlighter, etc. Click on the location where you want to add the comment and type your text.</li>
|
28 |
-
<li>Add annotations: You can add annotations to a PDF file to mark up or emphasize certain parts of the document. To do this, open the PDF file in Acrobat and click on the Comment button on the toolbar. Then choose the type of annotation you want to add from the menu, such as Stamp, Arrow, Line, Rectangle, etc. Click and drag on the document where you want to add the annotation.</li>
|
29 |
-
<li>Add bookmarks: You can add bookmarks to a PDF file to create a table of contents or navigation aids</p> ddb901b051<br />
|
30 |
-
<br />
|
31 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Colletorz.com Game Collector Pro 4.0.2 [TrT-TcT] Serial Key Keygen UPDATED.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Colletorz.com Game Collector Pro 4.0.2 [TrT-TcT] Serial Key Keygen</h1>
|
3 |
-
<p>If you are a video game enthusiast who owns a large collection of games, you might want to organize them in a neat and convenient way. You might also want to save some money and get access to the full features of a software that can help you with that. In this article, we will introduce you to Colletorz.com Game Collector Pro 4.0.2, a powerful and user-friendly game database software that can catalog your game collection automatically. We will also show you how to use TrT-TcT serial key keygen, a tool that can generate valid serial keys for activating the software for free. By the end of this article, you will learn how to use Colletorz.com Game Collector Pro 4.0.2 with TrT-TcT serial key keygen to organize your game collection easily and efficiently.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is Colletorz.com Game Collector Pro?</h3>
|
6 |
-
<p>Colletorz.com Game Collector Pro is a downloadable desktop software for Windows that allows you to catalog your game collection on your PC. It supports various platforms, such as PC, PlayStation, Xbox, Nintendo, Sega, Atari, and more. You can easily add games to your database by searching by title or by barcode, or by scanning barcodes with your phone or tablet. The software will automatically retrieve game information and cover art from its online game database, which contains over 170,000 games. You can also customize your database by adding personal details, such as location, owner, purchase date, notes, rating, etc. You can browse, sort, and search your game collection in various layouts and views, such as list, images, or card view. You can also export your game database to text or XML files, print game lists, or share your game list online.</p>
|
7 |
-
<h2>Colletorz.com Game Collector Pro 4.0.2 [TrT-TcT] Serial Key Keygen</h2><br /><p><b><b>Download Zip</b> ⚡ <a href="https://byltly.com/2uKw4y">https://byltly.com/2uKw4y</a></b></p><br /><br />
|
8 |
-
<h3>What is TrT-TcT?</h3>
|
9 |
-
<p>TrT-TcT is a group of hackers who specialize in cracking software and generating serial keys for them. They have released many serial key keygens for various software, such as Adobe Photoshop CS6, Microsoft Office 2010, WinRAR 5.50, etc. A serial key keygen is a program that can create unique serial keys for activating a software without paying for it. A serial key is a string of numbers and letters that verifies the authenticity and legitimacy of a software license. A keygen uses algorithms and formulas to generate serial keys that match the pattern and format of the original ones. However, using a serial key keygen is illegal and unethical, as it violates the intellectual property rights of the software developers. Therefore, we do not condone or encourage the use of TrT-TcT serial key keygen or any other similar tools.</p>
|
10 |
-
<h3>What is a serial key keygen?</h3>
|
11 |
-
<p>A serial key keygen is a program that can create unique serial keys for activating a software without paying for it. A serial key is a string of numbers and letters that verifies the authenticity and legitimacy of a software license. A keygen uses algorithms and formulas to generate serial keys that match the pattern and format of the original ones. However, using a serial key keygen is illegal and unethical, as it violates the intellectual property rights of the software developers. Therefore, we do not condone or encourage the use of TrT-TcT serial key keygen or any other similar tools.</p>
|
12 |
-
<h2>Features of Colletorz.com Game Collector Pro 4.0.2</h2>
|
13 |
-
<h3>Automatic game information and cover art</h3>
|
14 |
-
<p>One of the main features of Colletorz.com Game Collector Pro 4.0.2 is that it can automatically retrieve game information and cover art from its online game database, which contains over 170,000 games. You can easily add games to your database by searching by title or by barcode, or by scanning barcodes with your phone or tablet. The software will download and display the following game details for each game:</p>
|
15 |
-
<ul>
|
16 |
-
<li>Title</li>
|
17 |
-
<li>Platform</li>
|
18 |
-
<li>Genre</li>
|
19 |
-
<li>Release date</li>
|
20 |
-
<li>Publisher</li>
|
21 |
-
<li>Developer</li>
|
22 |
-
<li>Description</li>
|
23 |
-
<li>Cover art</li>
|
24 |
-
<li>Screenshots</li>
|
25 |
-
<li>Trailer videos</li>
|
26 |
-
<li>Metacritic rating</li>
|
27 |
-
<li>ESRB rating</li>
|
28 |
-
<li>PEGI rating</li>
|
29 |
-
<li>Multiplayer modes</li>
|
30 |
-
<li>Game modes</li>
|
31 |
-
<li>Region</li>
|
32 |
-
<li>Edition</li>
|
33 |
-
<li>Barcode</li>
|
34 |
-
</ul>
|
35 |
-
<p>You can also edit or add any game information manually if you want to. The software will automatically update your game database with new games and information from its online game database regularly. You can also use the Batch Update feature to update multiple games at once. This feature saves you a lot of time and effort in cataloging your game collection.</p>
|
36 |
-
<h3>Customizable database and interface</h3>
|
37 |
-
<p>Another feature of Colletorz.com Game Collector Pro 4.0.2 is that it allows you to customize your database and interface according to your preferences. You can add personal details to each game, such as location, owner, purchase date, notes, rating, etc. You can also create custom fields and lists to store any additional information you want. You can use these fields and lists to filter, group, and sort your game collection in various ways. For example, you can filter your games by genre, platform, rating, etc. You can also group your games by series, franchise, developer, etc. You can also sort your games by title, release date, metacritic score, etc. You can also customize your interface by choosing from different skins, layouts, and views. You can switch between list view, images view, or card view to display your game collection in different ways. You can also adjust the size, order, and visibility of the columns and fields in the list view. You can also change the fonts, colors, and backgrounds of the interface. This feature gives you more control and flexibility in managing your game collection.</p>
|
38 |
-
<h3>Free CLZ Cloud storage and syncing</h3>
|
39 |
-
<p>A third feature of Colletorz.com Game Collector Pro 4.0.2 is that it offers free CLZ Cloud storage and syncing for your game database. CLZ Cloud is a cloud-based service that allows you to store your game database online and sync it across multiple devices. You can use CLZ Cloud to backup your game database online and restore it anytime you want. You can also use CLZ Cloud to sync your game database between your PC and your mobile devices (such as iPhone, iPad, Android phone or tablet). You can use the free CLZ Games app to access and edit your game database on your mobile devices. You can also use CLZ Cloud to share your game list online with friends or family via email or social media. You can also embed your game list on your own website or blog using HTML code. This feature enables you to access and share your game collection anytime and anywhere.</p>
|
40 |
-
<h3>Other tools and functions</h3>
|
41 |
-
<p>Besides the features mentioned above, Colletorz.com Game Collector Pro 4.0.2 also provides other tools and functions that can help you organize your game collection more efficiently. Some of these tools and functions are:</p>
|
42 |
-
<p></p>
|
43 |
-
<ul>
|
44 |
-
<li>Statistics: You can view statistics and charts of your game collection, such as the number of games, the total value, the average rating, the distribution by platform, genre, etc. You can also export these statistics to CSV or HTML files.</li>
|
45 |
-
<li>Loan Manager: You can keep track of the games you loaned and borrowed from friends or family. You can record the loan date, due date, and return date of each game. You can also send email reminders to the borrowers or lenders.</li>
|
46 |
-
<li>Wish List: You can create a wish list of the games you want to buy or play in the future. You can add games to your wish list from the online game database or manually. You can also mark the games as owned or played when you get them.</li>
|
47 |
-
<li>Find Duplicates: You can find and remove duplicate entries in your game database. You can choose which fields to compare and how to merge the duplicates.</li>
|
48 |
-
<li>Update Checker: You can check for updates for the software and the online game database. You can also download and install the updates automatically or manually.</li>
|
49 |
-
</ul>
|
50 |
-
<p>These tools and functions can help you optimize your game collection and enhance your gaming experience.</p>
|
51 |
-
<h2>How to use TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2</h2>
|
52 |
-
<h3>Download and install the software</h3>
|
53 |
-
<p>The first step to use TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2 is to download and install the software on your PC. You can download the software from the official website of Colletorz.com or from other sources (such as torrent sites). However, we recommend that you download the software from the official website, as it is safer and more reliable. To install the software, you need to follow these steps:</p>
|
54 |
-
<ol>
|
55 |
-
<li>Run the setup file (GameCollectorSetup.exe) and follow the instructions on the screen.</li>
|
56 |
-
<li>Choose a destination folder for the software and click Next.</li>
|
57 |
-
<li>Choose a start menu folder for the software and click Next.</li>
|
58 |
-
<li>Choose whether to create a desktop shortcut for the software and click Next.</li>
|
59 |
-
<li>Click Install to start the installation process.</li>
|
60 |
-
<li>Wait for the installation to finish and click Finish.</li>
|
61 |
-
</ol>
|
62 |
-
<p>The software is now installed on your PC and ready to use.</p>
|
63 |
-
<h3>Run the keygen and generate a serial key</h3>
|
64 |
-
<p>The second step to use TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2 is to run the keygen and generate a serial key for activating the software. You can download the keygen from various sources (such as torrent sites or file-sharing platforms). However, we warn you that downloading and using a keygen is illegal and risky, as it may contain viruses or malware that can harm your PC or compromise your privacy. Therefore, we advise you to use a reliable antivirus program and scan the keygen before running it. To run the keygen and generate a serial key, you need to follow these steps:</p>
|
65 |
-
<ol>
|
66 |
-
<li>Extract the keygen file (TrT-TcT_Keygen.rar) using a file compression program (such as WinRAR or 7-Zip).</li>
|
67 |
-
<li>Run the keygen file (TrT-TcT_Keygen.exe) as administrator.</li>
|
68 |
-
<li>Select Colletorz.com Game Collector Pro 4.0.2 from the drop-down menu.</li>
|
69 |
-
<li>Click Generate to create a random serial key.</li>
|
70 |
-
<li>Copy the serial key to your clipboard or write it down somewhere.</li>
|
71 |
-
</ol>
|
72 |
-
<p>You have now generated a serial key for activating Colletorz.com Game Collector Pro 4.0.2.</p>
|
73 |
-
<h3>Activate the software with the serial key</h3>
|
74 |
-
<p>The third and final step to use TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2 is to activate the software with the serial key you generated. To activate the software, you need to follow these steps:</p>
|
75 |
-
<ol>
|
76 |
-
<li>Launch the software (GameCollector.exe) and click on the Menu button in the top left corner.</li>
|
77 |
-
<li>Select Activate License from the menu and enter your name and email address.</li>
|
78 |
-
<li>Paste or type the serial key you generated in the Serial Number field and click Activate.</li>
|
79 |
-
<li>Wait for the activation to complete and click OK.</li>
|
80 |
-
</ol>
|
81 |
-
<p>The software is now activated and you can use it without any limitations or restrictions.</p>
|
82 |
-
<h2>Benefits of using TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2</h2>
|
83 |
-
<h3>Save money and time</h3>
|
84 |
-
<p>One of the benefits of using TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2 is that you can save money and time. The software normally costs $29.95 for a single user license, which may be too expensive for some users. By using the keygen, you can get the software for free and avoid paying any fees or charges. You can also save time by not having to register or verify your license online or offline. You can simply generate a serial key and activate the software in a few minutes.</p>
|
85 |
-
<h3>Enjoy full features and updates</h3>
|
86 |
-
<p>Another benefit of using TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2 is that you can enjoy full features and updates. The software has many features and functions that can help you organize your game collection easily and efficiently, such as automatic game information and cover art, customizable database and interface, free CLZ Cloud storage and syncing, and other tools and functions. By using the keygen, you can access all these features without any limitations or restrictions. You can also get updates for the software and the online game database regularly, as the keygen does not interfere with the update checker or downloader.</p>
|
87 |
-
<h3>Organize your game collection easily and efficiently</h3>
|
88 |
-
<p>The third benefit of using TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2 is that you can organize your game collection easily and efficiently. The software is designed to help you catalog your game collection on your PC in a neat and convenient way. You can easily add games to your database by searching by title or by barcode, or by scanning barcodes with your phone or tablet. You can also customize your database by adding personal details, such as location, owner, purchase date, notes, rating, etc. You can also browse, sort, and search your game collection in various layouts and views, such as list, images, or card view. You can also export your game database to text or XML files, print game lists, or share your game list online. You can also use CLZ Cloud to backup your game database online and sync it across multiple devices. You can also use other tools and functions to optimize your game collection and enhance your gaming experience.</p>
|
89 |
-
<h2>Conclusion</h2>
|
90 |
-
<h3>Summary of the main points</h3>
|
91 |
-
<p>In conclusion, Colletorz.com Game Collector Pro 4.0.2 is a powerful and user-friendly game database software that can catalog your game collection automatically. It has many features and functions that can help you organize your game collection easily and efficiently, such as automatic game information and cover art, customizable database and interface, free CLZ Cloud storage and syncing, and other tools and functions. TrT-TcT serial key keygen is a tool that can generate valid serial keys for activating the software for free. By using TrT-TcT serial key keygen for Colletorz.com Game Collector Pro 4.0.2, you can save money and time, enjoy full features and updates, and organize your game collection easily and efficiently.</p>
|
92 |
-
<h3>Call to action</h3>
|
93 |
-
<p>If you are interested in trying out Colletorz.com Game Collector Pro 4.0.2 with TrT-TcT serial key keygen, you can download them from the links below (at your own risk). However, we remind you that using a serial key keygen is illegal and unethical, and that we do not condone or encourage the use of TrT-TcT serial key keygen or any other similar tools. We also do not take any responsibility for any damages or consequences that may result from using them. We recommend that you buy the software from the official website of Colletorz.com and support the software developers. You can also try the free trial version of the software before buying it. You can visit the official website of Colletorz.com for more information and details about the software and its features.</p>
|
94 |
-
<h2>FAQs</h2>
|
95 |
-
<h3>What are the system requirements for Colletorz.com Game Collector Pro 4.0.2?</h3>
|
96 |
-
<p>The system requirements for Colletorz.com Game Collector Pro 4.0.2 are as follows:</p>
|
97 |
-
<ul>
|
98 |
-
<li>Operating system: Windows 10, 8, 7, Vista, XP</li>
|
99 |
-
<li>Processor: Pentium 4 or higher</li>
|
100 |
-
<li>Memory: 256 MB RAM or higher</li>
|
101 |
-
<li>Hard disk space: 50 MB or more</li>
|
102 |
-
<li>Internet connection: Required for online game database and CLZ Cloud</li>
|
103 |
-
</ul>
|
104 |
-
<h3>How can I scan barcodes with my phone or tablet?</h3>
|
105 |
-
<p>You can scan barcodes with your phone or tablet by using the free CLZ Barry app. CLZ Barry is a barcode scanner app that can scan barcodes with your device's camera and send them to your PC via Wi-Fi. You can use CLZ Barry to scan barcodes of your games and add them to your game database automatically. You can download CLZ Barry from the App Store or Google Play Store.</p>
|
106 |
-
<h3>How can I update my game database with new games and information?</h3>
|
107 |
-
<p>You can update your game database with new games and information by using the Update from Core feature. Update from Core is a feature that allows you to update your game database with new games and information from the online game database. You can use Update from Core to add new games to your database, update existing games with new information, or fix incorrect or missing information. You can also use the Batch Update feature to update multiple games at once.</p>
|
108 |
-
<h3>How can I backup and restore my game database?</h3>
|
109 |
-
<p>You can backup and restore your game database by using the Backup and Restore feature. Backup and Restore is a feature that allows you to backup your game database to a file on your PC or to CLZ Cloud online. You can use Backup and Restore to backup your game database regularly and restore it anytime you want. You can also use Backup and Restore to transfer your game database to another PC or device.</p>
|
110 |
-
<h3>How can I contact Colletorz.com for support or feedback?</h3>
|
111 |
-
<p>You can contact Colletorz.com for support or feedback by using the Contact Us feature. Contact Us is a feature that allows you to send an email to Colletorz.com with your questions, comments, suggestions, or problems. You can also attach screenshots or log files to your email if needed. You can also visit the Support Center on the official website of Colletorz.com for more help and resources.</p> b2dd77e56b<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/3d Sexvilla 2 Everlust Sex Coins Hack.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>3d sexvilla 2 everlust sex coins hack</h2><br /><p><b><b>Download</b> ✵ <a href="https://imgfil.com/2uxXwc">https://imgfil.com/2uxXwc</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Disqus - 3d Sexvilla 2 Francais Gratuit;; Download 3d sexvilla 3. 3d sex villa everlust 3d full. Vidéos Porno de 3d Sexvilla 3 | metomalingsel.ml. Похожие видео ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Art Of Analog Layout By Alan Hastings Pdf Free Download ((FREE)).md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>after reading the book, it's somewhat clear that there are many things to consider when designing an analog circuit. it is usually best to start by modeling the intended circuit in a simulation tool. the actual layout depends on the application and the circuit. if it is a discrete component circuit, one should consider the radiation effects in the environment. in most cases, the layout should be done in a systematic and step-wise manner. </p>
|
3 |
-
<p>i have a love of analog electronics, and i can tell you from having read this book, that if you have the passion to get into the field, this is the book for you. i've been doing analog for years, and still find myself reading this book to refresh myself, and also to learn new stuff. the layout process is explained in a very easy to understand manner. i highly recommend this book to anyone who is just starting out or who has been in analog for some time and wants to refresh their memory.</p>
|
4 |
-
<h2>Art Of Analog Layout By Alan Hastings Pdf Free Download</h2><br /><p><b><b>DOWNLOAD</b> ===== <a href="https://imgfil.com/2uy0CR">https://imgfil.com/2uy0CR</a></b></p><br /><br />
|
5 |
-
<p>i read this book because it was recommended by a colleague. i found the book to be very interesting, and it has helped me understand the basic issues and concepts of analog layout. it is a very useful reference book and covers a wide range of topics.</p>
|
6 |
-
<p> verbal explanations are favored over mathematical formulas, graphs are kept to a minimum, and line drawings are used in this user-friendly book. clear guidance and advice are provided for those professionals who lay out analog circuits. key topics: matching of resistors and capacitors: includes causes of mismatch, particularly the hydrogen effect and package shift. mos transistors: covers a brief history of floating gate devices, eprom and eeprom. applications of mos transistors: expands information on failure mechanisms, including bvdss/bvdii, silc, nbti/ptbi and gidl and the difference between electrical and electrothermal soa. consideration of failure mechanisms as crucial to layout: integrates further information into many chapters covering various devices. standard bipolar, polygate cmos and analog bicmos: covers all three fundamental processes. a valuable reference for professional layout designers. </p> 899543212b<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Mastercam X8 Full Crack TOP 64bit Windows.md
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
<h2>Download Mastercam X8 Full Crack 64bit Windows</h2><br /><p><b><b>Download</b> ::: <a href="https://imgfil.com/2uxZDw">https://imgfil.com/2uxZDw</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Quick Overview
|
4 |
-
|
5 |
-
Discover the simplicity of direct programming, with Mastercam Desktop Software. Workflows for tasks like programming and cutting with ease are just a few clicks away. Create with precision at the workstation of your choice, whether it's a desktop or mobile
|
6 |
-
|
7 |
-
Pricing and Availability
|
8 |
-
|
9 |
-
Buy Mastercam Desktop Software at $1,299.00 US.
|
10 |
-
|
11 |
-
About Mastercam Software
|
12 |
-
|
13 |
-
Mastercam software is designed to give users the freedom to operate with the tools they want, anywhere. Mastercam software can be used on any computer with a Windows operating system, even if the computer doesn’t have a Mastercam CAMON-R. Whether you choose to operate with the new Mastercam Desktop Software or the new Mastercam Mobile Software, the Mastercam product will help you run your business more efficiently, more effectively, and more profitably.
|
14 |
-
|
15 |
-
Mastercam Software 2015
|
16 |
-
|
17 |
-
Mastercam Software 2016
|
18 |
-
|
19 |
-
Mastercam Software 2017
|
20 |
-
|
21 |
-
Mastercam Software 2018
|
22 |
-
|
23 |
-
Overview
|
24 |
-
|
25 |
-
Mastercam Desktop Software includes a variety of tasks, including creation, editing, and programming. To create with precision at the workstation of your choice, whether it’s a desktop or mobile, Mastercam software will help you operate more efficiently, more effectively, and more profitably.
|
26 |
-
|
27 |
-
Mastercam software allows you to navigate your 3D drawing software directly from Mastercam. Whether you are using Mastercam Pro, Mastercam Lite, Mastercam OnDemand, Mastercam Studio, Mastercam OnDemand, Mastercam OnDemand — Caliber, Mastercam OnDemand, Mastercam OnDemand — Caliber — Pro, Mastercam OnDemand, Mastercam OnDemand — Caliber — Lite, Mastercam OnDemand, Mastercam OnDemand — Caliber — Lite — Pro, Mastercam OnDemand — Caliber — Lite — Pro, Mastercam Pro, or Mastercam Pro — Caliber, you can easily bring your drawings into Mastercam software and then access them as you would normally.
|
28 |
-
|
29 |
-
Mastercam software enables you to view, edit, and create 3D drawing and machining data in Mastercam. You can also generate and convert machining instructions directly from Mastercam. Mastercam software helps you keep control and leverage the power of Mastercam.
|
30 |
-
|
31 |
-
Mastercam software also includes a wide variety of programs for text, graphics, and drawing. These applications are organized so that you can easily find and access the functions you need. You can insert symbols and shapes, create, modify, and operate 4fefd39f24<br />
|
32 |
-
<br />
|
33 |
-
<br />
|
34 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/setup.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
"""Set up the AI and its goals"""
|
2 |
-
from colorama import Fore, Style
|
3 |
-
|
4 |
-
from autogpt import utils
|
5 |
-
from autogpt.config.ai_config import AIConfig
|
6 |
-
from autogpt.logs import logger
|
7 |
-
|
8 |
-
|
9 |
-
def prompt_user() -> AIConfig:
|
10 |
-
"""Prompt the user for input
|
11 |
-
|
12 |
-
Returns:
|
13 |
-
AIConfig: The AIConfig object containing the user's input
|
14 |
-
"""
|
15 |
-
ai_name = ""
|
16 |
-
# Construct the prompt
|
17 |
-
logger.typewriter_log(
|
18 |
-
"Welcome to Auto-GPT! ",
|
19 |
-
Fore.GREEN,
|
20 |
-
"run with '--help' for more information.",
|
21 |
-
speak_text=True,
|
22 |
-
)
|
23 |
-
|
24 |
-
logger.typewriter_log(
|
25 |
-
"Create an AI-Assistant:",
|
26 |
-
Fore.GREEN,
|
27 |
-
"Enter the name of your AI and its role below. Entering nothing will load"
|
28 |
-
" defaults.",
|
29 |
-
speak_text=True,
|
30 |
-
)
|
31 |
-
|
32 |
-
# Get AI Name from User
|
33 |
-
logger.typewriter_log(
|
34 |
-
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
35 |
-
)
|
36 |
-
ai_name = utils.clean_input("AI Name: ")
|
37 |
-
if ai_name == "":
|
38 |
-
ai_name = "Entrepreneur-GPT"
|
39 |
-
|
40 |
-
logger.typewriter_log(
|
41 |
-
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
|
42 |
-
)
|
43 |
-
|
44 |
-
# Get AI Role from User
|
45 |
-
logger.typewriter_log(
|
46 |
-
"Describe your AI's role: ",
|
47 |
-
Fore.GREEN,
|
48 |
-
"For example, 'an AI designed to autonomously develop and run businesses with"
|
49 |
-
" the sole goal of increasing your net worth.'",
|
50 |
-
)
|
51 |
-
ai_role = utils.clean_input(f"{ai_name} is: ")
|
52 |
-
if ai_role == "":
|
53 |
-
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
54 |
-
" sole goal of increasing your net worth."
|
55 |
-
|
56 |
-
# Enter up to 5 goals for the AI
|
57 |
-
logger.typewriter_log(
|
58 |
-
"Enter up to 5 goals for your AI: ",
|
59 |
-
Fore.GREEN,
|
60 |
-
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
61 |
-
" multiple businesses autonomously'",
|
62 |
-
)
|
63 |
-
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
64 |
-
ai_goals = []
|
65 |
-
for i in range(5):
|
66 |
-
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
67 |
-
if ai_goal == "":
|
68 |
-
break
|
69 |
-
ai_goals.append(ai_goal)
|
70 |
-
if not ai_goals:
|
71 |
-
ai_goals = [
|
72 |
-
"Increase net worth",
|
73 |
-
"Grow Twitter Account",
|
74 |
-
"Develop and manage multiple businesses autonomously",
|
75 |
-
]
|
76 |
-
|
77 |
-
return AIConfig(ai_name, ai_role, ai_goals)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us APK How to Install and Enjoy the Game on Android 4.4.4 Devices.md
DELETED
@@ -1,255 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Among Us APK para Android 4.4.4: How to Download and Play the Popular Game on Older Devices</h1>
|
3 |
-
<p>Among Us is a multiplayer social deduction game that has taken the world by storm in recent years.</p>
|
4 |
-
<h2>among us apk para android 4.4.4</h2><br /><p><b><b>Download File</b> ☆☆☆☆☆ <a href="https://urlin.us/2uSRUp">https://urlin.us/2uSRUp</a></b></p><br /><br />
|
5 |
-
<p>In this game, you can play online or over local WiFi with 4-15 players as you attempt to prepare your spaceship for departure.</p>
|
6 |
-
<p>But beware! One or more players are impostors who can kill everyone else.</p>
|
7 |
-
<p>The crewmates must work together to complete tasks or find out who the impostors are before it's too late.</p>
|
8 |
-
<p>among us apk para android 4.4.4 download<br />
|
9 |
-
among us apk para android 4.4.4 gratis<br />
|
10 |
-
among us apk para android 4.4.4 sin internet<br />
|
11 |
-
among us apk para android 4.4.4 ultima version<br />
|
12 |
-
among us apk para android 4.4.4 mod menu<br />
|
13 |
-
among us apk para android 4.4.4 hackeado<br />
|
14 |
-
among us apk para android 4.4.4 con chat de voz<br />
|
15 |
-
among us apk para android 4.4.4 siempre impostor<br />
|
16 |
-
among us apk para android 4.4.4 actualizado<br />
|
17 |
-
among us apk para android 4.4.4 no compatible<br />
|
18 |
-
among us apk para android 4.4.4 descargar gratis<br />
|
19 |
-
among us apk para android 4.4.4 sin anuncios<br />
|
20 |
-
among us apk para android 4.4.4 con skins<br />
|
21 |
-
among us apk para android 4.4.4 con amigos<br />
|
22 |
-
among us apk para android 4.4.4 online<br />
|
23 |
-
among us apk para android 4.4.4 offline<br />
|
24 |
-
among us apk para android 4.4.4 full<br />
|
25 |
-
among us apk para android 4.4.4 mega<br />
|
26 |
-
among us apk para android 4.4.4 mediafire<br />
|
27 |
-
among us apk para android 4.4.4 uptodown<br />
|
28 |
-
among us apk para android 4.4.4 apkpure<br />
|
29 |
-
among us apk para android 4.4.4 aptoide[^1^]<br />
|
30 |
-
among us apk para android 4.4.4 apkmirror<br />
|
31 |
-
among us apk para android 4.4.4 apkomg<br />
|
32 |
-
among us apk para android 4.4.2 compatible<br />
|
33 |
-
among us apk para android 5 compatible<br />
|
34 |
-
among us apk para android kitkat<br />
|
35 |
-
among us apk para celular antigo<br />
|
36 |
-
como instalar among us en android 44<br />
|
37 |
-
como jugar among us en android 44<br />
|
38 |
-
como descargar among us en android 44<br />
|
39 |
-
como actualizar among us en android 44<br />
|
40 |
-
como tener chat de voz en among us en android 44<br />
|
41 |
-
como ser siempre impostor en among us en android 44<br />
|
42 |
-
como hackear among us en android 44<br />
|
43 |
-
como tener skins gratis en among us en android 44<br />
|
44 |
-
como jugar con amigos en among us en android 44<br />
|
45 |
-
como jugar online en among us en android 44<br />
|
46 |
-
como jugar offline en among us en android 44<br />
|
47 |
-
como solucionar el problema de compatibilidad de among us en android 44</p>
|
48 |
-
<p>The impostors must use deception and sabotage to prevent the crewmates from succeeding.</p>
|
49 |
-
<p>Among Us is a fun and addictive game that can keep you entertained for hours.</p>
|
50 |
-
<p>But what if you have an older device that runs on Android 4.4.4?</p>
|
51 |
-
<p>Can you still play this game?</p>
|
52 |
-
<p>The answer is yes!</p>
|
53 |
-
<p>All you need is an APK file.</p>
|
54 |
-
<p>An APK file is an application package file that contains all <p>the files and data needed to install and run an app on your device.</p>
|
55 |
-
<p>By downloading and installing an APK file, you can bypass the Google Play Store and enjoy apps that are not available or compatible with your device.</p>
|
56 |
-
<p>In this article, we will show you how to download and play Among Us APK para Android 4.4.4 on your older device.</p>
|
57 |
-
<p>We will also cover the features, compatibility issues, risks, and precautions of doing so.</p>
|
58 |
-
<p>Finally, we will give you some tips and tricks for playing Among Us on Android 4.4.4 and compare it with other devices.</p>
|
59 |
-
<p>So, let's get started!</p>
|
60 |
-
<h2>Features of Among Us</h2>
|
61 |
-
<h3>Gameplay and Modes</h3>
|
62 |
-
<p>The gameplay of Among Us is simple but exciting.</p>
|
63 |
-
<p>You can choose to be a crewmate or an impostor at the start of each game.</p>
|
64 |
-
<p>If you are a crewmate, your goal is to complete tasks around the spaceship or find out who the impostors are and vote them out.</p>
|
65 |
-
<p>If you are an impostor, your goal is to kill enough crewmates or sabotage the spaceship so that the crewmates cannot win.</p>
|
66 |
-
<p>You can also use vents to move around the map quickly and secretly.</p>
|
67 |
-
<p>There are two main modes of playing Among Us: Classic and Hide n Seek.</p>
|
68 |
-
<p>In Classic mode, the impostors can kill crewmates with a cooldown timer and report dead bodies to trigger emergency meetings.</p>
|
69 |
-
<p>The crewmates can call emergency meetings by pressing a button or finding a dead body.</p>
|
70 |
-
<p>During meetings, everyone can discuss and vote for who they think is the impostor.</p>
|
71 |
-
<p>The impostors can also lie and accuse others to divert suspicion.</p>
|
72 |
-
<p>In Hide n Seek mode, the impostors can kill crewmates without a cooldown timer but cannot report dead bodies or sabotage the spaceship.</p>
|
73 |
-
<p>The crewmates cannot call emergency meetings or vote for the impostor.</p>
|
74 |
-
<p>The only way for the crewmates to win is to complete all their tasks before they are killed.</p>
|
75 |
-
<h3>Customization and Options</h3>
|
76 |
-
<p>One of the fun aspects of Among Us is that you can customize your character and game settings to suit your preferences.</p>
|
77 |
-
<p>You can choose from different colors, hats, skins, and pets for your character.</p>
|
78 |
-
<p>You can also change your name and chat language in the game.</p>
|
79 |
-
<p>Some of the customization options are free, while others require in-app purchases or watching ads.</p>
|
80 |
-
<p>You can also adjust various game settings, such as the number of players, impostors, tasks, speed, vision, kill distance, voting time, and more.</p>
|
81 |
-
<p>You can create your own game with your own rules or join an existing game with other players' rules.</p>
|
82 |
-
<h3>Maps and Locations</h3>
|
83 |
-
<p>Another feature of Among Us is that you can play on different maps and locations that have different layouts, tasks, vents, and sabotages.</p>
|
84 |
-
<p>There are four maps available in Among Us: The Skeld, MIRA HQ, Polus, and the Airship.</p>
|
85 |
-
<p>The Skeld is the first and most popular map in Among Us. It is a spaceship with 14 rooms connected by corridors. It has 18 tasks and 14 vents for the impostors to use. The sabotages include reactor meltdown, oxygen depletion, communications disruption, lights outage, and doors locking.</p>
|
86 |
-
<p>MIRA HQ is the second map in Among Us. It is a space station with 12 rooms connected by hallways. It has 19 tasks and 12 vents for the impostors to use. The sabotages include reactor meltdown, oxygen depletion, communications disruption, lights outage, doors locking, and greenhouse contamination.</p>
|
87 |
-
<p>Polus is the third map in Among Us. It is a planet base with 15 rooms connected by tunnels. It has 24 tasks and 12 vents for the impostors to use. The sabotages include reactor meltdown, oxygen depletion, communications disruption, lights outage, doors locking, seismic stabilizers failure, and laboratory decontamination.</p>
|
88 |
-
<p>The Airship is the fourth and newest map in Among Us. It is a flying ship with 18 rooms connected by ladders and platforms. It has 22 tasks and 18 vents for the impostors to use. The sabotages include reactor meltdown, oxygen depletion, communications disruption, lights outage, doors locking, gap room bridge, and cockpit steering.</p>
|
89 |
-
<h3>Online and Local Multiplayer</h3>
|
90 |
-
<p>The best feature of Among Us is that you can play online or over local WiFi with your friends or strangers from around the world.</p>
|
91 |
-
<p>You can find a game online by selecting a region, a map, and a game mode. You can also filter the game settings by using the search bar.</p>
|
92 |
-
<p>You can join a public game with random players or a private game with a code that you can share with your friends.</p>
|
93 |
-
<p>You can also create your own game online or over local WiFi and invite your friends to join. You can customize the game settings and choose the map and mode that you want to play.</p>
|
94 |
-
<p>During the game, you can communicate with other players by using text chat or voice chat. You can also use emojis and stickers to express yourself.</p>
|
95 |
-
<p>Among Us supports cross-platform play, which means you can play with players who are using different devices, such as PC, console, or mobile.</p>
|
96 |
-
<p>You can also integrate Among Us with Discord, a popular voice and text chat app for gamers. You can use Discord to chat with your friends, join servers, and find communities related to Among Us.</p>
|
97 |
-
<h2>Compatibility Issues of Among Us with Android 4.4.4</h2>
|
98 |
-
<h3>Minimum Requirements for Among Us</h3>
|
99 |
-
<p>Before you download and play Among Us on your device, you need to make sure that your device meets the minimum requirements for the game.</p>
|
100 |
-
<p>According to the official Google Play Store page, the minimum requirements for Among Us are:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Android version: 4.4 or higher</li>
|
103 |
-
<li>RAM: 1 GB or higher</li>
|
104 |
-
<li>Storage space: 250 MB or higher</li>
|
105 |
-
<li>Internet connection: WiFi or cellular data</li>
|
106 |
-
</ul>
|
107 |
-
<p>If your device does not meet these requirements, you may not be able to download, install, or run the game properly.</p>
|
108 |
-
<h3>Reasons for Incompatibility with Android 4.4.4</h3>
|
109 |
-
<p>Even if your device meets the minimum requirements for Among Us, you may still encounter some compatibility issues with Android 4.4.4.</p>
|
110 |
-
<p>Some of the possible reasons for incompatibility are:</p>
|
111 |
-
<ul>
|
112 |
-
<li>Your device has an outdated platform version that does not support the latest features or updates of the game.</li>
|
113 |
-
<li>Your device has missing features or components that are required for the game to function properly.</li>
|
114 |
-
<li>Your device has regional restrictions that prevent you from accessing the game in your location.</li>
|
115 |
-
</ul>
|
116 |
-
<p>If you face any of these issues, you may see an error message on the Google Play Store that says "This app is incompatible with your device" or "This item isn't available in your country".</p>
|
117 |
-
<h3>Possible Solutions for Incompatibility with Android 4.4.4</h3>
|
118 |
-
<p>Fortunately, there are some possible solutions for incompatibility with Android 4.4.4 that you can try to fix the problem and play Among Us on your device.</p>
|
119 |
-
<p>Some of these solutions are:</p>
|
120 |
-
<ul>
|
121 |
-
<li>Clearing cache and data of the Google Play Store app and restarting your device. This may help to refresh the app and remove any corrupted files or data that may cause compatibility issues.</li>
|
122 |
-
<li>Uninstalling updates of the Google Play Store app and reverting to an older version. This may help to bypass any new changes or restrictions that may affect compatibility.</li>
|
123 |
-
<li>Updating your Android version to a newer one that supports the game. This may help to improve performance and stability of the game and enable new features or updates.</li>
|
124 |
-
<li>Sideloading the APK file of Among Us from a trusted source and installing it manually on your device. This may help to avoid the Google Play Store altogether and access the game directly.</li>
|
125 |
-
<li>Installing a custom ROM on your device that has a newer Android version or features that support the game. This may help to enhance your device's capabilities and compatibility with the game.</li>
|
126 |
-
</ul>
|
127 |
-
<p>However, before you try any of these solutions, you should be aware of the risks and precautions involved in doing so.</p>
|
128 |
-
<h2>How to Download and Install Among Us APK para Android 4.4.4</h2>
|
129 |
-
<h3>Steps to Download and Install Among Us APK para Android 4.4.4</h3>
|
130 |
-
<p>If you decide to sideload the APK file of Among Us on your device, you need to follow these steps carefully:</p>
|
131 |
-
<ol>
|
132 |
-
<li>Go to a trusted source that provides APK files of Among Us, such as APKPure, APKMirror, or Uptodown </li>
|
133 |
-
<li>Search for the latest version of Among Us APK that is compatible with Android 4.4.4, such as 2021.6.30 or 2021.5.12</li>
|
134 |
-
<li>Download the APK file to your device by tapping on the download button or scanning the QR code</li>
|
135 |
-
<li>Enable the installation of apps from unknown sources on your device by going to Settings > Security > Unknown Sources and toggling it on</li>
|
136 |
-
<li>Locate the downloaded APK file on your device by using a file manager app or the Downloads folder</li>
|
137 |
-
<li>Tap on the APK file and follow the instructions to install it on your device</li>
|
138 |
-
<li>Launch the game and enjoy playing Among Us on your Android 4.4.4 device</li>
|
139 |
-
</ol>
|
140 |
-
<p>Here is a table that summarizes the steps to download and install Among Us APK para Android 4.4.4:</p>
|
141 |
-
<table>
|
142 |
-
<tr>
|
143 |
-
<th>Step</th>
|
144 |
-
<th>Action</th>
|
145 |
-
<th>Source</th>
|
146 |
-
<th>Screenshot</th>
|
147 |
-
</tr>
|
148 |
-
<tr>
|
149 |
-
<td>1</td>
|
150 |
-
<td>Go to a trusted source that provides APK files of Among Us</td>
|
151 |
-
<td>[APKPure]</td>
|
152 |
-
<td><img src="^2^" alt="APKPure website"></td>
|
153 |
-
</tr>
|
154 |
-
<tr>
|
155 |
-
<td>2</td>
|
156 |
-
<td>Search for the latest version of Among Us APK that is compatible with Android 4.4.4</td>
|
157 |
-
<td>[Among Us 2021.6.30]</td>
|
158 |
-
<td><img src="^4^" alt="Among Us 2021.6.30 page"></td>
|
159 |
-
</tr>
|
160 |
-
<tr>
|
161 |
-
<td>3</td>
|
162 |
-
<td>Download the APK file to your device</td>
|
163 |
-
<td>[Download button]</td>
|
164 |
-
<td><img src="^6^" alt="Download button"></td>
|
165 |
-
</tr>
|
166 |
-
<tr>
|
167 |
-
<td>4</td>
|
168 |
-
<td>Enable the installation of apps from unknown sources on your device</td>
|
169 |
-
<td>[Settings > Security > Unknown Sources]</td>
|
170 |
-
<td><img src="^8^" alt="Unknown Sources toggle"></td>
|
171 |
-
</tr>
|
172 |
-
<tr>
|
173 |
-
<td>5</td>
|
174 |
-
<td>Locate the downloaded APK file on your device</td>
|
175 |
-
<td>[File manager app]</td>
|
176 |
-
<td><img src="^10^" alt="File manager app"></td>
|
177 |
-
</tr>
|
178 |
-
<tr>
|
179 |
-
<td>6</td>
|
180 |
-
<td>Tap on the APK file and follow the instructions to install it on your device</td>
|
181 |
-
<td>[Installation screen]</td>
|
182 |
-
<td><img src="^12^" alt="Installation screen"></td>
|
183 |
-
</tr>
|
184 |
-
<tr>
|
185 |
-
<td>7</td>
|
186 |
-
<td>Launch the game and enjoy playing Among Us on your Android 4.4.4 device</td>
|
187 |
-
<td>[Among Us icon]</td>
|
188 |
-
<td><img src="^14^" alt="Among Us icon"></td> </tr>
|
189 |
-
</table>
|
190 |
-
<h3>Risks and Precautions of Downloading and Installing Among Us APK para Android 4.4.4</h3>
|
191 |
-
<p>While downloading and installing Among Us APK para Android 4.4.4 may seem like an easy and convenient way to play the game on your older device, you should also be aware of the potential risks and precautions involved in doing so.</p>
|
192 |
-
<p>Some of the risks and precautions are:</p>
|
193 |
-
<ul>
|
194 |
-
<li>Malware and viruses: Some APK files may contain malicious software or viruses that can harm your device or steal your personal information. You should always download APK files from trusted sources and scan them with a reliable antivirus app before installing them.</li>
|
195 |
-
<li>Privacy issues: Some APK files may require access to your device's permissions, such as camera, microphone, contacts, location, or storage. You should always check the permissions before installing an APK file and deny any unnecessary or suspicious requests.</li>
|
196 |
-
<li>Legal issues: Some APK files may violate the terms and conditions of the original app or the Google Play Store. You may face legal consequences or penalties for downloading or using an unauthorized or modified version of the app.</li>
|
197 |
-
<li>Performance issues: Some APK files may not be optimized or compatible with your device's hardware or software. You may experience lag, crashes, glitches, or errors while playing the game. You should always backup your data and settings before installing an APK file and uninstall it if it causes any problems.</li>
|
198 |
-
</ul>
|
199 |
-
<p>Therefore, you should always exercise caution and discretion when downloading and installing Among Us APK para Android 4.4.4 on your device.</p>
|
200 |
-
<h2>How to Play Among Us on Android 4.4.4</h2>
|
201 |
-
<h3>Tips and Tricks for Playing Among Us on Android 4.4.4</h3>
|
202 |
-
<p>Now that you have downloaded and installed Among Us on your Android 4.4.4 device, you are ready to play the game and have fun with your friends or strangers.</p>
|
203 |
-
<p>Here are some tips and tricks for playing Among Us on Android 4.4.4:</p>
|
204 |
-
<ul>
|
205 |
-
<li>Adjust the settings: You can adjust the game settings to suit your preferences and device's capabilities. For example, you can lower the graphics quality, increase the frame rate, enable battery saver mode, or disable sound effects to improve performance and battery life.</li>
|
206 |
-
<li>Choose your role: You can choose to be a crewmate or an impostor at the start of each game. You can also customize your character's appearance and name. If you are a crewmate, you should try to complete your tasks as quickly as possible and report any suspicious behavior or dead bodies. If you are an impostor, you should try to blend in with the crewmates and kill them discreetly or sabotage the spaceship.</li>
|
207 |
-
<li>Communicate with other players: You can communicate with other players by using text chat or voice chat during meetings or in-game. You can also use emojis and stickers to express yourself. You should use communication to share information, ask questions, make accusations, defend yourself, or deceive others.</li>
|
208 |
-
<li>Use strategies: You can use various strategies to win the game as a crewmate or an impostor. For example, you can use logic, deduction, evidence, alibis, voting patterns, or psychology to find out who the impostors are or convince others that you are not one.</li>
|
209 |
-
</ul>
|
210 |
-
<h3>Comparison of Playing Among Us on Android 4.4.4 vs Other Devices</h3>
|
211 |
-
<p>Playing Among Us on Android 4.4.4 has its advantages and disadvantages compared to playing it on other devices, such as PC, console, or newer Android devices.</p>
|
212 |
-
<p>Some of the advantages are:</p>
|
213 |
-
<ul>
|
214 |
-
<li>You can play the game for free without paying any money or watching any ads.</li>
|
215 |
-
<li>You can play the game on your older device without buying a new one or upgrading your Android version.</li>
|
216 |
-
<li>You can play the game anywhere and anytime with your mobile device as long as you have an internet connection.</li>
|
217 |
-
</ul>
|
218 |
-
<p>Some of the disadvantages are:</p>
|
219 |
-
<ul>
|
220 |
-
<li>You may face compatibility issues or performance issues while playing the game on your older device.</li>
|
221 |
-
<li>You may face security risks or legal risks while downloading or installing an APK file of the game on your device.</li>
|
222 |
-
<li>You may miss out on some features or updates of the game that are only available on other devices.</li>
|
223 |
-
</ul>
|
224 |
-
<p>Therefore, you should weigh the pros and cons of playing Among Us on Android 4.4.4 before deciding whether it is worth it or not.</p>
|
225 |
-
<h2>Conclusion</h2>
|
226 |
-
<p>In conclusion, Among Us is a multiplayer social deduction game that is fun and addictive to play with your friends or strangers online or over local WiFi.</p <p>The game has various features, such as gameplay and modes, customization and options, maps and locations, and online and local multiplayer.</p>
|
227 |
-
<p>However, if you have an older device that runs on Android 4.4.4, you may face some compatibility issues with the game.</p>
|
228 |
-
<p>To solve this problem, you can download and install an APK file of Among Us that is compatible with Android 4.4.4.</p>
|
229 |
-
<p>An APK file is an application package file that contains all the files and data needed to install and run an app on your device.</p>
|
230 |
-
<p>By sideloading an APK file, you can bypass the Google Play Store and enjoy apps that are not available or compatible with your device.</p>
|
231 |
-
<p>In this article, we showed you how to download and install Among Us APK para Android 4.4.4 on your older device.</p>
|
232 |
-
<p>We also covered the compatibility issues, risks, and precautions of doing so.</p>
|
233 |
-
<p>Finally, we gave you some tips and tricks for playing Among Us on Android 4.4.4 and compared it with other devices.</p>
|
234 |
-
<p>We hope that this article was helpful and informative for you.</p>
|
235 |
-
<p>If you have any feedback or questions, please feel free to leave a comment below or contact us on our social media platforms.</p>
|
236 |
-
<p>Thank you for reading and happy gaming!</p>
|
237 |
-
<h2>FAQs</h2>
|
238 |
-
<p>Here are some frequently asked questions about Among Us APK para Android 4.4.4:</p>
|
239 |
-
<ol>
|
240 |
-
<li>What is the latest version of Among Us APK para Android 4.4.4?</li>
|
241 |
-
<li>How can I update Among Us APK para Android 4.4.4?</li>
|
242 |
-
<li>Is Among Us APK para Android 4.4.4 safe to download and install?</li>
|
243 |
-
<li>Can I play Among Us APK para Android 4.4.4 with other players who are using different devices or versions?</li>
|
244 |
-
<li>What are some alternatives to Among Us APK para Android 4.4.4?</li>
|
245 |
-
</ol>
|
246 |
-
<p>Here are the answers to these questions:</p>
|
247 |
-
<ol>
|
248 |
-
<li>The latest version of Among Us APK para Android 4.4.4 as of June 2023 is 2021.6.30. This version was released on June 15, 2023 and added new features, such as a new map (the Airship), new roles (the Sheriff and the Engineer), new tasks, new cosmetics, new settings, and bug fixes.</li>
|
249 |
-
<li>You can update Among Us APK para Android 4.4.4 by downloading and installing the latest version of the APK file from a trusted source. You can also check for updates within the game by tapping on the settings icon and selecting "Check for updates". However, you should always backup your data and settings before updating the game.</li>
|
250 |
-
<li>Among Us APK para Android 4.4.4 is safe to download and install if you get it from a trusted source that provides original and unmodified APK files of the game. You should also scan the APK file with a reliable antivirus app before installing it on your device. However, you should also be aware of the potential risks and precautions involved in sideloading an APK file, such as malware, viruses, privacy issues, legal issues, or performance issues.</li>
|
251 |
-
<li>Yes, you can play Among Us APK para Android 4.4.4 with other players who are using different devices or versions as long as they are using the same region, map, and game mode as you. Among Us supports cross-platform play, which means you can play with players who are using PC, console, or mobile devices. You can also integrate Among Us with Discord, a popular voice and text chat app for gamers.</li>
|
252 |
-
<li>If you are looking for some alternatives to Among Us APK para Android 4.4.4, you can try some other multiplayer social deduction games that are similar to Among Us, such as Werewolf Online, Town of Salem, Project Winter, Unfortunate Spacemen, or Deceit.</li>
|
253 |
-
</ol></p> 197e85843d<br />
|
254 |
-
<br />
|
255 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Bluetooth Driver for Windows 7 (32-bit) - Download Now and Enjoy Wireless Connectivity.md
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Bluetooth Driver for Windows 7 (32-bit)</h1>
|
3 |
-
<p>Bluetooth is a wireless technology that allows you to connect various devices such as headphones, speakers, keyboards, mice, printers, and more to your computer. However, to use Bluetooth on your Windows 7 (32-bit) system, you need a compatible Bluetooth device and adapter, as well as a proper Bluetooth driver.</p>
|
4 |
-
<p>A Bluetooth driver is a software program that allows your operating system to communicate with your Bluetooth device and adapter. Without a Bluetooth driver, your device and adapter may not work properly or at all. Therefore, it is important to download and install the right Bluetooth driver for your Windows 7 (32-bit) system.</p>
|
5 |
-
<h2>download bluetooth driver for windows 7 (32-bit)</h2><br /><p><b><b>Download File</b> ☆ <a href="https://jinyurl.com/2uNRVc">https://jinyurl.com/2uNRVc</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will show you how to download Bluetooth driver for Windows 7 (32-bit) in a few easy steps. We will also provide some tips on how to troubleshoot any issues with the Bluetooth driver and how to keep it updated. Let's get started!</p>
|
7 |
-
<h2>Check Your Bluetooth Device and Adapter</h2>
|
8 |
-
<p>Before you download the Bluetooth driver, you need to check your Bluetooth device and adapter. You need to know the model and manufacturer of your device and adapter, as well as whether they are compatible with Windows 7 (32-bit). This will help you find the right Bluetooth driver for your system.</p>
|
9 |
-
<p>To check your Bluetooth device and adapter, you can follow these steps:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Go to Start > Control Panel > Hardware and Sound > Devices and Printers.</li>
|
12 |
-
<li>Look for your Bluetooth device and adapter under the Devices or Unspecified section. If you don't see them, make sure they are turned on and within range.</li>
|
13 |
-
<li>Right-click on your Bluetooth device or adapter and select Properties.</li>
|
14 |
-
<li>Go to the Hardware tab and click on Properties again.</li>
|
15 |
-
<li>Go to the Details tab and select Hardware Ids from the drop-down menu.</li>
|
16 |
-
<li>Note down the values that start with VID_ (vendor ID) and PID_ (product ID). These are the identifiers of your device or adapter manufacturer and model.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>Find the Right Bluetooth Driver for Your Device</h2>
|
19 |
-
<p>Once you have identified your Bluetooth device and adapter, you need to find the right Bluetooth driver for them. The best way to do this is to visit the official website of your device or adapter manufacturer and look for the support or download section. There, you can search for your device or adapter model and find the compatible Bluetooth driver for Windows 7 (32-bit).</p>
|
20 |
-
<p>For example, if you have an Intel wireless adapter, you can go to [Intel's website](^1^) and search for Intel Wireless Bluetooth for Windows 7*. There, you can find the latest version of the Bluetooth driver that supports various Intel wireless adapters. You can also see the release notes, installation instructions, supported hardware, bug fixes, and known issues of the driver.</p>
|
21 |
-
<p>If you have a Dell computer with a built-in Bluetooth device or adapter, you can go to [Dell's website](^2^) and enter your service tag or express service code. There, you can find all the drivers that are compatible with your system, including the Bluetooth driver. You can also see the date, version, size, importance, and description of each driver.</p>
|
22 |
-
<h2>Download and Install the Bluetooth Driver</h2>
|
23 |
-
<p>After you have found the right Bluetooth driver for your device or adapter, you need to download and install it on your Windows 7 (32-bit) system. To do this, you can follow these steps:</p>
|
24 |
-
<ul>
|
25 |
-
<li> <li>Click on the download link of the Bluetooth driver and save the file to your preferred location.</li>
|
26 |
-
<li>Double-click on the downloaded file and follow the installation wizard. You may need to accept the license agreement, choose the installation location, and restart your computer.</li>
|
27 |
-
<li>After the installation is complete, you should see a Bluetooth icon in your system tray or notification area. You can click on it to access the Bluetooth settings and manage your Bluetooth devices.</li>
|
28 |
-
</ul>
|
29 |
-
<h2>Troubleshoot Any Issues with the Bluetooth Driver</h2>
|
30 |
-
<p>Sometimes, you may encounter some issues with the Bluetooth driver, such as connection errors, pairing failures, or driver conflicts. These issues can prevent you from using your Bluetooth devices or adapters properly or at all. To troubleshoot these issues, you can try some of these solutions:</p>
|
31 |
-
<p>download intel wireless bluetooth for windows 7 32-bit<br />
|
32 |
-
download dell wireless bluetooth driver for windows 7 32-bit<br />
|
33 |
-
download hp wireless bluetooth driver for windows 7 32-bit<br />
|
34 |
-
download lenovo wireless bluetooth driver for windows 7 32-bit<br />
|
35 |
-
download acer wireless bluetooth driver for windows 7 32-bit<br />
|
36 |
-
download asus wireless bluetooth driver for windows 7 32-bit<br />
|
37 |
-
download toshiba wireless bluetooth driver for windows 7 32-bit<br />
|
38 |
-
download samsung wireless bluetooth driver for windows 7 32-bit<br />
|
39 |
-
download sony wireless bluetooth driver for windows 7 32-bit<br />
|
40 |
-
download lg wireless bluetooth driver for windows 7 32-bit<br />
|
41 |
-
download microsoft wireless bluetooth driver for windows 7 32-bit<br />
|
42 |
-
download broadcom wireless bluetooth driver for windows 7 32-bit<br />
|
43 |
-
download qualcomm wireless bluetooth driver for windows 7 32-bit<br />
|
44 |
-
download realtek wireless bluetooth driver for windows 7 32-bit<br />
|
45 |
-
download ralink wireless bluetooth driver for windows 7 32-bit<br />
|
46 |
-
download mediatek wireless bluetooth driver for windows 7 32-bit<br />
|
47 |
-
download atheros wireless bluetooth driver for windows 7 32-bit<br />
|
48 |
-
download cisco wireless bluetooth driver for windows 7 32-bit<br />
|
49 |
-
download logitech wireless bluetooth driver for windows 7 32-bit<br />
|
50 |
-
download belkin wireless bluetooth driver for windows 7 32-bit<br />
|
51 |
-
download netgear wireless bluetooth driver for windows 7 32-bit<br />
|
52 |
-
download d-link wireless bluetooth driver for windows 7 32-bit<br />
|
53 |
-
download tp-link wireless bluetooth driver for windows 7 32-bit<br />
|
54 |
-
download linksys wireless bluetooth driver for windows 7 32-bit<br />
|
55 |
-
download edimax wireless bluetooth driver for windows 7 32-bit<br />
|
56 |
-
how to install wireless bluetooth driver on windows 7 (32-bit)<br />
|
57 |
-
how to update wireless bluetooth driver on windows 7 (32-bit)<br />
|
58 |
-
how to uninstall wireless bluetooth driver on windows 7 (32-bit)<br />
|
59 |
-
how to fix wireless bluetooth driver issues on windows 7 (32-bit)<br />
|
60 |
-
how to enable wireless bluetooth on windows 7 (32-bit)<br />
|
61 |
-
how to disable wireless bluetooth on windows 7 (32-bit)<br />
|
62 |
-
how to connect wireless bluetooth devices on windows 7 (32-bit)<br />
|
63 |
-
how to pair wireless bluetooth devices on windows 7 (32-bit)<br />
|
64 |
-
how to troubleshoot wireless bluetooth problems on windows 7 (32-bit)<br />
|
65 |
-
how to use wireless bluetooth features on windows 7 (32-bit)<br />
|
66 |
-
best free software to download wireless bluetooth drivers for windows 7 (32-bit)<br />
|
67 |
-
best paid software to download wireless bluetooth drivers for windows 7 (32-bit)<br />
|
68 |
-
best online tools to download wireless bluetooth drivers for windows 7 (32-bit)<br />
|
69 |
-
best websites to download wireless bluetooth drivers for windows 7 (32-bit)<br />
|
70 |
-
best blogs to learn about wireless bluetooth drivers for windows 7 (32-bit)<br />
|
71 |
-
best videos to watch about wireless bluetooth drivers for windows 7 (32-bit)<br />
|
72 |
-
best podcasts to listen about wireless bluetooth drivers for windows 7 (32-bit)<br />
|
73 |
-
best ebooks to read about wireless bluetooth drivers for windows 7 (32-bit)<br />
|
74 |
-
best courses to take about wireless bluetooth drivers for windows 7 (32-bit)<br />
|
75 |
-
best tips and tricks to optimize wireless bluetooth performance on windows 7 (32-bit)<br />
|
76 |
-
best practices and guidelines to follow when using wireless bluetooth on windows</p>
|
77 |
-
<ul>
|
78 |
-
<li>Make sure your Bluetooth device and adapter are turned on and within range. You can also try to remove and reinsert your adapter or replace its batteries if applicable.</li>
|
79 |
-
<li>Make sure your Bluetooth driver is up to date. You can check for updates on the manufacturer's website or use the Windows Update feature.</li>
|
80 |
-
<li>Make sure your Bluetooth device and adapter are compatible with each other and with Windows 7 (32-bit). You can check the compatibility on the manufacturer's website or use the Windows Compatibility Center.</li>
|
81 |
-
<li>Make sure your Bluetooth device and adapter are paired correctly. You can pair them by following the instructions on the manufacturer's website or using the Add a device wizard in Windows.</li>
|
82 |
-
<li>Make sure there are no other devices or programs that interfere with your Bluetooth connection. You can try to disable or uninstall any conflicting devices or programs, such as other wireless adapters, antivirus software, or firewalls.</li>
|
83 |
-
<li>Make sure there are no errors or corrupted files in your Bluetooth driver. You can try to scan and repair your driver using the System File Checker tool in Windows.</li>
|
84 |
-
</ul>
|
85 |
-
<h2>Update the Bluetooth Driver Regularly</h2>
|
86 |
-
<p>To ensure the optimal performance and security of your Bluetooth driver, you should update it regularly. Updating your Bluetooth driver can fix any bugs, improve compatibility, enhance features, and prevent any potential issues. There are two ways to update your Bluetooth driver: manually or automatically.</p>
|
87 |
-
<p>To update your Bluetooth driver manually, you need to visit the manufacturer's website and download the latest version of the driver. Then, you need to install it on your system following the same steps as before. You should check for updates at least once a month or whenever there is a new release.</p>
|
88 |
-
<p>To update your Bluetooth driver automatically, you need to use a third-party software that can scan your system and find the best drivers for your devices and adapters. Then, you need to install them on your system with one click. You can also schedule automatic updates at regular intervals or whenever there is a new release. Some examples of such software are Driver Booster, Driver Easy, and Driver Talent.</p>
|
89 |
-
<h2>Conclusion</h2>
|
90 |
-
<p>Downloading Bluetooth driver for Windows 7 (32-bit) is not a difficult task if you follow these steps:</p>
|
91 |
-
<ol>
|
92 |
-
<li>Check your Bluetooth device and adapter model and manufacturer.</li>
|
93 |
-
<li>Find the right Bluetooth driver for your device and adapter on the manufacturer's website.</li>
|
94 |
-
<li>Download and install the Bluetooth driver on your system.</li>
|
95 |
-
<li>Troubleshoot any issues with the Bluetooth driver using various solutions.</li>
|
96 |
-
<li>Update the Bluetooth driver regularly using manual or automatic methods.</li>
|
97 |
-
</ol>
|
98 |
-
<p>By doing so, you can enjoy using your Bluetooth devices and adapters on your Windows 7 (32-bit) system without any hassle. You can also benefit from improved performance, compatibility, features, and security of your Bluetooth driver.</p>
|
99 |
-
<h3>Frequently Asked Questions</h3>
|
100 |
-
<ul>
|
101 |
-
<li><strong>Q: How do I know if my Windows 7 system is 32-bit or 64-bit?</strong></li>
|
102 |
-
<li>A: You can check your system type by going to Start > Control Panel > System and Security > System. There, you can see whether your system is 32-bit or 64-bit under System type.</li>
|
103 |
-
<li><strong>Q: How do I know if my computer has a built-in Bluetooth adapter?</strong></li>
|
104 |
-
<li>A: You can check if your computer has a built-in Bluetooth adapter by going to Start > Control Panel > Hardware and Sound > Device Manager. There, you can see if there is a Bluetooth category under Network adapters. If there is, then you have a built-in Bluetooth adapter. If not, then you need an external Bluetooth adapter.</li>
|
105 |
-
<li><strong>Q: How do I turn on my Bluetooth device or adapter?</strong></li>
|
106 |
-
<li>A: You can turn on your Bluetooth device or adapter by pressing a button or switch on them. You can also turn on your Bluetooth adapter by pl ugging it into a USB port on your computer. You can also turn on your Bluetooth device or adapter by going to Start > Control Panel > Hardware and Sound > Devices and Printers. There, you can right-click on your Bluetooth device or adapter and select Turn on.</li>
|
107 |
-
<li><strong>Q: How do I pair my Bluetooth device with my computer?</strong></li>
|
108 |
-
<li>A: You can pair your Bluetooth device with your computer by going to Start > Control Panel > Hardware and Sound > Devices and Printers. There, you can click on Add a device and follow the instructions on the screen. You may need to enter a passcode or PIN to pair your device.</li>
|
109 |
-
<li><strong>Q: How do I use my Bluetooth device with my computer?</strong></li>
|
110 |
-
<li>A: You can use your Bluetooth device with your computer by going to Start > Control Panel > Hardware and Sound > Devices and Printers. There, you can double-click on your Bluetooth device and access its settings and features. You can also use your Bluetooth device with various applications and programs that support Bluetooth, such as music players, games, or browsers.</li>
|
111 |
-
</ul></p> 197e85843d<br />
|
112 |
-
<br />
|
113 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Brasfoot 18 Premium The Best Soccer Manager Game for Android.md
DELETED
@@ -1,147 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Brasfoot 2018 Premium APK: A Review</h1>
|
3 |
-
<p>If you are a fan of soccer and want to experience the thrill of managing your own team, then you might want to check out Brasfoot 2018 Premium APK. This is a soccer manager and coach game that lets you command a football team, buy and sell players, choose the tactics and participate in the championships that simulate reality. In this article, we will review Brasfoot 2018 Premium APK and tell you why it is one of the best soccer manager games for Android devices.</p>
|
4 |
-
<h2>brasfoot 2018 premium apk</h2><br /><p><b><b>DOWNLOAD</b> ➡ <a href="https://jinyurl.com/2uNKRl">https://jinyurl.com/2uNKRl</a></b></p><br /><br />
|
5 |
-
<h2>What is Brasfoot 2018?</h2>
|
6 |
-
<p>Brasfoot 2018 is a soccer manager and coach game that was developed by Emmanuel dos Santos, a Brazilian programmer. The game was first released in 2003 for Windows computers, and has since been updated every year with new features and improvements. In 2018, Brasfoot released its first Android version, which is called Brasfoot 18. The game is available in English, Portuguese, Spanish and Italian languages.</p>
|
7 |
-
<h3>Features of Brasfoot 2018</h3>
|
8 |
-
<p>Brasfoot 2018 has many features that make it a realistic and fun soccer manager game. Some of the features are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can choose from over 300 clubs from different countries and leagues, such as Brazil, England, Spain, Germany, Italy, France, Argentina, etc.</li>
|
11 |
-
<li>You can create your own custom club with your own name, logo, colors and players.</li>
|
12 |
-
<li>You can buy and sell players from the transfer market, or scout for new talents from other clubs.</li>
|
13 |
-
<li>You can choose from different formations and tactics for your team, such as 4-4-2, 4-3-3, 3-5-2, etc.</li>
|
14 |
-
<li>You can participate in various championships that simulate reality, such as the World Cup, the Champions League, the Copa Libertadores, etc.</li>
|
15 |
-
<li>You can view detailed statistics and reports of your team's performance, such as goals scored, assists, fouls, cards, injuries, etc.</li>
|
16 |
-
<li>You can save your progress and resume your game anytime you want.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>How to download and install Brasfoot 2018 Premium APK</h3>
|
19 |
-
<p>If you want to play Brasfoot 2018 on your Android device, you will need to download and install the Brasfoot 18 APK file. This is a modified version of the original game that enables extra options and features that are not available in the free version. Some of the extra options are:</p>
|
20 |
-
<ul>
|
21 |
-
<li>You can play with more than one club at the same time.</li>
|
22 |
-
<li>You can edit the players' names, skills and attributes.</li>
|
23 |
-
<li>You can create your own leagues and cups with your own rules.</li>
|
24 |
-
<li>You can access more than 20 additional skins for the game interface.</li>
|
25 |
-
</ul>
|
26 |
-
<p>To download and install Brasfoot 18 APK, you will need to follow these steps:</p>
|
27 |
-
<p>brasfoot 18 android download<br />
|
28 |
-
brasfoot 18 english soccer manager<br />
|
29 |
-
brasfoot 18 premium version features<br />
|
30 |
-
brasfoot 18 apk mod unlocked<br />
|
31 |
-
brasfoot 18 app for football fans<br />
|
32 |
-
brasfoot 18 game play online<br />
|
33 |
-
brasfoot 18 best tactics and strategies<br />
|
34 |
-
brasfoot 18 buy and sell players<br />
|
35 |
-
brasfoot 18 championships and leagues<br />
|
36 |
-
brasfoot 18 simulate reality with football<br />
|
37 |
-
brasfoot 18 super light and fast game<br />
|
38 |
-
brasfoot 18 update and patch download<br />
|
39 |
-
brasfoot 18 free apk for android<br />
|
40 |
-
brasfoot 18 how to install and play<br />
|
41 |
-
brasfoot 18 tips and tricks for beginners<br />
|
42 |
-
brasfoot 18 reviews and ratings<br />
|
43 |
-
brasfoot 18 cheats and hacks<br />
|
44 |
-
brasfoot 18 support and contact<br />
|
45 |
-
brasfoot 18 alternatives and similar games<br />
|
46 |
-
brasfoot 18 compatible devices and requirements<br />
|
47 |
-
brasfoot 18 latest version and changelog<br />
|
48 |
-
brasfoot 18 screenshots and videos<br />
|
49 |
-
brasfoot 18 forum and community<br />
|
50 |
-
brasfoot 18 news and announcements<br />
|
51 |
-
brasfoot 18 official website and social media</p>
|
52 |
-
<ol>
|
53 |
-
<li>Go to [this link](^1^) and click on the green "Download APK" button.</li>
|
54 |
-
<li>Wait for the download to finish and then open the file.</li>
|
55 |
-
<li>If you see a warning message that says "For your security, your phone is not allowed to install unknown apps from this source", go to your device's settings and enable the option to allow installation from unknown sources.</li>
|
56 |
-
<li>Follow the instructions on the screen to install the app.</li>
|
57 |
-
<li>Launch the app and enjoy playing Brasfoot 2018 Premium APK on your Android device.</li>
|
58 |
-
</ol> <h2>Why choose Brasfoot 2018 Premium APK over other soccer manager games?</h2>
|
59 |
-
<p>There are many soccer manager games available for Android devices, such as Football Manager, Top Eleven, PES Club Manager, etc. However, Brasfoot 2018 Premium APK stands out from the crowd for several reasons. Here are some of the advantages and disadvantages of Brasfoot 2018 Premium APK compared to other soccer manager games.</p>
|
60 |
-
<h3>Advantages of Brasfoot 2018 Premium APK</h3>
|
61 |
-
<p>Some of the benefits of playing Brasfoot 2018 Premium APK are:</p>
|
62 |
-
<ul>
|
63 |
-
<li>It is easy to play and has a simple and intuitive interface. You don't need to spend hours learning how to use the game's features and options.</li>
|
64 |
-
<li>It is realistic and challenging. You will face real-life situations and problems that a soccer manager has to deal with, such as injuries, suspensions, contracts, budgets, etc.</li>
|
65 |
-
<li>It is fun and addictive. You will feel the excitement and satisfaction of leading your team to victory and glory.</li>
|
66 |
-
<li>It is customizable and flexible. You can create your own club, league, cup, players, etc. You can also edit the existing ones to suit your preferences.</li>
|
67 |
-
<li>It is affordable and accessible. You can download and install the game for free from the link provided above. You don't need to pay any subscription or in-app purchases to enjoy the game's full potential.</li>
|
68 |
-
</ul>
|
69 |
-
<h3>Disadvantages of Brasfoot 2018 Premium APK</h3>
|
70 |
-
<p>Some of the drawbacks of playing Brasfoot 2018 Premium APK are:</p>
|
71 |
-
<ul>
|
72 |
-
<li>It is not very updated and modern. The game's graphics and design are not very impressive or appealing. The game also lacks some features that other soccer manager games have, such as online multiplayer mode, live matches, 3D animations, etc.</li>
|
73 |
-
<li>It is not very compatible and stable. The game may not work well on some devices or Android versions. The game may also crash or freeze sometimes during gameplay.</li>
|
74 |
-
<li>It is not very original and innovative. The game does not offer anything new or different from other soccer manager games. The game's concept and gameplay are very similar to other games in the genre.</li>
|
75 |
-
</ul>
|
76 |
-
<h2>Tips and tricks for playing Brasfoot 2018 Premium APK</h2>
|
77 |
-
<p>If you want to improve your skills and performance in Brasfoot 2018 Premium APK, you will need to follow some tips and tricks that will help you succeed in the game. Here are some of them:</p>
|
78 |
-
<h3>How to buy and sell players</h3>
|
79 |
-
<p>One of the most important aspects of being a soccer manager is buying and selling players. You will need to build a strong and balanced squad that can compete in different competitions. To do that, you will need to follow these steps:</p>
|
80 |
-
<ol>
|
81 |
-
<li>Go to the "Transfer Market" option in the main menu.</li>
|
82 |
-
<li>Select the "Search Players" option to find players that match your criteria, such as position, age, skill, value, etc.</li>
|
83 |
-
<li>Select the player you want to buy and click on the "Make Offer" button.</li>
|
84 |
-
<li>Negotiate with the player's club and agent until you reach an agreement on the transfer fee and salary.</li>
|
85 |
-
<li>If the deal is accepted, the player will join your club.</li>
|
86 |
-
<li>To sell a player, go to the "My Team" option in the main menu.</li>
|
87 |
-
<li>Select the player you want to sell and click on the "Sell Player" button.</li>
|
88 |
-
<li>Set a price for the player and wait for other clubs to make offers.</li>
|
89 |
-
<li>If you receive an offer that satisfies you, accept it and the player will leave your club.</li>
|
90 |
-
</ol>
|
91 |
-
<h3>How to choose the best tactics</h3>
|
92 |
-
<p>Another crucial aspect of being a soccer manager is choosing the best tactics for your team. You will need to select a formation, a style of play and a strategy that suit your players' skills and attributes. To do that, you will need to follow these steps:</p>
|
93 |
-
<ol>
|
94 |
-
<li>Go to the "Tactics" option in the main menu.</li>
|
95 |
-
<li>Select a formation from the list of available ones, such as 4-4-2, 4-3-3, 3-5-2, etc.</li>
|
96 |
-
<li>Select a style of play from the list of available ones, such as offensive, defensive, balanced, etc.</li>
|
97 |
-
<li>Select a strategy from the list of available ones, such as counterattack, possession, pressure, etc.</li>
|
98 |
-
<li>You can also adjust some parameters such as passing style, marking style, offside trap, etc.</li>
|
99 |
-
<li>You can also assign specific roles and instructions to each player, such as captain, free kick taker, penalty taker, etc.</li>
|
100 |
-
<li>Save your tactics and apply them to your team.</li>
|
101 |
-
</ol>
|
102 |
-
<h3>How to participate in the championships</h3>
|
103 |
-
<p>The ultimate goal of being a soccer manager is to participate in the championships and win trophies. You will need to compete against other teams and prove your skills and abilities. To do that, you will need to follow these steps:</p>
|
104 |
-
<ol>
|
105 |
-
<li>Go to the "Championships" option in the main menu.</li>
|
106 |
-
<li>Select a championship from the list of available ones, such as the World Cup, the Champions League, the Copa Libertadores, etc.</li>
|
107 |
-
<li>View the fixtures and standings of the championship and plan your matches accordingly.</li>
|
108 |
-
<li>Before each match, select your squad, tactics and strategy for the game.</li>
|
109 |
-
<li>During the match, you can view the live score, statistics and events of the game. You can also make substitutions and changes to your tactics and strategy if needed.</li>
|
110 |
-
<li>After the match, you can view the results, highlights and reports of the game. You can also view your team's performance and progress in the championship.</li>
|
111 |
-
<li>Repeat these steps until you finish the championship and see if you can win the trophy.</li>
|
112 |
-
</ol>
|
113 |
-
<h2>Conclusion</h2>
|
114 |
-
<p>Brasfoot 2018 Premium APK is a soccer manager and coach game that lets you command a football team, buy and sell players, choose the tactics and participate in the championships that simulate reality. It is a realistic and fun game that offers many features and options for you to customize and enjoy. It is also easy to play and affordable to download and install. However, it is not very updated and modern, not very compatible and stable, and not very original and innovative compared to other soccer manager games. Therefore, it may not appeal to everyone's taste and preference. If you are looking for a simple and classic soccer manager game for your Android device, then Brasfoot 2018 Premium APK might be a good choice for you. If you are looking for a more advanced and sophisticated soccer manager game for your Android device, then you might want to try other alternatives.</p>
|
115 |
-
<h3>Summary of the main points</h3>
|
116 |
-
<p>In this article, we have reviewed Brasfoot 2018 Premium APK and told you why it is one of the best soccer manager games for Android devices. We have covered the following points:</p>
|
117 |
-
<ul>
|
118 |
-
<li>What is Brasfoot 2018?</li>
|
119 |
-
<li>Features of Brasfoot 2018</li>
|
120 |
-
<li>How to download and install Brasfoot 2018 Premium APK</li>
|
121 |
-
<li>Why choose Brasfoot 2018 Premium APK over other soccer manager games?</li>
|
122 |
-
<li>Tips and tricks for playing Brasfoot 2018 Premium APK</li>
|
123 |
-
</ul>
|
124 |
-
<h3>Call to action</h3>
|
125 |
-
<p>If you are interested in playing Brasfoot 2018 Premium APK on your Android device, you can download it for free from [this link]. You can also visit [the official website] of Brasfoot 2018 for more information and updates about the game. You can also join [the official forum] of Brasfoot 2018 to share your opinions and experiences with other players. We hope you have enjoyed this article and found it useful. If you have any questions or comments, please feel free to leave them below. Thank you for reading!</p>
|
126 |
-
<h2>Frequently Asked Questions</h2>
|
127 |
-
<p>Here are some of the most common questions that people ask about Brasfoot 2018 Premium APK:</p>
|
128 |
-
<h4>Q: Is Brasfoot 2018 Premium APK safe to download and install?</h4>
|
129 |
-
<p>A: Yes, Brasfoot 18 APK is safe to download and install on your Android device. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from a trusted source such as [this link] or [the official website] of Brasfoot 2018.</p>
|
130 |
-
<h4>Q: Is Brasfoot 2018 Premium APK legal to use?</h4>
|
131 |
-
<p>A: Yes, Brasfoot 18 APK is legal to use on your Android device. It is a modified version of the original game that enables extra options and features that are not available in the free version. However, it does not violate any copyrights or trademarks of the original game or its developer. However, you should always respect the rights and wishes of the original game and its developer and not use the modified version for any illegal or unethical purposes.</p>
|
132 |
-
<h4>Q: How can I update Brasfoot 2018 Premium APK?</h4>
|
133 |
-
<p>A: Brasfoot 18 APK is updated regularly by its developer to fix bugs and add new features and improvements. You can check for updates by visiting [the official website] of Brasfoot 2018 or by opening the app and clicking on the "Check for Updates" button. If there is a new version available, you can download and install it following the same steps as before.</p>
|
134 |
-
<h4>Q: How can I contact the developer of Brasfoot 2018 Premium APK?</h4>
|
135 |
-
<p>A: If you have any questions, suggestions, feedback or complaints about Brasfoot 18 APK, you can contact the developer by visiting [the official website] of Brasfoot 2018 and clicking on the "Contact" button. You can also send an email to [the official email address] of Brasfoot 2018 or follow [the official social media accounts] of Brasfoot 2018.</p>
|
136 |
-
<h4>Q: How can I support the developer of Brasfoot 2018 Premium APK?</h4>
|
137 |
-
<p>A: If you like Brasfoot 18 APK and want to support the developer, you can do so by visiting [the official website] of Brasfoot 2018 and clicking on the "Donate" button. You can also buy the original game from [the official store] of Brasfoot 2018 or leave a positive review and rating on [the official page] of Brasfoot 2018.</p>
|
138 |
-
<h4>Q: How can I uninstall Brasfoot 2018 Premium APK?</h4>
|
139 |
-
<p>A: If you want to uninstall Brasfoot 18 APK from your Android device, you can do so by following these steps:</p>
|
140 |
-
<ol>
|
141 |
-
<li>Go to your device's settings and select the "Apps" option.</li>
|
142 |
-
<li>Find and select "Brasfoot 18" from the list of apps.</li>
|
143 |
-
<li>Click on the "Uninstall" button and confirm your action.</li>
|
144 |
-
<li>Wait for the app to be removed from your device.</li>
|
145 |
-
</ol></p> 197e85843d<br />
|
146 |
-
<br />
|
147 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/header.tsx
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import * as React from 'react'
|
2 |
-
import { UserMenu } from './user-menu'
|
3 |
-
|
4 |
-
export async function Header() {
|
5 |
-
return (
|
6 |
-
<header className="sticky top-0 z-50 flex items-center justify-between w-full h-16 px-4 border-b shrink-0 bg-gradient-to-b from-background/10 via-background/50 to-background/80 backdrop-blur-xl">
|
7 |
-
<div className="flex items-center justify-end space-x-2 w-full">
|
8 |
-
<UserMenu />
|
9 |
-
</div>
|
10 |
-
</header>
|
11 |
-
)
|
12 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/dataset.py
DELETED
@@ -1,183 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import random
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.utils.data
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
from . import spec_utils
|
10 |
-
|
11 |
-
|
12 |
-
class VocalRemoverValidationSet(torch.utils.data.Dataset):
|
13 |
-
def __init__(self, patch_list):
|
14 |
-
self.patch_list = patch_list
|
15 |
-
|
16 |
-
def __len__(self):
|
17 |
-
return len(self.patch_list)
|
18 |
-
|
19 |
-
def __getitem__(self, idx):
|
20 |
-
path = self.patch_list[idx]
|
21 |
-
data = np.load(path)
|
22 |
-
|
23 |
-
X, y = data["X"], data["y"]
|
24 |
-
|
25 |
-
X_mag = np.abs(X)
|
26 |
-
y_mag = np.abs(y)
|
27 |
-
|
28 |
-
return X_mag, y_mag
|
29 |
-
|
30 |
-
|
31 |
-
def make_pair(mix_dir, inst_dir):
|
32 |
-
input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"]
|
33 |
-
|
34 |
-
X_list = sorted(
|
35 |
-
[
|
36 |
-
os.path.join(mix_dir, fname)
|
37 |
-
for fname in os.listdir(mix_dir)
|
38 |
-
if os.path.splitext(fname)[1] in input_exts
|
39 |
-
]
|
40 |
-
)
|
41 |
-
y_list = sorted(
|
42 |
-
[
|
43 |
-
os.path.join(inst_dir, fname)
|
44 |
-
for fname in os.listdir(inst_dir)
|
45 |
-
if os.path.splitext(fname)[1] in input_exts
|
46 |
-
]
|
47 |
-
)
|
48 |
-
|
49 |
-
filelist = list(zip(X_list, y_list))
|
50 |
-
|
51 |
-
return filelist
|
52 |
-
|
53 |
-
|
54 |
-
def train_val_split(dataset_dir, split_mode, val_rate, val_filelist):
|
55 |
-
if split_mode == "random":
|
56 |
-
filelist = make_pair(
|
57 |
-
os.path.join(dataset_dir, "mixtures"),
|
58 |
-
os.path.join(dataset_dir, "instruments"),
|
59 |
-
)
|
60 |
-
|
61 |
-
random.shuffle(filelist)
|
62 |
-
|
63 |
-
if len(val_filelist) == 0:
|
64 |
-
val_size = int(len(filelist) * val_rate)
|
65 |
-
train_filelist = filelist[:-val_size]
|
66 |
-
val_filelist = filelist[-val_size:]
|
67 |
-
else:
|
68 |
-
train_filelist = [
|
69 |
-
pair for pair in filelist if list(pair) not in val_filelist
|
70 |
-
]
|
71 |
-
elif split_mode == "subdirs":
|
72 |
-
if len(val_filelist) != 0:
|
73 |
-
raise ValueError(
|
74 |
-
"The `val_filelist` option is not available in `subdirs` mode"
|
75 |
-
)
|
76 |
-
|
77 |
-
train_filelist = make_pair(
|
78 |
-
os.path.join(dataset_dir, "training/mixtures"),
|
79 |
-
os.path.join(dataset_dir, "training/instruments"),
|
80 |
-
)
|
81 |
-
|
82 |
-
val_filelist = make_pair(
|
83 |
-
os.path.join(dataset_dir, "validation/mixtures"),
|
84 |
-
os.path.join(dataset_dir, "validation/instruments"),
|
85 |
-
)
|
86 |
-
|
87 |
-
return train_filelist, val_filelist
|
88 |
-
|
89 |
-
|
90 |
-
def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha):
|
91 |
-
perm = np.random.permutation(len(X))
|
92 |
-
for i, idx in enumerate(tqdm(perm)):
|
93 |
-
if np.random.uniform() < reduction_rate:
|
94 |
-
y[idx] = spec_utils.reduce_vocal_aggressively(
|
95 |
-
X[idx], y[idx], reduction_mask
|
96 |
-
)
|
97 |
-
|
98 |
-
if np.random.uniform() < 0.5:
|
99 |
-
# swap channel
|
100 |
-
X[idx] = X[idx, ::-1]
|
101 |
-
y[idx] = y[idx, ::-1]
|
102 |
-
if np.random.uniform() < 0.02:
|
103 |
-
# mono
|
104 |
-
X[idx] = X[idx].mean(axis=0, keepdims=True)
|
105 |
-
y[idx] = y[idx].mean(axis=0, keepdims=True)
|
106 |
-
if np.random.uniform() < 0.02:
|
107 |
-
# inst
|
108 |
-
X[idx] = y[idx]
|
109 |
-
|
110 |
-
if np.random.uniform() < mixup_rate and i < len(perm) - 1:
|
111 |
-
lam = np.random.beta(mixup_alpha, mixup_alpha)
|
112 |
-
X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]]
|
113 |
-
y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]]
|
114 |
-
|
115 |
-
return X, y
|
116 |
-
|
117 |
-
|
118 |
-
def make_padding(width, cropsize, offset):
|
119 |
-
left = offset
|
120 |
-
roi_size = cropsize - left * 2
|
121 |
-
if roi_size == 0:
|
122 |
-
roi_size = cropsize
|
123 |
-
right = roi_size - (width % roi_size) + left
|
124 |
-
|
125 |
-
return left, right, roi_size
|
126 |
-
|
127 |
-
|
128 |
-
def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset):
|
129 |
-
len_dataset = patches * len(filelist)
|
130 |
-
|
131 |
-
X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
|
132 |
-
y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
|
133 |
-
|
134 |
-
for i, (X_path, y_path) in enumerate(tqdm(filelist)):
|
135 |
-
X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
|
136 |
-
coef = np.max([np.abs(X).max(), np.abs(y).max()])
|
137 |
-
X, y = X / coef, y / coef
|
138 |
-
|
139 |
-
l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
|
140 |
-
X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
|
141 |
-
y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
|
142 |
-
|
143 |
-
starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches)
|
144 |
-
ends = starts + cropsize
|
145 |
-
for j in range(patches):
|
146 |
-
idx = i * patches + j
|
147 |
-
X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]]
|
148 |
-
y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]]
|
149 |
-
|
150 |
-
return X_dataset, y_dataset
|
151 |
-
|
152 |
-
|
153 |
-
def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset):
|
154 |
-
patch_list = []
|
155 |
-
patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(
|
156 |
-
cropsize, sr, hop_length, n_fft, offset
|
157 |
-
)
|
158 |
-
os.makedirs(patch_dir, exist_ok=True)
|
159 |
-
|
160 |
-
for i, (X_path, y_path) in enumerate(tqdm(filelist)):
|
161 |
-
basename = os.path.splitext(os.path.basename(X_path))[0]
|
162 |
-
|
163 |
-
X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
|
164 |
-
coef = np.max([np.abs(X).max(), np.abs(y).max()])
|
165 |
-
X, y = X / coef, y / coef
|
166 |
-
|
167 |
-
l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
|
168 |
-
X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
|
169 |
-
y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
|
170 |
-
|
171 |
-
len_dataset = int(np.ceil(X.shape[2] / roi_size))
|
172 |
-
for j in range(len_dataset):
|
173 |
-
outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j))
|
174 |
-
start = j * roi_size
|
175 |
-
if not os.path.exists(outpath):
|
176 |
-
np.savez(
|
177 |
-
outpath,
|
178 |
-
X=X_pad[:, :, start : start + cropsize],
|
179 |
-
y=y_pad[:, :, start : start + cropsize],
|
180 |
-
)
|
181 |
-
patch_list.append(outpath)
|
182 |
-
|
183 |
-
return VocalRemoverValidationSet(patch_list)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/losses/stftloss.py
DELETED
@@ -1,207 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
# Adapted from MIT code under the original license
|
7 |
-
# Copyright 2019 Tomoki Hayashi
|
8 |
-
# MIT License (https://opensource.org/licenses/MIT)
|
9 |
-
import typing as tp
|
10 |
-
|
11 |
-
import torch
|
12 |
-
from torch import nn
|
13 |
-
from torch.nn import functional as F
|
14 |
-
|
15 |
-
|
16 |
-
# TODO: Replace with torchaudio.STFT?
|
17 |
-
def _stft(x: torch.Tensor, fft_size: int, hop_length: int, win_length: int,
|
18 |
-
window: tp.Optional[torch.Tensor], normalized: bool) -> torch.Tensor:
|
19 |
-
"""Perform STFT and convert to magnitude spectrogram.
|
20 |
-
|
21 |
-
Args:
|
22 |
-
x: Input signal tensor (B, C, T).
|
23 |
-
fft_size (int): FFT size.
|
24 |
-
hop_length (int): Hop size.
|
25 |
-
win_length (int): Window length.
|
26 |
-
window (torch.Tensor or None): Window function type.
|
27 |
-
normalized (bool): Whether to normalize the STFT or not.
|
28 |
-
|
29 |
-
Returns:
|
30 |
-
torch.Tensor: Magnitude spectrogram (B, C, #frames, fft_size // 2 + 1).
|
31 |
-
"""
|
32 |
-
B, C, T = x.shape
|
33 |
-
x_stft = torch.stft(
|
34 |
-
x.view(-1, T), fft_size, hop_length, win_length, window,
|
35 |
-
normalized=normalized, return_complex=True,
|
36 |
-
)
|
37 |
-
x_stft = x_stft.view(B, C, *x_stft.shape[1:])
|
38 |
-
real = x_stft.real
|
39 |
-
imag = x_stft.imag
|
40 |
-
|
41 |
-
# NOTE(kan-bayashi): clamp is needed to avoid nan or inf
|
42 |
-
return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
|
43 |
-
|
44 |
-
|
45 |
-
class SpectralConvergenceLoss(nn.Module):
|
46 |
-
"""Spectral convergence loss.
|
47 |
-
"""
|
48 |
-
def __init__(self, epsilon: float = torch.finfo(torch.float32).eps):
|
49 |
-
super().__init__()
|
50 |
-
self.epsilon = epsilon
|
51 |
-
|
52 |
-
def forward(self, x_mag: torch.Tensor, y_mag: torch.Tensor):
|
53 |
-
"""Calculate forward propagation.
|
54 |
-
|
55 |
-
Args:
|
56 |
-
x_mag: Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
|
57 |
-
y_mag: Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
|
58 |
-
Returns:
|
59 |
-
torch.Tensor: Spectral convergence loss value.
|
60 |
-
"""
|
61 |
-
return torch.norm(y_mag - x_mag, p="fro") / (torch.norm(y_mag, p="fro") + self.epsilon)
|
62 |
-
|
63 |
-
|
64 |
-
class LogSTFTMagnitudeLoss(nn.Module):
|
65 |
-
"""Log STFT magnitude loss.
|
66 |
-
|
67 |
-
Args:
|
68 |
-
epsilon (float): Epsilon value for numerical stability.
|
69 |
-
"""
|
70 |
-
def __init__(self, epsilon: float = torch.finfo(torch.float32).eps):
|
71 |
-
super().__init__()
|
72 |
-
self.epsilon = epsilon
|
73 |
-
|
74 |
-
def forward(self, x_mag: torch.Tensor, y_mag: torch.Tensor):
|
75 |
-
"""Calculate forward propagation.
|
76 |
-
|
77 |
-
Args:
|
78 |
-
x_mag (torch.Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
|
79 |
-
y_mag (torch.Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
|
80 |
-
Returns:
|
81 |
-
torch.Tensor: Log STFT magnitude loss value.
|
82 |
-
"""
|
83 |
-
return F.l1_loss(torch.log(self.epsilon + y_mag), torch.log(self.epsilon + x_mag))
|
84 |
-
|
85 |
-
|
86 |
-
class STFTLosses(nn.Module):
|
87 |
-
"""STFT losses.
|
88 |
-
|
89 |
-
Args:
|
90 |
-
n_fft (int): Size of FFT.
|
91 |
-
hop_length (int): Hop length.
|
92 |
-
win_length (int): Window length.
|
93 |
-
window (str): Window function type.
|
94 |
-
normalized (bool): Whether to use normalized STFT or not.
|
95 |
-
epsilon (float): Epsilon for numerical stability.
|
96 |
-
"""
|
97 |
-
def __init__(self, n_fft: int = 1024, hop_length: int = 120, win_length: int = 600,
|
98 |
-
window: str = "hann_window", normalized: bool = False,
|
99 |
-
epsilon: float = torch.finfo(torch.float32).eps):
|
100 |
-
super().__init__()
|
101 |
-
self.n_fft = n_fft
|
102 |
-
self.hop_length = hop_length
|
103 |
-
self.win_length = win_length
|
104 |
-
self.normalized = normalized
|
105 |
-
self.register_buffer("window", getattr(torch, window)(win_length))
|
106 |
-
self.spectral_convergenge_loss = SpectralConvergenceLoss(epsilon)
|
107 |
-
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss(epsilon)
|
108 |
-
|
109 |
-
def forward(self, x: torch.Tensor, y: torch.Tensor) -> tp.Tuple[torch.Tensor, torch.Tensor]:
|
110 |
-
"""Calculate forward propagation.
|
111 |
-
|
112 |
-
Args:
|
113 |
-
x (torch.Tensor): Predicted signal (B, T).
|
114 |
-
y (torch.Tensor): Groundtruth signal (B, T).
|
115 |
-
Returns:
|
116 |
-
torch.Tensor: Spectral convergence loss value.
|
117 |
-
torch.Tensor: Log STFT magnitude loss value.
|
118 |
-
"""
|
119 |
-
x_mag = _stft(x, self.n_fft, self.hop_length,
|
120 |
-
self.win_length, self.window, self.normalized) # type: ignore
|
121 |
-
y_mag = _stft(y, self.n_fft, self.hop_length,
|
122 |
-
self.win_length, self.window, self.normalized) # type: ignore
|
123 |
-
sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
|
124 |
-
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
|
125 |
-
|
126 |
-
return sc_loss, mag_loss
|
127 |
-
|
128 |
-
|
129 |
-
class STFTLoss(nn.Module):
|
130 |
-
"""Single Resolution STFT loss.
|
131 |
-
|
132 |
-
Args:
|
133 |
-
n_fft (int): Nb of FFT.
|
134 |
-
hop_length (int): Hop length.
|
135 |
-
win_length (int): Window length.
|
136 |
-
window (str): Window function type.
|
137 |
-
normalized (bool): Whether to use normalized STFT or not.
|
138 |
-
epsilon (float): Epsilon for numerical stability.
|
139 |
-
factor_sc (float): Coefficient for the spectral loss.
|
140 |
-
factor_mag (float): Coefficient for the magnitude loss.
|
141 |
-
"""
|
142 |
-
def __init__(self, n_fft: int = 1024, hop_length: int = 120, win_length: int = 600,
|
143 |
-
window: str = "hann_window", normalized: bool = False,
|
144 |
-
factor_sc: float = 0.1, factor_mag: float = 0.1,
|
145 |
-
epsilon: float = torch.finfo(torch.float32).eps):
|
146 |
-
super().__init__()
|
147 |
-
self.loss = STFTLosses(n_fft, hop_length, win_length, window, normalized, epsilon)
|
148 |
-
self.factor_sc = factor_sc
|
149 |
-
self.factor_mag = factor_mag
|
150 |
-
|
151 |
-
def forward(self, x: torch.Tensor, y: torch.Tensor) -> tp.Tuple[torch.Tensor, torch.Tensor]:
|
152 |
-
"""Calculate forward propagation.
|
153 |
-
|
154 |
-
Args:
|
155 |
-
x (torch.Tensor): Predicted signal (B, T).
|
156 |
-
y (torch.Tensor): Groundtruth signal (B, T).
|
157 |
-
Returns:
|
158 |
-
torch.Tensor: Single resolution STFT loss.
|
159 |
-
"""
|
160 |
-
sc_loss, mag_loss = self.loss(x, y)
|
161 |
-
return self.factor_sc * sc_loss + self.factor_mag * mag_loss
|
162 |
-
|
163 |
-
|
164 |
-
class MRSTFTLoss(nn.Module):
|
165 |
-
"""Multi resolution STFT loss.
|
166 |
-
|
167 |
-
Args:
|
168 |
-
n_ffts (Sequence[int]): Sequence of FFT sizes.
|
169 |
-
hop_lengths (Sequence[int]): Sequence of hop sizes.
|
170 |
-
win_lengths (Sequence[int]): Sequence of window lengths.
|
171 |
-
window (str): Window function type.
|
172 |
-
factor_sc (float): Coefficient for the spectral loss.
|
173 |
-
factor_mag (float): Coefficient for the magnitude loss.
|
174 |
-
normalized (bool): Whether to use normalized STFT or not.
|
175 |
-
epsilon (float): Epsilon for numerical stability.
|
176 |
-
"""
|
177 |
-
def __init__(self, n_ffts: tp.Sequence[int] = [1024, 2048, 512], hop_lengths: tp.Sequence[int] = [120, 240, 50],
|
178 |
-
win_lengths: tp.Sequence[int] = [600, 1200, 240], window: str = "hann_window",
|
179 |
-
factor_sc: float = 0.1, factor_mag: float = 0.1,
|
180 |
-
normalized: bool = False, epsilon: float = torch.finfo(torch.float32).eps):
|
181 |
-
super().__init__()
|
182 |
-
assert len(n_ffts) == len(hop_lengths) == len(win_lengths)
|
183 |
-
self.stft_losses = torch.nn.ModuleList()
|
184 |
-
for fs, ss, wl in zip(n_ffts, hop_lengths, win_lengths):
|
185 |
-
self.stft_losses += [STFTLosses(fs, ss, wl, window, normalized, epsilon)]
|
186 |
-
self.factor_sc = factor_sc
|
187 |
-
self.factor_mag = factor_mag
|
188 |
-
|
189 |
-
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
190 |
-
"""Calculate forward propagation.
|
191 |
-
|
192 |
-
Args:
|
193 |
-
x (torch.Tensor): Predicted signal (B, T).
|
194 |
-
y (torch.Tensor): Groundtruth signal (B, T).
|
195 |
-
Returns:
|
196 |
-
torch.Tensor: Multi resolution STFT loss.
|
197 |
-
"""
|
198 |
-
sc_loss = torch.Tensor([0.0])
|
199 |
-
mag_loss = torch.Tensor([0.0])
|
200 |
-
for f in self.stft_losses:
|
201 |
-
sc_l, mag_l = f(x, y)
|
202 |
-
sc_loss += sc_l
|
203 |
-
mag_loss += mag_l
|
204 |
-
sc_loss /= len(self.stft_losses)
|
205 |
-
mag_loss /= len(self.stft_losses)
|
206 |
-
|
207 |
-
return self.factor_sc * sc_loss + self.factor_mag * mag_loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/pytorch_utils.py
DELETED
@@ -1,251 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import time
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
|
6 |
-
|
7 |
-
def move_data_to_device(x, device):
|
8 |
-
if 'float' in str(x.dtype):
|
9 |
-
x = torch.Tensor(x)
|
10 |
-
elif 'int' in str(x.dtype):
|
11 |
-
x = torch.LongTensor(x)
|
12 |
-
else:
|
13 |
-
return x
|
14 |
-
|
15 |
-
return x.to(device)
|
16 |
-
|
17 |
-
|
18 |
-
def do_mixup(x, mixup_lambda):
|
19 |
-
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
|
20 |
-
(1, 3, 5, ...).
|
21 |
-
|
22 |
-
Args:
|
23 |
-
x: (batch_size * 2, ...)
|
24 |
-
mixup_lambda: (batch_size * 2,)
|
25 |
-
|
26 |
-
Returns:
|
27 |
-
out: (batch_size, ...)
|
28 |
-
"""
|
29 |
-
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
|
30 |
-
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
|
31 |
-
return out
|
32 |
-
|
33 |
-
|
34 |
-
def append_to_dict(dict, key, value):
|
35 |
-
if key in dict.keys():
|
36 |
-
dict[key].append(value)
|
37 |
-
else:
|
38 |
-
dict[key] = [value]
|
39 |
-
|
40 |
-
|
41 |
-
def forward(model, generator, return_input=False,
|
42 |
-
return_target=False):
|
43 |
-
"""Forward data to a model.
|
44 |
-
|
45 |
-
Args:
|
46 |
-
model: object
|
47 |
-
generator: object
|
48 |
-
return_input: bool
|
49 |
-
return_target: bool
|
50 |
-
|
51 |
-
Returns:
|
52 |
-
audio_name: (audios_num,)
|
53 |
-
clipwise_output: (audios_num, classes_num)
|
54 |
-
(ifexist) segmentwise_output: (audios_num, segments_num, classes_num)
|
55 |
-
(ifexist) framewise_output: (audios_num, frames_num, classes_num)
|
56 |
-
(optional) return_input: (audios_num, segment_samples)
|
57 |
-
(optional) return_target: (audios_num, classes_num)
|
58 |
-
"""
|
59 |
-
output_dict = {}
|
60 |
-
device = next(model.parameters()).device
|
61 |
-
time1 = time.time()
|
62 |
-
|
63 |
-
# Forward data to a model in mini-batches
|
64 |
-
for n, batch_data_dict in enumerate(generator):
|
65 |
-
print(n)
|
66 |
-
batch_waveform = move_data_to_device(batch_data_dict['waveform'], device)
|
67 |
-
|
68 |
-
with torch.no_grad():
|
69 |
-
model.eval()
|
70 |
-
batch_output = model(batch_waveform)
|
71 |
-
|
72 |
-
append_to_dict(output_dict, 'audio_name', batch_data_dict['audio_name'])
|
73 |
-
|
74 |
-
append_to_dict(output_dict, 'clipwise_output',
|
75 |
-
batch_output['clipwise_output'].data.cpu().numpy())
|
76 |
-
|
77 |
-
if 'segmentwise_output' in batch_output.keys():
|
78 |
-
append_to_dict(output_dict, 'segmentwise_output',
|
79 |
-
batch_output['segmentwise_output'].data.cpu().numpy())
|
80 |
-
|
81 |
-
if 'framewise_output' in batch_output.keys():
|
82 |
-
append_to_dict(output_dict, 'framewise_output',
|
83 |
-
batch_output['framewise_output'].data.cpu().numpy())
|
84 |
-
|
85 |
-
if return_input:
|
86 |
-
append_to_dict(output_dict, 'waveform', batch_data_dict['waveform'])
|
87 |
-
|
88 |
-
if return_target:
|
89 |
-
if 'target' in batch_data_dict.keys():
|
90 |
-
append_to_dict(output_dict, 'target', batch_data_dict['target'])
|
91 |
-
|
92 |
-
if n % 10 == 0:
|
93 |
-
print(' --- Inference time: {:.3f} s / 10 iterations ---'.format(
|
94 |
-
time.time() - time1))
|
95 |
-
time1 = time.time()
|
96 |
-
|
97 |
-
for key in output_dict.keys():
|
98 |
-
output_dict[key] = np.concatenate(output_dict[key], axis=0)
|
99 |
-
|
100 |
-
return output_dict
|
101 |
-
|
102 |
-
|
103 |
-
def interpolate(x, ratio):
|
104 |
-
"""Interpolate data in time domain. This is used to compensate the
|
105 |
-
resolution reduction in downsampling of a CNN.
|
106 |
-
|
107 |
-
Args:
|
108 |
-
x: (batch_size, time_steps, classes_num)
|
109 |
-
ratio: int, ratio to interpolate
|
110 |
-
|
111 |
-
Returns:
|
112 |
-
upsampled: (batch_size, time_steps * ratio, classes_num)
|
113 |
-
"""
|
114 |
-
(batch_size, time_steps, classes_num) = x.shape
|
115 |
-
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
|
116 |
-
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
|
117 |
-
return upsampled
|
118 |
-
|
119 |
-
|
120 |
-
def pad_framewise_output(framewise_output, frames_num):
|
121 |
-
"""Pad framewise_output to the same length as input frames. The pad value
|
122 |
-
is the same as the value of the last frame.
|
123 |
-
|
124 |
-
Args:
|
125 |
-
framewise_output: (batch_size, frames_num, classes_num)
|
126 |
-
frames_num: int, number of frames to pad
|
127 |
-
|
128 |
-
Outputs:
|
129 |
-
output: (batch_size, frames_num, classes_num)
|
130 |
-
"""
|
131 |
-
pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1)
|
132 |
-
"""tensor for padding"""
|
133 |
-
|
134 |
-
output = torch.cat((framewise_output, pad), dim=1)
|
135 |
-
"""(batch_size, frames_num, classes_num)"""
|
136 |
-
|
137 |
-
return output
|
138 |
-
|
139 |
-
|
140 |
-
def count_parameters(model):
|
141 |
-
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
142 |
-
|
143 |
-
|
144 |
-
def count_flops(model, audio_length):
|
145 |
-
"""Count flops. Code modified from others' implementation.
|
146 |
-
"""
|
147 |
-
multiply_adds = True
|
148 |
-
list_conv2d=[]
|
149 |
-
def conv2d_hook(self, input, output):
|
150 |
-
batch_size, input_channels, input_height, input_width = input[0].size()
|
151 |
-
output_channels, output_height, output_width = output[0].size()
|
152 |
-
|
153 |
-
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
|
154 |
-
bias_ops = 1 if self.bias is not None else 0
|
155 |
-
|
156 |
-
params = output_channels * (kernel_ops + bias_ops)
|
157 |
-
flops = batch_size * params * output_height * output_width
|
158 |
-
|
159 |
-
list_conv2d.append(flops)
|
160 |
-
|
161 |
-
list_conv1d=[]
|
162 |
-
def conv1d_hook(self, input, output):
|
163 |
-
batch_size, input_channels, input_length = input[0].size()
|
164 |
-
output_channels, output_length = output[0].size()
|
165 |
-
|
166 |
-
kernel_ops = self.kernel_size[0] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
|
167 |
-
bias_ops = 1 if self.bias is not None else 0
|
168 |
-
|
169 |
-
params = output_channels * (kernel_ops + bias_ops)
|
170 |
-
flops = batch_size * params * output_length
|
171 |
-
|
172 |
-
list_conv1d.append(flops)
|
173 |
-
|
174 |
-
list_linear=[]
|
175 |
-
def linear_hook(self, input, output):
|
176 |
-
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
|
177 |
-
|
178 |
-
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
|
179 |
-
bias_ops = self.bias.nelement()
|
180 |
-
|
181 |
-
flops = batch_size * (weight_ops + bias_ops)
|
182 |
-
list_linear.append(flops)
|
183 |
-
|
184 |
-
list_bn=[]
|
185 |
-
def bn_hook(self, input, output):
|
186 |
-
list_bn.append(input[0].nelement() * 2)
|
187 |
-
|
188 |
-
list_relu=[]
|
189 |
-
def relu_hook(self, input, output):
|
190 |
-
list_relu.append(input[0].nelement() * 2)
|
191 |
-
|
192 |
-
list_pooling2d=[]
|
193 |
-
def pooling2d_hook(self, input, output):
|
194 |
-
batch_size, input_channels, input_height, input_width = input[0].size()
|
195 |
-
output_channels, output_height, output_width = output[0].size()
|
196 |
-
|
197 |
-
kernel_ops = self.kernel_size * self.kernel_size
|
198 |
-
bias_ops = 0
|
199 |
-
params = output_channels * (kernel_ops + bias_ops)
|
200 |
-
flops = batch_size * params * output_height * output_width
|
201 |
-
|
202 |
-
list_pooling2d.append(flops)
|
203 |
-
|
204 |
-
list_pooling1d=[]
|
205 |
-
def pooling1d_hook(self, input, output):
|
206 |
-
batch_size, input_channels, input_length = input[0].size()
|
207 |
-
output_channels, output_length = output[0].size()
|
208 |
-
|
209 |
-
kernel_ops = self.kernel_size[0]
|
210 |
-
bias_ops = 0
|
211 |
-
|
212 |
-
params = output_channels * (kernel_ops + bias_ops)
|
213 |
-
flops = batch_size * params * output_length
|
214 |
-
|
215 |
-
list_pooling2d.append(flops)
|
216 |
-
|
217 |
-
def foo(net):
|
218 |
-
childrens = list(net.children())
|
219 |
-
if not childrens:
|
220 |
-
if isinstance(net, nn.Conv2d):
|
221 |
-
net.register_forward_hook(conv2d_hook)
|
222 |
-
elif isinstance(net, nn.Conv1d):
|
223 |
-
net.register_forward_hook(conv1d_hook)
|
224 |
-
elif isinstance(net, nn.Linear):
|
225 |
-
net.register_forward_hook(linear_hook)
|
226 |
-
elif isinstance(net, nn.BatchNorm2d) or isinstance(net, nn.BatchNorm1d):
|
227 |
-
net.register_forward_hook(bn_hook)
|
228 |
-
elif isinstance(net, nn.ReLU):
|
229 |
-
net.register_forward_hook(relu_hook)
|
230 |
-
elif isinstance(net, nn.AvgPool2d) or isinstance(net, nn.MaxPool2d):
|
231 |
-
net.register_forward_hook(pooling2d_hook)
|
232 |
-
elif isinstance(net, nn.AvgPool1d) or isinstance(net, nn.MaxPool1d):
|
233 |
-
net.register_forward_hook(pooling1d_hook)
|
234 |
-
else:
|
235 |
-
print('Warning: flop of module {} is not counted!'.format(net))
|
236 |
-
return
|
237 |
-
for c in childrens:
|
238 |
-
foo(c)
|
239 |
-
|
240 |
-
# Register hook
|
241 |
-
foo(model)
|
242 |
-
|
243 |
-
device = device = next(model.parameters()).device
|
244 |
-
input = torch.rand(1, audio_length).to(device)
|
245 |
-
|
246 |
-
out = model(input)
|
247 |
-
|
248 |
-
total_flops = sum(list_conv2d) + sum(list_conv1d) + sum(list_linear) + \
|
249 |
-
sum(list_bn) + sum(list_relu) + sum(list_pooling2d) + sum(list_pooling1d)
|
250 |
-
|
251 |
-
return total_flops
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Make An Audio
|
3 |
-
emoji: 😻
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.38.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/AiService.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import requests
|
3 |
-
from ...typing import get_type_hints
|
4 |
-
|
5 |
-
url = "https://aiservice.vercel.app/api/chat/answer"
|
6 |
-
model = ['gpt-3.5-turbo']
|
7 |
-
supports_stream = False
|
8 |
-
needs_auth = False
|
9 |
-
|
10 |
-
|
11 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
12 |
-
base = ''
|
13 |
-
for message in messages:
|
14 |
-
base += '%s: %s\n' % (message['role'], message['content'])
|
15 |
-
base += 'assistant:'
|
16 |
-
|
17 |
-
headers = {
|
18 |
-
"accept": "*/*",
|
19 |
-
"content-type": "text/plain;charset=UTF-8",
|
20 |
-
"sec-fetch-dest": "empty",
|
21 |
-
"sec-fetch-mode": "cors",
|
22 |
-
"sec-fetch-site": "same-origin",
|
23 |
-
"Referer": "https://aiservice.vercel.app/chat",
|
24 |
-
}
|
25 |
-
data = {
|
26 |
-
"input": base
|
27 |
-
}
|
28 |
-
response = requests.post(url, headers=headers, json=data)
|
29 |
-
if response.status_code == 200:
|
30 |
-
_json = response.json()
|
31 |
-
yield _json['data']
|
32 |
-
else:
|
33 |
-
print(f"Error Occurred::{response.status_code}")
|
34 |
-
return None
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
39 |
-
'(%s)' % ', '.join(
|
40 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adr740/Hadith_AI_Explorer/__init__.py
DELETED
File without changes
|
spaces/AgentVerse/agentVerse/ui/run.sh
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
#npm install
|
2 |
-
npm run watch
|
3 |
-
#npm run dev
|
4 |
-
#npm run build
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/facebook/Factory.d.ts
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import Facebook from './Facebook';
|
2 |
-
import Base from '../base/Base';
|
3 |
-
|
4 |
-
export default function Factory(
|
5 |
-
config?: Base.IConfig
|
6 |
-
): Facebook;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/GetChartData.js
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
var GetChartData = function (datasetIndex, dataIndex) {
|
2 |
-
var dataset = this.getChartDataset(datasetIndex);
|
3 |
-
if (dataset === undefined) {
|
4 |
-
return undefined;
|
5 |
-
}
|
6 |
-
if (typeof (dataIndex) === 'string') {
|
7 |
-
var labels = this.chart.data.labels;
|
8 |
-
dataIndex = labels.indexOf(dataIndex);
|
9 |
-
if (dataIndex === -1) {
|
10 |
-
return undefined;
|
11 |
-
}
|
12 |
-
}
|
13 |
-
return dataset.data[dataIndex];
|
14 |
-
}
|
15 |
-
export default GetChartData;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlanMars/QYL-AI-Space/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: QYL AI Space
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.29.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: gpl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alfasign/Midjourney_Prompt/app.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
from transformers import pipeline, set_seed
|
2 |
-
import gradio as grad
|
3 |
-
import random
|
4 |
-
import re
|
5 |
-
|
6 |
-
gpt2_pipe = pipeline('text-generation', model='succinctly/text2image-prompt-generator')
|
7 |
-
|
8 |
-
with open("name.txt", "r") as f:
|
9 |
-
line = f.readlines()
|
10 |
-
|
11 |
-
|
12 |
-
def generate(starting_text):
|
13 |
-
for count in range(6):
|
14 |
-
seed = random.randint(100, 1000000)
|
15 |
-
set_seed(seed)
|
16 |
-
|
17 |
-
# If the text field is empty
|
18 |
-
if starting_text == "":
|
19 |
-
starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
|
20 |
-
starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
|
21 |
-
print(starting_text)
|
22 |
-
|
23 |
-
response = gpt2_pipe(starting_text, max_length=random.randint(60, 90), num_return_sequences=8)
|
24 |
-
response_list = []
|
25 |
-
for x in response:
|
26 |
-
resp = x['generated_text'].strip()
|
27 |
-
if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
|
28 |
-
response_list.append(resp)
|
29 |
-
|
30 |
-
response_end = "\n".join(response_list)
|
31 |
-
response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
|
32 |
-
response_end = response_end.replace("<", "").replace(">", "")
|
33 |
-
if response_end != "":
|
34 |
-
return response_end
|
35 |
-
if count == 5:
|
36 |
-
return response_end
|
37 |
-
|
38 |
-
|
39 |
-
txt = grad.Textbox(lines=1, label="English", placeholder="English Text here")
|
40 |
-
out = grad.Textbox(lines=6, label="Generated Text")
|
41 |
-
examples = [["mythology of the Slavs"], ["All-seeing eye monitors these world"], ["astronaut dog"],
|
42 |
-
["A monochrome forest of ebony trees"], ["sad view of worker in office,"],
|
43 |
-
["Headshot photo portrait of John Lennon"], ["wide field with thousands of blue nemophila,"]]
|
44 |
-
title = "Midjourney Prompt Generator by ALF"
|
45 |
-
description = ""
|
46 |
-
article = "<div><center><img src='https://visitor-badge.glitch.me/badge?page_id=max_skobeev_prompt_generator_public' alt='visitor badge'></center></div>"
|
47 |
-
|
48 |
-
grad.Interface(fn=generate,
|
49 |
-
inputs=txt,
|
50 |
-
outputs=out,
|
51 |
-
examples=examples,
|
52 |
-
title=title,
|
53 |
-
description=description,
|
54 |
-
article=article,
|
55 |
-
allow_flagging='never',
|
56 |
-
cache_examples=False).queue(concurrency_count=1, api_open=False).launch(show_api=False, show_error=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/SingleChannel.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import clip
|
7 |
-
from PIL import Image
|
8 |
-
import copy
|
9 |
-
from manipulate import Manipulator
|
10 |
-
import argparse
|
11 |
-
|
12 |
-
def GetImgF(out,model,preprocess):
|
13 |
-
imgs=out
|
14 |
-
imgs1=imgs.reshape([-1]+list(imgs.shape[2:]))
|
15 |
-
|
16 |
-
tmp=[]
|
17 |
-
for i in range(len(imgs1)):
|
18 |
-
|
19 |
-
img=Image.fromarray(imgs1[i])
|
20 |
-
image = preprocess(img).unsqueeze(0).to(device)
|
21 |
-
tmp.append(image)
|
22 |
-
|
23 |
-
image=torch.cat(tmp)
|
24 |
-
with torch.no_grad():
|
25 |
-
image_features = model.encode_image(image)
|
26 |
-
|
27 |
-
image_features1=image_features.cpu().numpy()
|
28 |
-
image_features1=image_features1.reshape(list(imgs.shape[:2])+[512])
|
29 |
-
|
30 |
-
return image_features1
|
31 |
-
|
32 |
-
def GetFs(fs):
|
33 |
-
tmp=np.linalg.norm(fs,axis=-1)
|
34 |
-
fs1=fs/tmp[:,:,:,None]
|
35 |
-
fs2=fs1[:,:,1,:]-fs1[:,:,0,:] # 5*sigma - (-5)* sigma
|
36 |
-
fs3=fs2/np.linalg.norm(fs2,axis=-1)[:,:,None]
|
37 |
-
fs3=fs3.mean(axis=1)
|
38 |
-
fs3=fs3/np.linalg.norm(fs3,axis=-1)[:,None]
|
39 |
-
return fs3
|
40 |
-
|
41 |
-
#%%
|
42 |
-
if __name__ == "__main__":
|
43 |
-
parser = argparse.ArgumentParser(description='Process some integers.')
|
44 |
-
|
45 |
-
parser.add_argument('--dataset_name',type=str,default='cat',
|
46 |
-
help='name of dataset, for example, ffhq')
|
47 |
-
args = parser.parse_args()
|
48 |
-
dataset_name=args.dataset_name
|
49 |
-
|
50 |
-
#%%
|
51 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
52 |
-
model, preprocess = clip.load("ViT-B/32", device=device)
|
53 |
-
#%%
|
54 |
-
M=Manipulator(dataset_name=dataset_name)
|
55 |
-
np.set_printoptions(suppress=True)
|
56 |
-
print(M.dataset_name)
|
57 |
-
#%%
|
58 |
-
img_sindex=0
|
59 |
-
num_images=100
|
60 |
-
dlatents_o=[]
|
61 |
-
tmp=img_sindex*num_images
|
62 |
-
for i in range(len(M.dlatents)):
|
63 |
-
tmp1=M.dlatents[i][tmp:(tmp+num_images)]
|
64 |
-
dlatents_o.append(tmp1)
|
65 |
-
#%%
|
66 |
-
|
67 |
-
all_f=[]
|
68 |
-
M.alpha=[-5,5] #ffhq 5
|
69 |
-
M.step=2
|
70 |
-
M.num_images=num_images
|
71 |
-
select=np.array(M.mindexs)<=16 #below or equal to 128 resolution
|
72 |
-
mindexs2=np.array(M.mindexs)[select]
|
73 |
-
for lindex in mindexs2: #ignore ToRGB layers
|
74 |
-
print(lindex)
|
75 |
-
num_c=M.dlatents[lindex].shape[1]
|
76 |
-
for cindex in range(num_c):
|
77 |
-
|
78 |
-
M.dlatents=copy.copy(dlatents_o)
|
79 |
-
M.dlatents[lindex][:,cindex]=M.code_mean[lindex][cindex]
|
80 |
-
|
81 |
-
M.manipulate_layers=[lindex]
|
82 |
-
codes,out=M.EditOneC(cindex)
|
83 |
-
image_features1=GetImgF(out,model,preprocess)
|
84 |
-
all_f.append(image_features1)
|
85 |
-
|
86 |
-
all_f=np.array(all_f)
|
87 |
-
|
88 |
-
fs3=GetFs(all_f)
|
89 |
-
|
90 |
-
#%%
|
91 |
-
file_path='./npy/'+M.dataset_name+'/'
|
92 |
-
np.save(file_path+'fs3',fs3)
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/persistence.py
DELETED
@@ -1,251 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Facilities for pickling Python code alongside other data.
|
10 |
-
|
11 |
-
The pickled code is automatically imported into a separate Python module
|
12 |
-
during unpickling. This way, any previously exported pickles will remain
|
13 |
-
usable even if the original code is no longer available, or if the current
|
14 |
-
version of the code is not consistent with what was originally pickled."""
|
15 |
-
|
16 |
-
import sys
|
17 |
-
import pickle
|
18 |
-
import io
|
19 |
-
import inspect
|
20 |
-
import copy
|
21 |
-
import uuid
|
22 |
-
import types
|
23 |
-
import dnnlib
|
24 |
-
|
25 |
-
#----------------------------------------------------------------------------
|
26 |
-
|
27 |
-
_version = 6 # internal version number
|
28 |
-
_decorators = set() # {decorator_class, ...}
|
29 |
-
_import_hooks = [] # [hook_function, ...]
|
30 |
-
_module_to_src_dict = dict() # {module: src, ...}
|
31 |
-
_src_to_module_dict = dict() # {src: module, ...}
|
32 |
-
|
33 |
-
#----------------------------------------------------------------------------
|
34 |
-
|
35 |
-
def persistent_class(orig_class):
|
36 |
-
r"""Class decorator that extends a given class to save its source code
|
37 |
-
when pickled.
|
38 |
-
|
39 |
-
Example:
|
40 |
-
|
41 |
-
from torch_utils import persistence
|
42 |
-
|
43 |
-
@persistence.persistent_class
|
44 |
-
class MyNetwork(torch.nn.Module):
|
45 |
-
def __init__(self, num_inputs, num_outputs):
|
46 |
-
super().__init__()
|
47 |
-
self.fc = MyLayer(num_inputs, num_outputs)
|
48 |
-
...
|
49 |
-
|
50 |
-
@persistence.persistent_class
|
51 |
-
class MyLayer(torch.nn.Module):
|
52 |
-
...
|
53 |
-
|
54 |
-
When pickled, any instance of `MyNetwork` and `MyLayer` will save its
|
55 |
-
source code alongside other internal state (e.g., parameters, buffers,
|
56 |
-
and submodules). This way, any previously exported pickle will remain
|
57 |
-
usable even if the class definitions have been modified or are no
|
58 |
-
longer available.
|
59 |
-
|
60 |
-
The decorator saves the source code of the entire Python module
|
61 |
-
containing the decorated class. It does *not* save the source code of
|
62 |
-
any imported modules. Thus, the imported modules must be available
|
63 |
-
during unpickling, also including `torch_utils.persistence` itself.
|
64 |
-
|
65 |
-
It is ok to call functions defined in the same module from the
|
66 |
-
decorated class. However, if the decorated class depends on other
|
67 |
-
classes defined in the same module, they must be decorated as well.
|
68 |
-
This is illustrated in the above example in the case of `MyLayer`.
|
69 |
-
|
70 |
-
It is also possible to employ the decorator just-in-time before
|
71 |
-
calling the constructor. For example:
|
72 |
-
|
73 |
-
cls = MyLayer
|
74 |
-
if want_to_make_it_persistent:
|
75 |
-
cls = persistence.persistent_class(cls)
|
76 |
-
layer = cls(num_inputs, num_outputs)
|
77 |
-
|
78 |
-
As an additional feature, the decorator also keeps track of the
|
79 |
-
arguments that were used to construct each instance of the decorated
|
80 |
-
class. The arguments can be queried via `obj.init_args` and
|
81 |
-
`obj.init_kwargs`, and they are automatically pickled alongside other
|
82 |
-
object state. A typical use case is to first unpickle a previous
|
83 |
-
instance of a persistent class, and then upgrade it to use the latest
|
84 |
-
version of the source code:
|
85 |
-
|
86 |
-
with open('old_pickle.pkl', 'rb') as f:
|
87 |
-
old_net = pickle.load(f)
|
88 |
-
new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
|
89 |
-
misc.copy_params_and_buffers(old_net, new_net, require_all=True)
|
90 |
-
"""
|
91 |
-
assert isinstance(orig_class, type)
|
92 |
-
if is_persistent(orig_class):
|
93 |
-
return orig_class
|
94 |
-
|
95 |
-
assert orig_class.__module__ in sys.modules
|
96 |
-
orig_module = sys.modules[orig_class.__module__]
|
97 |
-
orig_module_src = _module_to_src(orig_module)
|
98 |
-
|
99 |
-
class Decorator(orig_class):
|
100 |
-
_orig_module_src = orig_module_src
|
101 |
-
_orig_class_name = orig_class.__name__
|
102 |
-
|
103 |
-
def __init__(self, *args, **kwargs):
|
104 |
-
super().__init__(*args, **kwargs)
|
105 |
-
self._init_args = copy.deepcopy(args)
|
106 |
-
self._init_kwargs = copy.deepcopy(kwargs)
|
107 |
-
assert orig_class.__name__ in orig_module.__dict__
|
108 |
-
_check_pickleable(self.__reduce__())
|
109 |
-
|
110 |
-
@property
|
111 |
-
def init_args(self):
|
112 |
-
return copy.deepcopy(self._init_args)
|
113 |
-
|
114 |
-
@property
|
115 |
-
def init_kwargs(self):
|
116 |
-
return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
|
117 |
-
|
118 |
-
def __reduce__(self):
|
119 |
-
fields = list(super().__reduce__())
|
120 |
-
fields += [None] * max(3 - len(fields), 0)
|
121 |
-
if fields[0] is not _reconstruct_persistent_obj:
|
122 |
-
meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])
|
123 |
-
fields[0] = _reconstruct_persistent_obj # reconstruct func
|
124 |
-
fields[1] = (meta,) # reconstruct args
|
125 |
-
fields[2] = None # state dict
|
126 |
-
return tuple(fields)
|
127 |
-
|
128 |
-
Decorator.__name__ = orig_class.__name__
|
129 |
-
_decorators.add(Decorator)
|
130 |
-
return Decorator
|
131 |
-
|
132 |
-
#----------------------------------------------------------------------------
|
133 |
-
|
134 |
-
def is_persistent(obj):
|
135 |
-
r"""Test whether the given object or class is persistent, i.e.,
|
136 |
-
whether it will save its source code when pickled.
|
137 |
-
"""
|
138 |
-
try:
|
139 |
-
if obj in _decorators:
|
140 |
-
return True
|
141 |
-
except TypeError:
|
142 |
-
pass
|
143 |
-
return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
|
144 |
-
|
145 |
-
#----------------------------------------------------------------------------
|
146 |
-
|
147 |
-
def import_hook(hook):
|
148 |
-
r"""Register an import hook that is called whenever a persistent object
|
149 |
-
is being unpickled. A typical use case is to patch the pickled source
|
150 |
-
code to avoid errors and inconsistencies when the API of some imported
|
151 |
-
module has changed.
|
152 |
-
|
153 |
-
The hook should have the following signature:
|
154 |
-
|
155 |
-
hook(meta) -> modified meta
|
156 |
-
|
157 |
-
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
|
158 |
-
|
159 |
-
type: Type of the persistent object, e.g. `'class'`.
|
160 |
-
version: Internal version number of `torch_utils.persistence`.
|
161 |
-
module_src Original source code of the Python module.
|
162 |
-
class_name: Class name in the original Python module.
|
163 |
-
state: Internal state of the object.
|
164 |
-
|
165 |
-
Example:
|
166 |
-
|
167 |
-
@persistence.import_hook
|
168 |
-
def wreck_my_network(meta):
|
169 |
-
if meta.class_name == 'MyNetwork':
|
170 |
-
print('MyNetwork is being imported. I will wreck it!')
|
171 |
-
meta.module_src = meta.module_src.replace("True", "False")
|
172 |
-
return meta
|
173 |
-
"""
|
174 |
-
assert callable(hook)
|
175 |
-
_import_hooks.append(hook)
|
176 |
-
|
177 |
-
#----------------------------------------------------------------------------
|
178 |
-
|
179 |
-
def _reconstruct_persistent_obj(meta):
|
180 |
-
r"""Hook that is called internally by the `pickle` module to unpickle
|
181 |
-
a persistent object.
|
182 |
-
"""
|
183 |
-
meta = dnnlib.EasyDict(meta)
|
184 |
-
meta.state = dnnlib.EasyDict(meta.state)
|
185 |
-
for hook in _import_hooks:
|
186 |
-
meta = hook(meta)
|
187 |
-
assert meta is not None
|
188 |
-
|
189 |
-
assert meta.version == _version
|
190 |
-
module = _src_to_module(meta.module_src)
|
191 |
-
|
192 |
-
assert meta.type == 'class'
|
193 |
-
orig_class = module.__dict__[meta.class_name]
|
194 |
-
decorator_class = persistent_class(orig_class)
|
195 |
-
obj = decorator_class.__new__(decorator_class)
|
196 |
-
|
197 |
-
setstate = getattr(obj, '__setstate__', None)
|
198 |
-
if callable(setstate):
|
199 |
-
setstate(meta.state) # pylint: disable=not-callable
|
200 |
-
else:
|
201 |
-
obj.__dict__.update(meta.state)
|
202 |
-
return obj
|
203 |
-
|
204 |
-
#----------------------------------------------------------------------------
|
205 |
-
|
206 |
-
def _module_to_src(module):
|
207 |
-
r"""Query the source code of a given Python module.
|
208 |
-
"""
|
209 |
-
src = _module_to_src_dict.get(module, None)
|
210 |
-
if src is None:
|
211 |
-
src = inspect.getsource(module)
|
212 |
-
_module_to_src_dict[module] = src
|
213 |
-
_src_to_module_dict[src] = module
|
214 |
-
return src
|
215 |
-
|
216 |
-
def _src_to_module(src):
|
217 |
-
r"""Get or create a Python module for the given source code.
|
218 |
-
"""
|
219 |
-
module = _src_to_module_dict.get(src, None)
|
220 |
-
if module is None:
|
221 |
-
module_name = "_imported_module_" + uuid.uuid4().hex
|
222 |
-
module = types.ModuleType(module_name)
|
223 |
-
sys.modules[module_name] = module
|
224 |
-
_module_to_src_dict[module] = src
|
225 |
-
_src_to_module_dict[src] = module
|
226 |
-
exec(src, module.__dict__) # pylint: disable=exec-used
|
227 |
-
return module
|
228 |
-
|
229 |
-
#----------------------------------------------------------------------------
|
230 |
-
|
231 |
-
def _check_pickleable(obj):
|
232 |
-
r"""Check that the given object is pickleable, raising an exception if
|
233 |
-
it is not. This function is expected to be considerably more efficient
|
234 |
-
than actually pickling the object.
|
235 |
-
"""
|
236 |
-
def recurse(obj):
|
237 |
-
if isinstance(obj, (list, tuple, set)):
|
238 |
-
return [recurse(x) for x in obj]
|
239 |
-
if isinstance(obj, dict):
|
240 |
-
return [[recurse(x), recurse(y)] for x, y in obj.items()]
|
241 |
-
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
|
242 |
-
return None # Python primitive types are pickleable.
|
243 |
-
if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor']:
|
244 |
-
return None # NumPy arrays and PyTorch tensors are pickleable.
|
245 |
-
if is_persistent(obj):
|
246 |
-
return None # Persistent objects are pickleable, by virtue of the constructor check.
|
247 |
-
return obj
|
248 |
-
with io.BytesIO() as f:
|
249 |
-
pickle.dump(recurse(obj), f)
|
250 |
-
|
251 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/decoder/tensoRF_decoder.py
DELETED
@@ -1,346 +0,0 @@
|
|
1 |
-
from .tensor_base import *
|
2 |
-
from .vgg import Encoder, Decoder, UNetDecoder, PlainDecoder
|
3 |
-
from src.style_module.style_module import LearnableIN, SimpleLinearStylizer
|
4 |
-
|
5 |
-
|
6 |
-
class TensorVMSplit(TensorBase):
|
7 |
-
def __init__(self, aabb, gridSize, device, **kargs):
|
8 |
-
super(TensorVMSplit, self).__init__(aabb, gridSize, device, **kargs)
|
9 |
-
|
10 |
-
def change_to_feature_mod(self, feature_n_comp, device):
|
11 |
-
self.density_line.requires_grad_(False)
|
12 |
-
self.density_plane.requires_grad_(False)
|
13 |
-
self.app_line = None
|
14 |
-
self.app_plane = None
|
15 |
-
self.basis_mat = None
|
16 |
-
self.renderModule = None
|
17 |
-
|
18 |
-
# Both encoder and decoder do not require grad when initialized
|
19 |
-
self.encoder = Encoder().to(device)
|
20 |
-
self.decoder = PlainDecoder().to(device)
|
21 |
-
|
22 |
-
# We need to finetune decoder when training a feature grid
|
23 |
-
self.decoder.requires_grad_(True)
|
24 |
-
|
25 |
-
self.feature_n_comp = feature_n_comp
|
26 |
-
self.init_feature_svd(device)
|
27 |
-
|
28 |
-
def change_to_style_mod(self, device='cuda'):
|
29 |
-
assert self.feature_line is not None, 'Have to be trained in feature mod first!'
|
30 |
-
self.feature_line.requires_grad_(False)
|
31 |
-
self.feature_plane.requires_grad_(False)
|
32 |
-
self.feature_basis_mat.requires_grad_(False)
|
33 |
-
self.decoder.requires_grad_(True)
|
34 |
-
|
35 |
-
self.IN = LearnableIN().to(device)
|
36 |
-
|
37 |
-
# self.stylizer = NearestFeatureTransform()
|
38 |
-
# self.stylizer = LearnableIN(1,256,device)
|
39 |
-
# self.stylizer = AdaAttN(256, 256).to(device)
|
40 |
-
# self.stylizer = AdaAttN_woin(256, 256).to(device)
|
41 |
-
self.stylizer = SimpleLinearStylizer(256).to(device)
|
42 |
-
|
43 |
-
def init_svd_volume(self, res, device):
|
44 |
-
self.density_plane, self.density_line = self.init_one_svd(self.density_n_comp, self.gridSize, 0.1, device)
|
45 |
-
self.app_plane, self.app_line = self.init_one_svd(self.app_n_comp, self.gridSize, 0.1, device)
|
46 |
-
self.basis_mat = torch.nn.Linear(sum(self.app_n_comp), self.app_dim, bias=False).to(device)
|
47 |
-
|
48 |
-
def init_feature_svd(self, device):
|
49 |
-
self.feature_plane, self.feature_line = self.init_one_svd(self.feature_n_comp, self.gridSize, 0.1, device)
|
50 |
-
self.feature_basis_mat = torch.nn.Linear(sum(self.feature_n_comp), 256, bias=False).to(device)
|
51 |
-
|
52 |
-
def init_one_svd(self, n_component, gridSize, scale, device):
|
53 |
-
plane_coef, line_coef = [], []
|
54 |
-
for i in range(len(self.vecMode)):
|
55 |
-
vec_id = self.vecMode[i]
|
56 |
-
mat_id_0, mat_id_1 = self.matMode[i]
|
57 |
-
plane_coef.append(torch.nn.Parameter(
|
58 |
-
scale * torch.randn((1, n_component[i], gridSize[mat_id_1], gridSize[mat_id_0]))))
|
59 |
-
line_coef.append(
|
60 |
-
torch.nn.Parameter(scale * torch.randn((1, n_component[i], gridSize[vec_id], 1))))
|
61 |
-
|
62 |
-
return torch.nn.ParameterList(plane_coef).to(device), torch.nn.ParameterList(line_coef).to(device)
|
63 |
-
|
64 |
-
def get_optparam_groups(self, lr_init_spatialxyz=0.02, lr_init_network=0.001):
|
65 |
-
grad_vars = [{'params': self.density_line, 'lr': lr_init_spatialxyz},
|
66 |
-
{'params': self.density_plane, 'lr': lr_init_spatialxyz},
|
67 |
-
{'params': self.app_line, 'lr': lr_init_spatialxyz},
|
68 |
-
{'params': self.app_plane, 'lr': lr_init_spatialxyz},
|
69 |
-
{'params': self.basis_mat.parameters(), 'lr': lr_init_network}]
|
70 |
-
if isinstance(self.renderModule, torch.nn.Module):
|
71 |
-
grad_vars += [{'params': self.renderModule.parameters(), 'lr': lr_init_network}]
|
72 |
-
return grad_vars
|
73 |
-
|
74 |
-
def get_optparam_groups_feature_mod(self, lr_init_spatialxyz, lr_init_network):
|
75 |
-
grad_vars = [{'params': self.feature_line, 'lr': lr_init_spatialxyz},
|
76 |
-
{'params': self.feature_plane, 'lr': lr_init_spatialxyz},
|
77 |
-
{'params': self.feature_basis_mat.parameters(), 'lr': lr_init_network},
|
78 |
-
{'params': self.decoder.parameters(), 'lr': lr_init_network}]
|
79 |
-
return grad_vars
|
80 |
-
|
81 |
-
def get_optparam_groups_style_mod(self, lr_init_network, lr_finetune):
|
82 |
-
grad_vars = [
|
83 |
-
{'params': self.stylizer.parameters(), 'lr': lr_init_network},
|
84 |
-
{'params': self.decoder.parameters(), 'lr': lr_finetune},
|
85 |
-
]
|
86 |
-
return grad_vars
|
87 |
-
|
88 |
-
def vectorDiffs(self, vector_comps):
|
89 |
-
total = 0
|
90 |
-
|
91 |
-
for idx in range(len(vector_comps)):
|
92 |
-
n_comp, n_size = vector_comps[idx].shape[1:-1]
|
93 |
-
|
94 |
-
dotp = torch.matmul(vector_comps[idx].view(n_comp, n_size),
|
95 |
-
vector_comps[idx].view(n_comp, n_size).transpose(-1, -2))
|
96 |
-
non_diagonal = dotp.view(-1)[1:].view(n_comp - 1, n_comp + 1)[..., :-1]
|
97 |
-
total = total + torch.mean(torch.abs(non_diagonal))
|
98 |
-
return total
|
99 |
-
|
100 |
-
def vector_comp_diffs(self):
|
101 |
-
return self.vectorDiffs(self.density_line) + self.vectorDiffs(self.app_line)
|
102 |
-
|
103 |
-
def density_L1(self):
|
104 |
-
total = 0
|
105 |
-
for idx in range(len(self.density_plane)):
|
106 |
-
total = total + torch.mean(torch.abs(self.density_plane[idx])) + torch.mean(torch.abs(self.density_line[
|
107 |
-
idx])) # + torch.mean(torch.abs(self.app_plane[idx])) + torch.mean(torch.abs(self.density_plane[idx]))
|
108 |
-
return total
|
109 |
-
|
110 |
-
def TV_loss_density(self, reg):
|
111 |
-
total = 0
|
112 |
-
for idx in range(len(self.density_plane)):
|
113 |
-
total = total + reg(self.density_plane[idx]) * 1e-2 # + reg(self.density_line[idx]) * 1e-3
|
114 |
-
return total
|
115 |
-
|
116 |
-
def TV_loss_app(self, reg):
|
117 |
-
total = 0
|
118 |
-
for idx in range(len(self.app_plane)):
|
119 |
-
total = total + reg(self.app_plane[idx]) * 1e-2 # + reg(self.app_line[idx]) * 1e-3
|
120 |
-
return total
|
121 |
-
|
122 |
-
def TV_loss_feature(self, reg):
|
123 |
-
total = 0
|
124 |
-
for idx in range(len(self.feature_plane)):
|
125 |
-
total = total + reg(self.feature_plane[idx]) + reg(self.feature_line[idx])
|
126 |
-
return total
|
127 |
-
|
128 |
-
def compute_densityfeature(self, xyz_sampled):
|
129 |
-
|
130 |
-
# plane + line basis
|
131 |
-
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]],
|
132 |
-
xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
|
133 |
-
coordinate_line = torch.stack(
|
134 |
-
(xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
|
135 |
-
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1,
|
136 |
-
1, 2)
|
137 |
-
|
138 |
-
sigma_feature = torch.zeros((xyz_sampled.shape[0],), device=xyz_sampled.device)
|
139 |
-
for idx_plane in range(len(self.density_plane)):
|
140 |
-
plane_coef_point = F.grid_sample(self.density_plane[idx_plane], coordinate_plane[[idx_plane]],
|
141 |
-
align_corners=True).view(-1, *xyz_sampled.shape[:1])
|
142 |
-
line_coef_point = F.grid_sample(self.density_line[idx_plane], coordinate_line[[idx_plane]],
|
143 |
-
align_corners=True).view(-1, *xyz_sampled.shape[:1])
|
144 |
-
sigma_feature = sigma_feature + torch.sum(plane_coef_point * line_coef_point, dim=0)
|
145 |
-
|
146 |
-
return sigma_feature
|
147 |
-
|
148 |
-
def compute_appfeature(self, xyz_sampled):
|
149 |
-
|
150 |
-
# plane + line basis
|
151 |
-
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]],
|
152 |
-
xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
|
153 |
-
coordinate_line = torch.stack(
|
154 |
-
(xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
|
155 |
-
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1,
|
156 |
-
1, 2)
|
157 |
-
|
158 |
-
plane_coef_point, line_coef_point = [], []
|
159 |
-
for idx_plane in range(len(self.app_plane)):
|
160 |
-
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
|
161 |
-
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
|
162 |
-
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
|
163 |
-
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
|
164 |
-
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
|
165 |
-
|
166 |
-
return self.basis_mat((plane_coef_point * line_coef_point).T)
|
167 |
-
|
168 |
-
def compute_feature(self, xyz_sampled):
|
169 |
-
|
170 |
-
# plane + line basis
|
171 |
-
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]],
|
172 |
-
xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
|
173 |
-
coordinate_line = torch.stack(
|
174 |
-
(xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
|
175 |
-
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1,
|
176 |
-
1, 2)
|
177 |
-
|
178 |
-
plane_coef_point, line_coef_point = [], []
|
179 |
-
for idx_plane in range(len(self.feature_plane)):
|
180 |
-
plane_coef_point.append(F.grid_sample(self.feature_plane[idx_plane], coordinate_plane[[idx_plane]],
|
181 |
-
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
|
182 |
-
line_coef_point.append(F.grid_sample(self.feature_line[idx_plane], coordinate_line[[idx_plane]],
|
183 |
-
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
|
184 |
-
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
|
185 |
-
|
186 |
-
return self.feature_basis_mat((plane_coef_point * line_coef_point).T)
|
187 |
-
|
188 |
-
def render_feature_map(self, rays_chunk, s_mean_std_mat=None, is_train=False, ndc_ray=False, N_samples=-1):
|
189 |
-
|
190 |
-
# sample points
|
191 |
-
viewdirs = rays_chunk[:, 3:6]
|
192 |
-
if ndc_ray:
|
193 |
-
xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc(rays_chunk[:, :3], viewdirs, is_train=is_train,
|
194 |
-
N_samples=N_samples)
|
195 |
-
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
|
196 |
-
rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True)
|
197 |
-
dists = dists * rays_norm
|
198 |
-
else:
|
199 |
-
xyz_sampled, z_vals, ray_valid = self.sample_ray(rays_chunk[:, :3], viewdirs, is_train=is_train,
|
200 |
-
N_samples=N_samples)
|
201 |
-
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
|
202 |
-
|
203 |
-
if self.alphaMask is not None:
|
204 |
-
alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid])
|
205 |
-
alpha_mask = alphas > 0
|
206 |
-
ray_invalid = ~ray_valid
|
207 |
-
ray_invalid[ray_valid] |= (~alpha_mask)
|
208 |
-
ray_valid = ~ray_invalid
|
209 |
-
|
210 |
-
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
|
211 |
-
if s_mean_std_mat is not None:
|
212 |
-
features = torch.zeros((*xyz_sampled.shape[:2], self.stylizer.embed_dim), device=xyz_sampled.device)
|
213 |
-
else:
|
214 |
-
features = torch.zeros((*xyz_sampled.shape[:2], 256), device=xyz_sampled.device)
|
215 |
-
|
216 |
-
if ray_valid.any():
|
217 |
-
xyz_sampled = self.normalize_coord(xyz_sampled)
|
218 |
-
sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid])
|
219 |
-
|
220 |
-
validsigma = self.feature2density(sigma_feature)
|
221 |
-
sigma[ray_valid] = validsigma
|
222 |
-
|
223 |
-
alpha, weight, bg_weight = raw2alpha(sigma, dists * self.distance_scale)
|
224 |
-
|
225 |
-
app_mask = weight > self.rayMarch_weight_thres
|
226 |
-
|
227 |
-
if app_mask.any():
|
228 |
-
valid_features = self.compute_feature(
|
229 |
-
xyz_sampled[app_mask]) # [n_valid_points~40k if not specify nSamples, C=256]
|
230 |
-
|
231 |
-
# transform content on 3d
|
232 |
-
if s_mean_std_mat is not None:
|
233 |
-
valid_features = self.stylizer.transform_content_3D(valid_features.transpose(0, 1)[None, ...])
|
234 |
-
valid_features = valid_features.squeeze(0).transpose(0, 1)
|
235 |
-
|
236 |
-
features[app_mask] = valid_features
|
237 |
-
|
238 |
-
feature_map = torch.sum(weight[..., None] * features, -2)
|
239 |
-
acc_map = torch.sum(weight, -1)
|
240 |
-
|
241 |
-
# style transfer on 2d
|
242 |
-
if s_mean_std_mat is not None:
|
243 |
-
feature_map = self.stylizer.transfer_style_2D(s_mean_std_mat, feature_map.transpose(0, 1)[None, ...],
|
244 |
-
acc_map)
|
245 |
-
feature_map = feature_map.squeeze().transpose(0, 1)
|
246 |
-
|
247 |
-
return feature_map, acc_map
|
248 |
-
|
249 |
-
def render_depth_map(self, rays_chunk, is_train=False, ndc_ray=False, N_samples=-1):
|
250 |
-
|
251 |
-
# sample points
|
252 |
-
viewdirs = rays_chunk[:, 3:6]
|
253 |
-
if ndc_ray:
|
254 |
-
xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc(rays_chunk[:, :3], viewdirs, is_train=is_train,
|
255 |
-
N_samples=N_samples)
|
256 |
-
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
|
257 |
-
rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True)
|
258 |
-
dists = dists * rays_norm
|
259 |
-
else:
|
260 |
-
xyz_sampled, z_vals, ray_valid = self.sample_ray(rays_chunk[:, :3], viewdirs, is_train=is_train,
|
261 |
-
N_samples=N_samples)
|
262 |
-
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
|
263 |
-
|
264 |
-
if self.alphaMask is not None:
|
265 |
-
alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid])
|
266 |
-
alpha_mask = alphas > 0
|
267 |
-
ray_invalid = ~ray_valid
|
268 |
-
ray_invalid[ray_valid] |= (~alpha_mask)
|
269 |
-
ray_valid = ~ray_invalid
|
270 |
-
|
271 |
-
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
|
272 |
-
|
273 |
-
if ray_valid.any():
|
274 |
-
xyz_sampled = self.normalize_coord(xyz_sampled)
|
275 |
-
sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid])
|
276 |
-
validsigma = self.feature2density(sigma_feature)
|
277 |
-
sigma[ray_valid] = validsigma
|
278 |
-
|
279 |
-
alpha, weight, bg_weight = raw2alpha(sigma, dists * self.distance_scale)
|
280 |
-
|
281 |
-
acc_map = torch.sum(weight, -1)
|
282 |
-
|
283 |
-
depth_map = torch.sum(weight * z_vals, -1)
|
284 |
-
depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1]
|
285 |
-
|
286 |
-
return depth_map # [n_rays]
|
287 |
-
|
288 |
-
@torch.no_grad()
|
289 |
-
def up_sampling_VM(self, plane_coef, line_coef, res_target):
|
290 |
-
|
291 |
-
for i in range(len(self.vecMode)):
|
292 |
-
vec_id = self.vecMode[i]
|
293 |
-
mat_id_0, mat_id_1 = self.matMode[i]
|
294 |
-
plane_coef[i] = torch.nn.Parameter(
|
295 |
-
F.interpolate(plane_coef[i].data, size=(res_target[mat_id_1], res_target[mat_id_0]), mode='bilinear',
|
296 |
-
align_corners=True))
|
297 |
-
line_coef[i] = torch.nn.Parameter(
|
298 |
-
F.interpolate(line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
|
299 |
-
|
300 |
-
return plane_coef, line_coef
|
301 |
-
|
302 |
-
@torch.no_grad()
|
303 |
-
def upsample_volume_grid(self, res_target):
|
304 |
-
self.app_plane, self.app_line = self.up_sampling_VM(self.app_plane, self.app_line, res_target)
|
305 |
-
self.density_plane, self.density_line = self.up_sampling_VM(self.density_plane, self.density_line, res_target)
|
306 |
-
|
307 |
-
self.update_stepSize(res_target)
|
308 |
-
print(f'upsamping to {res_target}')
|
309 |
-
|
310 |
-
@torch.no_grad()
|
311 |
-
def shrink(self, new_aabb):
|
312 |
-
print("====> shrinking ...")
|
313 |
-
xyz_min, xyz_max = new_aabb
|
314 |
-
t_l, b_r = (xyz_min - self.aabb[0]) / self.units, (xyz_max - self.aabb[0]) / self.units
|
315 |
-
# print(new_aabb, self.aabb)
|
316 |
-
# print(t_l, b_r,self.alphaMask.alpha_volume.shape)
|
317 |
-
t_l, b_r = torch.round(torch.round(t_l)).long(), torch.round(b_r).long() + 1
|
318 |
-
b_r = torch.stack([b_r, self.gridSize]).amin(0)
|
319 |
-
|
320 |
-
for i in range(len(self.vecMode)):
|
321 |
-
mode0 = self.vecMode[i]
|
322 |
-
self.density_line[i] = torch.nn.Parameter(
|
323 |
-
self.density_line[i].data[..., t_l[mode0]:b_r[mode0], :]
|
324 |
-
)
|
325 |
-
self.app_line[i] = torch.nn.Parameter(
|
326 |
-
self.app_line[i].data[..., t_l[mode0]:b_r[mode0], :]
|
327 |
-
)
|
328 |
-
mode0, mode1 = self.matMode[i]
|
329 |
-
self.density_plane[i] = torch.nn.Parameter(
|
330 |
-
self.density_plane[i].data[..., t_l[mode1]:b_r[mode1], t_l[mode0]:b_r[mode0]]
|
331 |
-
)
|
332 |
-
self.app_plane[i] = torch.nn.Parameter(
|
333 |
-
self.app_plane[i].data[..., t_l[mode1]:b_r[mode1], t_l[mode0]:b_r[mode0]]
|
334 |
-
)
|
335 |
-
|
336 |
-
if not torch.all(self.alphaMask.gridSize == self.gridSize):
|
337 |
-
t_l_r, b_r_r = t_l / (self.gridSize - 1), (b_r - 1) / (self.gridSize - 1)
|
338 |
-
correct_aabb = torch.zeros_like(new_aabb)
|
339 |
-
correct_aabb[0] = (1 - t_l_r) * self.aabb[0] + t_l_r * self.aabb[1]
|
340 |
-
correct_aabb[1] = (1 - b_r_r) * self.aabb[0] + b_r_r * self.aabb[1]
|
341 |
-
print("aabb", new_aabb, "\ncorrect aabb", correct_aabb)
|
342 |
-
new_aabb = correct_aabb
|
343 |
-
|
344 |
-
newSize = b_r - t_l
|
345 |
-
self.aabb = new_aabb
|
346 |
-
self.update_stepSize((newSize[0], newSize[1], newSize[2]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/autoencoder_asym_kl.py
DELETED
@@ -1,180 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
from typing import Optional, Tuple, Union
|
15 |
-
|
16 |
-
import torch
|
17 |
-
import torch.nn as nn
|
18 |
-
|
19 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
20 |
-
from ..utils import apply_forward_hook
|
21 |
-
from .autoencoder_kl import AutoencoderKLOutput
|
22 |
-
from .modeling_utils import ModelMixin
|
23 |
-
from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder, MaskConditionDecoder
|
24 |
-
|
25 |
-
|
26 |
-
class AsymmetricAutoencoderKL(ModelMixin, ConfigMixin):
|
27 |
-
r"""
|
28 |
-
Designing a Better Asymmetric VQGAN for StableDiffusion https://arxiv.org/abs/2306.04632 . A VAE model with KL loss
|
29 |
-
for encoding images into latents and decoding latent representations into images.
|
30 |
-
|
31 |
-
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
|
32 |
-
for all models (such as downloading or saving).
|
33 |
-
|
34 |
-
Parameters:
|
35 |
-
in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
|
36 |
-
out_channels (int, *optional*, defaults to 3): Number of channels in the output.
|
37 |
-
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
|
38 |
-
Tuple of downsample block types.
|
39 |
-
down_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
|
40 |
-
Tuple of down block output channels.
|
41 |
-
layers_per_down_block (`int`, *optional*, defaults to `1`):
|
42 |
-
Number layers for down block.
|
43 |
-
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
|
44 |
-
Tuple of upsample block types.
|
45 |
-
up_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
|
46 |
-
Tuple of up block output channels.
|
47 |
-
layers_per_up_block (`int`, *optional*, defaults to `1`):
|
48 |
-
Number layers for up block.
|
49 |
-
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
|
50 |
-
latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
|
51 |
-
sample_size (`int`, *optional*, defaults to `32`): Sample input size.
|
52 |
-
norm_num_groups (`int`, *optional*, defaults to `32`):
|
53 |
-
Number of groups to use for the first normalization layer in ResNet blocks.
|
54 |
-
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
55 |
-
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
56 |
-
training set. This is used to scale the latent space to have unit variance when training the diffusion
|
57 |
-
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
|
58 |
-
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
|
59 |
-
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
|
60 |
-
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
|
61 |
-
"""
|
62 |
-
|
63 |
-
@register_to_config
|
64 |
-
def __init__(
|
65 |
-
self,
|
66 |
-
in_channels: int = 3,
|
67 |
-
out_channels: int = 3,
|
68 |
-
down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
|
69 |
-
down_block_out_channels: Tuple[int] = (64,),
|
70 |
-
layers_per_down_block: int = 1,
|
71 |
-
up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
|
72 |
-
up_block_out_channels: Tuple[int] = (64,),
|
73 |
-
layers_per_up_block: int = 1,
|
74 |
-
act_fn: str = "silu",
|
75 |
-
latent_channels: int = 4,
|
76 |
-
norm_num_groups: int = 32,
|
77 |
-
sample_size: int = 32,
|
78 |
-
scaling_factor: float = 0.18215,
|
79 |
-
) -> None:
|
80 |
-
super().__init__()
|
81 |
-
|
82 |
-
# pass init params to Encoder
|
83 |
-
self.encoder = Encoder(
|
84 |
-
in_channels=in_channels,
|
85 |
-
out_channels=latent_channels,
|
86 |
-
down_block_types=down_block_types,
|
87 |
-
block_out_channels=down_block_out_channels,
|
88 |
-
layers_per_block=layers_per_down_block,
|
89 |
-
act_fn=act_fn,
|
90 |
-
norm_num_groups=norm_num_groups,
|
91 |
-
double_z=True,
|
92 |
-
)
|
93 |
-
|
94 |
-
# pass init params to Decoder
|
95 |
-
self.decoder = MaskConditionDecoder(
|
96 |
-
in_channels=latent_channels,
|
97 |
-
out_channels=out_channels,
|
98 |
-
up_block_types=up_block_types,
|
99 |
-
block_out_channels=up_block_out_channels,
|
100 |
-
layers_per_block=layers_per_up_block,
|
101 |
-
act_fn=act_fn,
|
102 |
-
norm_num_groups=norm_num_groups,
|
103 |
-
)
|
104 |
-
|
105 |
-
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
|
106 |
-
self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1)
|
107 |
-
|
108 |
-
self.use_slicing = False
|
109 |
-
self.use_tiling = False
|
110 |
-
|
111 |
-
@apply_forward_hook
|
112 |
-
def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
|
113 |
-
h = self.encoder(x)
|
114 |
-
moments = self.quant_conv(h)
|
115 |
-
posterior = DiagonalGaussianDistribution(moments)
|
116 |
-
|
117 |
-
if not return_dict:
|
118 |
-
return (posterior,)
|
119 |
-
|
120 |
-
return AutoencoderKLOutput(latent_dist=posterior)
|
121 |
-
|
122 |
-
def _decode(
|
123 |
-
self,
|
124 |
-
z: torch.FloatTensor,
|
125 |
-
image: Optional[torch.FloatTensor] = None,
|
126 |
-
mask: Optional[torch.FloatTensor] = None,
|
127 |
-
return_dict: bool = True,
|
128 |
-
) -> Union[DecoderOutput, torch.FloatTensor]:
|
129 |
-
z = self.post_quant_conv(z)
|
130 |
-
dec = self.decoder(z, image, mask)
|
131 |
-
|
132 |
-
if not return_dict:
|
133 |
-
return (dec,)
|
134 |
-
|
135 |
-
return DecoderOutput(sample=dec)
|
136 |
-
|
137 |
-
@apply_forward_hook
|
138 |
-
def decode(
|
139 |
-
self,
|
140 |
-
z: torch.FloatTensor,
|
141 |
-
image: Optional[torch.FloatTensor] = None,
|
142 |
-
mask: Optional[torch.FloatTensor] = None,
|
143 |
-
return_dict: bool = True,
|
144 |
-
) -> Union[DecoderOutput, torch.FloatTensor]:
|
145 |
-
decoded = self._decode(z, image, mask).sample
|
146 |
-
|
147 |
-
if not return_dict:
|
148 |
-
return (decoded,)
|
149 |
-
|
150 |
-
return DecoderOutput(sample=decoded)
|
151 |
-
|
152 |
-
def forward(
|
153 |
-
self,
|
154 |
-
sample: torch.FloatTensor,
|
155 |
-
mask: Optional[torch.FloatTensor] = None,
|
156 |
-
sample_posterior: bool = False,
|
157 |
-
return_dict: bool = True,
|
158 |
-
generator: Optional[torch.Generator] = None,
|
159 |
-
) -> Union[DecoderOutput, torch.FloatTensor]:
|
160 |
-
r"""
|
161 |
-
Args:
|
162 |
-
sample (`torch.FloatTensor`): Input sample.
|
163 |
-
mask (`torch.FloatTensor`, *optional*, defaults to `None`): Optional inpainting mask.
|
164 |
-
sample_posterior (`bool`, *optional*, defaults to `False`):
|
165 |
-
Whether to sample from the posterior.
|
166 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
167 |
-
Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
|
168 |
-
"""
|
169 |
-
x = sample
|
170 |
-
posterior = self.encode(x).latent_dist
|
171 |
-
if sample_posterior:
|
172 |
-
z = posterior.sample(generator=generator)
|
173 |
-
else:
|
174 |
-
z = posterior.mode()
|
175 |
-
dec = self.decode(z, sample, mask).sample
|
176 |
-
|
177 |
-
if not return_dict:
|
178 |
-
return (dec,)
|
179 |
-
|
180 |
-
return DecoderOutput(sample=dec)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .pipeline_latent_diffusion_uncond import LDMPipeline
|
|
|
|
spaces/AriaMei/TTSdemo/transforms.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(inputs,
|
13 |
-
unnormalized_widths,
|
14 |
-
unnormalized_heights,
|
15 |
-
unnormalized_derivatives,
|
16 |
-
inverse=False,
|
17 |
-
tails=None,
|
18 |
-
tail_bound=1.,
|
19 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
20 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
21 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
22 |
-
|
23 |
-
if tails is None:
|
24 |
-
spline_fn = rational_quadratic_spline
|
25 |
-
spline_kwargs = {}
|
26 |
-
else:
|
27 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
28 |
-
spline_kwargs = {
|
29 |
-
'tails': tails,
|
30 |
-
'tail_bound': tail_bound
|
31 |
-
}
|
32 |
-
|
33 |
-
outputs, logabsdet = spline_fn(
|
34 |
-
inputs=inputs,
|
35 |
-
unnormalized_widths=unnormalized_widths,
|
36 |
-
unnormalized_heights=unnormalized_heights,
|
37 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
38 |
-
inverse=inverse,
|
39 |
-
min_bin_width=min_bin_width,
|
40 |
-
min_bin_height=min_bin_height,
|
41 |
-
min_derivative=min_derivative,
|
42 |
-
**spline_kwargs
|
43 |
-
)
|
44 |
-
return outputs, logabsdet
|
45 |
-
|
46 |
-
|
47 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
48 |
-
bin_locations[..., -1] += eps
|
49 |
-
return torch.sum(
|
50 |
-
inputs[..., None] >= bin_locations,
|
51 |
-
dim=-1
|
52 |
-
) - 1
|
53 |
-
|
54 |
-
|
55 |
-
def unconstrained_rational_quadratic_spline(inputs,
|
56 |
-
unnormalized_widths,
|
57 |
-
unnormalized_heights,
|
58 |
-
unnormalized_derivatives,
|
59 |
-
inverse=False,
|
60 |
-
tails='linear',
|
61 |
-
tail_bound=1.,
|
62 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
63 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
64 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
65 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
66 |
-
outside_interval_mask = ~inside_interval_mask
|
67 |
-
|
68 |
-
outputs = torch.zeros_like(inputs)
|
69 |
-
logabsdet = torch.zeros_like(inputs)
|
70 |
-
|
71 |
-
if tails == 'linear':
|
72 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
73 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
74 |
-
unnormalized_derivatives[..., 0] = constant
|
75 |
-
unnormalized_derivatives[..., -1] = constant
|
76 |
-
|
77 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
78 |
-
logabsdet[outside_interval_mask] = 0
|
79 |
-
else:
|
80 |
-
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
81 |
-
|
82 |
-
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
89 |
-
min_bin_width=min_bin_width,
|
90 |
-
min_bin_height=min_bin_height,
|
91 |
-
min_derivative=min_derivative
|
92 |
-
)
|
93 |
-
|
94 |
-
return outputs, logabsdet
|
95 |
-
|
96 |
-
def rational_quadratic_spline(inputs,
|
97 |
-
unnormalized_widths,
|
98 |
-
unnormalized_heights,
|
99 |
-
unnormalized_derivatives,
|
100 |
-
inverse=False,
|
101 |
-
left=0., right=1., bottom=0., top=1.,
|
102 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
103 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
104 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
105 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
106 |
-
raise ValueError('Input to a transform is not within its domain')
|
107 |
-
|
108 |
-
num_bins = unnormalized_widths.shape[-1]
|
109 |
-
|
110 |
-
if min_bin_width * num_bins > 1.0:
|
111 |
-
raise ValueError('Minimal bin width too large for the number of bins')
|
112 |
-
if min_bin_height * num_bins > 1.0:
|
113 |
-
raise ValueError('Minimal bin height too large for the number of bins')
|
114 |
-
|
115 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
116 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
117 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
118 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
119 |
-
cumwidths = (right - left) * cumwidths + left
|
120 |
-
cumwidths[..., 0] = left
|
121 |
-
cumwidths[..., -1] = right
|
122 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
123 |
-
|
124 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
125 |
-
|
126 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
127 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
128 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
129 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
130 |
-
cumheights = (top - bottom) * cumheights + bottom
|
131 |
-
cumheights[..., 0] = bottom
|
132 |
-
cumheights[..., -1] = top
|
133 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
134 |
-
|
135 |
-
if inverse:
|
136 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
137 |
-
else:
|
138 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
139 |
-
|
140 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
141 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
142 |
-
|
143 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
144 |
-
delta = heights / widths
|
145 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
146 |
-
|
147 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
148 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
149 |
-
|
150 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
if inverse:
|
153 |
-
a = (((inputs - input_cumheights) * (input_derivatives
|
154 |
-
+ input_derivatives_plus_one
|
155 |
-
- 2 * input_delta)
|
156 |
-
+ input_heights * (input_delta - input_derivatives)))
|
157 |
-
b = (input_heights * input_derivatives
|
158 |
-
- (inputs - input_cumheights) * (input_derivatives
|
159 |
-
+ input_derivatives_plus_one
|
160 |
-
- 2 * input_delta))
|
161 |
-
c = - input_delta * (inputs - input_cumheights)
|
162 |
-
|
163 |
-
discriminant = b.pow(2) - 4 * a * c
|
164 |
-
assert (discriminant >= 0).all()
|
165 |
-
|
166 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
167 |
-
outputs = root * input_bin_widths + input_cumwidths
|
168 |
-
|
169 |
-
theta_one_minus_theta = root * (1 - root)
|
170 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
171 |
-
* theta_one_minus_theta)
|
172 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
173 |
-
+ 2 * input_delta * theta_one_minus_theta
|
174 |
-
+ input_derivatives * (1 - root).pow(2))
|
175 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
176 |
-
|
177 |
-
return outputs, -logabsdet
|
178 |
-
else:
|
179 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
180 |
-
theta_one_minus_theta = theta * (1 - theta)
|
181 |
-
|
182 |
-
numerator = input_heights * (input_delta * theta.pow(2)
|
183 |
-
+ input_derivatives * theta_one_minus_theta)
|
184 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
185 |
-
* theta_one_minus_theta)
|
186 |
-
outputs = input_cumheights + numerator / denominator
|
187 |
-
|
188 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
189 |
-
+ 2 * input_delta * theta_one_minus_theta
|
190 |
-
+ input_derivatives * (1 - theta).pow(2))
|
191 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
192 |
-
|
193 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/models/unet.py
DELETED
@@ -1,437 +0,0 @@
|
|
1 |
-
# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
|
2 |
-
|
3 |
-
import json
|
4 |
-
import os
|
5 |
-
from dataclasses import dataclass
|
6 |
-
from typing import List, Optional, Tuple, Union
|
7 |
-
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
import torch.utils.checkpoint
|
11 |
-
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
12 |
-
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
|
13 |
-
from diffusers.models.modeling_utils import ModelMixin
|
14 |
-
from diffusers.utils import BaseOutput, logging
|
15 |
-
|
16 |
-
from .resnet import InflatedConv3d
|
17 |
-
from .unet_blocks import (
|
18 |
-
CrossAttnDownBlock3D,
|
19 |
-
CrossAttnUpBlock3D,
|
20 |
-
DownBlock3D,
|
21 |
-
UNetMidBlock3DCrossAttn,
|
22 |
-
UpBlock3D,
|
23 |
-
get_down_block,
|
24 |
-
get_up_block,
|
25 |
-
)
|
26 |
-
|
27 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
28 |
-
|
29 |
-
|
30 |
-
@dataclass
|
31 |
-
class UNet3DConditionOutput(BaseOutput):
|
32 |
-
sample: torch.FloatTensor
|
33 |
-
|
34 |
-
|
35 |
-
class UNet3DConditionModel(ModelMixin, ConfigMixin):
|
36 |
-
_supports_gradient_checkpointing = True
|
37 |
-
|
38 |
-
@register_to_config
|
39 |
-
def __init__(
|
40 |
-
self,
|
41 |
-
sample_size: Optional[int] = None,
|
42 |
-
in_channels: int = 4,
|
43 |
-
out_channels: int = 4,
|
44 |
-
center_input_sample: bool = False,
|
45 |
-
flip_sin_to_cos: bool = True,
|
46 |
-
freq_shift: int = 0,
|
47 |
-
down_block_types: Tuple[str] = (
|
48 |
-
"CrossAttnDownBlock3D",
|
49 |
-
"CrossAttnDownBlock3D",
|
50 |
-
"CrossAttnDownBlock3D",
|
51 |
-
"DownBlock3D",
|
52 |
-
),
|
53 |
-
mid_block_type: str = "UNetMidBlock3DCrossAttn",
|
54 |
-
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
|
55 |
-
only_cross_attention: Union[bool, Tuple[bool]] = False,
|
56 |
-
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
|
57 |
-
layers_per_block: int = 2,
|
58 |
-
downsample_padding: int = 1,
|
59 |
-
mid_block_scale_factor: float = 1,
|
60 |
-
act_fn: str = "silu",
|
61 |
-
norm_num_groups: int = 32,
|
62 |
-
norm_eps: float = 1e-5,
|
63 |
-
cross_attention_dim: int = 1280,
|
64 |
-
attention_head_dim: Union[int, Tuple[int]] = 8,
|
65 |
-
dual_cross_attention: bool = False,
|
66 |
-
use_linear_projection: bool = False,
|
67 |
-
class_embed_type: Optional[str] = None,
|
68 |
-
num_class_embeds: Optional[int] = None,
|
69 |
-
upcast_attention: bool = False,
|
70 |
-
resnet_time_scale_shift: str = "default",
|
71 |
-
):
|
72 |
-
super().__init__()
|
73 |
-
|
74 |
-
self.sample_size = sample_size
|
75 |
-
time_embed_dim = block_out_channels[0] * 4
|
76 |
-
|
77 |
-
# input
|
78 |
-
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
|
79 |
-
|
80 |
-
# time
|
81 |
-
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
|
82 |
-
timestep_input_dim = block_out_channels[0]
|
83 |
-
|
84 |
-
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
85 |
-
|
86 |
-
# class embedding
|
87 |
-
if class_embed_type is None and num_class_embeds is not None:
|
88 |
-
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
|
89 |
-
elif class_embed_type == "timestep":
|
90 |
-
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
91 |
-
elif class_embed_type == "identity":
|
92 |
-
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
|
93 |
-
else:
|
94 |
-
self.class_embedding = None
|
95 |
-
|
96 |
-
self.down_blocks = nn.ModuleList([])
|
97 |
-
self.mid_block = None
|
98 |
-
self.up_blocks = nn.ModuleList([])
|
99 |
-
|
100 |
-
if isinstance(only_cross_attention, bool):
|
101 |
-
only_cross_attention = [only_cross_attention] * len(down_block_types)
|
102 |
-
|
103 |
-
if isinstance(attention_head_dim, int):
|
104 |
-
attention_head_dim = (attention_head_dim,) * len(down_block_types)
|
105 |
-
|
106 |
-
# down
|
107 |
-
output_channel = block_out_channels[0]
|
108 |
-
for i, down_block_type in enumerate(down_block_types):
|
109 |
-
input_channel = output_channel
|
110 |
-
output_channel = block_out_channels[i]
|
111 |
-
is_final_block = i == len(block_out_channels) - 1
|
112 |
-
|
113 |
-
down_block = get_down_block(
|
114 |
-
down_block_type,
|
115 |
-
num_layers=layers_per_block,
|
116 |
-
in_channels=input_channel,
|
117 |
-
out_channels=output_channel,
|
118 |
-
temb_channels=time_embed_dim,
|
119 |
-
add_downsample=not is_final_block,
|
120 |
-
resnet_eps=norm_eps,
|
121 |
-
resnet_act_fn=act_fn,
|
122 |
-
resnet_groups=norm_num_groups,
|
123 |
-
cross_attention_dim=cross_attention_dim,
|
124 |
-
attn_num_head_channels=attention_head_dim[i],
|
125 |
-
downsample_padding=downsample_padding,
|
126 |
-
dual_cross_attention=dual_cross_attention,
|
127 |
-
use_linear_projection=use_linear_projection,
|
128 |
-
only_cross_attention=only_cross_attention[i],
|
129 |
-
upcast_attention=upcast_attention,
|
130 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
131 |
-
)
|
132 |
-
self.down_blocks.append(down_block)
|
133 |
-
|
134 |
-
# mid
|
135 |
-
if mid_block_type == "UNetMidBlock3DCrossAttn":
|
136 |
-
self.mid_block = UNetMidBlock3DCrossAttn(
|
137 |
-
in_channels=block_out_channels[-1],
|
138 |
-
temb_channels=time_embed_dim,
|
139 |
-
resnet_eps=norm_eps,
|
140 |
-
resnet_act_fn=act_fn,
|
141 |
-
output_scale_factor=mid_block_scale_factor,
|
142 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
143 |
-
cross_attention_dim=cross_attention_dim,
|
144 |
-
attn_num_head_channels=attention_head_dim[-1],
|
145 |
-
resnet_groups=norm_num_groups,
|
146 |
-
dual_cross_attention=dual_cross_attention,
|
147 |
-
use_linear_projection=use_linear_projection,
|
148 |
-
upcast_attention=upcast_attention,
|
149 |
-
)
|
150 |
-
else:
|
151 |
-
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
|
152 |
-
|
153 |
-
# count how many layers upsample the videos
|
154 |
-
self.num_upsamplers = 0
|
155 |
-
|
156 |
-
# up
|
157 |
-
reversed_block_out_channels = list(reversed(block_out_channels))
|
158 |
-
reversed_attention_head_dim = list(reversed(attention_head_dim))
|
159 |
-
only_cross_attention = list(reversed(only_cross_attention))
|
160 |
-
output_channel = reversed_block_out_channels[0]
|
161 |
-
for i, up_block_type in enumerate(up_block_types):
|
162 |
-
is_final_block = i == len(block_out_channels) - 1
|
163 |
-
|
164 |
-
prev_output_channel = output_channel
|
165 |
-
output_channel = reversed_block_out_channels[i]
|
166 |
-
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
|
167 |
-
|
168 |
-
# add upsample block for all BUT final layer
|
169 |
-
if not is_final_block:
|
170 |
-
add_upsample = True
|
171 |
-
self.num_upsamplers += 1
|
172 |
-
else:
|
173 |
-
add_upsample = False
|
174 |
-
|
175 |
-
up_block = get_up_block(
|
176 |
-
up_block_type,
|
177 |
-
num_layers=layers_per_block + 1,
|
178 |
-
in_channels=input_channel,
|
179 |
-
out_channels=output_channel,
|
180 |
-
prev_output_channel=prev_output_channel,
|
181 |
-
temb_channels=time_embed_dim,
|
182 |
-
add_upsample=add_upsample,
|
183 |
-
resnet_eps=norm_eps,
|
184 |
-
resnet_act_fn=act_fn,
|
185 |
-
resnet_groups=norm_num_groups,
|
186 |
-
cross_attention_dim=cross_attention_dim,
|
187 |
-
attn_num_head_channels=reversed_attention_head_dim[i],
|
188 |
-
dual_cross_attention=dual_cross_attention,
|
189 |
-
use_linear_projection=use_linear_projection,
|
190 |
-
only_cross_attention=only_cross_attention[i],
|
191 |
-
upcast_attention=upcast_attention,
|
192 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
193 |
-
)
|
194 |
-
self.up_blocks.append(up_block)
|
195 |
-
prev_output_channel = output_channel
|
196 |
-
|
197 |
-
# out
|
198 |
-
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
|
199 |
-
self.conv_act = nn.SiLU()
|
200 |
-
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
|
201 |
-
|
202 |
-
def set_attention_slice(self, slice_size):
|
203 |
-
r"""
|
204 |
-
Enable sliced attention computation.
|
205 |
-
|
206 |
-
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
|
207 |
-
in several steps. This is useful to save some memory in exchange for a small speed decrease.
|
208 |
-
|
209 |
-
Args:
|
210 |
-
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
|
211 |
-
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
|
212 |
-
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
|
213 |
-
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
|
214 |
-
must be a multiple of `slice_size`.
|
215 |
-
"""
|
216 |
-
sliceable_head_dims = []
|
217 |
-
|
218 |
-
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
|
219 |
-
if hasattr(module, "set_attention_slice"):
|
220 |
-
sliceable_head_dims.append(module.sliceable_head_dim)
|
221 |
-
|
222 |
-
for child in module.children():
|
223 |
-
fn_recursive_retrieve_slicable_dims(child)
|
224 |
-
|
225 |
-
# retrieve number of attention layers
|
226 |
-
for module in self.children():
|
227 |
-
fn_recursive_retrieve_slicable_dims(module)
|
228 |
-
|
229 |
-
num_slicable_layers = len(sliceable_head_dims)
|
230 |
-
|
231 |
-
if slice_size == "auto":
|
232 |
-
# half the attention head size is usually a good trade-off between
|
233 |
-
# speed and memory
|
234 |
-
slice_size = [dim // 2 for dim in sliceable_head_dims]
|
235 |
-
elif slice_size == "max":
|
236 |
-
# make smallest slice possible
|
237 |
-
slice_size = num_slicable_layers * [1]
|
238 |
-
|
239 |
-
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
|
240 |
-
|
241 |
-
if len(slice_size) != len(sliceable_head_dims):
|
242 |
-
raise ValueError(
|
243 |
-
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
|
244 |
-
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
|
245 |
-
)
|
246 |
-
|
247 |
-
for i in range(len(slice_size)):
|
248 |
-
size = slice_size[i]
|
249 |
-
dim = sliceable_head_dims[i]
|
250 |
-
if size is not None and size > dim:
|
251 |
-
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
|
252 |
-
|
253 |
-
# Recursively walk through all the children.
|
254 |
-
# Any children which exposes the set_attention_slice method
|
255 |
-
# gets the message
|
256 |
-
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
|
257 |
-
if hasattr(module, "set_attention_slice"):
|
258 |
-
module.set_attention_slice(slice_size.pop())
|
259 |
-
|
260 |
-
for child in module.children():
|
261 |
-
fn_recursive_set_attention_slice(child, slice_size)
|
262 |
-
|
263 |
-
reversed_slice_size = list(reversed(slice_size))
|
264 |
-
for module in self.children():
|
265 |
-
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
266 |
-
|
267 |
-
def _set_gradient_checkpointing(self, module, value=False):
|
268 |
-
if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):
|
269 |
-
module.gradient_checkpointing = value
|
270 |
-
|
271 |
-
def forward(
|
272 |
-
self,
|
273 |
-
sample: torch.FloatTensor,
|
274 |
-
timestep: Union[torch.Tensor, float, int],
|
275 |
-
encoder_hidden_states: torch.Tensor,
|
276 |
-
class_labels: Optional[torch.Tensor] = None,
|
277 |
-
attention_mask: Optional[torch.Tensor] = None,
|
278 |
-
return_dict: bool = True,
|
279 |
-
) -> Union[UNet3DConditionOutput, Tuple]:
|
280 |
-
r"""
|
281 |
-
Args:
|
282 |
-
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
|
283 |
-
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
|
284 |
-
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
|
285 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
286 |
-
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
|
287 |
-
|
288 |
-
Returns:
|
289 |
-
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
|
290 |
-
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
291 |
-
returning a tuple, the first element is the sample tensor.
|
292 |
-
"""
|
293 |
-
# By default samples have to be AT least a multiple of the overall upsampling factor.
|
294 |
-
# The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
|
295 |
-
# However, the upsampling interpolation output size can be forced to fit any upsampling size
|
296 |
-
# on the fly if necessary.
|
297 |
-
default_overall_up_factor = 2**self.num_upsamplers
|
298 |
-
|
299 |
-
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
|
300 |
-
forward_upsample_size = False
|
301 |
-
upsample_size = None
|
302 |
-
|
303 |
-
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
|
304 |
-
logger.info("Forward upsample size to force interpolation output size.")
|
305 |
-
forward_upsample_size = True
|
306 |
-
|
307 |
-
# prepare attention_mask
|
308 |
-
if attention_mask is not None:
|
309 |
-
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
|
310 |
-
attention_mask = attention_mask.unsqueeze(1)
|
311 |
-
|
312 |
-
# center input if necessary
|
313 |
-
if self.config.center_input_sample:
|
314 |
-
sample = 2 * sample - 1.0
|
315 |
-
|
316 |
-
# time
|
317 |
-
timesteps = timestep
|
318 |
-
if not torch.is_tensor(timesteps):
|
319 |
-
# This would be a good case for the `match` statement (Python 3.10+)
|
320 |
-
is_mps = sample.device.type == "mps"
|
321 |
-
if isinstance(timestep, float):
|
322 |
-
dtype = torch.float32 if is_mps else torch.float64
|
323 |
-
else:
|
324 |
-
dtype = torch.int32 if is_mps else torch.int64
|
325 |
-
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
|
326 |
-
elif len(timesteps.shape) == 0:
|
327 |
-
timesteps = timesteps[None].to(sample.device)
|
328 |
-
|
329 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
330 |
-
timesteps = timesteps.expand(sample.shape[0])
|
331 |
-
|
332 |
-
t_emb = self.time_proj(timesteps)
|
333 |
-
|
334 |
-
# timesteps does not contain any weights and will always return f32 tensors
|
335 |
-
# but time_embedding might actually be running in fp16. so we need to cast here.
|
336 |
-
# there might be better ways to encapsulate this.
|
337 |
-
t_emb = t_emb.to(dtype=self.dtype)
|
338 |
-
emb = self.time_embedding(t_emb)
|
339 |
-
|
340 |
-
if self.class_embedding is not None:
|
341 |
-
if class_labels is None:
|
342 |
-
raise ValueError("class_labels should be provided when num_class_embeds > 0")
|
343 |
-
|
344 |
-
if self.config.class_embed_type == "timestep":
|
345 |
-
class_labels = self.time_proj(class_labels)
|
346 |
-
|
347 |
-
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
|
348 |
-
emb = emb + class_emb
|
349 |
-
|
350 |
-
# pre-process
|
351 |
-
sample = self.conv_in(sample)
|
352 |
-
|
353 |
-
# down
|
354 |
-
down_block_res_samples = (sample,)
|
355 |
-
for downsample_block in self.down_blocks:
|
356 |
-
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
|
357 |
-
sample, res_samples = downsample_block(
|
358 |
-
hidden_states=sample,
|
359 |
-
temb=emb,
|
360 |
-
encoder_hidden_states=encoder_hidden_states,
|
361 |
-
attention_mask=attention_mask,
|
362 |
-
)
|
363 |
-
else:
|
364 |
-
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
365 |
-
|
366 |
-
down_block_res_samples += res_samples
|
367 |
-
|
368 |
-
# mid
|
369 |
-
sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask)
|
370 |
-
|
371 |
-
# up
|
372 |
-
for i, upsample_block in enumerate(self.up_blocks):
|
373 |
-
is_final_block = i == len(self.up_blocks) - 1
|
374 |
-
|
375 |
-
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
376 |
-
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
377 |
-
|
378 |
-
# if we have not reached the final block and need to forward the
|
379 |
-
# upsample size, we do it here
|
380 |
-
if not is_final_block and forward_upsample_size:
|
381 |
-
upsample_size = down_block_res_samples[-1].shape[2:]
|
382 |
-
|
383 |
-
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
|
384 |
-
sample = upsample_block(
|
385 |
-
hidden_states=sample,
|
386 |
-
temb=emb,
|
387 |
-
res_hidden_states_tuple=res_samples,
|
388 |
-
encoder_hidden_states=encoder_hidden_states,
|
389 |
-
upsample_size=upsample_size,
|
390 |
-
attention_mask=attention_mask,
|
391 |
-
)
|
392 |
-
else:
|
393 |
-
sample = upsample_block(
|
394 |
-
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
|
395 |
-
)
|
396 |
-
# post-process
|
397 |
-
sample = self.conv_norm_out(sample)
|
398 |
-
sample = self.conv_act(sample)
|
399 |
-
sample = self.conv_out(sample)
|
400 |
-
|
401 |
-
if not return_dict:
|
402 |
-
return (sample,)
|
403 |
-
|
404 |
-
return UNet3DConditionOutput(sample=sample)
|
405 |
-
|
406 |
-
@classmethod
|
407 |
-
def from_pretrained_2d(cls, pretrained_model_path, subfolder=None):
|
408 |
-
if subfolder is not None:
|
409 |
-
pretrained_model_path = os.path.join(pretrained_model_path, subfolder)
|
410 |
-
|
411 |
-
config_file = os.path.join(pretrained_model_path, "config.json")
|
412 |
-
if not os.path.isfile(config_file):
|
413 |
-
raise RuntimeError(f"{config_file} does not exist")
|
414 |
-
with open(config_file, "r") as f:
|
415 |
-
config = json.load(f)
|
416 |
-
config["_class_name"] = cls.__name__
|
417 |
-
config["down_block_types"] = [
|
418 |
-
"CrossAttnDownBlock3D",
|
419 |
-
"CrossAttnDownBlock3D",
|
420 |
-
"CrossAttnDownBlock3D",
|
421 |
-
"DownBlock3D",
|
422 |
-
]
|
423 |
-
config["up_block_types"] = ["UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"]
|
424 |
-
|
425 |
-
from diffusers.utils import WEIGHTS_NAME
|
426 |
-
|
427 |
-
model = cls.from_config(config)
|
428 |
-
model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
|
429 |
-
if not os.path.isfile(model_file):
|
430 |
-
raise RuntimeError(f"{model_file} does not exist")
|
431 |
-
state_dict = torch.load(model_file, map_location="cpu")
|
432 |
-
for k, v in model.state_dict().items():
|
433 |
-
if "_temp." in k:
|
434 |
-
state_dict.update({k: v})
|
435 |
-
model.load_state_dict(state_dict)
|
436 |
-
|
437 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/vl_utils.py
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import random
|
3 |
-
from typing import List
|
4 |
-
|
5 |
-
import torch
|
6 |
-
|
7 |
-
|
8 |
-
def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
|
9 |
-
"""construct a map such that positive_map[i,j] = True iff box i is associated to token j
|
10 |
-
Input:
|
11 |
-
- tokenized:
|
12 |
-
- input_ids: Tensor[1, ntokens]
|
13 |
-
- attention_mask: Tensor[1, ntokens]
|
14 |
-
- token_span: list with length num_boxes.
|
15 |
-
- each item: [start_idx, end_idx]
|
16 |
-
"""
|
17 |
-
positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)
|
18 |
-
for j, tok_list in enumerate(token_span):
|
19 |
-
for (beg, end) in tok_list:
|
20 |
-
beg_pos = tokenized.char_to_token(beg)
|
21 |
-
end_pos = tokenized.char_to_token(end - 1)
|
22 |
-
if beg_pos is None:
|
23 |
-
try:
|
24 |
-
beg_pos = tokenized.char_to_token(beg + 1)
|
25 |
-
if beg_pos is None:
|
26 |
-
beg_pos = tokenized.char_to_token(beg + 2)
|
27 |
-
except:
|
28 |
-
beg_pos = None
|
29 |
-
if end_pos is None:
|
30 |
-
try:
|
31 |
-
end_pos = tokenized.char_to_token(end - 2)
|
32 |
-
if end_pos is None:
|
33 |
-
end_pos = tokenized.char_to_token(end - 3)
|
34 |
-
except:
|
35 |
-
end_pos = None
|
36 |
-
if beg_pos is None or end_pos is None:
|
37 |
-
continue
|
38 |
-
|
39 |
-
assert beg_pos is not None and end_pos is not None
|
40 |
-
if os.environ.get("SHILONG_DEBUG_ONLY_ONE_POS", None) == "TRUE":
|
41 |
-
positive_map[j, beg_pos] = 1
|
42 |
-
break
|
43 |
-
else:
|
44 |
-
positive_map[j, beg_pos : end_pos + 1].fill_(1)
|
45 |
-
|
46 |
-
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)
|
47 |
-
|
48 |
-
|
49 |
-
def build_captions_and_token_span(cat_list, force_lowercase):
|
50 |
-
"""
|
51 |
-
Return:
|
52 |
-
captions: str
|
53 |
-
cat2tokenspan: dict
|
54 |
-
{
|
55 |
-
'dog': [[0, 2]],
|
56 |
-
...
|
57 |
-
}
|
58 |
-
"""
|
59 |
-
|
60 |
-
cat2tokenspan = {}
|
61 |
-
captions = ""
|
62 |
-
for catname in cat_list:
|
63 |
-
class_name = catname
|
64 |
-
if force_lowercase:
|
65 |
-
class_name = class_name.lower()
|
66 |
-
if "/" in class_name:
|
67 |
-
class_name_list: List = class_name.strip().split("/")
|
68 |
-
class_name_list.append(class_name)
|
69 |
-
class_name: str = random.choice(class_name_list)
|
70 |
-
|
71 |
-
tokens_positive_i = []
|
72 |
-
subnamelist = [i.strip() for i in class_name.strip().split(" ")]
|
73 |
-
for subname in subnamelist:
|
74 |
-
if len(subname) == 0:
|
75 |
-
continue
|
76 |
-
if len(captions) > 0:
|
77 |
-
captions = captions + " "
|
78 |
-
strat_idx = len(captions)
|
79 |
-
end_idx = strat_idx + len(subname)
|
80 |
-
tokens_positive_i.append([strat_idx, end_idx])
|
81 |
-
captions = captions + subname
|
82 |
-
|
83 |
-
if len(tokens_positive_i) > 0:
|
84 |
-
captions = captions + " ."
|
85 |
-
cat2tokenspan[class_name] = tokens_positive_i
|
86 |
-
|
87 |
-
return captions, cat2tokenspan
|
88 |
-
|
89 |
-
|
90 |
-
def build_id2posspan_and_caption(category_dict: dict):
|
91 |
-
"""Build id2pos_span and caption from category_dict
|
92 |
-
|
93 |
-
Args:
|
94 |
-
category_dict (dict): category_dict
|
95 |
-
"""
|
96 |
-
cat_list = [item["name"].lower() for item in category_dict]
|
97 |
-
id2catname = {item["id"]: item["name"].lower() for item in category_dict}
|
98 |
-
caption, cat2posspan = build_captions_and_token_span(cat_list, force_lowercase=True)
|
99 |
-
id2posspan = {catid: cat2posspan[catname] for catid, catname in id2catname.items()}
|
100 |
-
return id2posspan, caption
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/connection.py
DELETED
@@ -1,572 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
import datetime
|
4 |
-
import logging
|
5 |
-
import os
|
6 |
-
import re
|
7 |
-
import socket
|
8 |
-
import warnings
|
9 |
-
from socket import error as SocketError
|
10 |
-
from socket import timeout as SocketTimeout
|
11 |
-
|
12 |
-
from .packages import six
|
13 |
-
from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
|
14 |
-
from .packages.six.moves.http_client import HTTPException # noqa: F401
|
15 |
-
from .util.proxy import create_proxy_ssl_context
|
16 |
-
|
17 |
-
try: # Compiled with SSL?
|
18 |
-
import ssl
|
19 |
-
|
20 |
-
BaseSSLError = ssl.SSLError
|
21 |
-
except (ImportError, AttributeError): # Platform-specific: No SSL.
|
22 |
-
ssl = None
|
23 |
-
|
24 |
-
class BaseSSLError(BaseException):
|
25 |
-
pass
|
26 |
-
|
27 |
-
|
28 |
-
try:
|
29 |
-
# Python 3: not a no-op, we're adding this to the namespace so it can be imported.
|
30 |
-
ConnectionError = ConnectionError
|
31 |
-
except NameError:
|
32 |
-
# Python 2
|
33 |
-
class ConnectionError(Exception):
|
34 |
-
pass
|
35 |
-
|
36 |
-
|
37 |
-
try: # Python 3:
|
38 |
-
# Not a no-op, we're adding this to the namespace so it can be imported.
|
39 |
-
BrokenPipeError = BrokenPipeError
|
40 |
-
except NameError: # Python 2:
|
41 |
-
|
42 |
-
class BrokenPipeError(Exception):
|
43 |
-
pass
|
44 |
-
|
45 |
-
|
46 |
-
from ._collections import HTTPHeaderDict # noqa (historical, removed in v2)
|
47 |
-
from ._version import __version__
|
48 |
-
from .exceptions import (
|
49 |
-
ConnectTimeoutError,
|
50 |
-
NewConnectionError,
|
51 |
-
SubjectAltNameWarning,
|
52 |
-
SystemTimeWarning,
|
53 |
-
)
|
54 |
-
from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection
|
55 |
-
from .util.ssl_ import (
|
56 |
-
assert_fingerprint,
|
57 |
-
create_urllib3_context,
|
58 |
-
is_ipaddress,
|
59 |
-
resolve_cert_reqs,
|
60 |
-
resolve_ssl_version,
|
61 |
-
ssl_wrap_socket,
|
62 |
-
)
|
63 |
-
from .util.ssl_match_hostname import CertificateError, match_hostname
|
64 |
-
|
65 |
-
log = logging.getLogger(__name__)
|
66 |
-
|
67 |
-
port_by_scheme = {"http": 80, "https": 443}
|
68 |
-
|
69 |
-
# When it comes time to update this value as a part of regular maintenance
|
70 |
-
# (ie test_recent_date is failing) update it to ~6 months before the current date.
|
71 |
-
RECENT_DATE = datetime.date(2022, 1, 1)
|
72 |
-
|
73 |
-
_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
|
74 |
-
|
75 |
-
|
76 |
-
class HTTPConnection(_HTTPConnection, object):
|
77 |
-
"""
|
78 |
-
Based on :class:`http.client.HTTPConnection` but provides an extra constructor
|
79 |
-
backwards-compatibility layer between older and newer Pythons.
|
80 |
-
|
81 |
-
Additional keyword parameters are used to configure attributes of the connection.
|
82 |
-
Accepted parameters include:
|
83 |
-
|
84 |
-
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
|
85 |
-
- ``source_address``: Set the source address for the current connection.
|
86 |
-
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
|
87 |
-
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
|
88 |
-
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
|
89 |
-
|
90 |
-
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
|
91 |
-
you might pass:
|
92 |
-
|
93 |
-
.. code-block:: python
|
94 |
-
|
95 |
-
HTTPConnection.default_socket_options + [
|
96 |
-
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
|
97 |
-
]
|
98 |
-
|
99 |
-
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
|
100 |
-
"""
|
101 |
-
|
102 |
-
default_port = port_by_scheme["http"]
|
103 |
-
|
104 |
-
#: Disable Nagle's algorithm by default.
|
105 |
-
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
|
106 |
-
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
|
107 |
-
|
108 |
-
#: Whether this connection verifies the host's certificate.
|
109 |
-
is_verified = False
|
110 |
-
|
111 |
-
#: Whether this proxy connection (if used) verifies the proxy host's
|
112 |
-
#: certificate.
|
113 |
-
proxy_is_verified = None
|
114 |
-
|
115 |
-
def __init__(self, *args, **kw):
|
116 |
-
if not six.PY2:
|
117 |
-
kw.pop("strict", None)
|
118 |
-
|
119 |
-
# Pre-set source_address.
|
120 |
-
self.source_address = kw.get("source_address")
|
121 |
-
|
122 |
-
#: The socket options provided by the user. If no options are
|
123 |
-
#: provided, we use the default options.
|
124 |
-
self.socket_options = kw.pop("socket_options", self.default_socket_options)
|
125 |
-
|
126 |
-
# Proxy options provided by the user.
|
127 |
-
self.proxy = kw.pop("proxy", None)
|
128 |
-
self.proxy_config = kw.pop("proxy_config", None)
|
129 |
-
|
130 |
-
_HTTPConnection.__init__(self, *args, **kw)
|
131 |
-
|
132 |
-
@property
|
133 |
-
def host(self):
|
134 |
-
"""
|
135 |
-
Getter method to remove any trailing dots that indicate the hostname is an FQDN.
|
136 |
-
|
137 |
-
In general, SSL certificates don't include the trailing dot indicating a
|
138 |
-
fully-qualified domain name, and thus, they don't validate properly when
|
139 |
-
checked against a domain name that includes the dot. In addition, some
|
140 |
-
servers may not expect to receive the trailing dot when provided.
|
141 |
-
|
142 |
-
However, the hostname with trailing dot is critical to DNS resolution; doing a
|
143 |
-
lookup with the trailing dot will properly only resolve the appropriate FQDN,
|
144 |
-
whereas a lookup without a trailing dot will search the system's search domain
|
145 |
-
list. Thus, it's important to keep the original host around for use only in
|
146 |
-
those cases where it's appropriate (i.e., when doing DNS lookup to establish the
|
147 |
-
actual TCP connection across which we're going to send HTTP requests).
|
148 |
-
"""
|
149 |
-
return self._dns_host.rstrip(".")
|
150 |
-
|
151 |
-
@host.setter
|
152 |
-
def host(self, value):
|
153 |
-
"""
|
154 |
-
Setter for the `host` property.
|
155 |
-
|
156 |
-
We assume that only urllib3 uses the _dns_host attribute; httplib itself
|
157 |
-
only uses `host`, and it seems reasonable that other libraries follow suit.
|
158 |
-
"""
|
159 |
-
self._dns_host = value
|
160 |
-
|
161 |
-
def _new_conn(self):
|
162 |
-
"""Establish a socket connection and set nodelay settings on it.
|
163 |
-
|
164 |
-
:return: New socket connection.
|
165 |
-
"""
|
166 |
-
extra_kw = {}
|
167 |
-
if self.source_address:
|
168 |
-
extra_kw["source_address"] = self.source_address
|
169 |
-
|
170 |
-
if self.socket_options:
|
171 |
-
extra_kw["socket_options"] = self.socket_options
|
172 |
-
|
173 |
-
try:
|
174 |
-
conn = connection.create_connection(
|
175 |
-
(self._dns_host, self.port), self.timeout, **extra_kw
|
176 |
-
)
|
177 |
-
|
178 |
-
except SocketTimeout:
|
179 |
-
raise ConnectTimeoutError(
|
180 |
-
self,
|
181 |
-
"Connection to %s timed out. (connect timeout=%s)"
|
182 |
-
% (self.host, self.timeout),
|
183 |
-
)
|
184 |
-
|
185 |
-
except SocketError as e:
|
186 |
-
raise NewConnectionError(
|
187 |
-
self, "Failed to establish a new connection: %s" % e
|
188 |
-
)
|
189 |
-
|
190 |
-
return conn
|
191 |
-
|
192 |
-
def _is_using_tunnel(self):
|
193 |
-
# Google App Engine's httplib does not define _tunnel_host
|
194 |
-
return getattr(self, "_tunnel_host", None)
|
195 |
-
|
196 |
-
def _prepare_conn(self, conn):
|
197 |
-
self.sock = conn
|
198 |
-
if self._is_using_tunnel():
|
199 |
-
# TODO: Fix tunnel so it doesn't depend on self.sock state.
|
200 |
-
self._tunnel()
|
201 |
-
# Mark this connection as not reusable
|
202 |
-
self.auto_open = 0
|
203 |
-
|
204 |
-
def connect(self):
|
205 |
-
conn = self._new_conn()
|
206 |
-
self._prepare_conn(conn)
|
207 |
-
|
208 |
-
def putrequest(self, method, url, *args, **kwargs):
|
209 |
-
""" """
|
210 |
-
# Empty docstring because the indentation of CPython's implementation
|
211 |
-
# is broken but we don't want this method in our documentation.
|
212 |
-
match = _CONTAINS_CONTROL_CHAR_RE.search(method)
|
213 |
-
if match:
|
214 |
-
raise ValueError(
|
215 |
-
"Method cannot contain non-token characters %r (found at least %r)"
|
216 |
-
% (method, match.group())
|
217 |
-
)
|
218 |
-
|
219 |
-
return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
|
220 |
-
|
221 |
-
def putheader(self, header, *values):
|
222 |
-
""" """
|
223 |
-
if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):
|
224 |
-
_HTTPConnection.putheader(self, header, *values)
|
225 |
-
elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS:
|
226 |
-
raise ValueError(
|
227 |
-
"urllib3.util.SKIP_HEADER only supports '%s'"
|
228 |
-
% ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),)
|
229 |
-
)
|
230 |
-
|
231 |
-
def request(self, method, url, body=None, headers=None):
|
232 |
-
# Update the inner socket's timeout value to send the request.
|
233 |
-
# This only triggers if the connection is re-used.
|
234 |
-
if getattr(self, "sock", None) is not None:
|
235 |
-
self.sock.settimeout(self.timeout)
|
236 |
-
|
237 |
-
if headers is None:
|
238 |
-
headers = {}
|
239 |
-
else:
|
240 |
-
# Avoid modifying the headers passed into .request()
|
241 |
-
headers = headers.copy()
|
242 |
-
if "user-agent" not in (six.ensure_str(k.lower()) for k in headers):
|
243 |
-
headers["User-Agent"] = _get_default_user_agent()
|
244 |
-
super(HTTPConnection, self).request(method, url, body=body, headers=headers)
|
245 |
-
|
246 |
-
def request_chunked(self, method, url, body=None, headers=None):
|
247 |
-
"""
|
248 |
-
Alternative to the common request method, which sends the
|
249 |
-
body with chunked encoding and not as one block
|
250 |
-
"""
|
251 |
-
headers = headers or {}
|
252 |
-
header_keys = set([six.ensure_str(k.lower()) for k in headers])
|
253 |
-
skip_accept_encoding = "accept-encoding" in header_keys
|
254 |
-
skip_host = "host" in header_keys
|
255 |
-
self.putrequest(
|
256 |
-
method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
|
257 |
-
)
|
258 |
-
if "user-agent" not in header_keys:
|
259 |
-
self.putheader("User-Agent", _get_default_user_agent())
|
260 |
-
for header, value in headers.items():
|
261 |
-
self.putheader(header, value)
|
262 |
-
if "transfer-encoding" not in header_keys:
|
263 |
-
self.putheader("Transfer-Encoding", "chunked")
|
264 |
-
self.endheaders()
|
265 |
-
|
266 |
-
if body is not None:
|
267 |
-
stringish_types = six.string_types + (bytes,)
|
268 |
-
if isinstance(body, stringish_types):
|
269 |
-
body = (body,)
|
270 |
-
for chunk in body:
|
271 |
-
if not chunk:
|
272 |
-
continue
|
273 |
-
if not isinstance(chunk, bytes):
|
274 |
-
chunk = chunk.encode("utf8")
|
275 |
-
len_str = hex(len(chunk))[2:]
|
276 |
-
to_send = bytearray(len_str.encode())
|
277 |
-
to_send += b"\r\n"
|
278 |
-
to_send += chunk
|
279 |
-
to_send += b"\r\n"
|
280 |
-
self.send(to_send)
|
281 |
-
|
282 |
-
# After the if clause, to always have a closed body
|
283 |
-
self.send(b"0\r\n\r\n")
|
284 |
-
|
285 |
-
|
286 |
-
class HTTPSConnection(HTTPConnection):
|
287 |
-
"""
|
288 |
-
Many of the parameters to this constructor are passed to the underlying SSL
|
289 |
-
socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.
|
290 |
-
"""
|
291 |
-
|
292 |
-
default_port = port_by_scheme["https"]
|
293 |
-
|
294 |
-
cert_reqs = None
|
295 |
-
ca_certs = None
|
296 |
-
ca_cert_dir = None
|
297 |
-
ca_cert_data = None
|
298 |
-
ssl_version = None
|
299 |
-
assert_fingerprint = None
|
300 |
-
tls_in_tls_required = False
|
301 |
-
|
302 |
-
def __init__(
|
303 |
-
self,
|
304 |
-
host,
|
305 |
-
port=None,
|
306 |
-
key_file=None,
|
307 |
-
cert_file=None,
|
308 |
-
key_password=None,
|
309 |
-
strict=None,
|
310 |
-
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
|
311 |
-
ssl_context=None,
|
312 |
-
server_hostname=None,
|
313 |
-
**kw
|
314 |
-
):
|
315 |
-
|
316 |
-
HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
|
317 |
-
|
318 |
-
self.key_file = key_file
|
319 |
-
self.cert_file = cert_file
|
320 |
-
self.key_password = key_password
|
321 |
-
self.ssl_context = ssl_context
|
322 |
-
self.server_hostname = server_hostname
|
323 |
-
|
324 |
-
# Required property for Google AppEngine 1.9.0 which otherwise causes
|
325 |
-
# HTTPS requests to go out as HTTP. (See Issue #356)
|
326 |
-
self._protocol = "https"
|
327 |
-
|
328 |
-
def set_cert(
|
329 |
-
self,
|
330 |
-
key_file=None,
|
331 |
-
cert_file=None,
|
332 |
-
cert_reqs=None,
|
333 |
-
key_password=None,
|
334 |
-
ca_certs=None,
|
335 |
-
assert_hostname=None,
|
336 |
-
assert_fingerprint=None,
|
337 |
-
ca_cert_dir=None,
|
338 |
-
ca_cert_data=None,
|
339 |
-
):
|
340 |
-
"""
|
341 |
-
This method should only be called once, before the connection is used.
|
342 |
-
"""
|
343 |
-
# If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
|
344 |
-
# have an SSLContext object in which case we'll use its verify_mode.
|
345 |
-
if cert_reqs is None:
|
346 |
-
if self.ssl_context is not None:
|
347 |
-
cert_reqs = self.ssl_context.verify_mode
|
348 |
-
else:
|
349 |
-
cert_reqs = resolve_cert_reqs(None)
|
350 |
-
|
351 |
-
self.key_file = key_file
|
352 |
-
self.cert_file = cert_file
|
353 |
-
self.cert_reqs = cert_reqs
|
354 |
-
self.key_password = key_password
|
355 |
-
self.assert_hostname = assert_hostname
|
356 |
-
self.assert_fingerprint = assert_fingerprint
|
357 |
-
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
|
358 |
-
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
|
359 |
-
self.ca_cert_data = ca_cert_data
|
360 |
-
|
361 |
-
def connect(self):
|
362 |
-
# Add certificate verification
|
363 |
-
self.sock = conn = self._new_conn()
|
364 |
-
hostname = self.host
|
365 |
-
tls_in_tls = False
|
366 |
-
|
367 |
-
if self._is_using_tunnel():
|
368 |
-
if self.tls_in_tls_required:
|
369 |
-
self.sock = conn = self._connect_tls_proxy(hostname, conn)
|
370 |
-
tls_in_tls = True
|
371 |
-
|
372 |
-
# Calls self._set_hostport(), so self.host is
|
373 |
-
# self._tunnel_host below.
|
374 |
-
self._tunnel()
|
375 |
-
# Mark this connection as not reusable
|
376 |
-
self.auto_open = 0
|
377 |
-
|
378 |
-
# Override the host with the one we're requesting data from.
|
379 |
-
hostname = self._tunnel_host
|
380 |
-
|
381 |
-
server_hostname = hostname
|
382 |
-
if self.server_hostname is not None:
|
383 |
-
server_hostname = self.server_hostname
|
384 |
-
|
385 |
-
is_time_off = datetime.date.today() < RECENT_DATE
|
386 |
-
if is_time_off:
|
387 |
-
warnings.warn(
|
388 |
-
(
|
389 |
-
"System time is way off (before {0}). This will probably "
|
390 |
-
"lead to SSL verification errors"
|
391 |
-
).format(RECENT_DATE),
|
392 |
-
SystemTimeWarning,
|
393 |
-
)
|
394 |
-
|
395 |
-
# Wrap socket using verification with the root certs in
|
396 |
-
# trusted_root_certs
|
397 |
-
default_ssl_context = False
|
398 |
-
if self.ssl_context is None:
|
399 |
-
default_ssl_context = True
|
400 |
-
self.ssl_context = create_urllib3_context(
|
401 |
-
ssl_version=resolve_ssl_version(self.ssl_version),
|
402 |
-
cert_reqs=resolve_cert_reqs(self.cert_reqs),
|
403 |
-
)
|
404 |
-
|
405 |
-
context = self.ssl_context
|
406 |
-
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
|
407 |
-
|
408 |
-
# Try to load OS default certs if none are given.
|
409 |
-
# Works well on Windows (requires Python3.4+)
|
410 |
-
if (
|
411 |
-
not self.ca_certs
|
412 |
-
and not self.ca_cert_dir
|
413 |
-
and not self.ca_cert_data
|
414 |
-
and default_ssl_context
|
415 |
-
and hasattr(context, "load_default_certs")
|
416 |
-
):
|
417 |
-
context.load_default_certs()
|
418 |
-
|
419 |
-
self.sock = ssl_wrap_socket(
|
420 |
-
sock=conn,
|
421 |
-
keyfile=self.key_file,
|
422 |
-
certfile=self.cert_file,
|
423 |
-
key_password=self.key_password,
|
424 |
-
ca_certs=self.ca_certs,
|
425 |
-
ca_cert_dir=self.ca_cert_dir,
|
426 |
-
ca_cert_data=self.ca_cert_data,
|
427 |
-
server_hostname=server_hostname,
|
428 |
-
ssl_context=context,
|
429 |
-
tls_in_tls=tls_in_tls,
|
430 |
-
)
|
431 |
-
|
432 |
-
# If we're using all defaults and the connection
|
433 |
-
# is TLSv1 or TLSv1.1 we throw a DeprecationWarning
|
434 |
-
# for the host.
|
435 |
-
if (
|
436 |
-
default_ssl_context
|
437 |
-
and self.ssl_version is None
|
438 |
-
and hasattr(self.sock, "version")
|
439 |
-
and self.sock.version() in {"TLSv1", "TLSv1.1"}
|
440 |
-
):
|
441 |
-
warnings.warn(
|
442 |
-
"Negotiating TLSv1/TLSv1.1 by default is deprecated "
|
443 |
-
"and will be disabled in urllib3 v2.0.0. Connecting to "
|
444 |
-
"'%s' with '%s' can be enabled by explicitly opting-in "
|
445 |
-
"with 'ssl_version'" % (self.host, self.sock.version()),
|
446 |
-
DeprecationWarning,
|
447 |
-
)
|
448 |
-
|
449 |
-
if self.assert_fingerprint:
|
450 |
-
assert_fingerprint(
|
451 |
-
self.sock.getpeercert(binary_form=True), self.assert_fingerprint
|
452 |
-
)
|
453 |
-
elif (
|
454 |
-
context.verify_mode != ssl.CERT_NONE
|
455 |
-
and not getattr(context, "check_hostname", False)
|
456 |
-
and self.assert_hostname is not False
|
457 |
-
):
|
458 |
-
# While urllib3 attempts to always turn off hostname matching from
|
459 |
-
# the TLS library, this cannot always be done. So we check whether
|
460 |
-
# the TLS Library still thinks it's matching hostnames.
|
461 |
-
cert = self.sock.getpeercert()
|
462 |
-
if not cert.get("subjectAltName", ()):
|
463 |
-
warnings.warn(
|
464 |
-
(
|
465 |
-
"Certificate for {0} has no `subjectAltName`, falling back to check for a "
|
466 |
-
"`commonName` for now. This feature is being removed by major browsers and "
|
467 |
-
"deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
|
468 |
-
"for details.)".format(hostname)
|
469 |
-
),
|
470 |
-
SubjectAltNameWarning,
|
471 |
-
)
|
472 |
-
_match_hostname(cert, self.assert_hostname or server_hostname)
|
473 |
-
|
474 |
-
self.is_verified = (
|
475 |
-
context.verify_mode == ssl.CERT_REQUIRED
|
476 |
-
or self.assert_fingerprint is not None
|
477 |
-
)
|
478 |
-
|
479 |
-
def _connect_tls_proxy(self, hostname, conn):
|
480 |
-
"""
|
481 |
-
Establish a TLS connection to the proxy using the provided SSL context.
|
482 |
-
"""
|
483 |
-
proxy_config = self.proxy_config
|
484 |
-
ssl_context = proxy_config.ssl_context
|
485 |
-
if ssl_context:
|
486 |
-
# If the user provided a proxy context, we assume CA and client
|
487 |
-
# certificates have already been set
|
488 |
-
return ssl_wrap_socket(
|
489 |
-
sock=conn,
|
490 |
-
server_hostname=hostname,
|
491 |
-
ssl_context=ssl_context,
|
492 |
-
)
|
493 |
-
|
494 |
-
ssl_context = create_proxy_ssl_context(
|
495 |
-
self.ssl_version,
|
496 |
-
self.cert_reqs,
|
497 |
-
self.ca_certs,
|
498 |
-
self.ca_cert_dir,
|
499 |
-
self.ca_cert_data,
|
500 |
-
)
|
501 |
-
|
502 |
-
# If no cert was provided, use only the default options for server
|
503 |
-
# certificate validation
|
504 |
-
socket = ssl_wrap_socket(
|
505 |
-
sock=conn,
|
506 |
-
ca_certs=self.ca_certs,
|
507 |
-
ca_cert_dir=self.ca_cert_dir,
|
508 |
-
ca_cert_data=self.ca_cert_data,
|
509 |
-
server_hostname=hostname,
|
510 |
-
ssl_context=ssl_context,
|
511 |
-
)
|
512 |
-
|
513 |
-
if ssl_context.verify_mode != ssl.CERT_NONE and not getattr(
|
514 |
-
ssl_context, "check_hostname", False
|
515 |
-
):
|
516 |
-
# While urllib3 attempts to always turn off hostname matching from
|
517 |
-
# the TLS library, this cannot always be done. So we check whether
|
518 |
-
# the TLS Library still thinks it's matching hostnames.
|
519 |
-
cert = socket.getpeercert()
|
520 |
-
if not cert.get("subjectAltName", ()):
|
521 |
-
warnings.warn(
|
522 |
-
(
|
523 |
-
"Certificate for {0} has no `subjectAltName`, falling back to check for a "
|
524 |
-
"`commonName` for now. This feature is being removed by major browsers and "
|
525 |
-
"deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
|
526 |
-
"for details.)".format(hostname)
|
527 |
-
),
|
528 |
-
SubjectAltNameWarning,
|
529 |
-
)
|
530 |
-
_match_hostname(cert, hostname)
|
531 |
-
|
532 |
-
self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED
|
533 |
-
return socket
|
534 |
-
|
535 |
-
|
536 |
-
def _match_hostname(cert, asserted_hostname):
|
537 |
-
# Our upstream implementation of ssl.match_hostname()
|
538 |
-
# only applies this normalization to IP addresses so it doesn't
|
539 |
-
# match DNS SANs so we do the same thing!
|
540 |
-
stripped_hostname = asserted_hostname.strip("u[]")
|
541 |
-
if is_ipaddress(stripped_hostname):
|
542 |
-
asserted_hostname = stripped_hostname
|
543 |
-
|
544 |
-
try:
|
545 |
-
match_hostname(cert, asserted_hostname)
|
546 |
-
except CertificateError as e:
|
547 |
-
log.warning(
|
548 |
-
"Certificate did not match expected hostname: %s. Certificate: %s",
|
549 |
-
asserted_hostname,
|
550 |
-
cert,
|
551 |
-
)
|
552 |
-
# Add cert to exception and reraise so client code can inspect
|
553 |
-
# the cert when catching the exception, if they want to
|
554 |
-
e._peer_cert = cert
|
555 |
-
raise
|
556 |
-
|
557 |
-
|
558 |
-
def _get_default_user_agent():
|
559 |
-
return "python-urllib3/%s" % __version__
|
560 |
-
|
561 |
-
|
562 |
-
class DummyConnection(object):
|
563 |
-
"""Used to detect a failed ConnectionCls import."""
|
564 |
-
|
565 |
-
pass
|
566 |
-
|
567 |
-
|
568 |
-
if not ssl:
|
569 |
-
HTTPSConnection = DummyConnection # noqa: F811
|
570 |
-
|
571 |
-
|
572 |
-
VerifiedHTTPSConnection = HTTPSConnection
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtomdffAI/wechatgpt4atom/channel/channel.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Message sending channel abstract class
|
3 |
-
"""
|
4 |
-
|
5 |
-
from bridge.bridge import Bridge
|
6 |
-
|
7 |
-
class Channel(object):
|
8 |
-
def startup(self):
|
9 |
-
"""
|
10 |
-
init channel
|
11 |
-
"""
|
12 |
-
raise NotImplementedError
|
13 |
-
|
14 |
-
def handle(self, msg):
|
15 |
-
"""
|
16 |
-
process received msg
|
17 |
-
:param msg: message object
|
18 |
-
"""
|
19 |
-
raise NotImplementedError
|
20 |
-
|
21 |
-
def send(self, msg, receiver):
|
22 |
-
"""
|
23 |
-
send message to user
|
24 |
-
:param msg: message content
|
25 |
-
:param receiver: receiver channel account
|
26 |
-
:return:
|
27 |
-
"""
|
28 |
-
raise NotImplementedError
|
29 |
-
|
30 |
-
def build_reply_content(self, query, context=None):
|
31 |
-
return Bridge().fetch_reply_content(query, context)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtomdffAI/wechatgpt4atom/docker/build.debian.sh
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
CHATGPT_ON_WECHAT_TAG=1.0.2
|
4 |
-
|
5 |
-
docker build -f Dockerfile.debian \
|
6 |
-
--build-arg CHATGPT_ON_WECHAT_VER=$CHATGPT_ON_WECHAT_TAG \
|
7 |
-
-t zhayujie/chatgpt-on-wechat .
|
8 |
-
|
9 |
-
docker tag zhayujie/chatgpt-on-wechat zhayujie/chatgpt-on-wechat:$CHATGPT_ON_WECHAT_TAG-debian
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/checkpoint/__init__.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
# File:
|
4 |
-
|
5 |
-
|
6 |
-
from . import catalog as _UNUSED # register the handler
|
7 |
-
from .detection_checkpoint import DetectionCheckpointer
|
8 |
-
from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
|
9 |
-
|
10 |
-
__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BAAI/AltDiffusion-m9/app.py
DELETED
@@ -1,330 +0,0 @@
|
|
1 |
-
import io
|
2 |
-
import re
|
3 |
-
import imp
|
4 |
-
import time
|
5 |
-
import json
|
6 |
-
import base64
|
7 |
-
import requests
|
8 |
-
import gradio as gr
|
9 |
-
import ui_functions as uifn
|
10 |
-
from css_and_js import js, call_JS
|
11 |
-
from PIL import Image, PngImagePlugin, ImageChops
|
12 |
-
|
13 |
-
url_host = "https://flagstudio.baai.ac.cn"
|
14 |
-
token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiMGY4M2QxMDg3N2MzMTFlZGFiYzYwZmU5ZGFjMTI1ZDMiLCJhcHBfbmFtZSI6IndlYiIsImlkZW50aXR5X3R5cGUiOiIyIiwidXNlcl9yb2xlIjoiMiIsImp0aSI6ImE3YTE1N2I3LTllNTItNDllMS04YzA0LWEzZmI5YjZiZjNlYSIsIm5iZiI6MTY3MDU5MTcwMSwiZXhwIjoxOTg1OTUxNzAxLCJpYXQiOjE2NzA1OTE3MDF9.OcfGayna-wr_5mo4LT6OJHSCokna8vqKSmmCftFUsx8"
|
15 |
-
|
16 |
-
def read_content(file_path: str) -> str:
|
17 |
-
"""read the content of target file
|
18 |
-
"""
|
19 |
-
with open(file_path, 'r', encoding='utf-8') as f:
|
20 |
-
content = f.read()
|
21 |
-
|
22 |
-
return content
|
23 |
-
|
24 |
-
def filter_content(raw_style: str):
|
25 |
-
if "(" in raw_style:
|
26 |
-
i = raw_style.index("(")
|
27 |
-
else :
|
28 |
-
i = -1
|
29 |
-
|
30 |
-
if i == -1:
|
31 |
-
return raw_style
|
32 |
-
else :
|
33 |
-
return raw_style[:i]
|
34 |
-
|
35 |
-
def upload_image(img):
|
36 |
-
url = url_host + "/api/v1/image/get-upload-link"
|
37 |
-
headers = {"token": token}
|
38 |
-
r = requests.post(url, json={}, headers=headers)
|
39 |
-
if r.status_code != 200:
|
40 |
-
raise gr.Error(r.reason)
|
41 |
-
head_res = r.json()
|
42 |
-
if head_res["code"] != 0:
|
43 |
-
raise gr.Error("Unknown error")
|
44 |
-
image_id = head_res["data"]["image_id"]
|
45 |
-
image_url = head_res["data"]["url"]
|
46 |
-
image_headers = head_res["data"]["headers"]
|
47 |
-
|
48 |
-
imgBytes = io.BytesIO()
|
49 |
-
img.save(imgBytes, "PNG")
|
50 |
-
imgBytes = imgBytes.getvalue()
|
51 |
-
|
52 |
-
r = requests.put(image_url, data=imgBytes, headers=image_headers)
|
53 |
-
if r.status_code != 200:
|
54 |
-
raise gr.Error(r.reason)
|
55 |
-
return image_id, image_url
|
56 |
-
|
57 |
-
def post_reqest(seed, prompt, width, height, image_num, img=None, mask=None):
|
58 |
-
data = {
|
59 |
-
"type": "gen-image",
|
60 |
-
"parameters": {
|
61 |
-
"width": width, # output height width
|
62 |
-
"height": height, # output image height
|
63 |
-
"prompts": [prompt],
|
64 |
-
}
|
65 |
-
}
|
66 |
-
data["parameters"]["seed"] = int(seed)
|
67 |
-
if img is not None:
|
68 |
-
# Upload image
|
69 |
-
image_id, image_url = upload_image(img)
|
70 |
-
data["parameters"]["init_image"] = {
|
71 |
-
"image_id": image_id,
|
72 |
-
"url": image_url,
|
73 |
-
"width": img.width,
|
74 |
-
"height": img.height,
|
75 |
-
}
|
76 |
-
if mask is not None:
|
77 |
-
# Upload mask
|
78 |
-
extrama = mask.convert("L").getextrema()
|
79 |
-
if extrama[1] > 0:
|
80 |
-
mask_id, mask_url = upload_image(mask)
|
81 |
-
data["parameters"]["mask_image"] = {
|
82 |
-
"image_id": mask_id,
|
83 |
-
"url": mask_url,
|
84 |
-
"width": mask.width,
|
85 |
-
"height": mask.height,
|
86 |
-
}
|
87 |
-
headers = {"token": token}
|
88 |
-
|
89 |
-
# Send create task request
|
90 |
-
all_task_data = []
|
91 |
-
url = url_host+"/api/v1/task/create"
|
92 |
-
for _ in range(image_num):
|
93 |
-
r = requests.post(url, json=data, headers=headers)
|
94 |
-
if r.status_code != 200:
|
95 |
-
raise gr.Error(r.reason)
|
96 |
-
create_res = r.json()
|
97 |
-
if create_res['code'] == 3002:
|
98 |
-
raise gr.Error("Inappropriate prompt detected.")
|
99 |
-
elif create_res['code'] != 0:
|
100 |
-
raise gr.Error("Unknown error")
|
101 |
-
all_task_data.append(create_res["data"])
|
102 |
-
|
103 |
-
# Get result
|
104 |
-
url = url_host+"/api/v1/task/status"
|
105 |
-
images = []
|
106 |
-
while True:
|
107 |
-
if len(all_task_data) <= 0:
|
108 |
-
return images
|
109 |
-
for i in range(len(all_task_data)-1, -1, -1):
|
110 |
-
data = all_task_data[i]
|
111 |
-
r = requests.post(url, json=data, headers=headers)
|
112 |
-
if r.status_code != 200:
|
113 |
-
raise gr.Error(r.reason)
|
114 |
-
res = r.json()
|
115 |
-
if res["code"] == 6002:
|
116 |
-
# Running
|
117 |
-
continue
|
118 |
-
if res["code"] == 6005:
|
119 |
-
raise gr.Error("NSFW image detected.")
|
120 |
-
elif res["code"] == 0:
|
121 |
-
# Finished
|
122 |
-
for img_info in res["data"]["images"]:
|
123 |
-
img_res = requests.get(img_info["url"])
|
124 |
-
images.append(Image.open(io.BytesIO(img_res.content)).convert("RGB"))
|
125 |
-
del all_task_data[i]
|
126 |
-
else:
|
127 |
-
raise gr.Error(f"Error code: {res['code']}")
|
128 |
-
time.sleep(1)
|
129 |
-
|
130 |
-
def request_images(raw_text, class_draw, style_draw, batch_size, w, h, seed):
|
131 |
-
if filter_content(class_draw) != "国画":
|
132 |
-
if filter_content(class_draw) != "通用":
|
133 |
-
raw_text = raw_text + f",{filter_content(class_draw)}"
|
134 |
-
|
135 |
-
for sty in style_draw:
|
136 |
-
raw_text = raw_text + f",{filter_content(sty)}"
|
137 |
-
elif filter_content(class_draw) == "国画":
|
138 |
-
raw_text = raw_text + ",国画,水墨画,大作,黑白,高清,传统"
|
139 |
-
print(f"raw text is {raw_text}")
|
140 |
-
|
141 |
-
images = post_reqest(seed, raw_text, w, h, int(batch_size))
|
142 |
-
|
143 |
-
return images
|
144 |
-
|
145 |
-
|
146 |
-
def img2img(prompt, image_and_mask):
|
147 |
-
if image_and_mask["image"].width <= image_and_mask["image"].height:
|
148 |
-
width = 512
|
149 |
-
height = int((width/image_and_mask["image"].width)*image_and_mask["image"].height)
|
150 |
-
else:
|
151 |
-
height = 512
|
152 |
-
width = int((height/image_and_mask["image"].height)*image_and_mask["image"].width)
|
153 |
-
return post_reqest(0, prompt, width, height, 1, image_and_mask["image"], image_and_mask["mask"])
|
154 |
-
|
155 |
-
|
156 |
-
examples = [
|
157 |
-
'水墨蝴蝶和牡丹花,国画',
|
158 |
-
'苍劲有力的墨竹,国画',
|
159 |
-
'暴风雨中的灯塔',
|
160 |
-
'机械小松鼠,科学幻想',
|
161 |
-
'中国水墨山水画,国画',
|
162 |
-
"Lighthouse in the storm",
|
163 |
-
"A dog",
|
164 |
-
"Landscape by 张大千",
|
165 |
-
"A tiger 长了兔子耳朵",
|
166 |
-
"A baby bird 铅笔素描",
|
167 |
-
]
|
168 |
-
|
169 |
-
if __name__ == "__main__":
|
170 |
-
block = gr.Blocks(css=read_content('style.css'))
|
171 |
-
|
172 |
-
with block:
|
173 |
-
gr.HTML(read_content("header.html"))
|
174 |
-
with gr.Tabs(elem_id='tabss') as tabs:
|
175 |
-
|
176 |
-
with gr.TabItem("文生图(Text-to-img)", id='txt2img_tab'):
|
177 |
-
|
178 |
-
with gr.Group():
|
179 |
-
with gr.Box():
|
180 |
-
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
181 |
-
text = gr.Textbox(
|
182 |
-
label="Prompt",
|
183 |
-
show_label=False,
|
184 |
-
max_lines=1,
|
185 |
-
placeholder="Input text(输入文字)",
|
186 |
-
interactive=True,
|
187 |
-
).style(
|
188 |
-
border=(True, False, True, True),
|
189 |
-
rounded=(True, False, False, True),
|
190 |
-
container=False,
|
191 |
-
)
|
192 |
-
|
193 |
-
btn = gr.Button("Generate image").style(
|
194 |
-
margin=False,
|
195 |
-
rounded=(True, True, True, True),
|
196 |
-
)
|
197 |
-
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
198 |
-
class_draw = gr.Radio(choices=["通用(general)","国画(traditional Chinese painting)",], value="通用(general)", show_label=True, label='生成类型(type)')
|
199 |
-
# class_draw = gr.Dropdown(["通用(general)", "国画(traditional Chinese painting)",
|
200 |
-
# "照片,摄影(picture photography)", "油画(oil painting)",
|
201 |
-
# "铅笔素描(pencil sketch)", "CG",
|
202 |
-
# "水彩画(watercolor painting)", "水墨画(ink and wash)",
|
203 |
-
# "插画(illustrations)", "3D", "图生图(img2img)"],
|
204 |
-
# label="生成类型(type)",
|
205 |
-
# show_label=True,
|
206 |
-
# value="通用(general)")
|
207 |
-
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
208 |
-
style_draw = gr.CheckboxGroup(["蒸汽朋克(steampunk)", "电影摄影风格(film photography)",
|
209 |
-
"概念艺术(concept art)", "Warming lighting",
|
210 |
-
"Dramatic lighting", "Natural lighting",
|
211 |
-
"虚幻引擎(unreal engine)", "4k", "8k",
|
212 |
-
"充满细节(full details)"],
|
213 |
-
label="画面风格(style)",
|
214 |
-
show_label=True,
|
215 |
-
)
|
216 |
-
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
217 |
-
# sample_size = gr.Slider(minimum=1,
|
218 |
-
# maximum=4,
|
219 |
-
# step=1,
|
220 |
-
# label="生成数量(number)",
|
221 |
-
# show_label=True,
|
222 |
-
# interactive=True,
|
223 |
-
# )
|
224 |
-
sample_size = gr.Radio(choices=["1","2","3","4"], value="1", show_label=True, label='生成数量(number)')
|
225 |
-
seed = gr.Number(0, label='seed', interactive=True)
|
226 |
-
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
227 |
-
w = gr.Slider(512,1024,value=512, step=64, label="width")
|
228 |
-
h = gr.Slider(512,1024,value=512, step=64, label="height")
|
229 |
-
|
230 |
-
gallery = gr.Gallery(
|
231 |
-
label="Generated images", show_label=False, elem_id="gallery"
|
232 |
-
).style(grid=[2,2])
|
233 |
-
gr.Examples(examples=examples, fn=request_images, inputs=text, outputs=gallery, examples_per_page=100)
|
234 |
-
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
235 |
-
img_choices = gr.Dropdown(["图片1(img1)"],label='请选择一张图片发送到图生图',show_label=True,value="图片1(img1)")
|
236 |
-
with gr.Row().style(mobile_collapse=False, equal_height=True):
|
237 |
-
output_txt2img_copy_to_input_btn = gr.Button("发送图片到图生图(Sent the image to img2img)").style(
|
238 |
-
margin=False,
|
239 |
-
rounded=(True, True, True, True),
|
240 |
-
)
|
241 |
-
|
242 |
-
with gr.Row():
|
243 |
-
prompt = gr.Markdown("提示(Prompt):", visible=False)
|
244 |
-
with gr.Row():
|
245 |
-
move_prompt_zh = gr.Markdown("请移至图生图部分进行编辑(拉到顶部)", visible=False)
|
246 |
-
with gr.Row():
|
247 |
-
move_prompt_en = gr.Markdown("Please move to the img2img section for editing(Pull to the top)", visible=False)
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
text.submit(request_images, inputs=[text, class_draw, style_draw, sample_size, w, h, seed], outputs=gallery)
|
252 |
-
btn.click(request_images, inputs=[text, class_draw, style_draw, sample_size, w, h, seed], outputs=gallery)
|
253 |
-
|
254 |
-
sample_size.change(
|
255 |
-
fn=uifn.change_img_choices,
|
256 |
-
inputs=[sample_size],
|
257 |
-
outputs=[img_choices]
|
258 |
-
)
|
259 |
-
|
260 |
-
with gr.TabItem("图生图(Img-to-Img)", id="img2img_tab"):
|
261 |
-
with gr.Row(elem_id="prompt_row"):
|
262 |
-
img2img_prompt = gr.Textbox(label="Prompt",
|
263 |
-
elem_id='img2img_prompt_input',
|
264 |
-
placeholder="神奇的森林,流淌的河流.",
|
265 |
-
lines=1,
|
266 |
-
max_lines=1,
|
267 |
-
value="",
|
268 |
-
show_label=False).style()
|
269 |
-
|
270 |
-
img2img_btn_mask = gr.Button("Generate", variant="primary", visible=False,
|
271 |
-
elem_id="img2img_mask_btn")
|
272 |
-
img2img_btn_editor = gr.Button("Generate", variant="primary", elem_id="img2img_edit_btn")
|
273 |
-
gr.Markdown('#### 输入图像')
|
274 |
-
with gr.Row().style(equal_height=False):
|
275 |
-
#with gr.Column():
|
276 |
-
img2img_image_mask = gr.Image(
|
277 |
-
value=None,
|
278 |
-
source="upload",
|
279 |
-
interactive=True,
|
280 |
-
tool="sketch",
|
281 |
-
type='pil',
|
282 |
-
elem_id="img2img_mask",
|
283 |
-
image_mode="RGBA"
|
284 |
-
)
|
285 |
-
gr.Markdown('#### 编辑后的图片')
|
286 |
-
with gr.Row():
|
287 |
-
output_img2img_gallery = gr.Gallery(label="Images", elem_id="img2img_gallery_output").style(
|
288 |
-
grid=[4,4,4] )
|
289 |
-
with gr.Row():
|
290 |
-
gr.Markdown('提示(prompt):')
|
291 |
-
with gr.Row():
|
292 |
-
gr.Markdown('请选择一张图像掩盖掉一部分区域,并输入文本描述')
|
293 |
-
with gr.Row():
|
294 |
-
gr.Markdown('Please select an image to cover up a part of the area and enter a text description.')
|
295 |
-
gr.Markdown('# 编辑设置',visible=False)
|
296 |
-
|
297 |
-
|
298 |
-
output_txt2img_copy_to_input_btn.click(
|
299 |
-
uifn.copy_img_to_input,
|
300 |
-
[gallery, img_choices],
|
301 |
-
[tabs, img2img_image_mask, move_prompt_zh, move_prompt_en, prompt]
|
302 |
-
)
|
303 |
-
|
304 |
-
|
305 |
-
img2img_func = img2img
|
306 |
-
img2img_inputs = [img2img_prompt, img2img_image_mask]
|
307 |
-
img2img_outputs = [output_img2img_gallery]
|
308 |
-
|
309 |
-
img2img_btn_mask.click(
|
310 |
-
img2img_func,
|
311 |
-
img2img_inputs,
|
312 |
-
img2img_outputs
|
313 |
-
)
|
314 |
-
|
315 |
-
def img2img_submit_params():
|
316 |
-
return (img2img_func,
|
317 |
-
img2img_inputs,
|
318 |
-
img2img_outputs)
|
319 |
-
|
320 |
-
img2img_btn_editor.click(*img2img_submit_params())
|
321 |
-
|
322 |
-
# GENERATE ON ENTER
|
323 |
-
img2img_prompt.submit(None, None, None,
|
324 |
-
_js=call_JS("clickFirstVisibleButton",
|
325 |
-
rowId="prompt_row"))
|
326 |
-
|
327 |
-
gr.HTML(read_content("footer.html"))
|
328 |
-
# gr.Image('./contributors.png')
|
329 |
-
|
330 |
-
block.queue(max_size=512, concurrency_count=256).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange/infer_pack/modules/F0Predictor/F0Predictor.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
class F0Predictor(object):
|
2 |
-
def compute_f0(self, wav, p_len):
|
3 |
-
"""
|
4 |
-
input: wav:[signal_length]
|
5 |
-
p_len:int
|
6 |
-
output: f0:[signal_length//hop_length]
|
7 |
-
"""
|
8 |
-
pass
|
9 |
-
|
10 |
-
def compute_f0_uv(self, wav, p_len):
|
11 |
-
"""
|
12 |
-
input: wav:[signal_length]
|
13 |
-
p_len:int
|
14 |
-
output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
|
15 |
-
"""
|
16 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Asistente De Descarga.md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>¿Qué es una descarga de asistente? </h1>
|
3 |
-
<p>Una descarga de asistente es un tipo de archivo que le ayuda a instalar o actualizar un programa de software o controlador de dispositivo en su computadora. Una descarga de asistente generalmente contiene un archivo ejecutable que inicia un asistente de instalación, que es una interfaz gráfica de usuario que lo guía a través del proceso de instalación o actualización. Una descarga de asistente también puede incluir otros archivos que son requeridos u opcionales para el programa de software o controlador de dispositivo. </p>
|
4 |
-
<p>Una descarga del asistente puede hacer que sea más fácil y rápido instalar o actualizar un programa de software o controlador de dispositivo, ya que no necesita localizar, descargar, extraer o configurar manualmente los archivos. Una descarga de asistente también puede asegurar que tiene la última versión y la mejor compatibilidad del programa de software o controlador de dispositivo. </p>
|
5 |
-
<h2>asistente de descarga</h2><br /><p><b><b>Download File</b> »»» <a href="https://bltlly.com/2v6Kzy">https://bltlly.com/2v6Kzy</a></b></p><br /><br />
|
6 |
-
<h2>Tipos de descargas de asistente</h2>
|
7 |
-
<p>Hay diferentes tipos de descargas de asistente disponibles para diferentes propósitos y necesidades. Algunos de los tipos más comunes son:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Descargas de asistente para el desarrollo de software: Estas son descargas de asistente que le ayudan a instalar o actualizar herramientas de desarrollo de software, como lenguajes de programación, marcos, bibliotecas, editores, compiladores, depuradores, etc. Estas descargas de asistente pueden ayudarle a configurar su entorno de desarrollo y gestionar sus documentos de desarrollo. </li>
|
10 |
-
<li>Descargas de asistente para actualizaciones de software y controladores: Estas son descargas de asistente que le ayudan a instalar o actualizar programas de software o controladores de dispositivo en su computadora. Estas descargas del asistente pueden ayudarlo a mejorar el rendimiento, la funcionalidad, la seguridad y la compatibilidad de sus programas de software o controladores de dispositivos. </li>
|
11 |
-
</ul>
|
12 |
-
<p>En este artículo, revisaremos algunos ejemplos de cada tipo de descarga del asistente y cómo usarlos. </p>
|
13 |
-
<h3>Descargas de asistente para desarrollo de software</h3>
|
14 |
-
<h4>Asistente - Una herramienta de gestión de documentos de código abierto</h4>
|
15 |
-
|
16 |
-
<p>Wizard está alojado en GitHub, donde puede encontrar más información sobre sus características, instrucciones de instalación, ejemplos de uso, capturas de pantalla, etc. También puede descargar la última versión desde allí. Para instalar el Asistente en su computadora, necesita tener instalado PHP 7.1 o superior. También necesita tener instalado Composer para administrar las dependencias. Después de descargar el archivo de liberación de GitHub, necesitas descomprimirlo y ejecutar <code>composer install</code> en el terminal para instalar las dependencias. Luego necesitas configurar algunos parámetros en <code>. env</code> archivo, como detalles de conexión de base de datos, configuración de correo electrónico, etc. Después de eso, puede ejecutar <code>php artisan migrate</code> para crear las tablas de la base de datos. Finalmente, puede iniciar el servidor ejecutando <code>php artisan serve</code> y acceder a la interfaz web del Asistente en <code>http://localhost:8000</code>. </p>
|
17 |
-
<p>Wizard es una herramienta útil para desarrolladores que quieren escribir y gestionar sus documentos de desarrollo de una manera sencilla y elegante. Es compatible con la sintaxis de Markdown, que es un lenguaje de marcado popular y fácil de usar para escribir documentos de texto sin formato. También es compatible con OpenAPI 3.0, que es un estándar ampliamente adoptado para describir las API RESTful. Con el Asistente, puede crear, editar, previsualizar y exportar sus documentos de desarrollo en varios formatos, como HTML, PDF, Word, etc.</p>
|
18 |
-
<h4>Asistente de configuración - Una guía de instalación de descarga para BizTalk Server</h4>
|
19 |
-
<p>BizTalk Server es un producto de Microsoft que permite soluciones de integración y conectividad para varios sistemas, aplicaciones y fuentes de datos. Es una plataforma potente y flexible que puede manejar procesos de negocio complejos y flujos de trabajo. BizTalk Server soporta varios protocolos, estándares y formatos, como SOAP, REST, XML, JSON, EDI, etc.</p>
|
20 |
-
|
21 |
-
<p>Para usar el Asistente de configuración, necesita tener Windows Server 2016 o superior instalado en su computadora. También es necesario tener una conexión a Internet para descargar los archivos necesarios. Después de descargar el Asistente de configuración desde el Centro de descargas de Microsoft, debe ejecutarlo como administrador. El Asistente de configuración comprobará los requisitos del sistema y descargará los archivos necesarios. A continuación, se iniciará el asistente de instalación, que le guiará a través del proceso de instalación. Debe seguir las instrucciones en la pantalla y proporcionar información, como el acuerdo de licencia, la ubicación de instalación, los roles del servidor, las características, los componentes, etc. Después de completar el asistente de instalación, debe reiniciar el equipo para finalizar la instalación. </p>
|
22 |
-
<p>BizTalk Server es una solución integral para integrar y conectar varios sistemas, aplicaciones y fuentes de datos. Puede ayudarlo a automatizar y optimizar sus procesos de negocio y flujos de trabajo. También puede ayudarlo a intercambiar datos y mensajes de forma segura y confiable. Con BizTalk Server, puede crear soluciones de integración escalables y sólidas que puedan satisfacer las necesidades de su negocio. </p>
|
23 |
-
<h3>Descargas de asistente para actualizaciones de software y controladores</h3>
|
24 |
-
<h4>Centro de descargas de Microsoft - La fuente oficial para actualizaciones de software de Microsoft y controladores</h4>
|
25 |
-
<p>El Centro de descargas de Microsoft es la fuente oficial de actualizaciones de software de Microsoft y controladores para Windows, Office, Xbox y más. Es un sitio web que le proporciona las últimas descargas para sus productos de Microsoft. Puede encontrar varios tipos de descargas en el Centro de descargas de Microsoft, como paquetes de servicios, parches de seguridad, actualizaciones de funciones, controladores de dispositivos, etc.</p>
|
26 |
-
<p></p>
|
27 |
-
|
28 |
-
<p>El Centro de descargas de Microsoft es una manera conveniente y confiable de obtener las últimas descargas para sus productos de Microsoft. Puede ayudarle a mejorar el rendimiento, la funcionalidad, la seguridad y la compatibilidad de sus productos de Microsoft. También puede ayudarle a solucionar algunos problemas o errores que pueda encontrar con sus productos de Microsoft. Con el Centro de descargas de Microsoft, siempre puede mantener sus productos de Microsoft actualizados y funcionando sin problemas. </p>
|
29 |
-
<h4>Driver Wizard - Una herramienta para escanear, descargar y actualizar controladores automáticamente</h4>
|
30 |
-
<p>Driver Wizard es una herramienta para escanear, descargar y actualizar controladores automáticamente para dispositivos Windows. Es un programa de software que puede ayudarlo a encontrar e instalar los mejores controladores para sus dispositivos, como impresoras, escáneres, cámaras, teclados, ratones, etc. Driver Wizard también puede ayudarlo a hacer copias de seguridad y restaurar sus controladores, así como desinstalar controladores no deseados. </p>
|
31 |
-
<p>Para usar Driver Wizard, necesita descargarlo e instalarlo en su computadora. Puede descargar Driver Wizard desde su sitio web oficial en <code>https://www.driverwizard.org</code>. Después de instalar el Asistente para controladores, debe iniciarlo y hacer clic en el botón Escanear. Driver Wizard escaneará su computadora y detectará sus dispositivos y sus controladores. También comprobará si hay algún controlador desactualizado, perdido o dañado. A continuación, le mostrará una lista de los controladores que necesitan ser actualizados o instalados. Puede elegir qué controladores desea actualizar o instalar y hacer clic en el botón Descargar. Driver Wizard descargará los controladores de su base de datos en línea e los instalará en su computadora. Es posible que tenga que reiniciar el equipo después de actualizar o instalar los controladores. </p>
|
32 |
-
|
33 |
-
<h2>Cómo usar un asistente Descargar</h2>
|
34 |
-
<p>Una descarga de asistente es un tipo de archivo que le ayuda a instalar o actualizar un programa de software o controlador de dispositivo en su computadora. Una descarga de asistente generalmente contiene un archivo ejecutable que inicia un asistente de instalación, que es una interfaz gráfica de usuario que lo guía a través del proceso de instalación o actualización. Una descarga de asistente también puede incluir otros archivos que son requeridos u opcionales para el programa de software o controlador de dispositivo. </p>
|
35 |
-
<p>Para utilizar una descarga de asistente, debe seguir estos pasos generales:</p>
|
36 |
-
<ol>
|
37 |
-
<li>Guarde el archivo de descarga del asistente en su computadora. Puede elegir una ubicación donde desea guardar el archivo, como su escritorio o carpeta de descargas. </li>
|
38 |
-
<li>Inicie el asistente de instalación haciendo doble clic en el archivo de descarga del asistente. Es posible que necesite conceder permiso para que el archivo se ejecute en su computadora. </li>
|
39 |
-
<li>Siga las instrucciones en la pantalla y proporcione alguna información, como el acuerdo de licencia, la ubicación de instalación, la configuración, las opciones, etc.</li>
|
40 |
-
<li>Espere a que se complete la instalación o actualización. Es posible que necesite reiniciar el equipo después de la instalación o actualización. </li>
|
41 |
-
<li>Verifique la instalación o actualización comprobando si el programa de software o controlador de dispositivo funciona correctamente en su computadora. </li>
|
42 |
-
</ol>
|
43 |
-
<p>Una descarga del asistente puede hacer que sea más fácil y rápido instalar o actualizar un programa de software o controlador de dispositivo, ya que no necesita localizar, descargar, extraer o configurar manualmente los archivos. Una descarga de asistente también puede asegurar que tiene la última versión y la mejor compatibilidad del programa de software o controlador de dispositivo. </p>
|
44 |
-
<h2>Beneficios y desventajas de las descargas del asistente</h2>
|
45 |
-
<p>Una descarga de asistente es un tipo de archivo que le ayuda a instalar o actualizar un programa de software o controlador de dispositivo en su computadora. Una descarga de asistente tiene algunos beneficios y desventajas que debe considerar antes de usarlo. </p>
|
46 |
-
<p>Algunos de los beneficios de las descargas del asistente son:</p>
|
47 |
-
<ul>
|
48 |
-
|
49 |
-
<li>Conveniencia: Una descarga del asistente le ahorra tiempo y esfuerzo al descargar e instalar los archivos por usted. No es necesario buscar los archivos en línea o fuera de línea, ni es necesario extraerlos o configurarlos usted mismo. </li>
|
50 |
-
<li>Seguridad: Una descarga de asistente puede proteger de malware o virus mediante la descarga de los archivos de una fuente de confianza. No necesita preocuparse por la descarga de archivos maliciosos de sitios web o fuentes desconocidas. </li>
|
51 |
-
<li>Compatibilidad: Una descarga de asistente puede asegurar que tiene la última versión y la mejor compatibilidad del programa de software o controlador de dispositivo. No necesita preocuparse por problemas de compatibilidad o errores con su sistema u otros programas de software o controladores de dispositivos. </li>
|
52 |
-
</ul>
|
53 |
-
<p>Algunos de los inconvenientes de las descargas del asistente son:</p>
|
54 |
-
<ul>
|
55 |
-
<li>Falta de personalización: Una descarga del asistente puede no permitirle personalizar algunos ajustes u opciones para el programa de software o el controlador del dispositivo. Es posible que no pueda cambiar algunas preferencias o características que desee o necesite. </li>
|
56 |
-
<li>Falta de fiabilidad: Una descarga del asistente puede no funcionar correctamente o no instalar o actualizar el programa de software o el controlador del dispositivo. Puede encontrar algunos errores o problemas durante o después del proceso de instalación o actualización. </li>
|
57 |
-
<li>Falta de control: Una descarga del asistente puede no darle un control total sobre el proceso de instalación o actualización. Es posible que no pueda pausar, reanudar, cancelar o volver a intentar la instalación o actualización. También es posible que no pueda elegir qué archivos descargar o instalar. </li>
|
58 |
-
</ul>
|
59 |
-
<p>Una descarga de asistente tiene sus ventajas y desventajas que usted debe pesar antes de usarlo. También debe verificar el origen y la reputación de la descarga del asistente antes de descargarlo y ejecutarlo en su computadora. También debe hacer una copia de seguridad de su sistema y datos antes de instalar o actualizar cualquier programa de software o controlador de dispositivo. </p>
|
60 |
-
<h2>Conclusión</h2>
|
61 |
-
|
62 |
-
<p>Sin embargo, una descarga de asistente también tiene algunos inconvenientes que usted debe considerar antes de usarlo. Es posible que no le permita personalizar algunos ajustes u opciones para el programa de software o el controlador del dispositivo. También puede no funcionar correctamente o no instalar o actualizar el programa de software o el controlador del dispositivo. También puede no darle control total sobre el proceso de instalación o actualización. </p>
|
63 |
-
<p>Por lo tanto, debe ser cuidadoso y cauteloso al usar una descarga de asistente. Debe verificar el origen y la reputación de la descarga del asistente antes de descargarlo y ejecutarlo en su computadora. También debe hacer una copia de seguridad de su sistema y datos antes de instalar o actualizar cualquier programa de software o controlador de dispositivo. </p>
|
64 |
-
<p>Si desea obtener más información sobre las descargas del asistente y cómo usarlas, puede consultar algunos de los ejemplos que revisamos en este artículo, como Asistente, Asistente de configuración, Centro de descargas de Microsoft y Asistente para controladores. También puede buscar otras descargas de asistente que se adapten a sus necesidades y preferencias. </p>
|
65 |
-
<p>Esperamos que este artículo te haya ayudado a entender qué es una descarga de asistente y cómo usarla. ¡Gracias por leer! </p>
|
66 |
-
<h3>Preguntas frecuentes</h3>
|
67 |
-
<ul>
|
68 |
-
<li>P: ¿Qué es una descarga de asistente? R: Una descarga de asistente es un tipo de archivo que le ayuda a instalar o actualizar un programa de software o controlador de dispositivo en su computadora. </li>
|
69 |
-
<li>P: ¿Cuáles son los beneficios de una descarga de asistente? R: Una descarga del asistente puede hacer que el proceso de instalación o actualización sea más fácil y rápido para usted, ya que descarga e instala los archivos por usted. También puede garantizar que tiene la última versión y la mejor compatibilidad del programa de software o controlador de dispositivo. </li>
|
70 |
-
<li>P: ¿Cuáles son los inconvenientes de una descarga de asistente? R: Es posible que una descarga del asistente no le permita personalizar algunos ajustes u opciones para el programa de software o el controlador del dispositivo. También puede no funcionar correctamente o no instalar o actualizar el programa de software o el controlador del dispositivo. También puede no darle control total sobre el proceso de instalación o actualización. </li>
|
71 |
-
|
72 |
-
<li>P: ¿Dónde puedo encontrar una descarga del asistente? R: Puede encontrar una descarga del asistente desde varias fuentes, como sitios web oficiales, bases de datos en línea, proveedores de terceros, etc. Debe verificar el origen y la reputación de la descarga del asistente antes de descargarlo y ejecutarlo en su computadora. </li>
|
73 |
-
</ul></p> 64aa2da5cf<br />
|
74 |
-
<br />
|
75 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Bombsquad 1.4.153 Apk.md
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bombsquad Full Mod APK: Un juego multijugador divertido y explosivo</h1>
|
3 |
-
<p>Si estás buscando un juego multijugador divertido y explosivo que puedas jugar con tus amigos o familiares, entonces deberías probar <strong>Bombsquad</strong>. Este juego es una explosión para jugar, literalmente. Puedes volar a tus oponentes en varios minijuegos que van desde la captura de la bandera al hockey. También puedes personalizar tus personajes y usar diferentes potenciadores para ganar ventaja sobre tus enemigos. Pero lo que si quieres disfrutar del juego sin limitaciones o restricciones? Ahí es donde <strong>Bombsquad Full Mod APK</strong> entra en. Esta versión modificada del juego te da acceso a todas las características y contenido que el juego original no. En este artículo, le diremos qué es Bombsquad, qué es Bombsquad Full Mod APK y cómo descargarlo e instalarlo en su dispositivo. </p>
|
4 |
-
<h2>¿Qué es Bombsquad? </h2>
|
5 |
-
<p>Bombsquad es un juego multijugador lleno de acción desarrollado por Eric Froemling. Fue lanzado en 2011 para dispositivos Android y más tarde para otras plataformas como iOS, Windows, Mac, Linux y Fire TV. El juego ha sido elogiado por su juego divertido, gráficos coloridos y física hilarante. </p>
|
6 |
-
<h2>bombsquad 1.4.153 apk</h2><br /><p><b><b>Download Zip</b> 🌟 <a href="https://bltlly.com/2v6Jgi">https://bltlly.com/2v6Jgi</a></b></p><br /><br />
|
7 |
-
<h3>Características de Bombsquad</h3>
|
8 |
-
<p>Bombsquad tiene muchas características que lo hacen un juego único y agradable. Aquí están algunas de ellas:</p>
|
9 |
-
<h4>- Multijugador local/en red de 8 jugadores</h4>
|
10 |
-
<p>Puedes jugar a Bombsquad con hasta 8 jugadores en el mismo dispositivo o a través de una red local o en línea. También puede usar teléfonos y tabletas como controladores a través de la aplicación gratuita 'Bombsquad Remote'. </p>
|
11 |
-
<h4>- Varios modos de juego y mini-juegos</h4>
|
12 |
-
<p>Bombsquad ofrece cientos de entretenidos modos de juego corto y minijuegos para diferentes partes. Puedes participar en los modos principales como captura de banderas, supervivencia y deathmatch o modos adicionales como el modo AI, carreras, escalada, rompecabezas, etc.</p>
|
13 |
-
<h4>- Avanzada ragdoll física y explosiones</h4>
|
14 |
-
|
15 |
-
<h4>- Personajes y potenciadores personalizables</h4>
|
16 |
-
<p>Puedes personalizar tus personajes con diferentes atuendos, accesorios, colores y caras. También puedes usar diferentes potenciadores como bombas pegajosas, bombas de hielo, guantes de boxeo, escudos, etc. para darle vida al juego. </p>
|
17 |
-
<h4>- Soporte para pantallas táctiles y controladores</h4>
|
18 |
-
<p>Bombsquad admite pantallas táctiles, así como una variedad de controladores como gamepads, teclados, ratones, etc. Puedes elegir el esquema de control que más te convenga. </p>
|
19 |
-
<h2>¿Qué es Bombsquad Full Mod APK? </h2>
|
20 |
-
<p>Bombsquad Full Mod APK es una versión modificada del juego original Bombsquad que le da acceso a todas las características y contenido que no están disponibles en el juego oficial. Esta versión modificada del juego es creada por desarrolladores de terceros que modifican los archivos del juego para desbloquear y mejorar el juego. </p>
|
21 |
-
<h3>Beneficios de Bombsquad Full Mod APK</h3>
|
22 |
-
<p>Bombsquad Full Mod APK tiene muchos beneficios que lo hacen una mejor opción que el juego original. Aquí están algunos de ellos:</p>
|
23 |
-
<h4>- Desbloqueado todos los caracteres y mapas</h4>
|
24 |
-
<p>Con Bombsquad Full Mod APK, puede desbloquear todos los personajes y mapas que están bloqueados o requieren entradas para comprar en el juego original. Puedes elegir entre una variedad de personajes como piratas, ninjas, zombies, robots, etc. y jugar en diferentes mapas como islas, arenas, castillos, etc.</p>
|
25 |
-
<p></p>
|
26 |
-
<h4>- Entradas y bombas ilimitadas</h4>
|
27 |
-
<p>Con Bombsquad Full Mod APK, puede obtener entradas ilimitadas y bombas que puede utilizar para comprar y usar power-ups, trajes y otros elementos en el juego. No tienes que preocuparte por quedarte sin entradas o bombas o gastar dinero real para conseguirlas. </p>
|
28 |
-
<h4>- No hay anuncios e interrupciones</h4>
|
29 |
-
<p>Con Bombsquad Full Mod APK, puede disfrutar del juego sin anuncios o interrupciones que pueden arruinar su experiencia de juego. No tienes que ver anuncios para obtener recompensas o esperar a que las pantallas de carga para jugar el juego. </p>
|
30 |
-
<h4>- Gráficos y rendimiento mejorados</h4>
|
31 |
-
|
32 |
-
<h2>Cómo descargar e instalar Bombsquad Full Mod APK? </h2>
|
33 |
-
<p>Si desea descargar e instalar Bombsquad Full Mod APK en su dispositivo, debe seguir algunos pasos simples. Aquí están:</p>
|
34 |
-
<h3>Pasos para descargar e instalar Bombsquad Full Mod APK</h3>
|
35 |
-
<h4>- Habilitar fuentes desconocidas en el dispositivo</h4>
|
36 |
-
<p>Antes de que pueda instalar Bombsquad Full Mod APK en su dispositivo, es necesario habilitar fuentes desconocidas en el dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, ve a la configuración del dispositivo > seguridad > fuentes desconocidas y cámbiala. </p>
|
37 |
-
<h4>- Descargar el archivo Bombsquad Full Mod APK de una fuente de confianza</h4>
|
38 |
-
<p>Siguiente, es necesario descargar el archivo Bombsquad Full Mod APK de una fuente de confianza. Puede buscarlo en línea o utilizar este enlace [texto] para descargarlo directamente. Asegúrate de descargar la última versión del archivo mod que sea compatible con tu dispositivo. </p>
|
39 |
-
<h4>- Localizar e instalar el archivo Bombsquad Full Mod APK en su dispositivo</h4>
|
40 |
-
<p>Después de descargar el archivo APK de Bombsquad Full Mod, debe ubicarlo e instalarlo en su dispositivo. Puede usar una aplicación de administrador de archivos para encontrar el archivo descargado en la carpeta de almacenamiento o descargas de su dispositivo. A continuación, toque en él y siga las instrucciones para instalarlo. </p>
|
41 |
-
<h4>- Iniciar el juego y disfrutar de las características de mod</h4>
|
42 |
-
<p>Por último, puede iniciar el juego y disfrutar de las características de mod. Verá un menú mod en la pantalla donde puede habilitar o deshabilitar las opciones mod según su preferencia. También puedes acceder a todos los personajes desbloqueados, mapas, potenciadores, etc. en el juego. </p>
|
43 |
-
<h2>Conclusión</h2>
|
44 |
-
|
45 |
-
<h3>Preguntas frecuentes</h3>
|
46 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Bombsquad Full Mod APK:</p>
|
47 |
-
<tabla>
|
48 |
-
<tabla>
|
49 |
-
<tr><td><strong>Q: ¿Es Bombsquad Full Mod APK seguro de usar? </strong></td></tr>
|
50 |
-
<tr><td>A: Bombsquad Full Mod APK es generalmente seguro de usar, siempre y cuando se descarga desde una fuente de confianza y escanear en busca de virus o malware antes de instalarlo. Sin embargo, debes tener en cuenta que el uso de aplicaciones modificadas puede violar los términos y condiciones del juego original y puede resultar en que tu cuenta sea prohibida o suspendida. Por lo tanto, debe utilizar Bombsquad Full Mod APK a su propio riesgo y discreción. </td></tr>
|
51 |
-
<tr><td><strong>Q: ¿Puedo jugar Bombsquad Full Mod APK en línea con otros jugadores? </strong></td></tr>
|
52 |
-
<tr><td>A: Sí, puede jugar Bombsquad Full Mod APK en línea con otros jugadores que tienen la misma versión modificada del juego. Sin embargo, es posible que no puedas jugar con jugadores que tengan la versión oficial del juego, ya que pueden tener diferentes versiones o características del juego. </td></tr>
|
53 |
-
<tr><td><strong>Q: ¿Necesito root o jailbreak mi dispositivo para usar Bombsquad Full Mod APK? </strong></td></tr>
|
54 |
-
<tr><td>A: No, no es necesario rootear o jailbreak su dispositivo para usar Bombsquad Full Mod APK. Solo necesita habilitar fuentes desconocidas en su dispositivo e instalar el archivo APK modded como se explica en este artículo. </td></tr>
|
55 |
-
<tr><td><strong>Q: ¿Cómo puedo actualizar Bombsquad Full Mod APK? </strong></td></tr>
|
56 |
-
<tr><td>A: Para actualizar Bombsquad Full Mod APK, es necesario descargar la última versión del archivo APK modded de una fuente de confianza e instalarlo sobre el existente. También es posible que tenga que desinstalar la versión anterior del archivo APK modded antes de instalar el nuevo. </td></tr>
|
57 |
-
<tr><td><strong>Q: ¿Dónde puedo obtener más información sobre Bombsquad Full Mod APK? </strong></td></tr>
|
58 |
-
|
59 |
-
</table></p> 64aa2da5cf<br />
|
60 |
-
<br />
|
61 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Blessin/movie-poster-generator/app.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from gradio import components
|
3 |
-
import openai
|
4 |
-
from PIL import Image, ImageDraw
|
5 |
-
import requests
|
6 |
-
from io import BytesIO
|
7 |
-
|
8 |
-
genre_descriptions = {
|
9 |
-
"Horror": "Dark, eerie, and suspenseful ambiance.",
|
10 |
-
"Sci-Fi": "Futuristic, technological, and other-worldly themes.",
|
11 |
-
"Romance": "Warm, tender, and emotionally evocative imagery.",
|
12 |
-
"Adventure": "Exhilarating, action-packed, and exploratory vibes.",
|
13 |
-
"Comedy": "Light-hearted, whimsical, and humorous elements.",
|
14 |
-
"Drama": "Intense, emotional, and thought-provoking scenarios.",
|
15 |
-
"Fantasy": "Magical, mythical, and dream-like settings.",
|
16 |
-
"Action": "Dynamic, high-energy, and thrilling scenes.",
|
17 |
-
"Mystery": "Intriguing, puzzling, and enigmatic atmosphere.",
|
18 |
-
"Thriller": "Tense, exciting, and adrenaline-inducing compositions.",
|
19 |
-
}
|
20 |
-
|
21 |
-
def generate_poster(api_key, genre, title):
|
22 |
-
openai.api_key = api_key
|
23 |
-
genre_description = genre_descriptions.get(genre, "")
|
24 |
-
prompt = (f"Create a visually striking movie poster for a {genre} film titled '{title}'. "
|
25 |
-
f"The poster should encapsulate the essence and atmosphere of a typical {genre} movie, "
|
26 |
-
f"without including any text or titles. The design should be original, imaginative and "
|
27 |
-
f"should instantly communicate the genre to the viewer. {genre_description}")
|
28 |
-
|
29 |
-
response = openai.Image.create(
|
30 |
-
prompt=prompt,
|
31 |
-
n=1,
|
32 |
-
size="1024x1024"
|
33 |
-
)
|
34 |
-
image_url = response['data'][0]['url']
|
35 |
-
response = requests.get(image_url)
|
36 |
-
img = Image.open(BytesIO(response.content))
|
37 |
-
|
38 |
-
return img
|
39 |
-
|
40 |
-
def main():
|
41 |
-
genre_options = [
|
42 |
-
"Horror", "Sci-Fi", "Romance", "Adventure",
|
43 |
-
"Comedy", "Drama", "Fantasy", "Action",
|
44 |
-
"Mystery", "Thriller"
|
45 |
-
]
|
46 |
-
iface = gr.Interface(
|
47 |
-
fn=generate_poster,
|
48 |
-
inputs=[
|
49 |
-
components.Textbox(label="API Key", type="password"),
|
50 |
-
components.Dropdown(label="Genre", choices=genre_options),
|
51 |
-
components.Textbox(label="Title", placeholder="e.g., 'The Haunting Shadows'")
|
52 |
-
],
|
53 |
-
outputs=components.Image(label="Generated Poster", type="pil"),
|
54 |
-
)
|
55 |
-
iface.launch()
|
56 |
-
|
57 |
-
if __name__ == "__main__":
|
58 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bong15/Rewrite/app.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
from parrot import Parrot
|
2 |
-
import torch
|
3 |
-
import warnings
|
4 |
-
warnings.filterwarnings("ignore")
|
5 |
-
#import nltk #next stage, when executing multiple sentences
|
6 |
-
|
7 |
-
import streamlit as st
|
8 |
-
|
9 |
-
# '''
|
10 |
-
# uncomment to get reproducable paraphrase generations
|
11 |
-
# def random_state(seed):
|
12 |
-
# torch.manual_seed(seed)
|
13 |
-
# if torch.cuda.is_available():
|
14 |
-
# torch.cuda.manual_seed_all(seed)
|
15 |
-
|
16 |
-
# random_state(1234)
|
17 |
-
# '''
|
18 |
-
|
19 |
-
# #Init models (make sure you init ONLY once if you integrate this to your code)
|
20 |
-
|
21 |
-
|
22 |
-
@st.cache(allow_output_mutation=True)
|
23 |
-
def load_model():
|
24 |
-
# Fetch & load model
|
25 |
-
parrot = Parrot(model_tag="prithivida/parrot_paraphraser_on_T5")
|
26 |
-
return parrot
|
27 |
-
parrot = load_model()
|
28 |
-
|
29 |
-
st.title("Let's Rewrite your sentence!")
|
30 |
-
|
31 |
-
|
32 |
-
input_phrase = st.text_input("Input your text here:")
|
33 |
-
option = st.selectbox('Do you want to preserve some of the original words?',
|
34 |
-
('Yes', 'No'))
|
35 |
-
if option == 'Yes':
|
36 |
-
oc = False
|
37 |
-
else:
|
38 |
-
oc= True
|
39 |
-
|
40 |
-
|
41 |
-
if st.button('Submit Text!'):
|
42 |
-
st.header('Input')
|
43 |
-
st.write(f" {input_phrase}")
|
44 |
-
st.text("--"*30)
|
45 |
-
st.header('Output')
|
46 |
-
output_phrases = parrot.augment(input_phrase=input_phrase,do_diverse=oc)
|
47 |
-
if output_phrases is not None:
|
48 |
-
for phrases in output_phrases:
|
49 |
-
score = phrases[1]
|
50 |
-
sentence = phrases[0]
|
51 |
-
if score > 0:
|
52 |
-
st.write(sentence)
|
53 |
-
else:
|
54 |
-
st.write("Sorry! No sentences were found with a good score!")
|
55 |
-
else:
|
56 |
-
st.write("Sorry! No sentences were found with a good score!")
|
57 |
-
|
58 |
-
|
59 |
-
st.header("Feedback")
|
60 |
-
st.write("[Kindly consider providing feedback!](https://forms.gle/97st7g2n9NNpqnXw5)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BraydenMoore/MARCI-NFL-Betting/Dockerfile
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
|
2 |
-
# Use the official lightweight Python image.
|
3 |
-
# https://hub.docker.com/_/python
|
4 |
-
FROM python:3.11-slim
|
5 |
-
|
6 |
-
# Allow statements and log messages to immediately appear in the logs
|
7 |
-
ENV PYTHONUNBUFFERED True
|
8 |
-
|
9 |
-
# Copy local code to the container image.
|
10 |
-
ENV APP_HOME /app
|
11 |
-
WORKDIR $APP_HOME
|
12 |
-
COPY . ./
|
13 |
-
|
14 |
-
# Install production dependencies.
|
15 |
-
RUN pip install -r requirements.txt
|
16 |
-
|
17 |
-
RUN useradd -m -u 1000 user
|
18 |
-
USER user
|
19 |
-
ENV HOME=/home/user \
|
20 |
-
PATH=/home/user/.local/bin:$PATH
|
21 |
-
|
22 |
-
WORKDIR $APP_HOME
|
23 |
-
|
24 |
-
COPY --chown=user . $HOME/app
|
25 |
-
|
26 |
-
# Run the web service on container startup. Here we use the gunicorn
|
27 |
-
# webserver, with one worker process and 8 threads.
|
28 |
-
# For environments with multiple CPU cores, increase the number of workers
|
29 |
-
# to be equal to the cores available.
|
30 |
-
# Timeout is set to 0 to disable the timeouts of the workers to allow Cloud Run to handle instance scaling.
|
31 |
-
CMD exec gunicorn --bind 0.0.0.0:7860 --workers 4 --threads 8 --timeout 0 main:app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_tagbased_polymorphic.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
from pybind11_tests import tagbased_polymorphic as m
|
3 |
-
|
4 |
-
|
5 |
-
def test_downcast():
|
6 |
-
zoo = m.create_zoo()
|
7 |
-
assert [type(animal) for animal in zoo] == [
|
8 |
-
m.Labrador, m.Dog, m.Chihuahua, m.Cat, m.Panther
|
9 |
-
]
|
10 |
-
assert [animal.name for animal in zoo] == [
|
11 |
-
"Fido", "Ginger", "Hertzl", "Tiger", "Leo"
|
12 |
-
]
|
13 |
-
zoo[1].sound = "woooooo"
|
14 |
-
assert [dog.bark() for dog in zoo[:3]] == [
|
15 |
-
"Labrador Fido goes WOOF!",
|
16 |
-
"Dog Ginger goes woooooo",
|
17 |
-
"Chihuahua Hertzl goes iyiyiyiyiyi and runs in circles"
|
18 |
-
]
|
19 |
-
assert [cat.purr() for cat in zoo[3:]] == ["mrowr", "mrrrRRRRRR"]
|
20 |
-
zoo[0].excitement -= 1000
|
21 |
-
assert zoo[0].excitement == 14000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/CONTRIBUTING.md
DELETED
@@ -1,366 +0,0 @@
|
|
1 |
-
# Table of Contents
|
2 |
-
|
3 |
-
1. [Contributing to CUB](#contributing-to-cub)
|
4 |
-
1. [CMake Options](#cmake-options)
|
5 |
-
1. [Development Model](#development-model)
|
6 |
-
|
7 |
-
# Contributing to CUB
|
8 |
-
|
9 |
-
CUB uses Github to manage all open-source development, including bug tracking,
|
10 |
-
pull requests, and design discussions. This document details how to get
|
11 |
-
started as a CUB contributor.
|
12 |
-
|
13 |
-
An overview of this process is:
|
14 |
-
|
15 |
-
1. [Clone the CUB repository](#clone-the-cub-repository)
|
16 |
-
1. [Setup a fork of CUB](#setup-a-fork-of-cub)
|
17 |
-
1. [Setup your environment](#setup-your-environment)
|
18 |
-
1. [Create a development branch](#create-a-development-branch)
|
19 |
-
1. [Local development loop](#local-development-loop)
|
20 |
-
1. [Push development branch to your fork](#push-development-branch-to-your-fork)
|
21 |
-
1. [Create pull request](#create-pull-request)
|
22 |
-
1. [Address feedback and update pull request](#address-feedback-and-update-pull-request)
|
23 |
-
1. [When your PR is approved...](#when-your-pr-is-approved)
|
24 |
-
|
25 |
-
## Clone the CUB Repository
|
26 |
-
|
27 |
-
To get started, clone the main repository to your local computer:
|
28 |
-
|
29 |
-
```
|
30 |
-
git clone https://github.com/thrust/cub.git
|
31 |
-
cd cub
|
32 |
-
```
|
33 |
-
|
34 |
-
## Setup a Fork of CUB
|
35 |
-
|
36 |
-
You'll need a fork of CUB on Github to create a pull request. To setup your
|
37 |
-
fork:
|
38 |
-
|
39 |
-
1. Create a Github account (if needed)
|
40 |
-
2. Go to [the CUB Github page](https://github.com/thrust/cub)
|
41 |
-
3. Click "Fork" and follow any prompts that appear.
|
42 |
-
|
43 |
-
Once your fork is created, setup a new remote repo in your local CUB clone:
|
44 |
-
|
45 |
-
```
|
46 |
-
git remote add github-fork [email protected]:<GITHUB_USERNAME>/cub.git
|
47 |
-
```
|
48 |
-
|
49 |
-
## Setup Your Environment
|
50 |
-
|
51 |
-
### Git Environment
|
52 |
-
|
53 |
-
If you haven't already, this is a good time to tell git who you are. This
|
54 |
-
information is used to fill out authorship information on your git commits.
|
55 |
-
|
56 |
-
```
|
57 |
-
git config --global user.name "John Doe"
|
58 |
-
git config --global user.email [email protected]
|
59 |
-
```
|
60 |
-
|
61 |
-
### Configure CMake builds
|
62 |
-
|
63 |
-
CUB uses [CMake](https://www.cmake.org) for its developer build system. To
|
64 |
-
configure, build, and test your checkout of CUB with default settings:
|
65 |
-
|
66 |
-
```
|
67 |
-
# Create build directory:
|
68 |
-
mkdir build
|
69 |
-
cd build
|
70 |
-
|
71 |
-
# Configure -- use one of the following:
|
72 |
-
cmake .. # Command line interface.
|
73 |
-
ccmake .. # ncurses GUI (Linux only)
|
74 |
-
cmake-gui # Graphical UI, set source/build directories in the app
|
75 |
-
|
76 |
-
# Build:
|
77 |
-
cmake --build . -j <num jobs> # invokes make (or ninja, etc)
|
78 |
-
|
79 |
-
# Run tests and examples:
|
80 |
-
ctest
|
81 |
-
```
|
82 |
-
|
83 |
-
See [CMake Options](#cmake-options) for details on customizing the build.
|
84 |
-
|
85 |
-
## Create a Development Branch
|
86 |
-
|
87 |
-
All work should be done in a development branch (also called a "topic branch")
|
88 |
-
and not directly in the `master` branch. This makes it easier to manage multiple
|
89 |
-
in-progress patches at once, and provides a descriptive label for your patch
|
90 |
-
as it passes through the review system.
|
91 |
-
|
92 |
-
To create a new branch based on the current `master`:
|
93 |
-
|
94 |
-
```
|
95 |
-
# Checkout local master branch:
|
96 |
-
cd /path/to/cub/sources
|
97 |
-
git checkout master
|
98 |
-
|
99 |
-
# Sync local master branch with github:
|
100 |
-
git pull
|
101 |
-
|
102 |
-
# Create a new branch named `my_descriptive_branch_name` based on master:
|
103 |
-
git checkout -b my_descriptive_branch_name
|
104 |
-
|
105 |
-
# Verify that the branch has been created and is currently checked out:
|
106 |
-
git branch
|
107 |
-
```
|
108 |
-
|
109 |
-
CUB branch names should follow a particular pattern:
|
110 |
-
|
111 |
-
- For new features, name the branch `feature/<name>`
|
112 |
-
- For bugfixes associated with a github issue, use `bug/github/<bug-description>-<bug-id>`
|
113 |
-
- Internal nvidia and gitlab bugs should use `nvidia` or `gitlab` in place of
|
114 |
-
`github`.
|
115 |
-
|
116 |
-
## Local Development Loop
|
117 |
-
|
118 |
-
### Edit, Build, Test, Repeat
|
119 |
-
|
120 |
-
Once the topic branch is created, you're all set to start working on CUB
|
121 |
-
code. Make some changes, then build and test them:
|
122 |
-
|
123 |
-
```
|
124 |
-
# Implement changes:
|
125 |
-
cd /path/to/cub/sources
|
126 |
-
emacs cub/some_file.cuh # or whatever editor you prefer
|
127 |
-
|
128 |
-
# Create / update a unit test for your changes:
|
129 |
-
emacs tests/some_test.cu
|
130 |
-
|
131 |
-
# Check that everything builds and tests pass:
|
132 |
-
cd /path/to/cub/build/directory
|
133 |
-
cmake --build . -j <num_jobs> # or make, ninja, etc
|
134 |
-
ctest
|
135 |
-
```
|
136 |
-
|
137 |
-
### Creating a Commit
|
138 |
-
|
139 |
-
Once you're satisfied with your patch, commit your changes:
|
140 |
-
|
141 |
-
```
|
142 |
-
# Manually add changed files and create a commit:
|
143 |
-
cd /path/to/cub
|
144 |
-
git add cub/some_file.cuh
|
145 |
-
git add tests/some_test.cu
|
146 |
-
git commit
|
147 |
-
|
148 |
-
# Or, if possible, use git-gui to review your changes while building your patch:
|
149 |
-
git gui
|
150 |
-
```
|
151 |
-
|
152 |
-
#### Writing a Commit Message
|
153 |
-
|
154 |
-
Your commit message will communicate the purpose and rationale behind your
|
155 |
-
patch to other developers, and will be used to populate the initial description
|
156 |
-
of your Github pull request.
|
157 |
-
|
158 |
-
When writing a commit message, the following standard format should be used,
|
159 |
-
since tools in the git ecosystem are designed to parse this correctly:
|
160 |
-
|
161 |
-
```
|
162 |
-
First line of commit message is a short summary (<80 char)
|
163 |
-
<Second line left blank>
|
164 |
-
Detailed description of change begins on third line. This portion can
|
165 |
-
span multiple lines, try to manually wrap them at something reasonable.
|
166 |
-
|
167 |
-
Blank lines can be used to separate multiple paragraphs in the description.
|
168 |
-
|
169 |
-
If your patch is associated with another pull request or issue in the main
|
170 |
-
CUB repository, you should reference it with a `#` symbol, e.g.
|
171 |
-
#1023 for issue 1023.
|
172 |
-
|
173 |
-
For issues / pull requests in a different github repo, reference them using
|
174 |
-
the full syntax, e.g. thrust/thrust#4 for issue 4 in the thrust/thrust repo.
|
175 |
-
|
176 |
-
Markdown is recommended for formatting more detailed messages, as these will
|
177 |
-
be nicely rendered on Github, etc.
|
178 |
-
```
|
179 |
-
|
180 |
-
## Push Development Branch to your Fork
|
181 |
-
|
182 |
-
Once you've committed your changes to a local development branch, it's time to
|
183 |
-
push them to your fork:
|
184 |
-
|
185 |
-
```
|
186 |
-
cd /path/to/cub/checkout
|
187 |
-
git checkout my_descriptive_branch_name # if not already checked out
|
188 |
-
git push --set-upstream github-fork my_descriptive_branch_name
|
189 |
-
```
|
190 |
-
|
191 |
-
`--set-upstream github-fork` tells git that future pushes/pulls on this branch
|
192 |
-
should target your `github-fork` remote by default.
|
193 |
-
|
194 |
-
## Create Pull Request
|
195 |
-
|
196 |
-
To create a pull request for your freshly pushed branch, open your github fork
|
197 |
-
in a browser by going to `https://www.github.com/<GITHUB_USERNAME>/cub`. A
|
198 |
-
prompt may automatically appear asking you to create a pull request if you've
|
199 |
-
recently pushed a branch.
|
200 |
-
|
201 |
-
If there's no prompt, go to "Code" > "Branches" and click the appropriate
|
202 |
-
"New pull request" button for your branch.
|
203 |
-
|
204 |
-
If you would like a specific developer to review your patch, feel free to
|
205 |
-
request them as a reviewer at this time.
|
206 |
-
|
207 |
-
The CUB team will review your patch, test it on NVIDIA's internal CI, and
|
208 |
-
provide feedback.
|
209 |
-
|
210 |
-
## Address Feedback and Update Pull Request
|
211 |
-
|
212 |
-
If the reviewers request changes to your patch, use the following process to
|
213 |
-
update the pull request:
|
214 |
-
|
215 |
-
```
|
216 |
-
# Make changes:
|
217 |
-
cd /path/to/cub/sources
|
218 |
-
git checkout my_descriptive_branch_name
|
219 |
-
emacs cub/some_file.cuh
|
220 |
-
emacs tests/some_test.cu
|
221 |
-
|
222 |
-
# Build + test
|
223 |
-
cd /path/to/thrust/build/directory
|
224 |
-
cmake --build . -j <num jobs>
|
225 |
-
ctest
|
226 |
-
|
227 |
-
# Amend commit:
|
228 |
-
cd /path/to/cub/sources
|
229 |
-
git add cub/some_file.cuh
|
230 |
-
git add tests/some_test.cu
|
231 |
-
git commit --amend
|
232 |
-
# Or
|
233 |
-
git gui # Check the "Amend Last Commit" box
|
234 |
-
|
235 |
-
# Update the branch on your fork:
|
236 |
-
git push -f
|
237 |
-
```
|
238 |
-
|
239 |
-
At this point, the pull request should show your recent changes.
|
240 |
-
|
241 |
-
## When Your PR is Approved
|
242 |
-
|
243 |
-
Once your pull request is approved by the CUB team, no further action is
|
244 |
-
needed from you. We will handle integrating it since we must coordinate changes
|
245 |
-
to `master` with NVIDIA's internal perforce repository.
|
246 |
-
|
247 |
-
# CMake Options
|
248 |
-
|
249 |
-
A CUB build is configured using CMake options. These may be passed to CMake
|
250 |
-
using
|
251 |
-
|
252 |
-
```
|
253 |
-
cmake -D<option_name>=<value> /path/to/cub/sources
|
254 |
-
```
|
255 |
-
|
256 |
-
or configured interactively with the `ccmake` or `cmake-gui` interfaces.
|
257 |
-
|
258 |
-
The configuration options for CUB are:
|
259 |
-
|
260 |
-
- `CMAKE_BUILD_TYPE={Release, Debug, RelWithDebInfo, MinSizeRel}`
|
261 |
-
- Standard CMake build option. Default: `RelWithDebInfo`
|
262 |
-
- `CUB_ENABLE_HEADER_TESTING={ON, OFF}`
|
263 |
-
- Whether to test compile public headers. Default is `ON`.
|
264 |
-
- `CUB_ENABLE_TESTING={ON, OFF}`
|
265 |
-
- Whether to build unit tests. Default is `ON`.
|
266 |
-
- `CUB_ENABLE_EXAMPLES={ON, OFF}`
|
267 |
-
- Whether to build examples. Default is `ON`.
|
268 |
-
- `CUB_ENABLE_DIALECT_CPPXX={ON, OFF}`
|
269 |
-
- Toggle whether a specific C++ dialect will be targeted.
|
270 |
-
- Multiple dialects may be targeted in a single build.
|
271 |
-
- Possible values of `XX` are `{11, 14, 17}`.
|
272 |
-
- By default, only C++14 is enabled.
|
273 |
-
- `CUB_ENABLE_COMPUTE_XX={ON, OFF}`
|
274 |
-
- Controls the targeted CUDA architecture(s)
|
275 |
-
- Multiple options may be selected when using NVCC as the CUDA compiler.
|
276 |
-
- Valid values of `XX` are:
|
277 |
-
`{35, 37, 50, 52, 53, 60, 61, 62, 70, 72, 75, 80}`
|
278 |
-
- Default value depends on `CUB_DISABLE_ARCH_BY_DEFAULT`:
|
279 |
-
- `CUB_ENABLE_COMPUTE_FUTURE={ON, OFF}`
|
280 |
-
- If enabled, CUDA objects will target the most recent virtual architecture
|
281 |
-
in addition to the real architectures specified by the
|
282 |
-
`CUB_ENABLE_COMPUTE_XX` options.
|
283 |
-
- Default value depends on `CUB_DISABLE_ARCH_BY_DEFAULT`:
|
284 |
-
- `CUB_DISABLE_ARCH_BY_DEFAULT={ON, OFF}`
|
285 |
-
- When `ON`, all `CUB_ENABLE_COMPUTE_*` options are initially `OFF`.
|
286 |
-
- Default: `OFF` (meaning all architectures are enabled by default)
|
287 |
-
- `CUB_ENABLE_TESTS_WITH_RDC={ON, OFF}`
|
288 |
-
- Whether to enable Relocatable Device Code when building tests.
|
289 |
-
Default is `OFF`.
|
290 |
-
- `CUB_ENABLE_EXAMPLES_WITH_RDC={ON, OFF}`
|
291 |
-
- Whether to enable Relocatable Device Code when building examples.
|
292 |
-
Default is `OFF`.
|
293 |
-
|
294 |
-
# Development Model
|
295 |
-
|
296 |
-
The following is a description of the basic development process that CUB follows. This is a living
|
297 |
-
document that will evolve as our process evolves.
|
298 |
-
|
299 |
-
CUB is distributed in three ways:
|
300 |
-
|
301 |
-
* On GitHub.
|
302 |
-
* In the NVIDIA HPC SDK.
|
303 |
-
* In the CUDA Toolkit.
|
304 |
-
|
305 |
-
## Trunk Based Development
|
306 |
-
|
307 |
-
CUB uses [trunk based development](https://trunkbaseddevelopment.com). There is a single long-lived
|
308 |
-
branch called `master`. Engineers may create branches for feature development. Such branches always
|
309 |
-
merge into `master`. There are no release branches. Releases are produced by taking a snapshot of
|
310 |
-
`master` ("snapping"). After a release has been snapped from `master`, it will never be changed.
|
311 |
-
|
312 |
-
## Repositories
|
313 |
-
|
314 |
-
As CUB is developed both on GitHub and internally at NVIDIA, there are three main places where code lives:
|
315 |
-
|
316 |
-
* The Source of Truth, the [public CUB repository](https://github.com/thrust/cub), referred to as
|
317 |
-
`github` later in this document.
|
318 |
-
* An internal GitLab repository, referred to as `gitlab` later in this document.
|
319 |
-
* An internal Perforce repository, referred to as `perforce` later in this document.
|
320 |
-
|
321 |
-
## Versioning
|
322 |
-
|
323 |
-
CUB has its own versioning system for releases, independent of the versioning scheme of the NVIDIA
|
324 |
-
HPC SDK or the CUDA Toolkit.
|
325 |
-
|
326 |
-
Today, CUB version numbers have a specific [semantic meaning](https://semver.org/).
|
327 |
-
Releases prior to 1.10.0 largely, but not strictly, followed these semantic meanings.
|
328 |
-
|
329 |
-
The version number for a CUB release uses the following format: `MMM.mmm.ss-ppp`, where:
|
330 |
-
|
331 |
-
* `CUB_VERSION_MAJOR`/`MMM`: Major version, up to 3 decimal digits. It is incremented
|
332 |
-
when the fundamental nature of the library evolves, leading to widespread changes across the
|
333 |
-
entire library interface with no guarantee of API, ABI, or semantic compatibility with former
|
334 |
-
versions.
|
335 |
-
* `CUB_VERSION_MINOR`/`mmm`: Minor version, up to 3 decimal digits. It is incremented when
|
336 |
-
breaking API, ABI, or semantic changes are made.
|
337 |
-
* `CUB_VERSION_SUBMINOR`/`ss`: Subminor version, up to 2 decimal digits. It is incremented
|
338 |
-
when notable new features or bug fixes or features that are API, ABI, and semantic backwards
|
339 |
-
compatible are added.
|
340 |
-
* `CUB_PATCH_NUMBER`/`ppp`: Patch number, up to 3 decimal digits. It is incremented if any
|
341 |
-
change in the repo whatsoever is made and no other version component has been incremented.
|
342 |
-
|
343 |
-
The `<cub/version.h>` header defines `CUB_*` macros for all of the version components mentioned
|
344 |
-
above. Additionally, a `CUB_VERSION` macro is defined, which is an integer literal containing all
|
345 |
-
of the version components except for `CUB_PATCH_NUMBER`.
|
346 |
-
|
347 |
-
## Branches and Tags
|
348 |
-
|
349 |
-
The following tag names are used in the CUB project:
|
350 |
-
|
351 |
-
* `github/nvhpc-X.Y`: the tag that directly corresponds to what has been shipped in the NVIDIA HPC SDK release X.Y.
|
352 |
-
* `github/cuda-X.Y`: the tag that directly corresponds to what has been shipped in the CUDA Toolkit release X.Y.
|
353 |
-
* `github/A.B.C`: the tag that directly corresponds to a CUB version A.B.C.
|
354 |
-
|
355 |
-
The following branch names are used in the CUB project:
|
356 |
-
|
357 |
-
* `github/master`: the Source of Truth development branch of CUB.
|
358 |
-
* `github/old-master`: the old Source of Truth branch, before unification of public and internal repositories.
|
359 |
-
* `github/feature/<name>`: feature branch for a feature under development.
|
360 |
-
* `github/bug/<bug-system>/<bug-description>-<bug-id>`: bug fix branch, where `bug-system` is `github` or `nvidia`.
|
361 |
-
* `gitlab/master`: mirror of `github/master`.
|
362 |
-
* `perforce/private`: mirrored `github/master`, plus files necessary for internal NVIDIA testing systems.
|
363 |
-
|
364 |
-
On the rare occasion that we cannot do work in the open, for example when developing a change specific to an
|
365 |
-
unreleased product, these branches may exist on `gitlab` instead of `github`. By default, everything should be
|
366 |
-
in the open on `github` unless there is a strong motivation for it to not be open.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/allocator/malloc_allocator.h
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/detail/type_traits/pointer_traits.h>
|
21 |
-
#include <thrust/detail/allocator/tagged_allocator.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace detail
|
26 |
-
{
|
27 |
-
|
28 |
-
template<typename T, typename System, typename Pointer>
|
29 |
-
class malloc_allocator
|
30 |
-
: public thrust::detail::tagged_allocator<
|
31 |
-
T, System, Pointer
|
32 |
-
>
|
33 |
-
{
|
34 |
-
private:
|
35 |
-
typedef thrust::detail::tagged_allocator<
|
36 |
-
T, System, Pointer
|
37 |
-
> super_t;
|
38 |
-
|
39 |
-
public:
|
40 |
-
typedef typename super_t::pointer pointer;
|
41 |
-
typedef typename super_t::size_type size_type;
|
42 |
-
|
43 |
-
pointer allocate(size_type cnt);
|
44 |
-
|
45 |
-
void deallocate(pointer p, size_type n);
|
46 |
-
};
|
47 |
-
|
48 |
-
} // end detail
|
49 |
-
} // end thrust
|
50 |
-
|
51 |
-
#include <thrust/detail/allocator/malloc_allocator.inl>
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/select_system.h
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/detail/cpp11_required.h>
|
21 |
-
|
22 |
-
#if THRUST_CPP_DIALECT >= 2011
|
23 |
-
|
24 |
-
#include <thrust/detail/type_deduction.h>
|
25 |
-
#include <thrust/type_traits/remove_cvref.h>
|
26 |
-
#include <thrust/system/detail/generic/select_system.h>
|
27 |
-
|
28 |
-
namespace thrust
|
29 |
-
{
|
30 |
-
|
31 |
-
namespace detail
|
32 |
-
{
|
33 |
-
|
34 |
-
// We need a way to compute the return type of `select_system`, which is found
|
35 |
-
// by using `thrust::system::detail::generic::select_system` and then making an
|
36 |
-
// ADL call. We have no trait that defines the return type. With the
|
37 |
-
// limitations of C++11 return type deduction, we need to be able to stick all
|
38 |
-
// of that into `decltype`. So, we put the using statement into a detail
|
39 |
-
// namespace, and then implement the generic dispatch function in that
|
40 |
-
// namespace.
|
41 |
-
|
42 |
-
namespace select_system_detail
|
43 |
-
{
|
44 |
-
|
45 |
-
using thrust::system::detail::generic::select_system;
|
46 |
-
|
47 |
-
struct select_system_fn final
|
48 |
-
{
|
49 |
-
__thrust_exec_check_disable__
|
50 |
-
template <typename DerivedPolicy0>
|
51 |
-
__host__ __device__
|
52 |
-
auto operator()(
|
53 |
-
thrust::detail::execution_policy_base<DerivedPolicy0> const& exec0
|
54 |
-
) const
|
55 |
-
THRUST_DECLTYPE_RETURNS(
|
56 |
-
select_system(
|
57 |
-
thrust::detail::derived_cast(thrust::detail::strip_const(exec0))
|
58 |
-
)
|
59 |
-
)
|
60 |
-
|
61 |
-
__thrust_exec_check_disable__
|
62 |
-
template <typename DerivedPolicy0, typename DerivedPolicy1>
|
63 |
-
__host__ __device__
|
64 |
-
auto operator()(
|
65 |
-
thrust::detail::execution_policy_base<DerivedPolicy0> const& exec0
|
66 |
-
, thrust::detail::execution_policy_base<DerivedPolicy1> const& exec1
|
67 |
-
) const
|
68 |
-
THRUST_DECLTYPE_RETURNS(
|
69 |
-
select_system(
|
70 |
-
thrust::detail::derived_cast(thrust::detail::strip_const(exec0))
|
71 |
-
, thrust::detail::derived_cast(thrust::detail::strip_const(exec1))
|
72 |
-
)
|
73 |
-
)
|
74 |
-
};
|
75 |
-
|
76 |
-
} // namespace select_system_detail
|
77 |
-
|
78 |
-
THRUST_INLINE_CONSTANT select_system_detail::select_system_fn select_system{};
|
79 |
-
|
80 |
-
} // detail
|
81 |
-
|
82 |
-
} // end namespace thrust
|
83 |
-
|
84 |
-
#endif // THRUST_CPP_DIALECT >= 2011
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/time_counter.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import time
|
3 |
-
|
4 |
-
|
5 |
-
class TimeCounter:
|
6 |
-
def __init__(self) -> None:
|
7 |
-
pass
|
8 |
-
|
9 |
-
def clear(self):
|
10 |
-
self.timedict = {}
|
11 |
-
self.basetime = time.perf_counter()
|
12 |
-
|
13 |
-
def timeit(self, name):
|
14 |
-
nowtime = time.perf_counter() - self.basetime
|
15 |
-
self.timedict[name] = nowtime
|
16 |
-
self.basetime = time.perf_counter()
|
17 |
-
|
18 |
-
|
19 |
-
class TimeHolder:
|
20 |
-
def __init__(self) -> None:
|
21 |
-
self.timedict = {}
|
22 |
-
|
23 |
-
def update(self, _timedict: dict):
|
24 |
-
for k, v in _timedict.items():
|
25 |
-
if k not in self.timedict:
|
26 |
-
self.timedict[k] = AverageMeter(name=k, val_only=True)
|
27 |
-
self.timedict[k].update(val=v)
|
28 |
-
|
29 |
-
def final_res(self):
|
30 |
-
return {k: v.avg for k, v in self.timedict.items()}
|
31 |
-
|
32 |
-
def __str__(self):
|
33 |
-
return json.dumps(self.final_res(), indent=2)
|
34 |
-
|
35 |
-
|
36 |
-
class AverageMeter(object):
|
37 |
-
"""Computes and stores the average and current value"""
|
38 |
-
|
39 |
-
def __init__(self, name, fmt=":f", val_only=False):
|
40 |
-
self.name = name
|
41 |
-
self.fmt = fmt
|
42 |
-
self.val_only = val_only
|
43 |
-
self.reset()
|
44 |
-
|
45 |
-
def reset(self):
|
46 |
-
self.val = 0
|
47 |
-
self.avg = 0
|
48 |
-
self.sum = 0
|
49 |
-
self.count = 0
|
50 |
-
|
51 |
-
def update(self, val, n=1):
|
52 |
-
self.val = val
|
53 |
-
self.sum += val * n
|
54 |
-
self.count += n
|
55 |
-
self.avg = self.sum / self.count
|
56 |
-
|
57 |
-
def __str__(self):
|
58 |
-
if self.val_only:
|
59 |
-
fmtstr = "{name} {val" + self.fmt + "}"
|
60 |
-
else:
|
61 |
-
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
|
62 |
-
return fmtstr.format(**self.__dict__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/lib/puppeteer/puppeteer.js
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import Renderer from '../renderer/loader.js'
|
2 |
-
|
3 |
-
/**
|
4 |
-
* 暂时保留对手工引用puppeteer.js的兼容
|
5 |
-
* 后期会逐步废弃
|
6 |
-
* 只提供截图及分片截图功能
|
7 |
-
*/
|
8 |
-
let renderer = Renderer.getRenderer()
|
9 |
-
renderer.screenshot = async (name, data) => {
|
10 |
-
let img = await renderer.render(name, data)
|
11 |
-
return img ? segment.image(img) : img
|
12 |
-
}
|
13 |
-
renderer.screenshots = async (name, data) => {
|
14 |
-
data.multiPage = true
|
15 |
-
let imgs = await renderer.render(name, data) || []
|
16 |
-
let ret = []
|
17 |
-
for (let img of imgs) {
|
18 |
-
ret.push(img ? segment.image(img) : img)
|
19 |
-
}
|
20 |
-
return ret.length > 0 ? ret : false
|
21 |
-
}
|
22 |
-
|
23 |
-
export default renderer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CognitiveLabs/GPT-4-Vision-Chat/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: GPT 4 Vision Chat
|
3 |
-
emoji: 🏞️ 💬
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: purple
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-0ee118f2.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as M,e as N,s as O,N as z,k,O as Q,K as S,p as j,o as w,M as E,ap as T,Q as A,z as C,v as B,A as q,x as P,a1 as L,B as V,am as W,P as X,R as Y,F as g,ak as r,E as Z,ae as y,h as D,j as F,q as p,r as x,t as K}from"./index-3370be2a.js";/* empty css */import{B as $}from"./Button-89624748.js";import{B as ee}from"./BlockTitle-bcf8c05e.js";import"./Info-5611e10f.js";function te(t){let e;return{c(){e=X(t[1])},m(l,s){j(l,e,s)},p(l,s){s&2&&Y(e,l[1])},d(l){l&&q(e)}}}function le(t){let e,l,s,a,o,c,h;return l=new ee({props:{show_label:t[4],info:t[2],$$slots:{default:[te]},$$scope:{ctx:t}}}),{c(){e=z("label"),k(l.$$.fragment),s=Q(),a=z("input"),S(a,"type","color"),a.disabled=t[3],S(a,"class","svelte-56zyyb"),S(e,"class","block")},m(_,f){j(_,e,f),w(l,e,null),E(e,s),E(e,a),T(a,t[0]),o=!0,c||(h=[A(a,"blur",t[6]),A(a,"input",t[7])],c=!0)},p(_,[f]){const m={};f&16&&(m.show_label=_[4]),f&4&&(m.info=_[2]),f&1026&&(m.$$scope={dirty:f,ctx:_}),l.$set(m),(!o||f&8)&&(a.disabled=_[3]),f&1&&T(a,_[0])},i(_){o||(C(l.$$.fragment,_),o=!0)},o(_){B(l.$$.fragment,_),o=!1},d(_){_&&q(e),P(l),c=!1,L(h)}}}function se(t,e,l){let{value:s="#000000"}=e,{value_is_output:a=!1}=e,{label:o}=e,{info:c=void 0}=e,{disabled:h=!1}=e,{show_label:_=!0}=e;const f=V();function m(){f("change",s),a||f("input")}W(()=>{l(5,a=!1)});function d(u){g.call(this,t,u)}function n(){s=this.value,l(0,s)}return t.$$set=u=>{"value"in u&&l(0,s=u.value),"value_is_output"in u&&l(5,a=u.value_is_output),"label"in u&&l(1,o=u.label),"info"in u&&l(2,c=u.info),"disabled"in u&&l(3,h=u.disabled),"show_label"in u&&l(4,_=u.show_label)},t.$$.update=()=>{t.$$.dirty&1&&m()},[s,o,c,h,_,a,d,n]}class ie extends M{constructor(e){super(),N(this,e,se,le,O,{value:0,value_is_output:5,label:1,info:2,disabled:3,show_label:4})}}function ne(t){let e,l,s,a,o,c;const h=[t[11]];let _={};for(let n=0;n<h.length;n+=1)_=Z(_,h[n]);e=new y({props:_});function f(n){t[13](n)}function m(n){t[14](n)}let d={label:t[2],info:t[3],show_label:t[7],disabled:t[12]==="static"};return t[0]!==void 0&&(d.value=t[0]),t[1]!==void 0&&(d.value_is_output=t[1]),s=new ie({props:d}),D.push(()=>F(s,"value",f)),D.push(()=>F(s,"value_is_output",m)),s.$on("change",t[15]),s.$on("input",t[16]),s.$on("submit",t[17]),s.$on("blur",t[18]),{c(){k(e.$$.fragment),l=Q(),k(s.$$.fragment)},m(n,u){w(e,n,u),j(n,l,u),w(s,n,u),c=!0},p(n,u){const v=u&2048?p(h,[x(n[11])]):{};e.$set(v);const b={};u&4&&(b.label=n[2]),u&8&&(b.info=n[3]),u&128&&(b.show_label=n[7]),u&4096&&(b.disabled=n[12]==="static"),!a&&u&1&&(a=!0,b.value=n[0],K(()=>a=!1)),!o&&u&2&&(o=!0,b.value_is_output=n[1],K(()=>o=!1)),s.$set(b)},i(n){c||(C(e.$$.fragment,n),C(s.$$.fragment,n),c=!0)},o(n){B(e.$$.fragment,n),B(s.$$.fragment,n),c=!1},d(n){n&&q(l),P(e,n),P(s,n)}}}function ae(t){let e,l;return e=new $({props:{visible:t[6],elem_id:t[4],elem_classes:t[5],container:t[8],scale:t[9],min_width:t[10],$$slots:{default:[ne]},$$scope:{ctx:t}}}),{c(){k(e.$$.fragment)},m(s,a){w(e,s,a),l=!0},p(s,[a]){const o={};a&64&&(o.visible=s[6]),a&16&&(o.elem_id=s[4]),a&32&&(o.elem_classes=s[5]),a&256&&(o.container=s[8]),a&512&&(o.scale=s[9]),a&1024&&(o.min_width=s[10]),a&530575&&(o.$$scope={dirty:a,ctx:s}),e.$set(o)},i(s){l||(C(e.$$.fragment,s),l=!0)},o(s){B(e.$$.fragment,s),l=!1},d(s){P(e,s)}}}function ue(t,e,l){let{label:s="ColorPicker"}=e,{info:a=void 0}=e,{elem_id:o=""}=e,{elem_classes:c=[]}=e,{visible:h=!0}=e,{value:_}=e,{value_is_output:f=!1}=e,{show_label:m}=e,{container:d=!0}=e,{scale:n=null}=e,{min_width:u=void 0}=e,{loading_status:v}=e,{mode:b}=e;function R(i){_=i,l(0,_)}function U(i){f=i,l(1,f)}function G(i){g.call(this,t,i)}function H(i){g.call(this,t,i)}function I(i){g.call(this,t,i)}function J(i){g.call(this,t,i)}return t.$$set=i=>{"label"in i&&l(2,s=i.label),"info"in i&&l(3,a=i.info),"elem_id"in i&&l(4,o=i.elem_id),"elem_classes"in i&&l(5,c=i.elem_classes),"visible"in i&&l(6,h=i.visible),"value"in i&&l(0,_=i.value),"value_is_output"in i&&l(1,f=i.value_is_output),"show_label"in i&&l(7,m=i.show_label),"container"in i&&l(8,d=i.container),"scale"in i&&l(9,n=i.scale),"min_width"in i&&l(10,u=i.min_width),"loading_status"in i&&l(11,v=i.loading_status),"mode"in i&&l(12,b=i.mode)},[_,f,s,a,o,c,h,m,d,n,u,v,b,R,U,G,H,I,J]}class _e extends M{constructor(e){super(),N(this,e,ue,ae,O,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,value_is_output:1,show_label:7,container:8,scale:9,min_width:10,loading_status:11,mode:12})}get label(){return this.$$.ctx[2]}set label(e){this.$$set({label:e}),r()}get info(){return this.$$.ctx[3]}set info(e){this.$$set({info:e}),r()}get elem_id(){return this.$$.ctx[4]}set elem_id(e){this.$$set({elem_id:e}),r()}get elem_classes(){return this.$$.ctx[5]}set elem_classes(e){this.$$set({elem_classes:e}),r()}get visible(){return this.$$.ctx[6]}set visible(e){this.$$set({visible:e}),r()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),r()}get value_is_output(){return this.$$.ctx[1]}set value_is_output(e){this.$$set({value_is_output:e}),r()}get show_label(){return this.$$.ctx[7]}set show_label(e){this.$$set({show_label:e}),r()}get container(){return this.$$.ctx[8]}set container(e){this.$$set({container:e}),r()}get scale(){return this.$$.ctx[9]}set scale(e){this.$$set({scale:e}),r()}get min_width(){return this.$$.ctx[10]}set min_width(e){this.$$set({min_width:e}),r()}get loading_status(){return this.$$.ctx[11]}set loading_status(e){this.$$set({loading_status:e}),r()}get mode(){return this.$$.ctx[12]}set mode(e){this.$$set({mode:e}),r()}}const me=_e,be=["static","dynamic"],de=t=>({type:{payload:"string"},description:{payload:"hex color code"},example_data:t.value??"#000000"});export{me as Component,de as document,be as modes};
|
2 |
-
//# sourceMappingURL=index-0ee118f2.js.map
|
|
|
|
|
|
spaces/Dinoking/Guccio-AI-Designer/netdissect/tool/lightbox.html
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<!--
|
4 |
-
+lightbox.html, a page for automatically showing all images in a
|
5 |
-
directory on an Apache server. Just copy it into the directory.
|
6 |
-
Works by scraping the default directory HTML at "./" - David Bau.
|
7 |
-
-->
|
8 |
-
<head>
|
9 |
-
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/vue.js"
|
10 |
-
integrity="sha256-CMMTrj5gGwOAXBeFi7kNokqowkzbeL8ydAJy39ewjkQ="
|
11 |
-
crossorigin="anonymous"></script>
|
12 |
-
<script src="https://cdn.jsdelivr.net/npm/[email protected]/lodash.js"
|
13 |
-
integrity="sha256-qwbDmNVLiCqkqRBpF46q5bjYH11j5cd+K+Y6D3/ja28="
|
14 |
-
crossorigin="anonymous"></script>
|
15 |
-
<script
|
16 |
-
src="https://code.jquery.com/jquery-3.3.1.js"
|
17 |
-
integrity="sha256-2Kok7MbOyxpgUVvAk/HJ2jigOSYS2auK4Pfzbm7uH60="
|
18 |
-
crossorigin="anonymous"></script>
|
19 |
-
<script src="https://cdnjs.cloudflare.com/ajax/libs/lity/2.3.1/lity.js"
|
20 |
-
integrity="sha256-28JiZvE/RethQIYCwkMdtSMHgI//KoTLeB2tSm10trs="
|
21 |
-
crossorigin="anonymous"></script>
|
22 |
-
<link rel="stylesheet"
|
23 |
-
href="https://cdnjs.cloudflare.com/ajax/libs/lity/2.3.1/lity.css"
|
24 |
-
integrity="sha256-76wKiAXVBs5Kyj7j0T43nlBCbvR6pqdeeZmXI4ATnY0="
|
25 |
-
crossorigin="anonymous" />
|
26 |
-
<style>
|
27 |
-
.thumb { display: inline-block; margin: 1px; text-align: center; }
|
28 |
-
.thumb img { max-width: 150px; }
|
29 |
-
</style>
|
30 |
-
</head>
|
31 |
-
<body>
|
32 |
-
<div id="app" v-if="images">
|
33 |
-
<h3>Images in <a :href="directory">{{ directory }}</a></h3>
|
34 |
-
<div v-for="r in images" class="thumb">
|
35 |
-
<div>{{ r }}</div>
|
36 |
-
<a :href="r" data-lity><img :src="r"></a>
|
37 |
-
</div>
|
38 |
-
</div><!--app-->
|
39 |
-
</body>
|
40 |
-
<script>
|
41 |
-
var theapp = new Vue({
|
42 |
-
el: '#app',
|
43 |
-
data: {
|
44 |
-
directory: window.location.pathname.replace(/[^\/]*$/, ''),
|
45 |
-
images: null
|
46 |
-
},
|
47 |
-
created: function() {
|
48 |
-
var self = this;
|
49 |
-
$.get('./?' + Math.random(), function(d) {
|
50 |
-
var imgurls = $.map($(d).find('a'),
|
51 |
-
x => x.href).filter(
|
52 |
-
x => x.match(/\.(jpg|jpeg|png|gif)$/i)).map(
|
53 |
-
x => x.replace(/.*\//, ''));
|
54 |
-
self.images = imgurls;
|
55 |
-
}, 'html');
|
56 |
-
}
|
57 |
-
})
|
58 |
-
</script>
|
59 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|