Commit
·
c19f181
1
Parent(s):
e74953a
Update parquet files (step 92 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1368565466ki/Satdia/text/__init__.py +0 -57
- spaces/1acneusushi/gradio-2dmoleculeeditor/Alice Madness Returns Xpadder Game Profile.md +0 -102
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autodesk 3ds Max 2009 Activation Code Download Tips and Tricks for Successful Installation.md +0 -140
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Faronics.Deep.free __LINK__ze.Standard.v6 62 020 3058 Incl Key Crack.md +0 -135
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Reader Pro Full Download How to Create Edit and Share PDFs Like a Pro.md +0 -33
- spaces/1gistliPinn/ChatGPT4/Examples/American Sniper Full FREE Movie In Hindi Download Kickass.md +0 -11
- spaces/1gistliPinn/ChatGPT4/Examples/Creative Sound Blaster X Fi Mb [REPACK] Cracked.22.md +0 -42
- spaces/1gistliPinn/ChatGPT4/Examples/Desktop Reminder 2 Pro Activation Key Crack __HOT__.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/EaseUS Partition Master 13.8 Crack WORK Key With License Code 2020.md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/Ecm Titanium 1.73 Rarbfdcm [NEW].md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/Finalfantasy7remakepcserialnumber [UPDATED].md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bar HTML Snippets Copy and Paste Code for Your Web Pages.md +0 -89
- spaces/1phancelerku/anime-remove-background/Anime Go APK An Easy and Reliable App for Anime Streaming.md +0 -210
- spaces/2023Liu2023/bingo/src/components/learn-more.tsx +0 -39
- spaces/2ndelement/voicevox/voicevox_engine/preset/PresetError.py +0 -2
- spaces/A00001/bingothoo/src/lib/bots/bing/types.ts +0 -259
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/layers/residual_block.py +0 -129
- spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/plms.py +0 -236
- spaces/AISuperheroes/10SL-RealTimeDSDashboard-Live-AIUIUX/app.py +0 -92
- spaces/Adapter/CoAdapter/ldm/modules/extra_condition/utils.py +0 -72
- spaces/Aloento/9Nine-PITS/mel_processing.py +0 -123
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/Inference.py +0 -106
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/tfutil.py +0 -262
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/styleclip_mapper.py +0 -76
- spaces/AndrewMetaBlock/emilyalsentzer-Bio_ClinicalBERT/app.py +0 -3
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py +0 -1510
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_flax.py +0 -919
- spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py +0 -237
- spaces/Andy1621/uniformer_image_segmentation/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py +0 -9
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/progressbar.py +0 -208
- spaces/ArkanDash/rvc-models/infer_pack/transforms.py +0 -209
- spaces/Armandoliv/gpt2-tweets-generation-app/README.md +0 -12
- spaces/Artples/Named-Entity-Recognition/app.py +0 -3
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/__init__.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/auth.py +0 -315
- spaces/Bart92/RVC_HF/train/process_ckpt.py +0 -259
- spaces/Benson/text-generation/Examples/Apkadmin Fuego Libre Mx Diamante Hack.md +0 -39
- spaces/Benson/text-generation/Examples/Descargar Gratis Brawl Estrellas Para Pc.md +0 -105
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/index.py +0 -508
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py +0 -353
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign.h +0 -130
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/scan.h +0 -122
- spaces/Candeloro/anime-remove-background/README.md +0 -14
- spaces/CikeyQI/meme-api/meme_generator/memes/hammer/__init__.py +0 -30
- spaces/Cpp4App/Cpp4App/CDM/detect_text/ocr.py +0 -43
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/cd.py +0 -390
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_a_n_k_r.py +0 -14
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-1cf9680f.js +0 -0
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/__init__.py +0 -139
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/_cli_utils.py +0 -63
spaces/1368565466ki/Satdia/text/__init__.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
""" from https://github.com/keithito/tacotron """
|
2 |
-
from text import cleaners
|
3 |
-
from text.symbols import symbols
|
4 |
-
|
5 |
-
|
6 |
-
# Mappings from symbol to numeric ID and vice versa:
|
7 |
-
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
8 |
-
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
|
9 |
-
|
10 |
-
|
11 |
-
def text_to_sequence(text, symbols, cleaner_names):
|
12 |
-
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
|
13 |
-
Args:
|
14 |
-
text: string to convert to a sequence
|
15 |
-
cleaner_names: names of the cleaner functions to run the text through
|
16 |
-
Returns:
|
17 |
-
List of integers corresponding to the symbols in the text
|
18 |
-
'''
|
19 |
-
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
20 |
-
sequence = []
|
21 |
-
|
22 |
-
clean_text = _clean_text(text, cleaner_names)
|
23 |
-
for symbol in clean_text:
|
24 |
-
if symbol not in _symbol_to_id.keys():
|
25 |
-
continue
|
26 |
-
symbol_id = _symbol_to_id[symbol]
|
27 |
-
sequence += [symbol_id]
|
28 |
-
return sequence, clean_text
|
29 |
-
|
30 |
-
|
31 |
-
def cleaned_text_to_sequence(cleaned_text):
|
32 |
-
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
|
33 |
-
Args:
|
34 |
-
text: string to convert to a sequence
|
35 |
-
Returns:
|
36 |
-
List of integers corresponding to the symbols in the text
|
37 |
-
'''
|
38 |
-
sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
|
39 |
-
return sequence
|
40 |
-
|
41 |
-
|
42 |
-
def sequence_to_text(sequence):
|
43 |
-
'''Converts a sequence of IDs back to a string'''
|
44 |
-
result = ''
|
45 |
-
for symbol_id in sequence:
|
46 |
-
s = _id_to_symbol[symbol_id]
|
47 |
-
result += s
|
48 |
-
return result
|
49 |
-
|
50 |
-
|
51 |
-
def _clean_text(text, cleaner_names):
|
52 |
-
for name in cleaner_names:
|
53 |
-
cleaner = getattr(cleaners, name)
|
54 |
-
if not cleaner:
|
55 |
-
raise Exception('Unknown cleaner: %s' % name)
|
56 |
-
text = cleaner(text)
|
57 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/Alice Madness Returns Xpadder Game Profile.md
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
## Alice Madness Returns Xpadder Game Profile
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
**Download ✅ [https://jinyurl.com/2tzZY1](https://jinyurl.com/2tzZY1)**
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# How to Play Alice: Madness Returns with a Gamepad on PC
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
Alice: Madness Returns is a dark and twisted sequel to the classic Alice in Wonderland game. It was released in 2011 for PC, Xbox 360 and PlayStation 3. However, many PC players have reported issues with the game's controller support, especially when using Xpadder, a software that allows you to map keyboard and mouse inputs to a gamepad.
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
In this article, we will show you how to play Alice: Madness Returns with a gamepad on PC using Xpadder. We will also provide you with a link to download a ready-made Xpadder game profile for Alice: Madness Returns that includes the first Alice game redone for gamepad too.
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
## What is Xpadder?
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
Xpadder is a software that allows you to use a gamepad with any PC game that does not have native controller support. You can create your own custom profiles for different games and assign keyboard and mouse inputs to your gamepad buttons, triggers, sticks and d-pad. You can also use Xpadder to emulate mouse movements, adjust sensitivity, add turbo functions, create macros and more.
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
Xpadder is compatible with most gamepads, including Xbox 360, PlayStation 3, PlayStation 4, Steam Controller and more. You can download Xpadder from its official website for $9.99 or find a free version online.
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
## How to Play Alice: Madness Returns with a Gamepad on PC using Xpadder
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
To play Alice: Madness Returns with a gamepad on PC using Xpadder, you will need to follow these steps:
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
1. Download and install Xpadder on your PC.
|
58 |
-
|
59 |
-
2. Connect your gamepad to your PC and launch Xpadder.
|
60 |
-
|
61 |
-
3. Select your gamepad from the list of detected devices and choose an image for it.
|
62 |
-
|
63 |
-
4. Click on each button, trigger, stick and d-pad on your gamepad and assign a keyboard or mouse input to it. You can use the default settings or customize them according to your preferences.
|
64 |
-
|
65 |
-
5. Save your profile by clicking on the floppy disk icon at the top right corner of the Xpadder window.
|
66 |
-
|
67 |
-
6. Download the Alice: Madness Returns Xpadder game profile from [here](https://xpadder.com/forum4/viewtopic.php?t=4116). This profile was created by user Primal Fear from the Xpadder Forum and it includes the first Alice game redone for gamepad too.
|
68 |
-
|
69 |
-
7. Extract the zip file and copy the .xpadderprofile file to your Xpadder folder.
|
70 |
-
|
71 |
-
8. Launch Alice: Madness Returns on your PC and go to the options menu. Disable mouse smoothing and set the keyboard layout to QWERTY.
|
72 |
-
|
73 |
-
9. Alt-tab to Xpadder and load the Alice: Madness Returns profile by clicking on the folder icon at the top right corner of the Xpadder window.
|
74 |
-
|
75 |
-
10. Enjoy playing Alice: Madness Returns with a gamepad on PC!
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
## Tips and Tricks
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
- You can switch between the first Alice game and Alice: Madness Returns by pressing the Back button on your gamepad.
|
84 |
-
|
85 |
-
- You can access the in-game menu by pressing the Start button on your gamepad.
|
86 |
-
|
87 |
-
- You can lock onto enemies by pressing the Right Trigger on your gamepad.
|
88 |
-
|
89 |
-
- You can use different weapons by pressing the Left Bumper or Right Bumper on your gamepad.
|
90 |
-
|
91 |
-
- You can shrink or grow by pressing the Left Trigger on your gamepad.
|
92 |
-
|
93 |
-
- You can dodge by pressing the A button on your gamepad.
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
145887f19f
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autodesk 3ds Max 2009 Activation Code Download Tips and Tricks for Successful Installation.md
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Autodesk 3ds Max 2009 Activation Code Download: A Complete Guide</h1>
|
3 |
-
<p>If you are looking for a powerful and versatile software for creating 3D animations, models, games, and graphics, you might want to consider Autodesk 3ds Max 2009. This software is one of the most popular and widely used tools for professional and amateur artists, designers, and developers. But how can you download and activate this software on your computer? In this article, we will show you a complete guide on how to do that. Read on to find out more.</p>
|
4 |
-
<h2>autodesk 3ds max 2009 activation code download</h2><br /><p><b><b>Download</b> ✶ <a href="https://byltly.com/2uKvCr">https://byltly.com/2uKvCr</a></b></p><br /><br />
|
5 |
-
<h2>What is Autodesk 3ds Max 2009?</h2>
|
6 |
-
<p>Autodesk 3ds Max 2009 is a software application that allows you to create and edit 3D content. It was released in April 2008 by Autodesk, a leading company in the field of design and engineering software. Autodesk 3ds Max 2009 is the ninth version of the software, which was formerly known as 3D Studio Max. It is compatible with Windows XP, Vista, and 7 operating systems.</p>
|
7 |
-
<h3>Features and benefits of Autodesk 3ds Max 2009</h3>
|
8 |
-
<p>Autodesk 3ds Max 2009 offers a range of features and benefits that make it a powerful and versatile software for creating 3D content. Some of these features and benefits are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It supports various file formats, such as DWG, DXF, OBJ, FBX, STL, VRML, and more.</li>
|
11 |
-
<li>It has a user-friendly interface that allows you to customize your workspace and tools according to your preferences.</li>
|
12 |
-
<li>It has a comprehensive set of modeling tools that enable you to create complex and realistic shapes, surfaces, and textures.</li>
|
13 |
-
<li>It has a robust animation system that allows you to create dynamic and expressive animations for characters, objects, and scenes.</li>
|
14 |
-
<li>It has a powerful rendering engine that allows you to produce high-quality images and videos with realistic lighting, shadows, reflections, and effects.</li>
|
15 |
-
<li>It has a rich library of materials, maps, lights, cameras, modifiers, and plugins that enhance your creativity and productivity.</li>
|
16 |
-
<li>It has a flexible workflow that allows you to integrate with other Autodesk products, such as AutoCAD, Maya, Revit, Inventor, and more.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>System requirements for Autodesk 3ds Max 2009</h3>
|
19 |
-
<p>To run Autodesk 3ds Max 2009 smoothly on your computer, you need to meet the following minimum system requirements:</p>
|
20 |
-
<table>
|
21 |
-
<tr>
|
22 |
-
<th>Component</th>
|
23 |
-
<th>Requirement</th>
|
24 |
-
</tr>
|
25 |
-
<tr>
|
26 |
-
<td>Operating system</td>
|
27 |
-
<td>Windows XP (SP2 or higher), Vista (SP1 or higher), or Windows7 (32-bit or64-bit)</td>
|
28 |
-
</tr>
|
29 |
-
<tr>
|
30 |
-
<td>Processor</td>
|
31 |
-
<td>Intel Pentium IV or higher; AMD Athlon XP or higher</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td>Memory</td>
|
35 |
-
<td>1 GB RAM (2 GB recommended)</td>
|
36 |
-
</tr>
|
37 |
-
<tr>
|
38 |
-
<td>Hard disk space</td>
|
39 |
-
<td>1 GB free disk space for installation; additional space required for working files</td>
|
40 |
-
</tr>
|
41 |
-
<tr>
|
42 |
-
<td>Graphics card</td>
|
43 |
-
<td>DirectX®-compatible graphics card with at least128 MB RAM; OpenGL-compatible graphics card recommended for advanced features</td>
|
44 |
-
</tr>
|
45 |
-
<tr>
|
46 |
-
<td>Display resolution</td>
|
47 |
-
<td>1024 x768 pixels or higher; true color (32-bit) recommended</td>
|
48 |
-
</tr>
|
49 |
-
<tr>
|
50 |
-
<td>Internet connection</td>
|
51 |
-
<td>Required for online activation; broadband connection recommended for downloading updates and accessing online resources</td>
|
52 |
-
</tr>
|
53 |
-
<tr>
|
54 |
-
<td>DVD-ROM drive</td>
|
55 |
-
<td>Required for installation from DVD media; not required for installation from electronic download</td>
|
56 |
-
</tr>
|
57 |
-
</table>
|
58 |
-
<h2>How to download Autodesk 3ds Max 2009?</h2>
|
59 |
-
<p>To download Autodesk 3ds Max 2009 on your computer, you need to follow these steps:</p>
|
60 |
-
<p>autodesk 3ds max 2009 crack download<br />
|
61 |
-
autodesk 3ds max 2009 serial number download<br />
|
62 |
-
autodesk 3ds max 2009 keygen download<br />
|
63 |
-
autodesk 3ds max 2009 license download<br />
|
64 |
-
autodesk 3ds max 2009 product key download<br />
|
65 |
-
autodesk 3ds max 2009 full version download<br />
|
66 |
-
autodesk 3ds max 2009 free download with crack<br />
|
67 |
-
autodesk 3ds max 2009 free download with keygen<br />
|
68 |
-
autodesk 3ds max 2009 free download with serial number<br />
|
69 |
-
autodesk 3ds max 2009 free download with license<br />
|
70 |
-
autodesk 3ds max 2009 free download with product key<br />
|
71 |
-
autodesk 3ds max 2009 free download with activation code<br />
|
72 |
-
how to activate autodesk 3ds max 2009<br />
|
73 |
-
how to crack autodesk 3ds max 2009<br />
|
74 |
-
how to install autodesk 3ds max 2009<br />
|
75 |
-
how to use autodesk 3ds max 2009<br />
|
76 |
-
how to download autodesk 3ds max 2009<br />
|
77 |
-
how to get autodesk 3ds max 2009 for free<br />
|
78 |
-
how to get autodesk 3ds max 2009 activation code<br />
|
79 |
-
how to get autodesk 3ds max 2009 serial number<br />
|
80 |
-
how to get autodesk 3ds max 2009 license<br />
|
81 |
-
how to get autodesk 3ds max 2009 product key<br />
|
82 |
-
how to get autodesk 3ds max 2009 keygen<br />
|
83 |
-
how to get autodesk 3ds max 2009 crack<br />
|
84 |
-
where to download autodesk 3ds max 2009<br />
|
85 |
-
where to find autodesk 3ds max 2009 activation code<br />
|
86 |
-
where to find autodesk 3ds max 2009 serial number<br />
|
87 |
-
where to find autodesk 3ds max 2009 license<br />
|
88 |
-
where to find autodesk 3ds max 2009 product key<br />
|
89 |
-
where to find autodesk 3ds max 2009 keygen<br />
|
90 |
-
where to find autodesk 3ds max 2009 crack<br />
|
91 |
-
best site to download autodesk 3ds max 2009<br />
|
92 |
-
best site to get autodesk 3ds max 2009 activation code<br />
|
93 |
-
best site to get autodesk 3ds max 2009 serial number<br />
|
94 |
-
best site to get autodesk 3ds max 2009 license<br />
|
95 |
-
best site to get autodesk 3ds max 2009 product key<br />
|
96 |
-
best site to get autodesk 3ds max</p>
|
97 |
-
<h3>Step1: Visit the official website of Autodesk </h3>
|
98 |
-
<p>The first step is to visit the official website of Autodesk at <a href="https://www.autodesk.com/">https://www.autodesk.com/</a>. This is where you can find all the products and services offered by Autodesk. You can also access various resources, such as tutorials, forums, blogs, support, and more.</p>
|
99 |
-
<h3>Step2: Create an account or sign in </h3>
|
100 |
-
<p>The next step is to create an account or sign in to your existing account on the website. You need an account to access the download page of Autodesk products. To create an account, click on the "Sign In" button at the top right corner of the website. Then click on "Create Account" and fill in your details. To sign in to your account, enter your email address and password.</p>
|
101 |
-
<h3>Step3: Select the product and version </h3>
|
102 |
-
<h3>Step 4: Choose the language and operating system </h3>
|
103 |
-
<p>The fourth step is to choose the language and operating system that you want to download. To do this, click on the "Download" button next to the product name. You will see a pop-up window that shows you the available options. Select the language and operating system that match your computer. Then click on "Next".</p>
|
104 |
-
<h3>Step 5: Download the installer file </h3>
|
105 |
-
<p>The fifth and final step is to download the installer file on your computer. To do this, click on the "Browser Download" button. You will see a dialog box that asks you to save the file. Choose a location where you want to save the file and click on "Save". The download will start automatically. Depending on your internet speed and file size, it may take some time to complete. Once the download is finished, you will have the installer file on your computer.</p>
|
106 |
-
<h2>How to activate Autodesk 3ds Max 2009?</h2>
|
107 |
-
<p>To activate Autodesk 3ds Max 2009 on your computer, you need to follow these steps:</p>
|
108 |
-
<h3>Step 1: Run the installer file and follow the instructions </h3>
|
109 |
-
<p>The first step is to run the installer file that you downloaded on your computer. To do this, double-click on the file or right-click and select "Open". You will see a welcome screen that asks you to accept the terms and conditions of the software. Click on "I Accept" and then click on "Next". You will see a screen that asks you to choose the type of installation. You can choose between "Typical", "Custom", or "Complete". We recommend choosing "Typical" for most users. Then click on "Next". You will see a screen that shows you the installation progress. Wait until the installation is completed.</p>
|
110 |
-
<h3>Step 2: Enter the serial number and product key </h3>
|
111 |
-
<p>The second step is to enter the serial number and product key that you received when you purchased or subscribed to Autodesk 3ds Max 2009. To do this, open the software by clicking on its icon on your desktop or start menu. You will see a screen that asks you to activate your product. Click on "Activate" and then click on "Next". You will see a screen that asks you to enter your serial number and product key. You can find these numbers in your email confirmation, invoice, or online account. Enter them in the corresponding fields and click on "Next".</p>
|
112 |
-
<h3>Step 3: Request an activation code online or by phone </h3>
|
113 |
-
<p>The third step is to request an activation code online or by phone. To do this, you will see a screen that shows you two options: "Activate Online" or "Activate by Phone". If you have an internet connection, we recommend choosing "Activate Online". This is the fastest and easiest way to activate your product. To do this, click on "Activate Online" and then click on "Next". You will see a screen that shows you your request code. Copy this code and paste it in a text document or write it down somewhere. Then click on "Next". You will be redirected to a web page where you need to sign in to your Autodesk account or create one if you don't have one already. Then follow the instructions on the web page to get your activation code.</p>
|
114 |
-
<p>If you don't have an internet connection or prefer to activate by phone, you can choose "Activate by Phone". This is an alternative way to activate your product. To do this, click on "Activate by Phone" and then click on "Next". You will see a screen that shows you your request code and a phone number for your region. Call this number and follow the voice prompts to get your activation code.</p>
|
115 |
-
<h3>Step 4: Enter the activation code and complete the process </h3>
|
116 |
-
<p>The fourth and final step is to enter the activation code and complete the process. To do this, go back to the software activation screen and enter the activation code that you received online or by phone in the corresponding field. Then click on "Next". You will see a screen that confirms that your product has been activated successfully. Click on "Finish" to close the screen. Congratulations! You have successfully downloaded and activated Autodesk 3ds Max 2009 on your computer.</p>
|
117 |
-
<h2>Conclusion</h2>
|
118 |
-
<p>In this article, we have shown you how to download and activate Autodesk 3ds Max 2009 on your computer. We hope that this guide has been helpful and informative for you. Autodesk 3ds Max 2009 is a powerful and versatile software for creating 3D content. It offers a range of features and benefits that make it a great tool for professional and amateur artists, designers, and developers. If you want to learn more about Autodesk 3ds Max 2009, you can visit its official website at <a href="https://www.autodesk.com/products/3ds-max/overview">https://www.autodesk.com/products/3ds-max/overview</a>. There you can find more resources, such as tutorials, forums, blogs, support, and more.</p>
|
119 |
-
<h2>FAQs</h2>
|
120 |
-
<p>Here are some frequently asked questions about Autodesk 3ds Max 2009:</p>
|
121 |
-
<ol>
|
122 |
-
<li><b>What is the difference between Autodesk 3ds Max 2009 and Autodesk Maya?</b></li>
|
123 |
-
<p>Autodesk 3ds Max 2009 and Autodesk Maya are both software applications for creating 3D content. They are both products of Autodesk, but they have different features, strengths, and workflows. Generally speaking, Autodesk 3ds Max 2009 is more focused on modeling, animation, rendering, and game development, while Autodesk Maya is more focused on visual effects, simulation, rigging, scripting, and compositing.</p>
|
124 |
-
<li><b>How much does Autodesk 3ds Max 2009 cost?</b></li>
|
125 |
-
<p>Autodesk 3ds Max 2009 is no longer available for purchase or subscription from Autodesk. The latest version of Autodesk 3ds Max is Autodesk 3ds Max2022, which costs $1,620 per year or $205 per month for a subscription plan.</p>
|
126 |
-
<li><b>Can I use Autodesk 3ds Max 2009 for free?</b></li>
|
127 |
-
<p>No, you cannot use Autodesk 3ds Max 2009 for free legally. However, if you are a student or educator, you can get access to Autodesk products for free for educational purposes through <a href="https://www.autodesk.com/education/home">https://www.autodesk.com/education/home</a>. There you can find more information about how to apply for a free license.</p>
|
128 |
-
<li><b>Can I use Autodesk 3ds Max 2009 on Mac?</b></li>
|
129 |
-
<p>No, you cannot use Autodesk 3ds Max 2009 on Mac natively. Autodesk 3ds Max only supports Windows operating systems. However, if you have a Mac with an Intel processor, you can use Boot Camp or Parallels Desktop software to run Windows applications on your Mac.</p>
|
130 |
-
<li><b>Where can I find tutorials for Autodesk 3ds Max 2009?</b></li>
|
131 |
-
<p>You can find tutorials for Autodesk 3ds Max 2009 on various websites and platforms online. Some of them are:</p>
|
132 |
-
<ul>
|
133 |
-
<li><a href="https://knowledge.autodesk.com/support/3ds-max/getting-started">https://knowledge.autodesk.com/support/3ds-max/getting-started</a>: This is the official website of Autodesk where you can find basic tutorials for getting started with Autodesk products.</li>
|
134 |
-
<li><a href="https://www.youtube.com/results?search_query=autodesk+3ds+max+2009+tutorial">https://www.youtube.com/results?search_query=autodesk+3ds+max+2009+tutorial</a>: This is YouTube where you can find various video tutorials for different topics and levels of difficulty.</li>
|
135 |
-
<li><a href="https://www.lynda.com/learning-paths/Design/become-a-3d-artist">https://www.lynda.com/learning-paths/Design/become-a-3d-artist</a>: This is Lynda where you can find comprehensive courses for learning different aspects of Autodesk products.</li>
|
136 |
-
</ul>
|
137 |
-
</ol>
|
138 |
-
</p> 0a6ba089eb<br />
|
139 |
-
<br />
|
140 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Faronics.Deep.free __LINK__ze.Standard.v6 62 020 3058 Incl Key Crack.md
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Faronics Deep Freeze Standard v6 62 020 3058 Incl Key Crack: A Review</h1>
|
3 |
-
<p>If you are looking for a way to protect your computer from unwanted changes, malware, and system errors, you might have come across Faronics Deep Freeze Standard. This is a software that can freeze your system configuration and restore it to its original state with a simple reboot. But what if you don't want to pay for the software and instead use a crack to activate it? Is it worth the risk? In this article, we will review Faronics Deep Freeze Standard, explain what a crack is and why people use it, and show you how to get the software legally and safely.</p>
|
4 |
-
<h2>Faronics.Deep.Freeze.Standard.v6 62 020 3058 Incl Key crack</h2><br /><p><b><b>DOWNLOAD</b> » <a href="https://byltly.com/2uKvXu">https://byltly.com/2uKvXu</a></b></p><br /><br />
|
5 |
-
<h2>What is Faronics Deep Freeze Standard?</h2>
|
6 |
-
<p>Faronics Deep Freeze Standard is a patented reboot-to-restore software that can make your computer indestructible. It works by redirecting any changes made to your hard drive to a virtual partition, leaving the original data intact. This way, you can provide unrestricted access to your computer without worrying about permanent configuration changes, malware infections, or system errors. Any unwanted changes can be reversed with a simple reboot, restoring your computer to its desired state.</p>
|
7 |
-
<h3>Features and benefits of Faronics Deep Freeze Standard</h3>
|
8 |
-
<p>Some of the features and benefits of Faronics Deep Freeze Standard are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>It can protect your computer from phishing, ransomware, zero-day threats, and other malicious attacks.</li>
|
11 |
-
<li>It can prevent configuration drifts and ensure compliance with software licenses.</li>
|
12 |
-
<li>It can retain critical data even if there is no separate physical partition available on your computer.</li>
|
13 |
-
<li>It can increase productivity by reducing troubleshooting time and IT support costs.</li>
|
14 |
-
<li>It can enhance security by protecting the master boot record from rootkit injections and other alterations.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to install and use Faronics Deep Freeze Standard</h3>
|
17 |
-
<p>To install and use Faronics Deep Freeze Standard, you need to follow these steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Download the software from the official website or from a trusted source.</li>
|
20 |
-
<li>Run the installer and follow the instructions on the screen.</li>
|
21 |
-
<li>Select the drives or partitions that you want to freeze and configure the settings according to your preferences.</li>
|
22 |
-
<li>Enter the license key that you received after purchasing the software or request a free trial.</li>
|
23 |
-
<li>Restart your computer to activate the software.</li>
|
24 |
-
<li>To make any changes to your frozen system, you need to enter the password that you set during installation and select thawed mode. This will allow you to make temporary changes that will be discarded after rebooting.</li>
|
25 |
-
<li>To make permanent changes to your frozen system, you need to enter the password that you set during installation and select maintenance mode. This will allow you to make persistent changes that will be retained after rebooting.</li>
|
26 |
-
</ol>
|
27 |
-
<h2>What is a crack and why do people use it?</h2>
|
28 |
-
<p>A crack is a program or a file that can modify or bypass the security features of another software. People use cracks to activate or unlock software that they have not paid for or that they have obtained illegally. For example, some people might use a crack to activate Faronics Deep Freeze Standard without buying a license key or requesting a free trial.</p>
|
29 |
-
<h3>The risks and disadvantages of using a crack</h3>
|
30 |
-
<p>Using a crack might seem like an easy way to get software for free, but it comes with many risks and disadvantages. Some of them are:</p>
|
31 |
-
<ul>
|
32 |
-
<li>You might download a fake or malicious crack that can harm your computer or steal your personal information.</li>
|
33 |
-
<li>You might expose your computer to viruses, spyware, adware, trojans, worms, or other malware that can compromise your security and privacy.</li>
|
34 |
-
<li>You might experience performance issues, system errors, crashes, or data loss due to incompatible or corrupted files.</li>
|
35 |
-
<li>You might not be able to access updates, patches, bug fixes, or new features that are available for the original software.</li>
|
36 |
-
<li>You might not be able to get technical support or customer service from the software developer or vendor.</li>
|
37 |
-
</ul>
|
38 |
-
<h3>The legal and ethical implications of using a crack</h3>
|
39 |
-
<p>Besides the technical risks and disadvantages, using a crack also has legal and ethical implications. Some of them are:</p>
|
40 |
-
<p>How to install Faronics Deep Freeze Standard v6.62 on Windows 10<br />
|
41 |
-
Faronics Deep Freeze Standard v6.62 review and features<br />
|
42 |
-
Faronics Deep Freeze Standard v6.62 license key generator<br />
|
43 |
-
Faronics Deep Freeze Standard v6.62 vs Enterprise edition<br />
|
44 |
-
Faronics Deep Freeze Standard v6.62 download link and crack<br />
|
45 |
-
How to uninstall Faronics Deep Freeze Standard v6.62 from your PC<br />
|
46 |
-
How to use Faronics Deep Freeze Standard v6.62 for PC restore and protection<br />
|
47 |
-
Faronics Deep Freeze Standard v6.62 release notes and updates<br />
|
48 |
-
How to activate Faronics Deep Freeze Standard v6.62 with key<br />
|
49 |
-
How to bypass Faronics Deep Freeze Standard v6.62 password<br />
|
50 |
-
How to configure Faronics Deep Freeze Standard v6.62 settings and options<br />
|
51 |
-
How to create virtual partitions with Faronics Deep Freeze Standard v6.62<br />
|
52 |
-
How to fix common issues with Faronics Deep Freeze Standard v6.62<br />
|
53 |
-
How to get support for Faronics Deep Freeze Standard v6.62<br />
|
54 |
-
How to integrate Faronics Deep Freeze Standard v6.62 with other Faronics products<br />
|
55 |
-
How to manage multiple computers with Faronics Deep Freeze Standard v6.62<br />
|
56 |
-
How to protect your Master Boot Record with Faronics Deep Freeze Standard v6.62<br />
|
57 |
-
How to recover data from frozen computers with Faronics Deep Freeze Standard v6.62<br />
|
58 |
-
How to revert changes with a simple reboot using Faronics Deep Freeze Standard v6.62<br />
|
59 |
-
How to secure your POS computers from malware with Faronics Deep Freeze Standard v6.62<br />
|
60 |
-
How to update Faronics Deep Freeze Standard v6.62 to the latest version<br />
|
61 |
-
Is Faronics Deep Freeze Standard v6.62 compatible with Windows 11<br />
|
62 |
-
Pros and cons of using Faronics Deep Freeze Standard v6.62<br />
|
63 |
-
The best alternatives to Faronics Deep Freeze Standard v6.62<br />
|
64 |
-
The difference between Reboot-to-Restore and Reboot-to-Repair technologies<br />
|
65 |
-
The history and evolution of Faronics Deep Freeze Standard<br />
|
66 |
-
The impact of Faronics Deep Freeze Standard v6.62 on system performance and resources<br />
|
67 |
-
The most common use cases for Faronics Deep Freeze Standard v6.62<br />
|
68 |
-
The non-restrictive Reboot-to-Restore concept of Faronics Deep Freeze Standard v6.62<br />
|
69 |
-
The patented technology behind Faronics Deep Freeze Standard v6.62<br />
|
70 |
-
Tips and tricks for using Faronics Deep Freeze Standard v6.62 effectively<br />
|
71 |
-
What are the benefits of using Faronics Deep Freeze Standard v6.62 for education, government, healthcare, and retail sectors<br />
|
72 |
-
What are the drawbacks of using Faronics Deep Freeze Standard v6.62<br />
|
73 |
-
What are the minimum system requirements for installing and running Faronics Deep Freeze Standard v6.62<br />
|
74 |
-
What are the pricing and licensing options for Faronics Deep Freeze Standard v6.62<br />
|
75 |
-
What are the security features of Faronics Deep Freeze Standard v6.62<br />
|
76 |
-
What is the difference between freezing and thawing your computer with Faronics Deep Freeze Standard v6.62<br />
|
77 |
-
What is the difference between standard mode and stealth mode in Faronics Deep Freeze Standard v6.62<br />
|
78 |
-
What is the difference between workstation installer and seed installer in Faronics Deep Freeze Standard v6.62</p>
|
79 |
-
<ul>
|
80 |
-
<li>You might violate the intellectual property rights of the software developer or vendor and face legal consequences such as fines or lawsuits.</li>
|
81 |
-
<li>You might contribute to software piracy and harm the software industry by reducing its revenue and innovation.</li>
|
82 |
-
<li>You might disrespect the hard work and creativity of the software developer or vendor by not paying for their product or service.</li>
|
83 |
-
<li>You might lose your credibility and reputation as a professional or a user by engaging in unethical behavior.</li>
|
84 |
-
</ul>
|
85 |
-
<h2>How to get Faronics Deep Freeze Standard legally and safely</h2>
|
86 |
-
<p>The best way to get Faronics Deep Freeze Standard legally and safely is to buy a licensed version of the software from the official website or from an authorized reseller. This way, you can enjoy all the features and benefits of the software without risking your computer, security, privacy, or reputation.</p>
|
87 |
-
<h3>The advantages of buying a licensed version of Faronics Deep Freeze Standard</h3>
|
88 |
-
<p>Some of the advantages of buying a licensed version of Faronics Deep Freeze Standard are:</p>
|
89 |
-
<ul>
|
90 |
-
<li>You can get access to updates, patches, bug fixes, or new features that are available for the software.</li>
|
91 |
-
<li>You can get technical support or customer service from the software developer or vendor in case you encounter any issues or have any questions.</li>
|
92 |
-
<li>You can protect your computer from unwanted changes, malware, and system errors with confidence and peace of mind.</li>
|
93 |
-
<li>You can support the software developer or vendor by paying for their product or service and encouraging their continued development and improvement.</li>
|
94 |
-
<li>You can demonstrate your professionalism and integrity by complying with the intellectual property rights of the software developer or vendor and respecting their hard work and creativity.</li>
|
95 |
-
</ul>
|
96 |
-
<h3>The steps to purchase and activate Faronics Deep Freeze Standard</h3>
|
97 |
-
<p>To purchase and activate Faronics Deep Freeze Standard legally and safely, you need to follow these steps:</p>
|
98 |
-
<ol>
|
99 |
-
<li>Visit <a href="https://www.faronics.com/products/deep-freeze/standard">the official website</a> of Faronics Deep Freeze Standard or contact an authorized reseller near you.</li>
|
100 |
-
<li>Select the number of licenses that you need based on how many computers you want to install the software on. You can also request a quote for volume discounts if you need more than 10 licenses.</li>
|
101 |
-
<li>Fill out your personal information such as name, email address, phone number, country, etc. You can also opt-in for newsletters or special offers if you wish.</li>
|
102 |
-
<li>Select your preferred payment method such as credit card, PayPal, wire transfer, etc. You can also apply a coupon code if you have one.</li>
|
103 |
-
<li>Complete your payment process by following the instructions on the screen. You will receive an email confirmation with your order details and invoice.</li>
|
104 |
-
<li>Download the software from <a href="https://www.faronics.com/document-library/document/deep-freeze-standard-release-notes">the download page</a> using the link provided in your email confirmation. You will also receive your license key in your email confirmation.</li>
|
105 |
-
<li>Install and activate the software using <a href="https://www.faronics.com/document-library/document/deep-freeze-standard-user-guide">the user guide</a>. You will need to enter your license key during installation or activation.</li>
|
106 |
-
<h2>Conclusion</h2>
|
107 |
-
<p>Faronics Deep Freeze Standard is a powerful reboot-to-restore software that can protect your computer from unwanted changes, malware, and system errors. However, using a crack to activate it is not only risky but also illegal and unethical. Therefore, we recommend that you buy a licensed version of the from the official website or from an authorized reseller. This way, you can enjoy all the benefits and features of the software without compromising your computer, security, privacy, or reputation.</p>
|
108 |
-
<h3>Summary of the main points</h3>
|
109 |
-
<p>In this article, we have reviewed Faronics Deep Freeze Standard, a reboot-to-restore software that can make your computer indestructible. We have also explained what a crack is and why people use it to activate software illegally. We have shown you the risks and disadvantages of using a crack, as well as the legal and ethical implications of doing so. Finally, we have shown you how to get Faronics Deep Freeze Standard legally and safely by buying a licensed version of the software from the official website or from an authorized reseller.</p>
|
110 |
-
<h3>Call to action</h3>
|
111 |
-
<p>If you are interested in Faronics Deep Freeze Standard and want to protect your computer from unwanted changes, malware, and system errors, don't hesitate to buy a licensed version of the software today. You can visit <a href="https://www.faronics.com/products/deep-freeze/standard">the official website</a> of Faronics Deep Freeze Standard or contact an authorized reseller near you to place your order. You can also request a free trial if you want to test the software before buying it. Don't risk your computer, security, privacy, or reputation by using a crack. Get Faronics Deep Freeze Standard legally and safely and enjoy its features and benefits with confidence and peace of mind.</p>
|
112 |
-
<h2>FAQs</h2>
|
113 |
-
<p>Here are some frequently asked questions about Faronics Deep Freeze Standard and cracks:</p>
|
114 |
-
<ul>
|
115 |
-
<li><b>Q: How much does Faronics Deep Freeze Standard cost?</b></li>
|
116 |
-
<li>A: The price of Faronics Deep Freeze Standard depends on how many licenses you need and how long you want to use the software. You can check <a href="https://www.faronics.com/products/deep-freeze/standard">the pricing page</a> for more details or request a quote for volume discounts.</li>
|
117 |
-
<li><b>Q: How long does the free trial of Faronics Deep Freeze Standard last?</b></li>
|
118 |
-
<li>A: The free trial of Faronics Deep Freeze Standard lasts for 30 days. You can request a free trial <a href="https://www.faronics.com/free-trial">here</a>.</li>
|
119 |
-
<li><b>Q: What are the system requirements for Faronics Deep Freeze Standard?</b></li>
|
120 |
-
<li>A: The system requirements for Faronics Deep Freeze Standard are:</li>
|
121 |
-
<ul>
|
122 |
-
<li>Windows 10 up to version 21H1</li>
|
123 |
-
<li>Windows 8.1 (32 & 64 Bit)</li>
|
124 |
-
<li>Windows 7 (32 & 64 Bit)</li>
|
125 |
-
<li>10% free hard drive space</li>
|
126 |
-
<li>The hardware requirements are the same as the recommended requirements for the host operating system.</li>
|
127 |
-
</ul>
|
128 |
-
<li><b>Q: What is the difference between Faronics Deep Freeze Standard and Faronics Deep Freeze Enterprise?</b></li>
|
129 |
-
<li>A: Faronics Deep Freeze Standard is designed for single-user computers, while Faronics Deep Freeze Enterprise is designed for multi-user computers. Faronics Deep Freeze Enterprise has more features and options than Faronics Deep Freeze Standard, such as centralized management console, remote deployment and control, scheduled maintenance and updates, etc.</li>
|
130 |
-
<li><b>Q: Where can I find more information about Faronics Deep Freeze Standard?</b></li>
|
131 |
-
<li>A: You can find more information about Faronics Deep Freeze Standard on <a href="https://www.faronics.com/products/deep-freeze/standard">the official website</a>, <a href="https://www.faronics.com/document-library/document/deep-freeze-standard-user-guide">the user guide</a>, <a href="https://www.faronics.com/document-library/document/deep-freeze-standard-release-notes">the release notes</a>, or <a href="https://www.faronics.com/support">the support page</a>. You can also contact <a href="https://www.faronics.com/contact-us">the customer service</a> if you have any questions or issues.</li>
|
132 |
-
</ul>
|
133 |
-
</p> 0a6ba089eb<br />
|
134 |
-
<br />
|
135 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Reader Pro Full Download How to Create Edit and Share PDFs Like a Pro.md
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Foxit Reader Pro Full: How to Get the Best PDF Reader and Editor for Windows</h1>
|
3 |
-
<p>If you are looking for a free and powerful PDF reader and editor for Windows, you may want to consider Foxit Reader Pro. Foxit Reader Pro is a premium version of Foxit Reader, a popular PDF editor that offers a range of features and tools to help you view, annotate, fill out, sign, and protect your PDF documents. In this article, we will show you how to get the full version of Foxit Reader Pro and what benefits it can bring to your work.</p>
|
4 |
-
<h2>download foxit reader pro full + crack</h2><br /><p><b><b>DOWNLOAD</b> ✅ <a href="https://byltly.com/2uKweU">https://byltly.com/2uKweU</a></b></p><br /><br />
|
5 |
-
<h2>What is Foxit Reader Pro?</h2>
|
6 |
-
<p>Foxit Reader Pro is a PDF reader and editor that works like a word processor, allowing you to manage text, paragraphs, images, and other elements in your PDF documents. You can also create new PDF files from scratch or from various sources, such as scanners, web pages, or other applications. Foxit Reader Pro supports Optical Character Recognition (OCR), which means it can recognize text from scanned or image-based PDFs and let you edit them as you wish.</p>
|
7 |
-
<p>One of the main advantages of Foxit Reader Pro is its collaboration features. You can connect to online cloud storage and Content Management Systems (CMS) to access and share your PDF files with your team members. You can also add comments, annotations, stamps, and signatures to your PDFs and track the changes made by others. Foxit Reader Pro also lets you protect your PDFs with passwords, encryption, certificates, redaction, and digital rights management (DRM).</p>
|
8 |
-
<p>Foxit Reader Pro also offers some exclusive features that are not available in the free version of Foxit Reader. These include:</p>
|
9 |
-
<p></p>
|
10 |
-
<ul>
|
11 |
-
<li>PDF/A/E/X compliance verification and creation</li>
|
12 |
-
<li>PDF index creation and searching</li>
|
13 |
-
<li>PDF conversion from Microsoft Office documents</li>
|
14 |
-
<li>PDF optimization and compression</li>
|
15 |
-
<li>PDF editing with advanced object editing tools</li>
|
16 |
-
<li>PDF form design with form field recognition</li>
|
17 |
-
<li>PDF portfolio creation and editing</li>
|
18 |
-
<li>PDF redaction with pattern search</li>
|
19 |
-
<li>PDF bates numbering</li>
|
20 |
-
<li>PDF watermarking</li>
|
21 |
-
<li>PDF signing with digital signatures</li>
|
22 |
-
<li>PDF encryption with Microsoft IRM protection</li>
|
23 |
-
</ul>
|
24 |
-
<h2>How to get Foxit Reader Pro full download?</h2>
|
25 |
-
<p>To get the full version of Foxit Reader Pro, you need to purchase a subscription from the official website of Foxit Software. There are two subscription levels you can choose from: the standard PDF reader and the pro PDF reader. The standard PDF reader provides all the essential features for viewing and annotating PDFs, while the pro PDF reader adds advanced editing and security tools.</p>
|
26 |
-
<p>The subscription prices vary depending on the duration and the number of users. You can choose between monthly or yearly plans and between single-user or multi-user licenses. You can also get a free trial for 14 days to test the features of Foxit Reader Pro before buying it.</p>
|
27 |
-
<p>Once you purchase a subscription, you will receive an email with a download link and a license key. You need to download the offline installer file from the link and run it on your computer. Then, you need to enter the license key when prompted to activate your subscription. After that, you can start using Foxit Reader Pro offline on your PC.</p>
|
28 |
-
<h2>Conclusion</h2>
|
29 |
-
<p>Foxit Reader Pro is a comprehensive PDF reader and editor that can help you view, edit, share, sign, and protect your PDF documents with ease. It offers a range of features and tools that go beyond basic viewing and annotating. You can also collaborate with your team members online and access your PDF files from various cloud storage and CMS platforms.</p>
|
30 |
-
<p>To get the full version of Foxit Reader Pro, you need to purchase a subscription from the official website of Foxit Software. You can choose between two subscription levels: the standard PDF reader and the pro PDF reader. You can also get a free trial for 14 days to try out the features of Foxit Reader Pro.</p>
|
31 |
-
<p>If you are looking for a reliable and versatile PDF reader and editor for Windows, Foxit Reader Pro is a great option to consider.</p> ddb901b051<br />
|
32 |
-
<br />
|
33 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/American Sniper Full FREE Movie In Hindi Download Kickass.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>download hd mp4 720p 480p chad michael collins,danay garcia,billy zane,tom berenger,joe lando,pedro jos pallares,jaime correa,lucho velasco, 3 october 2017dvdrip mp4mobilemovies hon3yhd 3gpmobilemovies 02cinema downloadhub</p>
|
3 |
-
<h2>american sniper full movie in hindi download kickass</h2><br /><p><b><b>Download</b> ⚹⚹⚹ <a href="https://imgfil.com/2uxXyv">https://imgfil.com/2uxXyv</a></b></p><br /><br />
|
4 |
-
<p>download hd pc mp4 720p 480p chad michael collins,danay garcia,billy zane,tom berenger,joe lando,pedro jos pallares,jaime correa,lucho velasco, 3 october 2017dvdrip mp4mobilemovies hon3yhd 3gpmobilemovies 02cinema downloadhub</p>
|
5 |
-
<p>download hd pc mp4 720p 480p chad michael collins,danay garcia,billy zane,tom berenger,joe lando,pedro jos pallares,jaime correa,lucho velasco, 3 october 2017dvdrip mp4mobilemovies hon3yhd 3gpmobilemovies 02cinema downloadhub </p>
|
6 |
-
<p>10 oct 2017 download full movie in hindi language torrent hd [bbc documentary. download full movie in hindi language torrent hd [bbc documentary. https://coub.com/stories/3215817-i-am-full-movie-in-hindi-dubbed-hd-1080p-best. .com/stories/3155095-tajpuri-movie-download-kickass-hd-5-3gp-torrent-free. </p>
|
7 |
-
<p>https://coub.com/stories/3215817-i-am-full-movie-in-hindi-dubbed-hd-1080p-best. story(3).. https://coub.com/stories/3215913-to-and-from-full-movie-in-hindi-dubbed-hd-720p-torrent-free. .com/stories/3070224-jai-movie-full-movie-in-hindi-dubbed-1080p-torrent-free.com/stories/3068299-karachi-movie-full-movie-in-hindi-dubbed-hd-1080p-torrent-free.com/stories/3048499-fool-n-final-movie-full-movie-in-hindi-dubbed-1080p-wetwonn. </p>
|
8 |
-
<p></p>
|
9 |
-
<p>https://coub.com/stories/3215817-i-am-full-movie-in-hindi-dubbed-hd-1080p-best. https://coub.com/stories/3215913-to-and-from-full-movie-in-hindi-dubbed-hd-720p-torrent-free.com/stories/3148388-paint-full-movie-in-hindi-dubbed-torrent-hd.com/stories/3070224-jai-movie-full-movie-in-hindi-dubbed-1080p-torrent-free.com/stories/3068299-karachi-movie-full-movie-in-hindi-dubbed-hd-1080p-torrent-free.com/stories/3048499-fool-n-final-movie-full-movie-in-hindi-dubbed-1080p-wetwonn. </p> 899543212b<br />
|
10 |
-
<br />
|
11 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Creative Sound Blaster X Fi Mb [REPACK] Cracked.22.md
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Install Creative Sound Blaster X-Fi MB on Realtek HD Audio for Free</h1>
|
3 |
-
<p>If you want to enhance your audio quality and experience on your PC, you might be interested in installing Creative Sound Blaster X-Fi MB software. This software provides premium audio effects and features for PC systems equipped with only basic onboard audio. However, this software is not free and requires a license key to activate.</p>
|
4 |
-
<p>Fortunately, there is a way to install Creative Sound Blaster X-Fi MB on Realtek HD audio for free, without paying for a license key. This method involves downloading a cracked version of the software and modifying some registry files. In this article, we will show you how to do it step by step.</p>
|
5 |
-
<h2>Creative Sound Blaster X Fi Mb Cracked.22</h2><br /><p><b><b>Download File</b> ☆☆☆ <a href="https://imgfil.com/2uxYAs">https://imgfil.com/2uxYAs</a></b></p><br /><br />
|
6 |
-
<h2>Disclaimer</h2>
|
7 |
-
<p>Before we proceed, we want to make it clear that this method is not legal and may violate the terms and conditions of Creative Technology Ltd. We do not condone or encourage piracy or illegal use of software. This article is for educational and informational purposes only. Use this method at your own risk.</p>
|
8 |
-
<h2>Requirements</h2>
|
9 |
-
<p>To install Creative Sound Blaster X-Fi MB on Realtek HD audio for free, you will need the following:</p>
|
10 |
-
<ul>
|
11 |
-
<li>A PC with Windows XP/XP Professional/Vista/7/8/10/11 operating system and Realtek HD audio driver installed.</li>
|
12 |
-
<li>A stable internet connection.</li>
|
13 |
-
<li>A zip file extractor such as WinRAR or 7-Zip.</li>
|
14 |
-
<li>A registry editor such as Regedit or Reg Organizer.</li>
|
15 |
-
<li>A cracked version of Creative Sound Blaster X-Fi MB software. You can download it from here[^3^].</li>
|
16 |
-
</ul>
|
17 |
-
<h2>Steps</h2>
|
18 |
-
<p>Follow these steps to install Creative Sound Blaster X-Fi MB on Realtek HD audio for free:</p>
|
19 |
-
<ol>
|
20 |
-
<li>Download the cracked version of Creative Sound Blaster X-Fi MB software from the link above and extract it to a folder on your desktop.</li>
|
21 |
-
<li>Open the folder and run the setup.exe file as administrator. Follow the instructions to install the software.</li>
|
22 |
-
<li>After the installation is complete, do not run the software yet. Instead, go to the folder where you extracted the zip file and open the "Instructions" folder.</li>
|
23 |
-
<li>Open the "Take Ownership.reg" file and click "Yes" to add it to your registry. This will allow you to modify some system files later.</li>
|
24 |
-
<li>Open the "Realtek HD Audio Driver" folder and copy all the files inside it.</li>
|
25 |
-
<li>Paste the files into the following location: C:\Windows\System32\drivers\</li>
|
26 |
-
<li>If prompted to replace any existing files, click "Yes".</li>
|
27 |
-
<li>Open the "Creative Sound Blaster X-Fi MB" folder and copy all the files inside it.</li>
|
28 |
-
<li>Paste the files into the following location: C:\Program Files (x86)\Creative\Sound Blaster X-Fi MB\</li>
|
29 |
-
<li>If prompted to replace any existing files, click "Yes".</li>
|
30 |
-
<li>Open the "Registry" folder and double-click on the "SBXFIMB5.reg" file. Click "Yes" to add it to your registry.</li>
|
31 |
-
<li>Restart your PC.</li>
|
32 |
-
<li>After your PC restarts, run the Creative Sound Blaster X-Fi MB software from your desktop or start menu. You should see a message saying that your software is activated.</li>
|
33 |
-
<li>Enjoy your enhanced audio quality and experience!</li>
|
34 |
-
</ol>
|
35 |
-
<h2>Troubleshooting</h2>
|
36 |
-
<p>If you encounter any problems while installing or running Creative Sound Blaster X-Fi MB on Realtek HD audio for free, you can try these troubleshooting tips:</p>
|
37 |
-
<ul>
|
38 |
-
<li>If you get an error message saying that your software is not activated or that your license key is invalid, make sure that you copied and pasted all the files correctly and that you added the registry entries properly.</li>
|
39 |
-
<li>If you get a blue screen of death (BSOD) or your PC crashes while installing or running Creative Sound Blaster X-Fi MB on Realtek HD audio for free, make sure that you have a compatible version of Windows and Realtek HD audio driver installed. You can also try updating your drivers or reinstalling them.</li>
|
40 |
-
<li</p> d5da3c52bf<br />
|
41 |
-
<br />
|
42 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Desktop Reminder 2 Pro Activation Key Crack __HOT__.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>desktop reminder 2 pro activation key crack</h2><br /><p><b><b>Download</b> ——— <a href="https://imgfil.com/2uxX4X">https://imgfil.com/2uxX4X</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
5 Mar. 2015 - 1 mindesktop-reminder pro activation key desktop-reminder pro desktop-reminder 2 pro with crack .. Desktop-. Reminder, free and safe download. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/EaseUS Partition Master 13.8 Crack WORK Key With License Code 2020.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>Because of the EaseUS Partition Master 12.9 Crack you can easily manage the system partition. You can easily manage the file system of your hard disk. You can easily manage the partition easily. You can easily manage the data of your hard disk. You can easily delete the partitions by this tool. By the help of this tool you can easily make copies of your partition easily. This tool is beneficial for the beginners because it is very easy to use this tool for the beginners.</p>
|
3 |
-
<h2>EaseUS Partition Master 13.8 Crack Key With License Code 2020</h2><br /><p><b><b>Download Zip</b> ⇒⇒⇒ <a href="https://imgfil.com/2uy0oP">https://imgfil.com/2uy0oP</a></b></p><br /><br />
|
4 |
-
<p>There is no need to install any other tool in your computer for the partitioning. Every partition of your computer can be managed with the help of this tool. In order to manage the hard disk, you need to start the partition of the hard disk. You can easily set the boot option if your computer is not booting properly. You can easily increase or decrease the size of the partition easily. By the help of this tool you can easily copy partition by this tool. By the help of this tool, you can easily merge and convert the system partitions easily.</p>
|
5 |
-
<p>The EASEUS Partition Master is computer disk management software that integrates several tools to those which are involved in disk management, such as disk copying, disk handling, partition management, and data recovery. It also offers to the extent of flexibility of a partition, the clarity of the software to the user interface, and its extremely affordable and straightforward structure. You can handle a partition in the absence of any instructions or be the hard disk management wizard. You can save time by not having to contact your computer technician or a computer specialist to manage your computer if you can’t buy more hard disk space, or if you have partitioned your hard disk. The latest feature of EASEUS Partition Master 15.3 Key is that it can be opened from any volume of the operating system on the computer. Additionally, It’s very easy to use and partition disks, and there is no formal learning curve needed to use it. All of the options of EASEUS Partition Master 15.2 Crack are available for both disk that is primary and secondary hard disk drives. It allows you to create a bootable disk in case of system boot failure. Also, you can extend partition (especially for system drive) and easily manage disk space easily.</p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Ecm Titanium 1.73 Rarbfdcm [NEW].md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>README.txt: Ecm Titanium 1.73 Rarbfdcm... Download WinAVI Video Converter Pro version 11.6.1.4734 Windows..... bfdcm crack -> DOWNLOAD bfdcm wine... como puadav como yuta como el fin de podaas o las riendas de oro de la virginidad española la historia de... Dimple Dots Sucks 3D Game, Free Download in... Eye of the Beholder 2: The Legend of Dark Lake v1.5.0 [v1.1.1 in English only] Free Download (DNFUp) eyeofthebeholder 2 the... JQuery or similar data can then be used to... Download WinAVI Video Converter Pro version 11.6.1.4734 Windows..... bfdcm crack -> DOWNLOAD bfdcm wine... como puadav como yuta como el fin de podaas o las riendas de oro de la virginidad española la historia de... Dimple Dots Sucks 3D Game, Free Download in... Eye of the Beholder 2: The Legend of Dark Lake v1.5.0 [v1.1.1 in English only] Free Download (DNFUp) eyeofthebeholder 2 the... JQuery or similar data can then be used to...</p>
|
3 |
-
<h2>Ecm Titanium 1.73 Rarbfdcm</h2><br /><p><b><b>Download Zip</b> ✏ <a href="https://imgfil.com/2uy05p">https://imgfil.com/2uy05p</a></b></p><br /><br />
|
4 |
-
<p>programa de descargar juegos para windows 10 o pro acuerdo de azar slot freezoe5.com (puedes consultar al. Ecm Titanium 1.73 Rarbfdcm DOWNLOAD Free Game Acceleration Free Relauncher.rarbfdcm DOWNLOAD Free Game Acceleration Free Relauncher.rarbfdcm Downloa...</p>
|
5 |
-
<p>Otros casos de bfdcm crack...... con la bfdcm para Crack, Descartar Up3d, y muchos mas... WinAVI Video Converter 11.6.1 Keygen.rarbfdcm ->->->->-> DOWNLOAD WinAVI Video Converter 11.6.1.4734 Portable Full Version Free Download. WinAVI.. Surcode Dts Encoder Serial Key Keygen talapatra nidhi book in telugu pdf free... UPD Winavi Video Converter 11.6.1 Keygen.rarbfdcm 2020.08.17 22:33... Rufus 2.10.973 Portable [Latest] crackingpatching.unblock2.club <br></p> 899543212b<br />
|
6 |
-
<br />
|
7 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Finalfantasy7remakepcserialnumber [UPDATED].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>finalfantasy7remakepcserialnumber</h2><br /><p><b><b>DOWNLOAD</b> »»» <a href="https://imgfil.com/2uy1vC">https://imgfil.com/2uy1vC</a></b></p><br /><br />
|
2 |
-
|
3 |
-
finalfantasy7remakepcserialnumber · crack topsolid v6.12 · HD Online Player (the bourne identity english subtitle). Disciplines. French and ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bar HTML Snippets Copy and Paste Code for Your Web Pages.md
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Create a Download Bar in HTML</h1>
|
3 |
-
<p>A download bar is a graphical element that shows the progress of a file download from a website or an app. It can help users to estimate the time and speed of the download, as well as to cancel or pause it if needed. In this article, we will show you how to create a download bar in HTML using two different methods: using HTML5 features and using JavaScript and AJAX.</p>
|
4 |
-
<h2>What is a Download Bar?</h2>
|
5 |
-
<p>A download bar is a graphical element that shows the progress of a file download from a website or an app. It usually consists of two parts: a background bar that represents the maximum size of the file, and a foreground bar that represents the current size of the file. The foreground bar grows as more data is downloaded from the server, until it reaches the end of the background bar, indicating that the download is complete.</p>
|
6 |
-
<h2>download bar html</h2><br /><p><b><b>Download</b> ===== <a href="https://urlin.us/2uSWfW">https://urlin.us/2uSWfW</a></b></p><br /><br />
|
7 |
-
<p>A download bar can also have other features, such as:</p>
|
8 |
-
<ul>
|
9 |
-
<li>A text label that shows the percentage or amount of data downloaded.</li>
|
10 |
-
<li>A cancel button that allows users to stop the download.</li>
|
11 |
-
<li>A pause button that allows users to pause and resume the download.</li>
|
12 |
-
<li>An icon or image that indicates the type or name of the file.</li>
|
13 |
-
</ul>
|
14 |
-
<h2>Why Use a Download Bar in HTML?</h2>
|
15 |
-
<p>Using a download bar in HTML can enhance the user experience and functionality of your website or app. Some of the benefits are:</p>
|
16 |
-
<ul>
|
17 |
-
<li>It provides feedback and information to the users about the status and speed of the download, which can reduce frustration and uncertainty.</li>
|
18 |
-
<li>It allows users to control the download process, which can increase satisfaction and trust.</li>
|
19 |
-
<li>It makes your website or app more accessible and responsive, as it does not require any additional plugins or scripts to work.</li>
|
20 |
-
<li>It can improve the performance and efficiency of your website or app, as it can reduce the bandwidth and server load by allowing users to cancel or pause unnecessary downloads.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>How to Use HTML5 Features to Create a Download Bar?</h2>
|
23 |
-
<p>HTML5 is the latest version of HTML, which is the standard markup language for creating web pages. HTML5 introduces many new features and elements that can help you to create a download bar in HTML with minimal code. In this section, we will show you how to use two of them: the progress element and the download attribute.</p>
|
24 |
-
<h3>The Progress Element</h3>
|
25 |
-
<p>The progress element is a new HTML5 feature that allows you to create a simple download bar with minimal code. It has two attributes: value and max, which represent the current and the maximum value of the download progress, respectively. The value attribute is optional, but if you omit it, the progress element will display an indeterminate progress bar, which means that the download progress is unknown. The max attribute is also optional, but if you omit it, it will default to 1. The value of both attributes must be a valid floating-point number.</p>
|
26 |
-
<p>Here is an example of how to use the progress element:</p>
|
27 |
-
<code><pre><!-- A download bar with a value of 0.5 and a max of 1 --> <progress value="0.5" max="1"></progress> <!-- A download bar with a value of 50 and a max of 100 --> <progress value="50" max="100"></progress> <!-- An indeterminate download bar --> <progress></progress></pre></code>
|
28 |
-
<p>You can style the progress element with CSS to change its appearance and behavior. For example, you can use the width, height, border, background, color, and font properties to modify its size, shape, color, and text. You can also use the ::-webkit-progress-bar and ::-webkit-progress-value pseudo-elements to target the background and foreground bars separately. Here is an example of how to style the progress element with CSS:</p>
|
29 |
-
<code><pre><style> /* The progress element */ progress width: 300px; height: 20px; border: none; background: lightgray; /* The background bar */ ::-webkit-progress-bar border-radius: 10px; /* The foreground bar */ ::-webkit-progress-value border-radius: 10px; background: linear-gradient(to right, green, yellow, red); /* The text label */ progress::after content: attr(value) "%"; color: white; font-weight: bold; </style></pre></code>
|
30 |
-
<h3>The Download Attribute</h3>
|
31 |
-
<p>The download attribute is another new HTML5 feature that allows you to specify that a link will trigger a file download instead of opening a new page. It has one optional attribute: value, which represents the name of the downloaded file. If you omit the value attribute, the browser will use the original name of the file. The value of the attribute must be a valid filename.</p>
|
32 |
-
<p>Here is an example of how to use the download attribute:</p>
|
33 |
-
<p>How to create a download link with HTML<br />
|
34 |
-
Progress bar download using HTML 5<br />
|
35 |
-
CSS navigation bar with download button<br />
|
36 |
-
HTML download attribute for download links<br />
|
37 |
-
How to style a download bar with CSS<br />
|
38 |
-
Download bar HTML code examples<br />
|
39 |
-
How to use JavaScript to show download progress<br />
|
40 |
-
HTML 5 download bar tutorial<br />
|
41 |
-
Best practices for download bar design<br />
|
42 |
-
How to make a responsive download bar with Bootstrap<br />
|
43 |
-
Download bar HTML templates free download<br />
|
44 |
-
How to add icons and images to download links<br />
|
45 |
-
How to use AJAX to create a dynamic download bar<br />
|
46 |
-
HTML 5 download bar animation effects<br />
|
47 |
-
How to customize the filename of the downloaded file<br />
|
48 |
-
Download bar HTML generator online<br />
|
49 |
-
How to create a download bar with jQuery UI<br />
|
50 |
-
How to handle errors and interruptions in download progress<br />
|
51 |
-
How to use SVG for download bar graphics<br />
|
52 |
-
HTML 5 download bar accessibility tips<br />
|
53 |
-
How to create a download bar with React JS<br />
|
54 |
-
How to use PHP to create a download bar<br />
|
55 |
-
How to test and debug a download bar with HTML 5<br />
|
56 |
-
How to create a download bar with Angular JS<br />
|
57 |
-
How to use CSS transitions and transformations for download bar animation<br />
|
58 |
-
Download bar HTML best practices checklist<br />
|
59 |
-
How to create a download bar with Vue JS<br />
|
60 |
-
How to use ASP.NET to create a download bar<br />
|
61 |
-
How to optimize the performance of a download bar with HTML 5<br />
|
62 |
-
How to create a download bar with WordPress<br />
|
63 |
-
How to use HTML 5 canvas for download bar graphics<br />
|
64 |
-
How to use CSS flexbox and grid for download bar layout<br />
|
65 |
-
How to create a download bar with Laravel<br />
|
66 |
-
How to use Python to create a download bar<br />
|
67 |
-
How to use HTML 5 web workers for download progress<br />
|
68 |
-
How to create a download bar with Ruby on Rails<br />
|
69 |
-
How to use CSS variables and custom properties for download bar styling<br />
|
70 |
-
How to create a download bar with Node.js<br />
|
71 |
-
How to use HTML 5 local storage and session storage for download progress<br />
|
72 |
-
How to create a download bar with Drupal</p>
|
73 |
-
<code><pre><!-- A link that will download a file named "example.pdf" --> <a href="example.pdf" download>Download Example PDF</a> <!-- A link that will download a file named "report.pdf" --> <a href="example.pdf" download="report.pdf">Download Report PDF</a></pre></code>
|
74 |
-
<p>You can use the download attribute with the progress element to create a download button with a download bar. To do this, you need to add some JavaScript code that will update the value of the progress element according to the data loaded from the server. We will show you how to do this in the next section.</p>
|
75 |
-
<h2>How to Use JavaScript and AJAX to Create a Download Bar?</h2>
|
76 |
-
<p>JavaScript is a scripting language that allows you to add interactivity and functionality to your web pages. AJAX is a technique that uses JavaScript and XML (or JSON) to communicate with a server without reloading the page. Using JavaScript and AJAX, you can create a dynamic and interactive download bar that can show real-time data from the server. In this section, we will show you how to use two of them: the XMLHttpRequest object and the Fetch API.</p>
|
77 |
-
<h3>The XMLHttpRequest Object</h3>
|
78 |
-
<p>The XMLHttpRequest object is a JavaScript object that allows you to send and receive data from a server without reloading the page. It has several properties and methods that can help you to create a dynamic and interactive download bar. One of them is the onprogress event, which fires periodically while the data is being transferred from the server. You can use the onprogress event to update the value of the progress element according to the data loaded from the server.</p>
|
79 |
-
<p>Here is an example of how to use the XMLHttpRequest object and the onprogress event:</p>
|
80 |
-
<code><pre><!-- A link that will download a file named "example.pdf" --> <a href="example.pdf" download id="download-link">Download Example PDF</a> <!-- A progress element that will show the download progress --> <progress id="download-progress" value="0" max="100"></progress> <script> // Get the link element by its id var link = document.getElementById("download-link"); // Get the progress element by its id var progress = document.getElementById("download-progress"); // Add a click event listener to the link element link.addEventListener("click", function(event) // Prevent the default behavior of the link element event.preventDefault(); // Create a new XMLHttpRequest object var xhr = new XMLHttpRequest(); // Open a GET request to the file URL xhr.open("GET", link.href, true); // Set the response type to blob, which is a binary data type xhr.responseType = "blob"; // Add an onprogress event listener to the xhr object xhr.onprogress = function(event) // Check if the event has total and loaded properties if (event.lengthComputable) // Calculate the percentage of the download progress var percent = Math.round((event.loaded / event.total) * 100); // Update the value and text of the progress element progress.value = percent; progress.innerHTML = percent + "%"; ; // Add an onload event listener to the xhr object xhr.onload = function() // Check if the status code is 200, which means OK if (xhr.status === 200) // Create a new URL object from the response blob var url = URL.createObjectURL(xhr.response); // Create a new anchor element var a = document.createElement("a"); // Set the href attribute to the blob URL a.href = url; // Set the download attribute to the file name a.download = link.download; // Append the anchor element to the document body document.body.appendChild(a); // Simulate a click on the anchor element a.click(); // Remove the anchor element from the document body document.body.removeChild(a); ; // Send the request to the server xhr.send(); ); </script></pre></code>
|
81 |
-
<h3>The Fetch API</h3>
|
82 |
-
<p>The Fetch API is a newer JavaScript feature that provides an easier and more modern way to fetch data from a server. It returns a promise, which is an object that represents an asynchronous operation that can either succeed or fail. You can use the then method of the promise to handle the response from the server, and the catch method to handle any errors. You can also use the body property of the response object, which is a readable stream that allows you to read chunks of data as they arrive from the server. You can use the getReader method of the body property to get a reader object, which has a read method that returns another promise with each chunk of data. You can use the read method to update the value of the progress element according to the data loaded from the server.</p>
|
83 |
-
<p>Here is an example of how to use the Fetch API and the readable stream:</p>
|
84 |
-
<code><pre><!-- A link that will download a file named "example.pdf" --> <a href="example.pdf" download id="download-link">Download Example PDF</a> <!-- A progress element that will show the download progress --> <progress id="download-progress" value="0" max="100"></progress> <script> // Get the link element by its id var link = document.getElementById("download-link"); // Get the progress element by its id var progress = document.getElementById("download-progress"); // Add a click event listener to the link element link.addEventListener("click", function(event) { // Prevent the default behavior of the link element event.preventDefault(); // Use fetch to send a GET request to JavaScript, and AJAX. These are some of the most popular and widely used web development technologies that can help you to create amazing websites and apps.</li>
|
85 |
-
<li><strong>Q: How can I get feedback or help on my download bar in HTML?</strong></li>
|
86 |
-
<li>A: You can use online platforms like <a href="">Stack Overflow</a>, <a href="">Reddit</a>, or <a href="">CodePen</a> to share your code and get feedback or help from other web developers. You can also join online communities or forums that are related to web development, such as <a href="">Dev.to</a>, <a href="">FreeCodeCamp</a>, or <a href="">CodeNewbie</a>.</li>
|
87 |
-
</ul></p> 197e85843d<br />
|
88 |
-
<br />
|
89 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Anime Go APK An Easy and Reliable App for Anime Streaming.md
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download APK Anime Go</h1>
|
3 |
-
<p>If you are an anime fan who loves to watch your favorite shows and movies online, you might have heard of APK Anime Go. This is a popular app that allows you to stream and download thousands of anime titles on your device. But how do you download APK Anime Go? And what are its features and benefits? In this article, we will answer these questions and more. We will show you how to download APK Anime Go for Android devices and other devices using emulators. We will also guide you on how to use the app and enjoy its amazing functions. So let's get started!</p>
|
4 |
-
<h2>What is APK Anime Go?</h2>
|
5 |
-
<p>APK Anime Go is an app that lets you watch online anime videos, episodes, movies, and more. It has a huge collection of anime from various genres, categories, years, and sources. You can find anime from popular series like Naruto, One Piece, Dragon Ball, One Punch Man, etc. as well as anime from lesser-known or niche titles. You can also discover new anime based on your preferences and recommendations.</p>
|
6 |
-
<h2>download apk anime go</h2><br /><p><b><b>Download File</b> 🗹 <a href="https://jinyurl.com/2uNQAm">https://jinyurl.com/2uNQAm</a></b></p><br /><br />
|
7 |
-
<p>APK Anime Go has a simple and user-friendly interface that makes it easy to navigate and use. You can search for anime by name, genre, category, tag, or filter. You can also browse through the latest releases, trending titles, top-rated shows, etc. You can watch anime in high-quality resolution with subtitles in different languages. You can also download anime for offline viewing or save them to your favorites list.</p>
|
8 |
-
<p>APK Anime Go is a free app that does not require any registration or subscription. You can watch unlimited anime without any ads or interruptions. You can also update the app regularly to get new features and bug fixes.</p>
|
9 |
-
<h2>Why Download APK Anime Go?</h2>
|
10 |
-
<p>You might be wondering why you should download APK Anime Go when there are so many other anime streaming services and platforms available. Well, here are some reasons why APK Anime Go is better than most of them:</p>
|
11 |
-
<ul>
|
12 |
-
<li>APK Anime Go has a larger and more diverse collection of anime than most other apps. You can find anime from all genres, categories, years, sources etc.</li>
|
13 |
-
<li>APK Anime Go has <li>APK Anime Go has a faster and smoother streaming experience than most other apps. You can watch anime without any buffering, lagging, or crashing issues.</li>
|
14 |
-
<li>APK Anime Go has a more flexible and convenient downloading feature than most other apps. You can download anime in different formats, resolutions, and sizes. You can also choose the storage location and manage your downloads easily.</li>
|
15 |
-
<li>APK Anime Go has a more interactive and engaging community than most other apps. You can rate, review, comment, and share your opinions on anime with other users. You can also get feedback, suggestions, and support from the app developers and moderators.</li>
|
16 |
-
</ul>
|
17 |
-
<p>As you can see, APK Anime Go is a great app for anime lovers who want to watch and download anime on their devices. It has many advantages and benefits over other anime streaming services and platforms. So why not give it a try and see for yourself?</p>
|
18 |
-
<h2>How to Download APK Anime Go for Android Devices</h2>
|
19 |
-
<p>If you have an Android device, such as a smartphone or tablet, you can easily download APK Anime Go on your device. Here are the steps you need to follow:</p>
|
20 |
-
<h3>Requirements for Downloading APK Anime Go</h3>
|
21 |
-
<p>Before you download APK Anime Go on your Android device, you need to make sure that your device meets the following requirements:</p>
|
22 |
-
<p>download anime go app apk<br />
|
23 |
-
anime go apk free download for android<br />
|
24 |
-
anime go streaming app apk download<br />
|
25 |
-
download anime go latest version apk<br />
|
26 |
-
anime go apk download 2023<br />
|
27 |
-
how to download anime go apk<br />
|
28 |
-
anime go mod apk download<br />
|
29 |
-
anime go pro apk download<br />
|
30 |
-
download anime go apk for pc<br />
|
31 |
-
anime go apk download no ads<br />
|
32 |
-
anime go premium apk download<br />
|
33 |
-
anime go apk download uptodown<br />
|
34 |
-
anime go apk download apkpure<br />
|
35 |
-
anime go plus apk download<br />
|
36 |
-
anime go hd apk download<br />
|
37 |
-
anime go 5.0 apk download<br />
|
38 |
-
anime go 4k apk download<br />
|
39 |
-
anime go offline apk download<br />
|
40 |
-
anime go english sub apk download<br />
|
41 |
-
anime go english dub apk download<br />
|
42 |
-
best anime go apk download<br />
|
43 |
-
new anime go apk download<br />
|
44 |
-
old anime go apk download<br />
|
45 |
-
original anime go apk download<br />
|
46 |
-
safe anime go apk download<br />
|
47 |
-
secure anime go apk download<br />
|
48 |
-
trusted anime go apk download<br />
|
49 |
-
verified anime go apk download<br />
|
50 |
-
working anime go apk download<br />
|
51 |
-
updated anime go apk download<br />
|
52 |
-
fast anime go apk download<br />
|
53 |
-
easy anime go apk download<br />
|
54 |
-
simple anime go apk download<br />
|
55 |
-
quick anime go apk download<br />
|
56 |
-
direct anime go apk download<br />
|
57 |
-
unlimited anime go apk download<br />
|
58 |
-
full anime go apk download<br />
|
59 |
-
cracked anime go apk download<br />
|
60 |
-
hacked anime go apk download<br />
|
61 |
-
unlocked anime go apk download<br />
|
62 |
-
ad-free anime go apk download<br />
|
63 |
-
virus-free anime go apk download<br />
|
64 |
-
malware-free anime go apk download<br />
|
65 |
-
bug-free anime go apk download<br />
|
66 |
-
error-free anime go apk download<br />
|
67 |
-
high-quality anime go apk download<br />
|
68 |
-
low-size anime go apk download<br />
|
69 |
-
smooth anime go apk download<br />
|
70 |
-
stable anime go apk download</p>
|
71 |
-
<ul>
|
72 |
-
<li>Your device must have Android 4.1 or higher version installed.</li>
|
73 |
-
<li>Your device must have at least 100 MB of free storage space available.</li>
|
74 |
-
<li>Your device must have a stable internet connection.</li>
|
75 |
-
<li>Your device must allow the installation of apps from unknown sources. To enable this option, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
76 |
-
</ul>
|
77 |
-
<h3>Sources for Downloading APK Anime Go</h3>
|
78 |
-
<p>There are many sources where you can download APK Anime Go on your Android device. However, not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and choose a trusted source for downloading the app. Here is a table of the best and safest sources for downloading APK Anime Go, with ratings, reviews, and URLs:</p>
|
79 |
-
<table>
|
80 |
-
<tr>
|
81 |
-
<th>Source</th>
|
82 |
-
<th>Rating</th>
|
83 |
-
<th>Review</th>
|
84 |
-
<th>URL</th>
|
85 |
-
</tr>
|
86 |
-
<tr>
|
87 |
-
<td>APKPure</td>
|
88 |
-
<td>4.8/5</td>
|
89 |
-
<td>This is one of the most popular and reputable sources for downloading APK files. It has a large database of apps and games, including APK Anime Go. It also has a fast and secure download process, with no ads or pop-ups.</td>
|
90 |
-
<td><a href="https://apkpure.com/apk-anime-go/com.apkanimego">https://apkpure.com/apk-anime-go/com.apkanimego</a></td>
|
91 |
-
</tr>
|
92 |
-
<tr>
|
93 |
-
<td>APKMirror</td>
|
94 |
-
<td>4.6/5</td>
|
95 |
-
<td>This is another well-known and trusted source for downloading APK files. It has a wide range of apps and games, including APK Anime Go. It also has a simple and user-friendly interface, with no registration or subscription required.</td>
|
96 |
-
<td><a href="https://www.apkmirror.com/apk/apk-anime-go">https://www.apkmirror.com/apk/apk-anime-go</a></td>
|
97 |
-
</tr>
|
98 |
-
<tr>
|
99 |
-
<td>Uptodown</td>
|
100 |
-
<td>4.4/5</td>
|
101 |
-
<td>This is a reliable and safe source for downloading APK files. It has a huge collection of apps and games, including APK Anime Go. It also has a fast and easy download process, with no malware or viruses.</td>
|
102 |
-
<td><a href="https://apk-anime-go.en.uptodown.com/android">https://apk-anime-go.en.uptodown.com/android</a></td>
|
103 |
-
</tr>
|
104 |
-
<tr>
|
105 |
-
<td>APKMonk</td>
|
106 |
-
<td>4.2/5</td>
|
107 |
-
<td>This is a decent source for downloading APK files. It has a good selection of apps and games, including APK Anime Go. It also has a smooth and secure download process, with no spam or fraud.</td>
|
108 |
-
<td><a href="https://www.apkmonk.com/app/com.apkanimego">https://www.apkmonk.com/app/com.apkanimego</a></td>
|
109 |
-
</tr> <h3>Steps for Downloading APK Anime Go</h3>
|
110 |
-
<p>Once you have chosen a source for downloading APK Anime Go, you can follow these steps to download and install the app on your Android device:</p>
|
111 |
-
<ol>
|
112 |
-
<li>Open the source website on your device's browser and search for APK Anime Go.</li>
|
113 |
-
<li>Click on the download button and wait for the APK file to be downloaded on your device.</li>
|
114 |
-
<li>Locate the downloaded APK file on your device's file manager and tap on it to open it.</li>
|
115 |
-
<li>Click on the install button and wait for the app to be installed on your device.</li>
|
116 |
-
<li>Once the installation is complete, you can launch the app from your device's app drawer or home screen.</li>
|
117 |
-
</ol>
|
118 |
-
<p>Congratulations! You have successfully downloaded APK Anime Go on your Android device. You can now enjoy watching and downloading anime on your device.</p>
|
119 |
-
<h2>How to Download APK Anime Go for Other Devices</h2>
|
120 |
-
<p>If you don't have an Android device, don't worry. You can still download APK Anime Go on other devices, such as Windows PC, Mac, iOS, Firestick, etc. However, you will need to use an emulator to run the app on these devices. An emulator is a software that simulates an Android environment on your device and allows you to run Android apps and games on it.</p>
|
121 |
-
<h3>Emulators for Downloading APK Anime Go</h3>
|
122 |
-
<p>There are many emulators available for different devices and platforms. However, not all of them are compatible and efficient for running APK Anime Go. Here is a list of the best and most compatible emulators for downloading APK Anime Go, with features, pros, and cons:</p>
|
123 |
-
<table>
|
124 |
-
<tr>
|
125 |
-
<th>Emulator</th>
|
126 |
-
<th>Device/Platform</th>
|
127 |
-
<th>Features</th>
|
128 |
-
<th>Pros</th>
|
129 |
-
<th>Cons</th>
|
130 |
-
</tr>
|
131 |
-
<tr>
|
132 |
-
<td>BlueStacks</td>
|
133 |
-
<td>Windows PC, Mac</td>
|
134 |
-
<td>- The most popular and widely used emulator for Android apps and games<br>- Supports high-performance gaming and streaming<br>- Has a user-friendly interface and customizable settings<br>- Has a built-in Google Play Store and App Center<br>- Has a multi-instance feature that allows you to run multiple apps at once</td>
|
135 |
-
<td>- Easy to download and install<br>- Compatible with most Android apps and games<br>- Offers a smooth and fast experience<br>- Has a large and active community of users and developers</td>
|
136 |
-
<td>- Requires a lot of RAM and CPU power<br>- May cause some compatibility issues with some apps or games<br>- May show some ads or pop-ups</td>
|
137 |
-
</tr>
|
138 |
-
<tr>
|
139 |
-
<td>NoxPlayer</td>
|
140 |
-
<td>Windows PC, Mac</td>
|
141 |
-
<td>- A powerful and stable emulator for Android apps and games<br>- Supports high-resolution gaming and streaming<br>- Has a simple and elegant interface and flexible settings<br>- Has a built-in Google Play Store and File Manager<br>- Has a keyboard and mouse mapping feature that allows you to control the app with ease</td>
|
142 |
-
<td>- Easy to download and install<br>- Compatible with most Android apps and games<br>- Offers a smooth and fast experience<br>- Has a low system requirement and consumes less RAM and CPU power</td>
|
143 |
-
<td>- May cause some lagging or crashing issues with some apps or games<br>- May show some ads or pop-ups</td>
|
144 |
-
</tr>
|
145 |
-
<tr>
|
146 |
-
<td>MEmu Play</td>
|
147 |
-
<td>Windows PC</td>
|
148 |
-
<td>- A powerful and reliable emulator for Android apps and games<br>- Supports high-performance gaming and streaming<br>- Has a user-friendly interface and customizable settings<br>- Has a built-in Google Play Store and File Manager<br>- Has a multi-instance feature that allows you to run multiple apps at once</td>
|
149 |
-
<td>- Easy to download and install<br>- Compatible with most Android apps and games<br>- Offers a smooth and fast experience<br>- Has a low system requirement and consumes less RAM and CPU power</td>
|
150 |
-
<td>- Only available for Windows PC<br>- May cause some compatibility issues with some apps or games<br>- May show some ads or pop-ups</td>
|
151 |
-
</tr> <tr>
|
152 |
-
<td>iOS Emulator</td>
|
153 |
-
<td>iOS</td>
|
154 |
-
<td>- A simple and lightweight emulator for Android apps and games<br>- Supports basic gaming and streaming<br>- Has a minimal and easy-to-use interface and settings<br>- Has a built-in Google Play Store and File Manager</td>
|
155 |
-
<td>- Easy to download and install<br>- Compatible with some Android apps and games<br>- Offers a decent and smooth experience<br>- Does not require a lot of RAM and CPU power</td>
|
156 |
-
<td>- Only available for iOS devices<br>- Does not support high-resolution gaming and streaming<br>- May cause some lagging or crashing issues with some apps or games<br>- May show some ads or pop-ups</td>
|
157 |
-
</tr>
|
158 |
-
<tr>
|
159 |
-
<td>Firestick Emulator</td>
|
160 |
-
<td>Firestick</td>
|
161 |
-
<td>- A dedicated and optimized emulator for Android apps and games on Firestick devices<br>- Supports high-quality gaming and streaming on TV screens<br>- Has a user-friendly interface and customizable settings<br>- Has a built-in Google Play Store and File Manager<br>- Has a remote control feature that allows you to control the app with your Firestick remote</td>
|
162 |
-
<td>- Easy to download and install<br>- Compatible with most Android apps and games<br>- Offers a smooth and fast experience<br>- Does not require a lot of RAM and CPU power</td>
|
163 |
-
<td>- Only available for Firestick devices<br>- May cause some compatibility issues with some apps or games<br>- May show some ads or pop-ups</td>
|
164 |
-
</tr>
|
165 |
-
</table>
|
166 |
-
<h3>Steps for Downloading APK Anime Go Using Emulators</h3>
|
167 |
-
<p>Once you have chosen an emulator for downloading APK Anime Go, you can follow these steps to download and install the app on your device using the emulator:</p>
|
168 |
-
<ol>
|
169 |
-
<li>Download and install the emulator on your device from its official website or app store.</li>
|
170 |
-
<li>Launch the emulator on your device and sign in with your Google account.</li>
|
171 |
-
<li>Open the Google Play Store or any other source website on the emulator's browser and search for APK Anime Go.</li>
|
172 |
-
<li>Click on the download button and wait for the APK file to be downloaded on the emulator.</li>
|
173 |
-
<li>Locate the downloaded APK file on the emulator's file manager and tap on it to open it.</li>
|
174 |
-
<li>Click on the install button and wait for the app to be installed on the emulator.</li>
|
175 |
-
<li>Once the installation is complete, you can launch the app from the emulator's app drawer or home screen.</li>
|
176 |
-
</ol>
|
177 |
-
<p>Congratulations! You have successfully downloaded APK Anime Go on your device using an emulator. You can now enjoy watching and downloading anime on your device.</p>
|
178 |
-
<h2>How to Use APK Anime Go</h2>
|
179 |
-
<p>Now that you have downloaded APK Anime Go on your device, you might be wondering how to use it. Don't worry, it's very easy and fun. Here are some tips on how to use the app, its interface, functions, settings, and features:</p>
|
180 |
-
<h3>How to Search for Anime</h3>
|
181 |
-
<p>If you want to find anime of your choice, you can use the search bar at the top of the app's home screen. You can type in the name of the anime or any keyword related to it. You can also use filters, categories, genres, and tags to narrow down your search results. For example, you can filter by year, season, status, type, etc. You can also browse by category, such as action, adventure, comedy, drama, etc. You can also browse by tag, such as romance, school, fantasy, horror, etc. You can also browse by source, such as Crunchyroll, Funimation, Netflix, etc.</p>
|
182 |
-
<h3>How to Watch Anime</h3>
|
183 |
-
<p>If you want to watch anime online, you can simply tap on the anime title that you want to watch. You will see a list of episodes or movies available for that anime. You can tap on the episode or movie that you want to watch. You will see a video player that will start playing the anime. You can also use various options and settings while watching anime. For example, you can play, pause, resume, skip, rewind, fast forward, adjust volume, brightness, subtitles, etc. You can also switch between different video quality options, such as 360p, 480p, 720p, 1080p etc.</p>
|
184 |
-
<h3>How to Download Anime</h3>
|
185 |
-
<p>If you want to download anime for offline viewing, you can follow these steps:</p>
|
186 |
-
<ol>
|
187 |
-
<li>Tap on the anime title that you want to download.</li>
|
188 |
-
<li>Tap on the episode or movie that you want to download.</li>
|
189 |
-
<li>Tap on the download icon at the bottom right corner of the video player.</li>
|
190 |
-
<li>Select the video quality option that you want to download.</li>
|
191 |
-
<li <li>Wait for the download to finish. You can see the progress and status of your downloads on the app's download manager.</li>
|
192 |
-
<li>Once the download is complete, you can access your downloaded anime on the app's offline library. You can watch them anytime and anywhere without an internet connection.</li>
|
193 |
-
</ol>
|
194 |
-
<p>That's how you can download anime for offline viewing using APK Anime Go. You can also choose the storage location and manage your downloads easily on the app.</p>
|
195 |
-
<h2>Conclusion</h2>
|
196 |
-
<p>In this article, we have shown you how to download APK Anime Go on your device. We have also explained what APK Anime Go is, why you should download it, and how to use it. APK Anime Go is a great app for anime lovers who want to watch and download anime on their devices. It has a huge collection of anime from various genres, categories, years, and sources. It has a simple and user-friendly interface that makes it easy to navigate and use. It has a fast and smooth streaming experience that lets you watch anime without any buffering, lagging, or crashing issues. It has a flexible and convenient downloading feature that lets you download anime in different formats, resolutions, and sizes. It has an interactive and engaging community that lets you rate, review, comment, and share your opinions on anime with other users.</p>
|
197 |
-
<p>If you are an anime fan who loves to watch your favorite shows and movies online, you should definitely download APK Anime Go on your device. It is a free app that does not require any registration or subscription. You can watch unlimited anime without any ads or interruptions. You can also update the app regularly to get new features and bug fixes.</p>
|
198 |
-
<p>So what are you waiting for? Download APK Anime Go now and enjoy watching and downloading anime on your device. You will not regret it!</p>
|
199 |
-
<p>Thank you for reading this article. We hope you found it helpful and informative. If you have any questions or feedback, please feel free to leave them in the comments section below. We would love to hear from you!</p>
|
200 |
-
<h2>FAQs</h2>
|
201 |
-
<p>Here are some frequently asked questions about APK Anime Go:</p>
|
202 |
-
<ul>
|
203 |
-
<li><b>Is APK Anime Go safe and legal?</b><br>Yes, APK Anime Go is safe and legal to use. It does not contain any viruses, malware, or spyware that can harm your device or steal your personal information. It also does not host or distribute any illegal or pirated content. It only provides links to third-party sources that host the anime videos.</li>
|
204 |
-
<li><b>Is APK Anime Go compatible with my device?</b><br>APK Anime Go is compatible with most Android devices that have Android 4.1 or higher version installed. It is also compatible with other devices, such as Windows PC, Mac, iOS, Firestick, etc., using emulators.</li>
|
205 |
-
<li><b>How do I update APK Anime Go?</b><br>You can update APK Anime Go by downloading and installing the latest version of the app from the same source that you downloaded it from. You can also check for updates on the app's settings menu.</li>
|
206 |
-
<li><b>How do I uninstall APK Anime Go?</b><br>You can uninstall APK Anime Go by going to your device's settings menu > apps > APK Anime Go > uninstall. You can also delete the app's icon from your device's app drawer or home screen.</li>
|
207 |
-
<li><b>How do I contact APK Anime Go?</b><br>You can contact APK Anime Go by sending an email to [email protected]. You can also visit their official website at https://apkanimego.com/ or follow them on their social media accounts at https://www.facebook.com/apkanimego/ and https://twitter.com/apkanimego/.</li>
|
208 |
-
</ul></p> 401be4b1e0<br />
|
209 |
-
<br />
|
210 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/learn-more.tsx
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import React from 'react'
|
2 |
-
import { SourceAttribution } from '@/lib/bots/bing/types'
|
3 |
-
|
4 |
-
export interface LearnMoreProps {
|
5 |
-
sourceAttributions?: SourceAttribution[]
|
6 |
-
}
|
7 |
-
|
8 |
-
export function LearnMore({ sourceAttributions }: LearnMoreProps) {
|
9 |
-
if (!sourceAttributions?.length) {
|
10 |
-
return null
|
11 |
-
}
|
12 |
-
|
13 |
-
return (
|
14 |
-
<div className="learn-more-root" role="list" aria-label="了解详细信息:">
|
15 |
-
<div className="learn-more">了解详细信息:</div>
|
16 |
-
<div className="attribution-container">
|
17 |
-
<div className="attribution-items">
|
18 |
-
{sourceAttributions.map((attribution, index) => {
|
19 |
-
const { providerDisplayName, seeMoreUrl } = attribution
|
20 |
-
const { host } = new URL(seeMoreUrl)
|
21 |
-
return (
|
22 |
-
<a
|
23 |
-
key={index}
|
24 |
-
className="attribution-item"
|
25 |
-
target="_blank"
|
26 |
-
role="listitem"
|
27 |
-
href={seeMoreUrl}
|
28 |
-
title={providerDisplayName}
|
29 |
-
tabIndex={index}
|
30 |
-
>
|
31 |
-
{index + 1}. {host}
|
32 |
-
</a>
|
33 |
-
)
|
34 |
-
})}
|
35 |
-
</div>
|
36 |
-
</div>
|
37 |
-
</div>
|
38 |
-
)
|
39 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/preset/PresetError.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
class PresetError(Exception):
|
2 |
-
pass
|
|
|
|
|
|
spaces/A00001/bingothoo/src/lib/bots/bing/types.ts
DELETED
@@ -1,259 +0,0 @@
|
|
1 |
-
export type Author = 'user' | 'system' | 'bot'
|
2 |
-
|
3 |
-
export type BotId = 'bing'
|
4 |
-
|
5 |
-
export enum BingConversationStyle {
|
6 |
-
Creative = 'Creative',
|
7 |
-
Balanced = 'Balanced',
|
8 |
-
Precise = 'Precise'
|
9 |
-
}
|
10 |
-
|
11 |
-
export enum ErrorCode {
|
12 |
-
CONVERSATION_LIMIT = 'CONVERSATION_LIMIT',
|
13 |
-
BING_UNAUTHORIZED = 'BING_UNAUTHORIZED',
|
14 |
-
BING_FORBIDDEN = 'BING_FORBIDDEN',
|
15 |
-
BING_CAPTCHA = 'BING_CAPTCHA',
|
16 |
-
THROTTLE_LIMIT = 'THROTTLE_LIMIT',
|
17 |
-
NOTFOUND_ERROR = 'NOT_FOUND_ERROR',
|
18 |
-
UNKOWN_ERROR = 'UNKOWN_ERROR',
|
19 |
-
NETWORK_ERROR = 'NETWORK_ERROR',
|
20 |
-
}
|
21 |
-
|
22 |
-
export class ChatError extends Error {
|
23 |
-
code: ErrorCode
|
24 |
-
constructor(message: string, code: ErrorCode) {
|
25 |
-
super(message)
|
26 |
-
this.code = code
|
27 |
-
}
|
28 |
-
}
|
29 |
-
|
30 |
-
export type ChatMessageModel = {
|
31 |
-
id: string
|
32 |
-
author: Author
|
33 |
-
text: string
|
34 |
-
error?: ChatError
|
35 |
-
throttling?: Throttling
|
36 |
-
sourceAttributions?: SourceAttribution[]
|
37 |
-
suggestedResponses?: SuggestedResponse[]
|
38 |
-
}
|
39 |
-
|
40 |
-
export interface ConversationModel {
|
41 |
-
messages: ChatMessageModel[]
|
42 |
-
}
|
43 |
-
|
44 |
-
export type Event =
|
45 |
-
| {
|
46 |
-
type: 'UPDATE_ANSWER'
|
47 |
-
data: {
|
48 |
-
text: string
|
49 |
-
spokenText?: string
|
50 |
-
sourceAttributions?: SourceAttribution[]
|
51 |
-
suggestedResponses?: SuggestedResponse[]
|
52 |
-
throttling?: Throttling
|
53 |
-
}
|
54 |
-
}
|
55 |
-
| {
|
56 |
-
type: 'DONE'
|
57 |
-
}
|
58 |
-
| {
|
59 |
-
type: 'ERROR'
|
60 |
-
error: ChatError
|
61 |
-
}
|
62 |
-
|
63 |
-
export interface SendMessageParams<T> {
|
64 |
-
prompt: string
|
65 |
-
imageUrl?: string
|
66 |
-
options: T
|
67 |
-
onEvent: (event: Event) => void
|
68 |
-
signal?: AbortSignal
|
69 |
-
}
|
70 |
-
|
71 |
-
export interface ConversationResponse {
|
72 |
-
conversationId: string
|
73 |
-
clientId: string
|
74 |
-
conversationSignature: string
|
75 |
-
result: {
|
76 |
-
value: string
|
77 |
-
message?: string
|
78 |
-
}
|
79 |
-
}
|
80 |
-
|
81 |
-
export interface Telemetry {
|
82 |
-
metrics?: null
|
83 |
-
startTime: string
|
84 |
-
}
|
85 |
-
|
86 |
-
export interface ChatUpdateArgument {
|
87 |
-
messages?: ChatResponseMessage[]
|
88 |
-
throttling?: Throttling
|
89 |
-
requestId: string
|
90 |
-
result: null
|
91 |
-
}
|
92 |
-
|
93 |
-
export type ChatUpdateCompleteResponse = {
|
94 |
-
type: 2
|
95 |
-
invocationId: string
|
96 |
-
item: ChatResponseItem
|
97 |
-
} | {
|
98 |
-
type: 1
|
99 |
-
target: string
|
100 |
-
arguments: ChatUpdateArgument[]
|
101 |
-
} | {
|
102 |
-
type: 3
|
103 |
-
invocationId: string
|
104 |
-
} | {
|
105 |
-
type: 6 | 7
|
106 |
-
}
|
107 |
-
|
108 |
-
export interface ChatRequestResult {
|
109 |
-
value: string
|
110 |
-
serviceVersion: string
|
111 |
-
error?: string
|
112 |
-
}
|
113 |
-
|
114 |
-
export interface ChatResponseItem {
|
115 |
-
messages: ChatResponseMessage[]
|
116 |
-
firstNewMessageIndex: number
|
117 |
-
suggestedResponses: null
|
118 |
-
conversationId: string
|
119 |
-
requestId: string
|
120 |
-
conversationExpiryTime: string
|
121 |
-
telemetry: Telemetry
|
122 |
-
result: ChatRequestResult
|
123 |
-
throttling: Throttling
|
124 |
-
}
|
125 |
-
export enum InvocationEventType {
|
126 |
-
Invocation = 1,
|
127 |
-
StreamItem = 2,
|
128 |
-
Completion = 3,
|
129 |
-
StreamInvocation = 4,
|
130 |
-
CancelInvocation = 5,
|
131 |
-
Ping = 6,
|
132 |
-
Close = 7,
|
133 |
-
}
|
134 |
-
|
135 |
-
// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts
|
136 |
-
|
137 |
-
export interface ConversationInfo {
|
138 |
-
conversationId: string
|
139 |
-
clientId: string
|
140 |
-
conversationSignature: string
|
141 |
-
invocationId: number
|
142 |
-
conversationStyle: BingConversationStyle
|
143 |
-
prompt: string
|
144 |
-
imageUrl?: string
|
145 |
-
}
|
146 |
-
|
147 |
-
export interface BingChatResponse {
|
148 |
-
conversationSignature: string
|
149 |
-
conversationId: string
|
150 |
-
clientId: string
|
151 |
-
invocationId: number
|
152 |
-
conversationExpiryTime: Date
|
153 |
-
response: string
|
154 |
-
details: ChatResponseMessage
|
155 |
-
}
|
156 |
-
|
157 |
-
export interface Throttling {
|
158 |
-
maxNumLongDocSummaryUserMessagesInConversation: number
|
159 |
-
maxNumUserMessagesInConversation: number
|
160 |
-
numLongDocSummaryUserMessagesInConversation: number
|
161 |
-
numUserMessagesInConversation: number
|
162 |
-
}
|
163 |
-
|
164 |
-
export interface ChatResponseMessage {
|
165 |
-
text: string
|
166 |
-
spokenText?: string
|
167 |
-
author: string
|
168 |
-
createdAt: Date
|
169 |
-
timestamp: Date
|
170 |
-
messageId: string
|
171 |
-
requestId: string
|
172 |
-
offense: string
|
173 |
-
adaptiveCards: AdaptiveCard[]
|
174 |
-
sourceAttributions: SourceAttribution[]
|
175 |
-
feedback: Feedback
|
176 |
-
contentOrigin: string
|
177 |
-
messageType?: string
|
178 |
-
contentType?: string
|
179 |
-
privacy: null
|
180 |
-
suggestedResponses: SuggestedResponse[]
|
181 |
-
}
|
182 |
-
|
183 |
-
export interface AdaptiveCard {
|
184 |
-
type: string
|
185 |
-
version: string
|
186 |
-
body: Body[]
|
187 |
-
}
|
188 |
-
|
189 |
-
export interface Body {
|
190 |
-
type: string
|
191 |
-
text: string
|
192 |
-
wrap: boolean
|
193 |
-
size?: string
|
194 |
-
}
|
195 |
-
|
196 |
-
export interface Feedback {
|
197 |
-
tag: null
|
198 |
-
updatedOn: null
|
199 |
-
type: string
|
200 |
-
}
|
201 |
-
|
202 |
-
export interface SourceAttribution {
|
203 |
-
providerDisplayName: string
|
204 |
-
seeMoreUrl: string
|
205 |
-
searchQuery: string
|
206 |
-
}
|
207 |
-
|
208 |
-
export interface SuggestedResponse {
|
209 |
-
text: string
|
210 |
-
author?: Author
|
211 |
-
createdAt?: Date
|
212 |
-
timestamp?: Date
|
213 |
-
messageId?: string
|
214 |
-
messageType?: string
|
215 |
-
offense?: string
|
216 |
-
feedback?: Feedback
|
217 |
-
contentOrigin?: string
|
218 |
-
privacy?: null
|
219 |
-
}
|
220 |
-
|
221 |
-
export interface KBlobRequest {
|
222 |
-
knowledgeRequest: KnowledgeRequestContext
|
223 |
-
imageBase64?: string
|
224 |
-
}
|
225 |
-
|
226 |
-
export interface KBlobResponse {
|
227 |
-
blobId: string
|
228 |
-
processedBlobId?: string
|
229 |
-
}
|
230 |
-
|
231 |
-
export interface KnowledgeRequestContext {
|
232 |
-
imageInfo: ImageInfo;
|
233 |
-
knowledgeRequest: KnowledgeRequest;
|
234 |
-
}
|
235 |
-
|
236 |
-
export interface ImageInfo {
|
237 |
-
url?: string;
|
238 |
-
}
|
239 |
-
|
240 |
-
export interface KnowledgeRequest {
|
241 |
-
invokedSkills: string[];
|
242 |
-
subscriptionId: string;
|
243 |
-
invokedSkillsRequestData: InvokedSkillsRequestData;
|
244 |
-
convoData: ConvoData;
|
245 |
-
}
|
246 |
-
|
247 |
-
export interface ConvoData {
|
248 |
-
convoid: string;
|
249 |
-
convotone: BingConversationStyle;
|
250 |
-
}
|
251 |
-
|
252 |
-
export interface InvokedSkillsRequestData {
|
253 |
-
enableFaceBlur: boolean;
|
254 |
-
}
|
255 |
-
|
256 |
-
export interface FileItem {
|
257 |
-
url: string;
|
258 |
-
status?: 'loading' | 'error' | 'loaded'
|
259 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/layers/residual_block.py
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
"""Residual block module in WaveNet.
|
4 |
-
|
5 |
-
This code is modified from https://github.com/r9y9/wavenet_vocoder.
|
6 |
-
|
7 |
-
"""
|
8 |
-
|
9 |
-
import math
|
10 |
-
|
11 |
-
import torch
|
12 |
-
import torch.nn.functional as F
|
13 |
-
|
14 |
-
|
15 |
-
class Conv1d(torch.nn.Conv1d):
|
16 |
-
"""Conv1d module with customized initialization."""
|
17 |
-
|
18 |
-
def __init__(self, *args, **kwargs):
|
19 |
-
"""Initialize Conv1d module."""
|
20 |
-
super(Conv1d, self).__init__(*args, **kwargs)
|
21 |
-
|
22 |
-
def reset_parameters(self):
|
23 |
-
"""Reset parameters."""
|
24 |
-
torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu")
|
25 |
-
if self.bias is not None:
|
26 |
-
torch.nn.init.constant_(self.bias, 0.0)
|
27 |
-
|
28 |
-
|
29 |
-
class Conv1d1x1(Conv1d):
|
30 |
-
"""1x1 Conv1d with customized initialization."""
|
31 |
-
|
32 |
-
def __init__(self, in_channels, out_channels, bias):
|
33 |
-
"""Initialize 1x1 Conv1d module."""
|
34 |
-
super(Conv1d1x1, self).__init__(in_channels, out_channels,
|
35 |
-
kernel_size=1, padding=0,
|
36 |
-
dilation=1, bias=bias)
|
37 |
-
|
38 |
-
|
39 |
-
class ResidualBlock(torch.nn.Module):
|
40 |
-
"""Residual block module in WaveNet."""
|
41 |
-
|
42 |
-
def __init__(self,
|
43 |
-
kernel_size=3,
|
44 |
-
residual_channels=64,
|
45 |
-
gate_channels=128,
|
46 |
-
skip_channels=64,
|
47 |
-
aux_channels=80,
|
48 |
-
dropout=0.0,
|
49 |
-
dilation=1,
|
50 |
-
bias=True,
|
51 |
-
use_causal_conv=False
|
52 |
-
):
|
53 |
-
"""Initialize ResidualBlock module.
|
54 |
-
|
55 |
-
Args:
|
56 |
-
kernel_size (int): Kernel size of dilation convolution layer.
|
57 |
-
residual_channels (int): Number of channels for residual connection.
|
58 |
-
skip_channels (int): Number of channels for skip connection.
|
59 |
-
aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
|
60 |
-
dropout (float): Dropout probability.
|
61 |
-
dilation (int): Dilation factor.
|
62 |
-
bias (bool): Whether to add bias parameter in convolution layers.
|
63 |
-
use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution.
|
64 |
-
|
65 |
-
"""
|
66 |
-
super(ResidualBlock, self).__init__()
|
67 |
-
self.dropout = dropout
|
68 |
-
# no future time stamps available
|
69 |
-
if use_causal_conv:
|
70 |
-
padding = (kernel_size - 1) * dilation
|
71 |
-
else:
|
72 |
-
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
|
73 |
-
padding = (kernel_size - 1) // 2 * dilation
|
74 |
-
self.use_causal_conv = use_causal_conv
|
75 |
-
|
76 |
-
# dilation conv
|
77 |
-
self.conv = Conv1d(residual_channels, gate_channels, kernel_size,
|
78 |
-
padding=padding, dilation=dilation, bias=bias)
|
79 |
-
|
80 |
-
# local conditioning
|
81 |
-
if aux_channels > 0:
|
82 |
-
self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False)
|
83 |
-
else:
|
84 |
-
self.conv1x1_aux = None
|
85 |
-
|
86 |
-
# conv output is split into two groups
|
87 |
-
gate_out_channels = gate_channels // 2
|
88 |
-
self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias)
|
89 |
-
self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias)
|
90 |
-
|
91 |
-
def forward(self, x, c):
|
92 |
-
"""Calculate forward propagation.
|
93 |
-
|
94 |
-
Args:
|
95 |
-
x (Tensor): Input tensor (B, residual_channels, T).
|
96 |
-
c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
|
97 |
-
|
98 |
-
Returns:
|
99 |
-
Tensor: Output tensor for residual connection (B, residual_channels, T).
|
100 |
-
Tensor: Output tensor for skip connection (B, skip_channels, T).
|
101 |
-
|
102 |
-
"""
|
103 |
-
residual = x
|
104 |
-
x = F.dropout(x, p=self.dropout, training=self.training)
|
105 |
-
x = self.conv(x)
|
106 |
-
|
107 |
-
# remove future time steps if use_causal_conv conv
|
108 |
-
x = x[:, :, :residual.size(-1)] if self.use_causal_conv else x
|
109 |
-
|
110 |
-
# split into two part for gated activation
|
111 |
-
splitdim = 1
|
112 |
-
xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim)
|
113 |
-
|
114 |
-
# local conditioning
|
115 |
-
if c is not None:
|
116 |
-
assert self.conv1x1_aux is not None
|
117 |
-
c = self.conv1x1_aux(c)
|
118 |
-
ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
|
119 |
-
xa, xb = xa + ca, xb + cb
|
120 |
-
|
121 |
-
x = torch.tanh(xa) * torch.sigmoid(xb)
|
122 |
-
|
123 |
-
# for skip connection
|
124 |
-
s = self.conv1x1_skip(x)
|
125 |
-
|
126 |
-
# for residual connection
|
127 |
-
x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5)
|
128 |
-
|
129 |
-
return x, s
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/plms.py
DELETED
@@ -1,236 +0,0 @@
|
|
1 |
-
"""SAMPLING ONLY."""
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from tqdm import tqdm
|
6 |
-
from functools import partial
|
7 |
-
|
8 |
-
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
9 |
-
|
10 |
-
|
11 |
-
class PLMSSampler(object):
|
12 |
-
def __init__(self, model, schedule="linear", **kwargs):
|
13 |
-
super().__init__()
|
14 |
-
self.model = model
|
15 |
-
self.ddpm_num_timesteps = model.num_timesteps
|
16 |
-
self.schedule = schedule
|
17 |
-
|
18 |
-
def register_buffer(self, name, attr):
|
19 |
-
if type(attr) == torch.Tensor:
|
20 |
-
if attr.device != torch.device("cuda"):
|
21 |
-
attr = attr.to(torch.device("cuda"))
|
22 |
-
setattr(self, name, attr)
|
23 |
-
|
24 |
-
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
25 |
-
if ddim_eta != 0:
|
26 |
-
raise ValueError('ddim_eta must be 0 for PLMS')
|
27 |
-
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
28 |
-
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
29 |
-
alphas_cumprod = self.model.alphas_cumprod
|
30 |
-
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
31 |
-
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
32 |
-
|
33 |
-
self.register_buffer('betas', to_torch(self.model.betas))
|
34 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
35 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
36 |
-
|
37 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
38 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
39 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
40 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
41 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
42 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
43 |
-
|
44 |
-
# ddim sampling parameters
|
45 |
-
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
46 |
-
ddim_timesteps=self.ddim_timesteps,
|
47 |
-
eta=ddim_eta,verbose=verbose)
|
48 |
-
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
49 |
-
self.register_buffer('ddim_alphas', ddim_alphas)
|
50 |
-
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
51 |
-
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
52 |
-
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
53 |
-
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
54 |
-
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
55 |
-
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
56 |
-
|
57 |
-
@torch.no_grad()
|
58 |
-
def sample(self,
|
59 |
-
S,
|
60 |
-
batch_size,
|
61 |
-
shape,
|
62 |
-
conditioning=None,
|
63 |
-
callback=None,
|
64 |
-
normals_sequence=None,
|
65 |
-
img_callback=None,
|
66 |
-
quantize_x0=False,
|
67 |
-
eta=0.,
|
68 |
-
mask=None,
|
69 |
-
x0=None,
|
70 |
-
temperature=1.,
|
71 |
-
noise_dropout=0.,
|
72 |
-
score_corrector=None,
|
73 |
-
corrector_kwargs=None,
|
74 |
-
verbose=True,
|
75 |
-
x_T=None,
|
76 |
-
log_every_t=100,
|
77 |
-
unconditional_guidance_scale=1.,
|
78 |
-
unconditional_conditioning=None,
|
79 |
-
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
80 |
-
**kwargs
|
81 |
-
):
|
82 |
-
if conditioning is not None:
|
83 |
-
if isinstance(conditioning, dict):
|
84 |
-
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
|
85 |
-
if cbs != batch_size:
|
86 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
87 |
-
else:
|
88 |
-
if conditioning.shape[0] != batch_size:
|
89 |
-
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
90 |
-
|
91 |
-
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
92 |
-
# sampling
|
93 |
-
C, H, W = shape
|
94 |
-
size = (batch_size, C, H, W)
|
95 |
-
print(f'Data shape for PLMS sampling is {size}')
|
96 |
-
|
97 |
-
samples, intermediates = self.plms_sampling(conditioning, size,
|
98 |
-
callback=callback,
|
99 |
-
img_callback=img_callback,
|
100 |
-
quantize_denoised=quantize_x0,
|
101 |
-
mask=mask, x0=x0,
|
102 |
-
ddim_use_original_steps=False,
|
103 |
-
noise_dropout=noise_dropout,
|
104 |
-
temperature=temperature,
|
105 |
-
score_corrector=score_corrector,
|
106 |
-
corrector_kwargs=corrector_kwargs,
|
107 |
-
x_T=x_T,
|
108 |
-
log_every_t=log_every_t,
|
109 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
110 |
-
unconditional_conditioning=unconditional_conditioning,
|
111 |
-
)
|
112 |
-
return samples, intermediates
|
113 |
-
|
114 |
-
@torch.no_grad()
|
115 |
-
def plms_sampling(self, cond, shape,
|
116 |
-
x_T=None, ddim_use_original_steps=False,
|
117 |
-
callback=None, timesteps=None, quantize_denoised=False,
|
118 |
-
mask=None, x0=None, img_callback=None, log_every_t=100,
|
119 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
120 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None,):
|
121 |
-
device = self.model.betas.device
|
122 |
-
b = shape[0]
|
123 |
-
if x_T is None:
|
124 |
-
img = torch.randn(shape, device=device)
|
125 |
-
else:
|
126 |
-
img = x_T
|
127 |
-
|
128 |
-
if timesteps is None:
|
129 |
-
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
130 |
-
elif timesteps is not None and not ddim_use_original_steps:
|
131 |
-
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
132 |
-
timesteps = self.ddim_timesteps[:subset_end]
|
133 |
-
|
134 |
-
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
135 |
-
time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
|
136 |
-
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
137 |
-
print(f"Running PLMS Sampling with {total_steps} timesteps")
|
138 |
-
|
139 |
-
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
|
140 |
-
old_eps = []
|
141 |
-
|
142 |
-
for i, step in enumerate(iterator):
|
143 |
-
index = total_steps - i - 1
|
144 |
-
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
145 |
-
ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
|
146 |
-
|
147 |
-
if mask is not None:
|
148 |
-
assert x0 is not None
|
149 |
-
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
|
150 |
-
img = img_orig * mask + (1. - mask) * img
|
151 |
-
|
152 |
-
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
153 |
-
quantize_denoised=quantize_denoised, temperature=temperature,
|
154 |
-
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
155 |
-
corrector_kwargs=corrector_kwargs,
|
156 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
157 |
-
unconditional_conditioning=unconditional_conditioning,
|
158 |
-
old_eps=old_eps, t_next=ts_next)
|
159 |
-
img, pred_x0, e_t = outs
|
160 |
-
old_eps.append(e_t)
|
161 |
-
if len(old_eps) >= 4:
|
162 |
-
old_eps.pop(0)
|
163 |
-
if callback: callback(i)
|
164 |
-
if img_callback: img_callback(pred_x0, i)
|
165 |
-
|
166 |
-
if index % log_every_t == 0 or index == total_steps - 1:
|
167 |
-
intermediates['x_inter'].append(img)
|
168 |
-
intermediates['pred_x0'].append(pred_x0)
|
169 |
-
|
170 |
-
return img, intermediates
|
171 |
-
|
172 |
-
@torch.no_grad()
|
173 |
-
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
174 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
175 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
|
176 |
-
b, *_, device = *x.shape, x.device
|
177 |
-
|
178 |
-
def get_model_output(x, t):
|
179 |
-
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
|
180 |
-
e_t = self.model.apply_model(x, t, c)
|
181 |
-
else:
|
182 |
-
x_in = torch.cat([x] * 2)
|
183 |
-
t_in = torch.cat([t] * 2)
|
184 |
-
c_in = torch.cat([unconditional_conditioning, c])
|
185 |
-
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
|
186 |
-
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
187 |
-
|
188 |
-
if score_corrector is not None:
|
189 |
-
assert self.model.parameterization == "eps"
|
190 |
-
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
191 |
-
|
192 |
-
return e_t
|
193 |
-
|
194 |
-
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
195 |
-
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
196 |
-
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
197 |
-
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
198 |
-
|
199 |
-
def get_x_prev_and_pred_x0(e_t, index):
|
200 |
-
# select parameters corresponding to the currently considered timestep
|
201 |
-
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
202 |
-
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
203 |
-
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
204 |
-
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
205 |
-
|
206 |
-
# current prediction for x_0
|
207 |
-
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
208 |
-
if quantize_denoised:
|
209 |
-
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
210 |
-
# direction pointing to x_t
|
211 |
-
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
212 |
-
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
213 |
-
if noise_dropout > 0.:
|
214 |
-
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
215 |
-
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
216 |
-
return x_prev, pred_x0
|
217 |
-
|
218 |
-
e_t = get_model_output(x, t)
|
219 |
-
if len(old_eps) == 0:
|
220 |
-
# Pseudo Improved Euler (2nd order)
|
221 |
-
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
|
222 |
-
e_t_next = get_model_output(x_prev, t_next)
|
223 |
-
e_t_prime = (e_t + e_t_next) / 2
|
224 |
-
elif len(old_eps) == 1:
|
225 |
-
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
|
226 |
-
e_t_prime = (3 * e_t - old_eps[-1]) / 2
|
227 |
-
elif len(old_eps) == 2:
|
228 |
-
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
|
229 |
-
e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
|
230 |
-
elif len(old_eps) >= 3:
|
231 |
-
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
|
232 |
-
e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
|
233 |
-
|
234 |
-
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
|
235 |
-
|
236 |
-
return x_prev, pred_x0, e_t
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AISuperheroes/10SL-RealTimeDSDashboard-Live-AIUIUX/app.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
import time # to simulate a real time data, time loop
|
2 |
-
|
3 |
-
import numpy as np # np mean, np random
|
4 |
-
import pandas as pd # read csv, df manipulation
|
5 |
-
import plotly.express as px # interactive charts
|
6 |
-
import streamlit as st # 🎈 data web app development
|
7 |
-
|
8 |
-
st.set_page_config(
|
9 |
-
page_title="Real-Time Data Science Dashboard",
|
10 |
-
page_icon="✅",
|
11 |
-
layout="wide",
|
12 |
-
)
|
13 |
-
|
14 |
-
# read csv from a github repo
|
15 |
-
dataset_url = "https://raw.githubusercontent.com/Lexie88rus/bank-marketing-analysis/master/bank.csv"
|
16 |
-
|
17 |
-
# read csv from a URL
|
18 |
-
@st.experimental_memo
|
19 |
-
def get_data() -> pd.DataFrame:
|
20 |
-
return pd.read_csv(dataset_url)
|
21 |
-
|
22 |
-
df = get_data()
|
23 |
-
|
24 |
-
# dashboard title
|
25 |
-
st.title("Real-Time / Live Data Science Dashboard")
|
26 |
-
|
27 |
-
# top-level filters
|
28 |
-
job_filter = st.selectbox("Select the Job", pd.unique(df["job"]))
|
29 |
-
|
30 |
-
# creating a single-element container
|
31 |
-
placeholder = st.empty()
|
32 |
-
|
33 |
-
# dataframe filter
|
34 |
-
df = df[df["job"] == job_filter]
|
35 |
-
|
36 |
-
# near real-time / live feed simulation
|
37 |
-
for seconds in range(200):
|
38 |
-
|
39 |
-
df["age_new"] = df["age"] * np.random.choice(range(1, 5))
|
40 |
-
df["balance_new"] = df["balance"] * np.random.choice(range(1, 5))
|
41 |
-
|
42 |
-
# creating KPIs
|
43 |
-
avg_age = np.mean(df["age_new"])
|
44 |
-
|
45 |
-
count_married = int(
|
46 |
-
df[(df["marital"] == "married")]["marital"].count()
|
47 |
-
+ np.random.choice(range(1, 30))
|
48 |
-
)
|
49 |
-
|
50 |
-
balance = np.mean(df["balance_new"])
|
51 |
-
|
52 |
-
with placeholder.container():
|
53 |
-
|
54 |
-
# create three columns
|
55 |
-
kpi1, kpi2, kpi3 = st.columns(3)
|
56 |
-
|
57 |
-
# fill in those three columns with respective metrics or KPIs
|
58 |
-
kpi1.metric(
|
59 |
-
label="Age ⏳",
|
60 |
-
value=round(avg_age),
|
61 |
-
delta=round(avg_age) - 10,
|
62 |
-
)
|
63 |
-
|
64 |
-
kpi2.metric(
|
65 |
-
label="Married Count 💍",
|
66 |
-
value=int(count_married),
|
67 |
-
delta=-10 + count_married,
|
68 |
-
)
|
69 |
-
|
70 |
-
kpi3.metric(
|
71 |
-
label="A/C Balance $",
|
72 |
-
value=f"$ {round(balance,2)} ",
|
73 |
-
delta=-round(balance / count_married) * 100,
|
74 |
-
)
|
75 |
-
|
76 |
-
# create two columns for charts
|
77 |
-
fig_col1, fig_col2 = st.columns(2)
|
78 |
-
with fig_col1:
|
79 |
-
st.markdown("### First Chart")
|
80 |
-
fig = px.density_heatmap(
|
81 |
-
data_frame=df, y="age_new", x="marital"
|
82 |
-
)
|
83 |
-
st.write(fig)
|
84 |
-
|
85 |
-
with fig_col2:
|
86 |
-
st.markdown("### Second Chart")
|
87 |
-
fig2 = px.histogram(data_frame=df, x="age_new")
|
88 |
-
st.write(fig2)
|
89 |
-
|
90 |
-
st.markdown("### Detailed Data View")
|
91 |
-
st.dataframe(df)
|
92 |
-
time.sleep(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/utils.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
skeleton = [[15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7], [6, 8], [7, 9], [8, 10],
|
6 |
-
[1, 2], [0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6]]
|
7 |
-
|
8 |
-
pose_kpt_color = [[51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [0, 255, 0],
|
9 |
-
[255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0],
|
10 |
-
[0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0]]
|
11 |
-
|
12 |
-
pose_link_color = [[0, 255, 0], [0, 255, 0], [255, 128, 0], [255, 128, 0],
|
13 |
-
[51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [0, 255, 0], [255, 128, 0],
|
14 |
-
[0, 255, 0], [255, 128, 0], [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255],
|
15 |
-
[51, 153, 255], [51, 153, 255], [51, 153, 255]]
|
16 |
-
|
17 |
-
|
18 |
-
def imshow_keypoints(img,
|
19 |
-
pose_result,
|
20 |
-
kpt_score_thr=0.1,
|
21 |
-
radius=2,
|
22 |
-
thickness=2):
|
23 |
-
"""Draw keypoints and links on an image.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
img (ndarry): The image to draw poses on.
|
27 |
-
pose_result (list[kpts]): The poses to draw. Each element kpts is
|
28 |
-
a set of K keypoints as an Kx3 numpy.ndarray, where each
|
29 |
-
keypoint is represented as x, y, score.
|
30 |
-
kpt_score_thr (float, optional): Minimum score of keypoints
|
31 |
-
to be shown. Default: 0.3.
|
32 |
-
thickness (int): Thickness of lines.
|
33 |
-
"""
|
34 |
-
|
35 |
-
img_h, img_w, _ = img.shape
|
36 |
-
img = np.zeros(img.shape)
|
37 |
-
|
38 |
-
for idx, kpts in enumerate(pose_result):
|
39 |
-
if idx > 1:
|
40 |
-
continue
|
41 |
-
kpts = kpts['keypoints']
|
42 |
-
# print(kpts)
|
43 |
-
kpts = np.array(kpts, copy=False)
|
44 |
-
|
45 |
-
# draw each point on image
|
46 |
-
assert len(pose_kpt_color) == len(kpts)
|
47 |
-
|
48 |
-
for kid, kpt in enumerate(kpts):
|
49 |
-
x_coord, y_coord, kpt_score = int(kpt[0]), int(kpt[1]), kpt[2]
|
50 |
-
|
51 |
-
if kpt_score < kpt_score_thr or pose_kpt_color[kid] is None:
|
52 |
-
# skip the point that should not be drawn
|
53 |
-
continue
|
54 |
-
|
55 |
-
color = tuple(int(c) for c in pose_kpt_color[kid])
|
56 |
-
cv2.circle(img, (int(x_coord), int(y_coord)), radius, color, -1)
|
57 |
-
|
58 |
-
# draw links
|
59 |
-
|
60 |
-
for sk_id, sk in enumerate(skeleton):
|
61 |
-
pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1]))
|
62 |
-
pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1]))
|
63 |
-
|
64 |
-
if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 or pos1[1] >= img_h or pos2[0] <= 0
|
65 |
-
or pos2[0] >= img_w or pos2[1] <= 0 or pos2[1] >= img_h or kpts[sk[0], 2] < kpt_score_thr
|
66 |
-
or kpts[sk[1], 2] < kpt_score_thr or pose_link_color[sk_id] is None):
|
67 |
-
# skip the link that should not be drawn
|
68 |
-
continue
|
69 |
-
color = tuple(int(c) for c in pose_link_color[sk_id])
|
70 |
-
cv2.line(img, pos1, pos2, color, thickness=thickness)
|
71 |
-
|
72 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-PITS/mel_processing.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
# from https://github.com/jaywalnut310/vits
|
2 |
-
import torch
|
3 |
-
import torch.utils.data
|
4 |
-
from librosa.filters import mel as librosa_mel_fn
|
5 |
-
from torch.cuda.amp import autocast
|
6 |
-
|
7 |
-
|
8 |
-
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
9 |
-
"""
|
10 |
-
PARAMS
|
11 |
-
------
|
12 |
-
C: compression factor
|
13 |
-
"""
|
14 |
-
return torch.log(torch.clamp(x, min=clip_val) * C)
|
15 |
-
|
16 |
-
|
17 |
-
def dynamic_range_decompression_torch(x, C=1):
|
18 |
-
"""
|
19 |
-
PARAMS
|
20 |
-
------
|
21 |
-
C: compression factor used to compress
|
22 |
-
"""
|
23 |
-
return torch.exp(x) / C
|
24 |
-
|
25 |
-
|
26 |
-
def spectral_normalize_torch(magnitudes):
|
27 |
-
output = dynamic_range_compression_torch(magnitudes)
|
28 |
-
return output
|
29 |
-
|
30 |
-
|
31 |
-
def spectral_de_normalize_torch(magnitudes):
|
32 |
-
output = dynamic_range_decompression_torch(magnitudes)
|
33 |
-
return output
|
34 |
-
|
35 |
-
|
36 |
-
mel_basis = {}
|
37 |
-
hann_window = {}
|
38 |
-
|
39 |
-
|
40 |
-
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
41 |
-
if torch.min(y) < -1.:
|
42 |
-
print('min value is ', torch.min(y))
|
43 |
-
if torch.max(y) > 1.:
|
44 |
-
print('max value is ', torch.max(y))
|
45 |
-
|
46 |
-
global hann_window
|
47 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
48 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
49 |
-
if wnsize_dtype_device not in hann_window:
|
50 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
51 |
-
|
52 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (
|
53 |
-
int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode='reflect')
|
54 |
-
y = y.squeeze(1)
|
55 |
-
with autocast(enabled=False):
|
56 |
-
y = y.float()
|
57 |
-
spec = torch.stft(
|
58 |
-
y,
|
59 |
-
n_fft,
|
60 |
-
hop_length=hop_size,
|
61 |
-
win_length=win_size,
|
62 |
-
window=hann_window[wnsize_dtype_device],
|
63 |
-
center=center,
|
64 |
-
pad_mode='reflect',
|
65 |
-
normalized=False,
|
66 |
-
onesided=True
|
67 |
-
)
|
68 |
-
|
69 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
70 |
-
return spec
|
71 |
-
|
72 |
-
|
73 |
-
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
74 |
-
global mel_basis
|
75 |
-
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
76 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
77 |
-
if fmax_dtype_device not in mel_basis:
|
78 |
-
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
79 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
80 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
81 |
-
spec = spectral_normalize_torch(spec)
|
82 |
-
return spec
|
83 |
-
|
84 |
-
|
85 |
-
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
86 |
-
if torch.min(y) < -1.:
|
87 |
-
print('min value is ', torch.min(y))
|
88 |
-
if torch.max(y) > 1.:
|
89 |
-
print('max value is ', torch.max(y))
|
90 |
-
|
91 |
-
global mel_basis, hann_window
|
92 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
93 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
94 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
95 |
-
if fmax_dtype_device not in mel_basis:
|
96 |
-
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
|
97 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
98 |
-
if wnsize_dtype_device not in hann_window:
|
99 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
100 |
-
|
101 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (
|
102 |
-
int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode='reflect')
|
103 |
-
y = y.squeeze(1)
|
104 |
-
with autocast(enabled=False):
|
105 |
-
y = y.float()
|
106 |
-
spec = torch.stft(
|
107 |
-
y,
|
108 |
-
n_fft,
|
109 |
-
hop_length=hop_size,
|
110 |
-
win_length=win_size,
|
111 |
-
window=hann_window[wnsize_dtype_device],
|
112 |
-
center=center,
|
113 |
-
pad_mode='reflect',
|
114 |
-
normalized=False,
|
115 |
-
onesided=True
|
116 |
-
)
|
117 |
-
|
118 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
119 |
-
|
120 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
121 |
-
spec = spectral_normalize_torch(spec)
|
122 |
-
|
123 |
-
return spec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/Inference.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
from manipulate import Manipulator
|
4 |
-
import tensorflow as tf
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
import clip
|
8 |
-
from MapTS import GetBoundary,GetDt
|
9 |
-
|
10 |
-
class StyleCLIP():
|
11 |
-
|
12 |
-
def __init__(self,dataset_name='ffhq'):
|
13 |
-
print('load clip')
|
14 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
15 |
-
self.model, preprocess = clip.load("ViT-B/32", device=device)
|
16 |
-
self.LoadData(dataset_name)
|
17 |
-
|
18 |
-
def LoadData(self, dataset_name):
|
19 |
-
tf.keras.backend.clear_session()
|
20 |
-
M=Manipulator(dataset_name=dataset_name)
|
21 |
-
np.set_printoptions(suppress=True)
|
22 |
-
fs3=np.load('./npy/'+dataset_name+'/fs3.npy')
|
23 |
-
|
24 |
-
self.M=M
|
25 |
-
self.fs3=fs3
|
26 |
-
|
27 |
-
w_plus=np.load('./data/'+dataset_name+'/w_plus.npy')
|
28 |
-
self.M.dlatents=M.W2S(w_plus)
|
29 |
-
|
30 |
-
if dataset_name=='ffhq':
|
31 |
-
self.c_threshold=20
|
32 |
-
else:
|
33 |
-
self.c_threshold=100
|
34 |
-
self.SetInitP()
|
35 |
-
|
36 |
-
def SetInitP(self):
|
37 |
-
self.M.alpha=[3]
|
38 |
-
self.M.num_images=1
|
39 |
-
|
40 |
-
self.target=''
|
41 |
-
self.neutral=''
|
42 |
-
self.GetDt2()
|
43 |
-
img_index=0
|
44 |
-
self.M.dlatent_tmp=[tmp[img_index:(img_index+1)] for tmp in self.M.dlatents]
|
45 |
-
|
46 |
-
|
47 |
-
def GetDt2(self):
|
48 |
-
classnames=[self.target,self.neutral]
|
49 |
-
dt=GetDt(classnames,self.model)
|
50 |
-
|
51 |
-
self.dt=dt
|
52 |
-
num_cs=[]
|
53 |
-
betas=np.arange(0.1,0.3,0.01)
|
54 |
-
for i in range(len(betas)):
|
55 |
-
boundary_tmp2,num_c=GetBoundary(self.fs3,self.dt,self.M,threshold=betas[i])
|
56 |
-
print(betas[i])
|
57 |
-
num_cs.append(num_c)
|
58 |
-
|
59 |
-
num_cs=np.array(num_cs)
|
60 |
-
select=num_cs>self.c_threshold
|
61 |
-
|
62 |
-
if sum(select)==0:
|
63 |
-
self.beta=0.1
|
64 |
-
else:
|
65 |
-
self.beta=betas[select][-1]
|
66 |
-
|
67 |
-
|
68 |
-
def GetCode(self):
|
69 |
-
boundary_tmp2,num_c=GetBoundary(self.fs3,self.dt,self.M,threshold=self.beta)
|
70 |
-
codes=self.M.MSCode(self.M.dlatent_tmp,boundary_tmp2)
|
71 |
-
return codes
|
72 |
-
|
73 |
-
def GetImg(self):
|
74 |
-
|
75 |
-
codes=self.GetCode()
|
76 |
-
out=self.M.GenerateImg(codes)
|
77 |
-
img=out[0,0]
|
78 |
-
return img
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
#%%
|
84 |
-
if __name__ == "__main__":
|
85 |
-
style_clip=StyleCLIP()
|
86 |
-
self=style_clip
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/tfutil.py
DELETED
@@ -1,262 +0,0 @@
|
|
1 |
-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Miscellaneous helper utils for Tensorflow."""
|
10 |
-
|
11 |
-
import os
|
12 |
-
import numpy as np
|
13 |
-
import tensorflow as tf
|
14 |
-
|
15 |
-
# Silence deprecation warnings from TensorFlow 1.13 onwards
|
16 |
-
import logging
|
17 |
-
logging.getLogger('tensorflow').setLevel(logging.ERROR)
|
18 |
-
import tensorflow.contrib # requires TensorFlow 1.x!
|
19 |
-
tf.contrib = tensorflow.contrib
|
20 |
-
|
21 |
-
from typing import Any, Iterable, List, Union
|
22 |
-
|
23 |
-
TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation]
|
24 |
-
"""A type that represents a valid Tensorflow expression."""
|
25 |
-
|
26 |
-
TfExpressionEx = Union[TfExpression, int, float, np.ndarray]
|
27 |
-
"""A type that can be converted to a valid Tensorflow expression."""
|
28 |
-
|
29 |
-
|
30 |
-
def run(*args, **kwargs) -> Any:
|
31 |
-
"""Run the specified ops in the default session."""
|
32 |
-
assert_tf_initialized()
|
33 |
-
return tf.get_default_session().run(*args, **kwargs)
|
34 |
-
|
35 |
-
|
36 |
-
def is_tf_expression(x: Any) -> bool:
|
37 |
-
"""Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation."""
|
38 |
-
return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation))
|
39 |
-
|
40 |
-
|
41 |
-
def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
|
42 |
-
"""Convert a Tensorflow shape to a list of ints. Retained for backwards compatibility -- use TensorShape.as_list() in new code."""
|
43 |
-
return [dim.value for dim in shape]
|
44 |
-
|
45 |
-
|
46 |
-
def flatten(x: TfExpressionEx) -> TfExpression:
|
47 |
-
"""Shortcut function for flattening a tensor."""
|
48 |
-
with tf.name_scope("Flatten"):
|
49 |
-
return tf.reshape(x, [-1])
|
50 |
-
|
51 |
-
|
52 |
-
def log2(x: TfExpressionEx) -> TfExpression:
|
53 |
-
"""Logarithm in base 2."""
|
54 |
-
with tf.name_scope("Log2"):
|
55 |
-
return tf.log(x) * np.float32(1.0 / np.log(2.0))
|
56 |
-
|
57 |
-
|
58 |
-
def exp2(x: TfExpressionEx) -> TfExpression:
|
59 |
-
"""Exponent in base 2."""
|
60 |
-
with tf.name_scope("Exp2"):
|
61 |
-
return tf.exp(x * np.float32(np.log(2.0)))
|
62 |
-
|
63 |
-
|
64 |
-
def erfinv(y: TfExpressionEx) -> TfExpression:
|
65 |
-
"""Inverse of the error function."""
|
66 |
-
# pylint: disable=no-name-in-module
|
67 |
-
from tensorflow.python.ops.distributions import special_math
|
68 |
-
return special_math.erfinv(y)
|
69 |
-
|
70 |
-
|
71 |
-
def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx:
|
72 |
-
"""Linear interpolation."""
|
73 |
-
with tf.name_scope("Lerp"):
|
74 |
-
return a + (b - a) * t
|
75 |
-
|
76 |
-
|
77 |
-
def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression:
|
78 |
-
"""Linear interpolation with clip."""
|
79 |
-
with tf.name_scope("LerpClip"):
|
80 |
-
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
|
81 |
-
|
82 |
-
|
83 |
-
def absolute_name_scope(scope: str) -> tf.name_scope:
|
84 |
-
"""Forcefully enter the specified name scope, ignoring any surrounding scopes."""
|
85 |
-
return tf.name_scope(scope + "/")
|
86 |
-
|
87 |
-
|
88 |
-
def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope:
|
89 |
-
"""Forcefully enter the specified variable scope, ignoring any surrounding scopes."""
|
90 |
-
return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False)
|
91 |
-
|
92 |
-
|
93 |
-
def _sanitize_tf_config(config_dict: dict = None) -> dict:
|
94 |
-
# Defaults.
|
95 |
-
cfg = dict()
|
96 |
-
cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is.
|
97 |
-
cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is.
|
98 |
-
cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
|
99 |
-
cfg["env.HDF5_USE_FILE_LOCKING"] = "FALSE" # Disable HDF5 file locking to avoid concurrency issues with network shares.
|
100 |
-
cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
|
101 |
-
cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
|
102 |
-
|
103 |
-
# Remove defaults for environment variables that are already set.
|
104 |
-
for key in list(cfg):
|
105 |
-
fields = key.split(".")
|
106 |
-
if fields[0] == "env":
|
107 |
-
assert len(fields) == 2
|
108 |
-
if fields[1] in os.environ:
|
109 |
-
del cfg[key]
|
110 |
-
|
111 |
-
# User overrides.
|
112 |
-
if config_dict is not None:
|
113 |
-
cfg.update(config_dict)
|
114 |
-
return cfg
|
115 |
-
|
116 |
-
|
117 |
-
def init_tf(config_dict: dict = None) -> None:
|
118 |
-
"""Initialize TensorFlow session using good default settings."""
|
119 |
-
# Skip if already initialized.
|
120 |
-
if tf.get_default_session() is not None:
|
121 |
-
return
|
122 |
-
|
123 |
-
# Setup config dict and random seeds.
|
124 |
-
cfg = _sanitize_tf_config(config_dict)
|
125 |
-
np_random_seed = cfg["rnd.np_random_seed"]
|
126 |
-
if np_random_seed is not None:
|
127 |
-
np.random.seed(np_random_seed)
|
128 |
-
tf_random_seed = cfg["rnd.tf_random_seed"]
|
129 |
-
if tf_random_seed == "auto":
|
130 |
-
tf_random_seed = np.random.randint(1 << 31)
|
131 |
-
if tf_random_seed is not None:
|
132 |
-
tf.set_random_seed(tf_random_seed)
|
133 |
-
|
134 |
-
# Setup environment variables.
|
135 |
-
for key, value in cfg.items():
|
136 |
-
fields = key.split(".")
|
137 |
-
if fields[0] == "env":
|
138 |
-
assert len(fields) == 2
|
139 |
-
os.environ[fields[1]] = str(value)
|
140 |
-
|
141 |
-
# Create default TensorFlow session.
|
142 |
-
create_session(cfg, force_as_default=True)
|
143 |
-
|
144 |
-
|
145 |
-
def assert_tf_initialized():
|
146 |
-
"""Check that TensorFlow session has been initialized."""
|
147 |
-
if tf.get_default_session() is None:
|
148 |
-
raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().")
|
149 |
-
|
150 |
-
|
151 |
-
def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session:
|
152 |
-
"""Create tf.Session based on config dict."""
|
153 |
-
# Setup TensorFlow config proto.
|
154 |
-
cfg = _sanitize_tf_config(config_dict)
|
155 |
-
config_proto = tf.ConfigProto()
|
156 |
-
for key, value in cfg.items():
|
157 |
-
fields = key.split(".")
|
158 |
-
if fields[0] not in ["rnd", "env"]:
|
159 |
-
obj = config_proto
|
160 |
-
for field in fields[:-1]:
|
161 |
-
obj = getattr(obj, field)
|
162 |
-
setattr(obj, fields[-1], value)
|
163 |
-
|
164 |
-
# Create session.
|
165 |
-
session = tf.Session(config=config_proto)
|
166 |
-
if force_as_default:
|
167 |
-
# pylint: disable=protected-access
|
168 |
-
session._default_session = session.as_default()
|
169 |
-
session._default_session.enforce_nesting = False
|
170 |
-
session._default_session.__enter__()
|
171 |
-
return session
|
172 |
-
|
173 |
-
|
174 |
-
def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
|
175 |
-
"""Initialize all tf.Variables that have not already been initialized.
|
176 |
-
|
177 |
-
Equivalent to the following, but more efficient and does not bloat the tf graph:
|
178 |
-
tf.variables_initializer(tf.report_uninitialized_variables()).run()
|
179 |
-
"""
|
180 |
-
assert_tf_initialized()
|
181 |
-
if target_vars is None:
|
182 |
-
target_vars = tf.global_variables()
|
183 |
-
|
184 |
-
test_vars = []
|
185 |
-
test_ops = []
|
186 |
-
|
187 |
-
with tf.control_dependencies(None): # ignore surrounding control_dependencies
|
188 |
-
for var in target_vars:
|
189 |
-
assert is_tf_expression(var)
|
190 |
-
|
191 |
-
try:
|
192 |
-
tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
|
193 |
-
except KeyError:
|
194 |
-
# Op does not exist => variable may be uninitialized.
|
195 |
-
test_vars.append(var)
|
196 |
-
|
197 |
-
with absolute_name_scope(var.name.split(":")[0]):
|
198 |
-
test_ops.append(tf.is_variable_initialized(var))
|
199 |
-
|
200 |
-
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
|
201 |
-
run([var.initializer for var in init_vars])
|
202 |
-
|
203 |
-
|
204 |
-
def set_vars(var_to_value_dict: dict) -> None:
|
205 |
-
"""Set the values of given tf.Variables.
|
206 |
-
|
207 |
-
Equivalent to the following, but more efficient and does not bloat the tf graph:
|
208 |
-
tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
|
209 |
-
"""
|
210 |
-
assert_tf_initialized()
|
211 |
-
ops = []
|
212 |
-
feed_dict = {}
|
213 |
-
|
214 |
-
for var, value in var_to_value_dict.items():
|
215 |
-
assert is_tf_expression(var)
|
216 |
-
|
217 |
-
try:
|
218 |
-
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
|
219 |
-
except KeyError:
|
220 |
-
with absolute_name_scope(var.name.split(":")[0]):
|
221 |
-
with tf.control_dependencies(None): # ignore surrounding control_dependencies
|
222 |
-
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
|
223 |
-
|
224 |
-
ops.append(setter)
|
225 |
-
feed_dict[setter.op.inputs[1]] = value
|
226 |
-
|
227 |
-
run(ops, feed_dict)
|
228 |
-
|
229 |
-
|
230 |
-
def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs):
|
231 |
-
"""Create tf.Variable with large initial value without bloating the tf graph."""
|
232 |
-
assert_tf_initialized()
|
233 |
-
assert isinstance(initial_value, np.ndarray)
|
234 |
-
zeros = tf.zeros(initial_value.shape, initial_value.dtype)
|
235 |
-
var = tf.Variable(zeros, *args, **kwargs)
|
236 |
-
set_vars({var: initial_value})
|
237 |
-
return var
|
238 |
-
|
239 |
-
|
240 |
-
def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):
|
241 |
-
"""Convert a minibatch of images from uint8 to float32 with configurable dynamic range.
|
242 |
-
Can be used as an input transformation for Network.run().
|
243 |
-
"""
|
244 |
-
images = tf.cast(images, tf.float32)
|
245 |
-
if nhwc_to_nchw:
|
246 |
-
images = tf.transpose(images, [0, 3, 1, 2])
|
247 |
-
return images * ((drange[1] - drange[0]) / 255) + drange[0]
|
248 |
-
|
249 |
-
|
250 |
-
def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1):
|
251 |
-
"""Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
|
252 |
-
Can be used as an output transformation for Network.run().
|
253 |
-
"""
|
254 |
-
images = tf.cast(images, tf.float32)
|
255 |
-
if shrink > 1:
|
256 |
-
ksize = [1, 1, shrink, shrink]
|
257 |
-
images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
|
258 |
-
if nchw_to_nhwc:
|
259 |
-
images = tf.transpose(images, [0, 2, 3, 1])
|
260 |
-
scale = 255 / (drange[1] - drange[0])
|
261 |
-
images = images * scale + (0.5 - drange[0] * scale)
|
262 |
-
return tf.saturate_cast(images, tf.uint8)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/styleclip_mapper.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
from models.StyleCLIP.mapper import latent_mappers
|
4 |
-
from models.StyleCLIP.models.stylegan2.model import Generator
|
5 |
-
|
6 |
-
|
7 |
-
def get_keys(d, name):
|
8 |
-
if 'state_dict' in d:
|
9 |
-
d = d['state_dict']
|
10 |
-
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
|
11 |
-
return d_filt
|
12 |
-
|
13 |
-
|
14 |
-
class StyleCLIPMapper(nn.Module):
|
15 |
-
|
16 |
-
def __init__(self, opts, run_id):
|
17 |
-
super(StyleCLIPMapper, self).__init__()
|
18 |
-
self.opts = opts
|
19 |
-
# Define architecture
|
20 |
-
self.mapper = self.set_mapper()
|
21 |
-
self.run_id = run_id
|
22 |
-
|
23 |
-
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
|
24 |
-
# Load weights if needed
|
25 |
-
self.load_weights()
|
26 |
-
|
27 |
-
def set_mapper(self):
|
28 |
-
if self.opts.mapper_type == 'SingleMapper':
|
29 |
-
mapper = latent_mappers.SingleMapper(self.opts)
|
30 |
-
elif self.opts.mapper_type == 'LevelsMapper':
|
31 |
-
mapper = latent_mappers.LevelsMapper(self.opts)
|
32 |
-
else:
|
33 |
-
raise Exception('{} is not a valid mapper'.format(self.opts.mapper_type))
|
34 |
-
return mapper
|
35 |
-
|
36 |
-
def load_weights(self):
|
37 |
-
if self.opts.checkpoint_path is not None:
|
38 |
-
print('Loading from checkpoint: {}'.format(self.opts.checkpoint_path))
|
39 |
-
ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
|
40 |
-
self.mapper.load_state_dict(get_keys(ckpt, 'mapper'), strict=True)
|
41 |
-
|
42 |
-
def set_G(self, new_G):
|
43 |
-
self.decoder = new_G
|
44 |
-
|
45 |
-
def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True,
|
46 |
-
inject_latent=None, return_latents=False, alpha=None):
|
47 |
-
if input_code:
|
48 |
-
codes = x
|
49 |
-
else:
|
50 |
-
codes = self.mapper(x)
|
51 |
-
|
52 |
-
if latent_mask is not None:
|
53 |
-
for i in latent_mask:
|
54 |
-
if inject_latent is not None:
|
55 |
-
if alpha is not None:
|
56 |
-
codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i]
|
57 |
-
else:
|
58 |
-
codes[:, i] = inject_latent[:, i]
|
59 |
-
else:
|
60 |
-
codes[:, i] = 0
|
61 |
-
|
62 |
-
input_is_latent = not input_code
|
63 |
-
images = self.decoder.synthesis(codes, noise_mode='const')
|
64 |
-
result_latent = None
|
65 |
-
# images, result_latent = self.decoder([codes],
|
66 |
-
# input_is_latent=input_is_latent,
|
67 |
-
# randomize_noise=randomize_noise,
|
68 |
-
# return_latents=return_latents)
|
69 |
-
|
70 |
-
if resize:
|
71 |
-
images = self.face_pool(images)
|
72 |
-
|
73 |
-
if return_latents:
|
74 |
-
return images, result_latent
|
75 |
-
else:
|
76 |
-
return images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AndrewMetaBlock/emilyalsentzer-Bio_ClinicalBERT/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/emilyalsentzer/Bio_ClinicalBERT").launch()
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_diffedit.py
DELETED
@@ -1,1510 +0,0 @@
|
|
1 |
-
# Copyright 2023 DiffEdit Authors and Pix2Pix Zero Authors and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
import warnings
|
17 |
-
from dataclasses import dataclass
|
18 |
-
from typing import Any, Callable, Dict, List, Optional, Union
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import PIL
|
22 |
-
import torch
|
23 |
-
from packaging import version
|
24 |
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
25 |
-
|
26 |
-
from ...configuration_utils import FrozenDict
|
27 |
-
from ...image_processor import VaeImageProcessor
|
28 |
-
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
29 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
30 |
-
from ...schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers
|
31 |
-
from ...utils import (
|
32 |
-
PIL_INTERPOLATION,
|
33 |
-
BaseOutput,
|
34 |
-
deprecate,
|
35 |
-
is_accelerate_available,
|
36 |
-
is_accelerate_version,
|
37 |
-
logging,
|
38 |
-
randn_tensor,
|
39 |
-
replace_example_docstring,
|
40 |
-
)
|
41 |
-
from ..pipeline_utils import DiffusionPipeline
|
42 |
-
from . import StableDiffusionPipelineOutput
|
43 |
-
from .safety_checker import StableDiffusionSafetyChecker
|
44 |
-
|
45 |
-
|
46 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
47 |
-
|
48 |
-
|
49 |
-
@dataclass
|
50 |
-
class DiffEditInversionPipelineOutput(BaseOutput):
|
51 |
-
"""
|
52 |
-
Output class for Stable Diffusion pipelines.
|
53 |
-
|
54 |
-
Args:
|
55 |
-
latents (`torch.FloatTensor`)
|
56 |
-
inverted latents tensor
|
57 |
-
images (`List[PIL.Image.Image]` or `np.ndarray`)
|
58 |
-
List of denoised PIL images of length `num_timesteps * batch_size` or numpy array of shape `(num_timesteps,
|
59 |
-
batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the
|
60 |
-
diffusion pipeline.
|
61 |
-
"""
|
62 |
-
|
63 |
-
latents: torch.FloatTensor
|
64 |
-
images: Union[List[PIL.Image.Image], np.ndarray]
|
65 |
-
|
66 |
-
|
67 |
-
EXAMPLE_DOC_STRING = """
|
68 |
-
|
69 |
-
```py
|
70 |
-
>>> import PIL
|
71 |
-
>>> import requests
|
72 |
-
>>> import torch
|
73 |
-
>>> from io import BytesIO
|
74 |
-
|
75 |
-
>>> from diffusers import StableDiffusionDiffEditPipeline
|
76 |
-
|
77 |
-
|
78 |
-
>>> def download_image(url):
|
79 |
-
... response = requests.get(url)
|
80 |
-
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
81 |
-
|
82 |
-
|
83 |
-
>>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
|
84 |
-
|
85 |
-
>>> init_image = download_image(img_url).resize((768, 768))
|
86 |
-
|
87 |
-
>>> pipe = StableDiffusionDiffEditPipeline.from_pretrained(
|
88 |
-
... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
|
89 |
-
... )
|
90 |
-
>>> pipe = pipe.to("cuda")
|
91 |
-
|
92 |
-
>>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
|
93 |
-
>>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
|
94 |
-
>>> pipeline.enable_model_cpu_offload()
|
95 |
-
|
96 |
-
>>> mask_prompt = "A bowl of fruits"
|
97 |
-
>>> prompt = "A bowl of pears"
|
98 |
-
|
99 |
-
>>> mask_image = pipe.generate_mask(image=init_image, source_prompt=prompt, target_prompt=mask_prompt)
|
100 |
-
>>> image_latents = pipe.invert(image=init_image, prompt=mask_prompt).latents
|
101 |
-
>>> image = pipe(prompt=prompt, mask_image=mask_image, image_latents=image_latents).images[0]
|
102 |
-
```
|
103 |
-
"""
|
104 |
-
|
105 |
-
EXAMPLE_INVERT_DOC_STRING = """
|
106 |
-
```py
|
107 |
-
>>> import PIL
|
108 |
-
>>> import requests
|
109 |
-
>>> import torch
|
110 |
-
>>> from io import BytesIO
|
111 |
-
|
112 |
-
>>> from diffusers import StableDiffusionDiffEditPipeline
|
113 |
-
|
114 |
-
|
115 |
-
>>> def download_image(url):
|
116 |
-
... response = requests.get(url)
|
117 |
-
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
118 |
-
|
119 |
-
|
120 |
-
>>> img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png"
|
121 |
-
|
122 |
-
>>> init_image = download_image(img_url).resize((768, 768))
|
123 |
-
|
124 |
-
>>> pipe = StableDiffusionDiffEditPipeline.from_pretrained(
|
125 |
-
... "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16
|
126 |
-
... )
|
127 |
-
>>> pipe = pipe.to("cuda")
|
128 |
-
|
129 |
-
>>> pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
|
130 |
-
>>> pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config)
|
131 |
-
>>> pipeline.enable_model_cpu_offload()
|
132 |
-
|
133 |
-
>>> prompt = "A bowl of fruits"
|
134 |
-
|
135 |
-
>>> inverted_latents = pipe.invert(image=init_image, prompt=prompt).latents
|
136 |
-
```
|
137 |
-
"""
|
138 |
-
|
139 |
-
|
140 |
-
def auto_corr_loss(hidden_states, generator=None):
|
141 |
-
reg_loss = 0.0
|
142 |
-
for i in range(hidden_states.shape[0]):
|
143 |
-
for j in range(hidden_states.shape[1]):
|
144 |
-
noise = hidden_states[i : i + 1, j : j + 1, :, :]
|
145 |
-
while True:
|
146 |
-
roll_amount = torch.randint(noise.shape[2] // 2, (1,), generator=generator).item()
|
147 |
-
reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=2)).mean() ** 2
|
148 |
-
reg_loss += (noise * torch.roll(noise, shifts=roll_amount, dims=3)).mean() ** 2
|
149 |
-
|
150 |
-
if noise.shape[2] <= 8:
|
151 |
-
break
|
152 |
-
noise = torch.nn.functional.avg_pool2d(noise, kernel_size=2)
|
153 |
-
return reg_loss
|
154 |
-
|
155 |
-
|
156 |
-
def kl_divergence(hidden_states):
|
157 |
-
return hidden_states.var() + hidden_states.mean() ** 2 - 1 - torch.log(hidden_states.var() + 1e-7)
|
158 |
-
|
159 |
-
|
160 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
|
161 |
-
def preprocess(image):
|
162 |
-
warnings.warn(
|
163 |
-
"The preprocess method is deprecated and will be removed in a future version. Please"
|
164 |
-
" use VaeImageProcessor.preprocess instead",
|
165 |
-
FutureWarning,
|
166 |
-
)
|
167 |
-
if isinstance(image, torch.Tensor):
|
168 |
-
return image
|
169 |
-
elif isinstance(image, PIL.Image.Image):
|
170 |
-
image = [image]
|
171 |
-
|
172 |
-
if isinstance(image[0], PIL.Image.Image):
|
173 |
-
w, h = image[0].size
|
174 |
-
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
175 |
-
|
176 |
-
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
177 |
-
image = np.concatenate(image, axis=0)
|
178 |
-
image = np.array(image).astype(np.float32) / 255.0
|
179 |
-
image = image.transpose(0, 3, 1, 2)
|
180 |
-
image = 2.0 * image - 1.0
|
181 |
-
image = torch.from_numpy(image)
|
182 |
-
elif isinstance(image[0], torch.Tensor):
|
183 |
-
image = torch.cat(image, dim=0)
|
184 |
-
return image
|
185 |
-
|
186 |
-
|
187 |
-
def preprocess_mask(mask, batch_size: int = 1):
|
188 |
-
if not isinstance(mask, torch.Tensor):
|
189 |
-
# preprocess mask
|
190 |
-
if isinstance(mask, PIL.Image.Image) or isinstance(mask, np.ndarray):
|
191 |
-
mask = [mask]
|
192 |
-
|
193 |
-
if isinstance(mask, list):
|
194 |
-
if isinstance(mask[0], PIL.Image.Image):
|
195 |
-
mask = [np.array(m.convert("L")).astype(np.float32) / 255.0 for m in mask]
|
196 |
-
if isinstance(mask[0], np.ndarray):
|
197 |
-
mask = np.stack(mask, axis=0) if mask[0].ndim < 3 else np.concatenate(mask, axis=0)
|
198 |
-
mask = torch.from_numpy(mask)
|
199 |
-
elif isinstance(mask[0], torch.Tensor):
|
200 |
-
mask = torch.stack(mask, dim=0) if mask[0].ndim < 3 else torch.cat(mask, dim=0)
|
201 |
-
|
202 |
-
# Batch and add channel dim for single mask
|
203 |
-
if mask.ndim == 2:
|
204 |
-
mask = mask.unsqueeze(0).unsqueeze(0)
|
205 |
-
|
206 |
-
# Batch single mask or add channel dim
|
207 |
-
if mask.ndim == 3:
|
208 |
-
# Single batched mask, no channel dim or single mask not batched but channel dim
|
209 |
-
if mask.shape[0] == 1:
|
210 |
-
mask = mask.unsqueeze(0)
|
211 |
-
|
212 |
-
# Batched masks no channel dim
|
213 |
-
else:
|
214 |
-
mask = mask.unsqueeze(1)
|
215 |
-
|
216 |
-
# Check mask shape
|
217 |
-
if batch_size > 1:
|
218 |
-
if mask.shape[0] == 1:
|
219 |
-
mask = torch.cat([mask] * batch_size)
|
220 |
-
elif mask.shape[0] > 1 and mask.shape[0] != batch_size:
|
221 |
-
raise ValueError(
|
222 |
-
f"`mask_image` with batch size {mask.shape[0]} cannot be broadcasted to batch size {batch_size} "
|
223 |
-
f"inferred by prompt inputs"
|
224 |
-
)
|
225 |
-
|
226 |
-
if mask.shape[1] != 1:
|
227 |
-
raise ValueError(f"`mask_image` must have 1 channel, but has {mask.shape[1]} channels")
|
228 |
-
|
229 |
-
# Check mask is in [0, 1]
|
230 |
-
if mask.min() < 0 or mask.max() > 1:
|
231 |
-
raise ValueError("`mask_image` should be in [0, 1] range")
|
232 |
-
|
233 |
-
# Binarize mask
|
234 |
-
mask[mask < 0.5] = 0
|
235 |
-
mask[mask >= 0.5] = 1
|
236 |
-
|
237 |
-
return mask
|
238 |
-
|
239 |
-
|
240 |
-
class StableDiffusionDiffEditPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
|
241 |
-
r"""
|
242 |
-
<Tip warning={true}>
|
243 |
-
|
244 |
-
This is an experimental feature!
|
245 |
-
|
246 |
-
</Tip>
|
247 |
-
|
248 |
-
Pipeline for text-guided image inpainting using Stable Diffusion and DiffEdit.
|
249 |
-
|
250 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
251 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
252 |
-
|
253 |
-
In addition the pipeline inherits the following loading methods:
|
254 |
-
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
|
255 |
-
- *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
|
256 |
-
|
257 |
-
as well as the following saving methods:
|
258 |
-
- *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
|
259 |
-
|
260 |
-
Args:
|
261 |
-
vae ([`AutoencoderKL`]):
|
262 |
-
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
263 |
-
text_encoder ([`CLIPTextModel`]):
|
264 |
-
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
265 |
-
tokenizer (`CLIPTokenizer`):
|
266 |
-
A [`~transformers.CLIPTokenizer`] to tokenize text.
|
267 |
-
unet ([`UNet2DConditionModel`]):
|
268 |
-
A [`UNet2DConditionModel`] to denoise the encoded image latents.
|
269 |
-
scheduler ([`SchedulerMixin`]):
|
270 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
|
271 |
-
inverse_scheduler (`[DDIMInverseScheduler]`):
|
272 |
-
A scheduler to be used in combination with `unet` to fill in the unmasked part of the input latents.
|
273 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
274 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
275 |
-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
276 |
-
about a model's potential harms.
|
277 |
-
feature_extractor ([`CLIPImageProcessor`]):
|
278 |
-
A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`.
|
279 |
-
"""
|
280 |
-
_optional_components = ["safety_checker", "feature_extractor", "inverse_scheduler"]
|
281 |
-
|
282 |
-
def __init__(
|
283 |
-
self,
|
284 |
-
vae: AutoencoderKL,
|
285 |
-
text_encoder: CLIPTextModel,
|
286 |
-
tokenizer: CLIPTokenizer,
|
287 |
-
unet: UNet2DConditionModel,
|
288 |
-
scheduler: KarrasDiffusionSchedulers,
|
289 |
-
safety_checker: StableDiffusionSafetyChecker,
|
290 |
-
feature_extractor: CLIPImageProcessor,
|
291 |
-
inverse_scheduler: DDIMInverseScheduler,
|
292 |
-
requires_safety_checker: bool = True,
|
293 |
-
):
|
294 |
-
super().__init__()
|
295 |
-
|
296 |
-
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
297 |
-
deprecation_message = (
|
298 |
-
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
299 |
-
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
300 |
-
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
301 |
-
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
302 |
-
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
303 |
-
" file"
|
304 |
-
)
|
305 |
-
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
306 |
-
new_config = dict(scheduler.config)
|
307 |
-
new_config["steps_offset"] = 1
|
308 |
-
scheduler._internal_dict = FrozenDict(new_config)
|
309 |
-
|
310 |
-
if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
|
311 |
-
deprecation_message = (
|
312 |
-
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
|
313 |
-
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
|
314 |
-
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
|
315 |
-
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
|
316 |
-
" Hub, it would be very nice if you could open a Pull request for the"
|
317 |
-
" `scheduler/scheduler_config.json` file"
|
318 |
-
)
|
319 |
-
deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
|
320 |
-
new_config = dict(scheduler.config)
|
321 |
-
new_config["skip_prk_steps"] = True
|
322 |
-
scheduler._internal_dict = FrozenDict(new_config)
|
323 |
-
|
324 |
-
if safety_checker is None and requires_safety_checker:
|
325 |
-
logger.warning(
|
326 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
327 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
328 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
329 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
330 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
331 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
332 |
-
)
|
333 |
-
|
334 |
-
if safety_checker is not None and feature_extractor is None:
|
335 |
-
raise ValueError(
|
336 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
337 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
338 |
-
)
|
339 |
-
|
340 |
-
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
341 |
-
version.parse(unet.config._diffusers_version).base_version
|
342 |
-
) < version.parse("0.9.0.dev0")
|
343 |
-
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
344 |
-
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
345 |
-
deprecation_message = (
|
346 |
-
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
347 |
-
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
348 |
-
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
349 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
350 |
-
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
351 |
-
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
352 |
-
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
353 |
-
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
354 |
-
" the `unet/config.json` file"
|
355 |
-
)
|
356 |
-
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
357 |
-
new_config = dict(unet.config)
|
358 |
-
new_config["sample_size"] = 64
|
359 |
-
unet._internal_dict = FrozenDict(new_config)
|
360 |
-
|
361 |
-
self.register_modules(
|
362 |
-
vae=vae,
|
363 |
-
text_encoder=text_encoder,
|
364 |
-
tokenizer=tokenizer,
|
365 |
-
unet=unet,
|
366 |
-
scheduler=scheduler,
|
367 |
-
safety_checker=safety_checker,
|
368 |
-
feature_extractor=feature_extractor,
|
369 |
-
inverse_scheduler=inverse_scheduler,
|
370 |
-
)
|
371 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
372 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
373 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
374 |
-
|
375 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
376 |
-
def enable_vae_slicing(self):
|
377 |
-
r"""
|
378 |
-
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
379 |
-
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
380 |
-
"""
|
381 |
-
self.vae.enable_slicing()
|
382 |
-
|
383 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
384 |
-
def disable_vae_slicing(self):
|
385 |
-
r"""
|
386 |
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
387 |
-
computing decoding in one step.
|
388 |
-
"""
|
389 |
-
self.vae.disable_slicing()
|
390 |
-
|
391 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
|
392 |
-
def enable_vae_tiling(self):
|
393 |
-
r"""
|
394 |
-
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
|
395 |
-
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
|
396 |
-
processing larger images.
|
397 |
-
"""
|
398 |
-
self.vae.enable_tiling()
|
399 |
-
|
400 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
|
401 |
-
def disable_vae_tiling(self):
|
402 |
-
r"""
|
403 |
-
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
|
404 |
-
computing decoding in one step.
|
405 |
-
"""
|
406 |
-
self.vae.disable_tiling()
|
407 |
-
|
408 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
|
409 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
410 |
-
r"""
|
411 |
-
Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
|
412 |
-
time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
|
413 |
-
Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
|
414 |
-
iterative execution of the `unet`.
|
415 |
-
"""
|
416 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
417 |
-
from accelerate import cpu_offload_with_hook
|
418 |
-
else:
|
419 |
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
420 |
-
|
421 |
-
device = torch.device(f"cuda:{gpu_id}")
|
422 |
-
|
423 |
-
if self.device.type != "cpu":
|
424 |
-
self.to("cpu", silence_dtype_warnings=True)
|
425 |
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
426 |
-
|
427 |
-
hook = None
|
428 |
-
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
429 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
430 |
-
|
431 |
-
if self.safety_checker is not None:
|
432 |
-
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
433 |
-
|
434 |
-
# We'll offload the last model manually.
|
435 |
-
self.final_offload_hook = hook
|
436 |
-
|
437 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
438 |
-
def _encode_prompt(
|
439 |
-
self,
|
440 |
-
prompt,
|
441 |
-
device,
|
442 |
-
num_images_per_prompt,
|
443 |
-
do_classifier_free_guidance,
|
444 |
-
negative_prompt=None,
|
445 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
446 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
447 |
-
lora_scale: Optional[float] = None,
|
448 |
-
):
|
449 |
-
r"""
|
450 |
-
Encodes the prompt into text encoder hidden states.
|
451 |
-
|
452 |
-
Args:
|
453 |
-
prompt (`str` or `List[str]`, *optional*):
|
454 |
-
prompt to be encoded
|
455 |
-
device: (`torch.device`):
|
456 |
-
torch device
|
457 |
-
num_images_per_prompt (`int`):
|
458 |
-
number of images that should be generated per prompt
|
459 |
-
do_classifier_free_guidance (`bool`):
|
460 |
-
whether to use classifier free guidance or not
|
461 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
462 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
463 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
464 |
-
less than `1`).
|
465 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
466 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
467 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
468 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
469 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
470 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
471 |
-
argument.
|
472 |
-
lora_scale (`float`, *optional*):
|
473 |
-
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
474 |
-
"""
|
475 |
-
# set lora scale so that monkey patched LoRA
|
476 |
-
# function of text encoder can correctly access it
|
477 |
-
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
478 |
-
self._lora_scale = lora_scale
|
479 |
-
|
480 |
-
if prompt is not None and isinstance(prompt, str):
|
481 |
-
batch_size = 1
|
482 |
-
elif prompt is not None and isinstance(prompt, list):
|
483 |
-
batch_size = len(prompt)
|
484 |
-
else:
|
485 |
-
batch_size = prompt_embeds.shape[0]
|
486 |
-
|
487 |
-
if prompt_embeds is None:
|
488 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
489 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
490 |
-
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
491 |
-
|
492 |
-
text_inputs = self.tokenizer(
|
493 |
-
prompt,
|
494 |
-
padding="max_length",
|
495 |
-
max_length=self.tokenizer.model_max_length,
|
496 |
-
truncation=True,
|
497 |
-
return_tensors="pt",
|
498 |
-
)
|
499 |
-
text_input_ids = text_inputs.input_ids
|
500 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
501 |
-
|
502 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
503 |
-
text_input_ids, untruncated_ids
|
504 |
-
):
|
505 |
-
removed_text = self.tokenizer.batch_decode(
|
506 |
-
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
507 |
-
)
|
508 |
-
logger.warning(
|
509 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
510 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
511 |
-
)
|
512 |
-
|
513 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
514 |
-
attention_mask = text_inputs.attention_mask.to(device)
|
515 |
-
else:
|
516 |
-
attention_mask = None
|
517 |
-
|
518 |
-
prompt_embeds = self.text_encoder(
|
519 |
-
text_input_ids.to(device),
|
520 |
-
attention_mask=attention_mask,
|
521 |
-
)
|
522 |
-
prompt_embeds = prompt_embeds[0]
|
523 |
-
|
524 |
-
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
525 |
-
|
526 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
527 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
528 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
529 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
530 |
-
|
531 |
-
# get unconditional embeddings for classifier free guidance
|
532 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
533 |
-
uncond_tokens: List[str]
|
534 |
-
if negative_prompt is None:
|
535 |
-
uncond_tokens = [""] * batch_size
|
536 |
-
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
537 |
-
raise TypeError(
|
538 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
539 |
-
f" {type(prompt)}."
|
540 |
-
)
|
541 |
-
elif isinstance(negative_prompt, str):
|
542 |
-
uncond_tokens = [negative_prompt]
|
543 |
-
elif batch_size != len(negative_prompt):
|
544 |
-
raise ValueError(
|
545 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
546 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
547 |
-
" the batch size of `prompt`."
|
548 |
-
)
|
549 |
-
else:
|
550 |
-
uncond_tokens = negative_prompt
|
551 |
-
|
552 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
553 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
554 |
-
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
555 |
-
|
556 |
-
max_length = prompt_embeds.shape[1]
|
557 |
-
uncond_input = self.tokenizer(
|
558 |
-
uncond_tokens,
|
559 |
-
padding="max_length",
|
560 |
-
max_length=max_length,
|
561 |
-
truncation=True,
|
562 |
-
return_tensors="pt",
|
563 |
-
)
|
564 |
-
|
565 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
566 |
-
attention_mask = uncond_input.attention_mask.to(device)
|
567 |
-
else:
|
568 |
-
attention_mask = None
|
569 |
-
|
570 |
-
negative_prompt_embeds = self.text_encoder(
|
571 |
-
uncond_input.input_ids.to(device),
|
572 |
-
attention_mask=attention_mask,
|
573 |
-
)
|
574 |
-
negative_prompt_embeds = negative_prompt_embeds[0]
|
575 |
-
|
576 |
-
if do_classifier_free_guidance:
|
577 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
578 |
-
seq_len = negative_prompt_embeds.shape[1]
|
579 |
-
|
580 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
581 |
-
|
582 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
583 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
584 |
-
|
585 |
-
# For classifier free guidance, we need to do two forward passes.
|
586 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
587 |
-
# to avoid doing two forward passes
|
588 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
589 |
-
|
590 |
-
return prompt_embeds
|
591 |
-
|
592 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
593 |
-
def run_safety_checker(self, image, device, dtype):
|
594 |
-
if self.safety_checker is None:
|
595 |
-
has_nsfw_concept = None
|
596 |
-
else:
|
597 |
-
if torch.is_tensor(image):
|
598 |
-
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
599 |
-
else:
|
600 |
-
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
601 |
-
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
602 |
-
image, has_nsfw_concept = self.safety_checker(
|
603 |
-
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
604 |
-
)
|
605 |
-
return image, has_nsfw_concept
|
606 |
-
|
607 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
608 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
609 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
610 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
611 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
612 |
-
# and should be between [0, 1]
|
613 |
-
|
614 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
615 |
-
extra_step_kwargs = {}
|
616 |
-
if accepts_eta:
|
617 |
-
extra_step_kwargs["eta"] = eta
|
618 |
-
|
619 |
-
# check if the scheduler accepts generator
|
620 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
621 |
-
if accepts_generator:
|
622 |
-
extra_step_kwargs["generator"] = generator
|
623 |
-
return extra_step_kwargs
|
624 |
-
|
625 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
626 |
-
def decode_latents(self, latents):
|
627 |
-
warnings.warn(
|
628 |
-
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
629 |
-
" use VaeImageProcessor instead",
|
630 |
-
FutureWarning,
|
631 |
-
)
|
632 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
633 |
-
image = self.vae.decode(latents, return_dict=False)[0]
|
634 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
635 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
636 |
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
637 |
-
return image
|
638 |
-
|
639 |
-
def check_inputs(
|
640 |
-
self,
|
641 |
-
prompt,
|
642 |
-
strength,
|
643 |
-
callback_steps,
|
644 |
-
negative_prompt=None,
|
645 |
-
prompt_embeds=None,
|
646 |
-
negative_prompt_embeds=None,
|
647 |
-
):
|
648 |
-
if (strength is None) or (strength is not None and (strength < 0 or strength > 1)):
|
649 |
-
raise ValueError(
|
650 |
-
f"The value of `strength` should in [0.0, 1.0] but is, but is {strength} of type {type(strength)}."
|
651 |
-
)
|
652 |
-
|
653 |
-
if (callback_steps is None) or (
|
654 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
655 |
-
):
|
656 |
-
raise ValueError(
|
657 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
658 |
-
f" {type(callback_steps)}."
|
659 |
-
)
|
660 |
-
|
661 |
-
if prompt is not None and prompt_embeds is not None:
|
662 |
-
raise ValueError(
|
663 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
664 |
-
" only forward one of the two."
|
665 |
-
)
|
666 |
-
elif prompt is None and prompt_embeds is None:
|
667 |
-
raise ValueError(
|
668 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
669 |
-
)
|
670 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
671 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
672 |
-
|
673 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
674 |
-
raise ValueError(
|
675 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
676 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
677 |
-
)
|
678 |
-
|
679 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
680 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
681 |
-
raise ValueError(
|
682 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
683 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
684 |
-
f" {negative_prompt_embeds.shape}."
|
685 |
-
)
|
686 |
-
|
687 |
-
def check_source_inputs(
|
688 |
-
self,
|
689 |
-
source_prompt=None,
|
690 |
-
source_negative_prompt=None,
|
691 |
-
source_prompt_embeds=None,
|
692 |
-
source_negative_prompt_embeds=None,
|
693 |
-
):
|
694 |
-
if source_prompt is not None and source_prompt_embeds is not None:
|
695 |
-
raise ValueError(
|
696 |
-
f"Cannot forward both `source_prompt`: {source_prompt} and `source_prompt_embeds`: {source_prompt_embeds}."
|
697 |
-
" Please make sure to only forward one of the two."
|
698 |
-
)
|
699 |
-
elif source_prompt is None and source_prompt_embeds is None:
|
700 |
-
raise ValueError(
|
701 |
-
"Provide either `source_image` or `source_prompt_embeds`. Cannot leave all both of the arguments undefined."
|
702 |
-
)
|
703 |
-
elif source_prompt is not None and (
|
704 |
-
not isinstance(source_prompt, str) and not isinstance(source_prompt, list)
|
705 |
-
):
|
706 |
-
raise ValueError(f"`source_prompt` has to be of type `str` or `list` but is {type(source_prompt)}")
|
707 |
-
|
708 |
-
if source_negative_prompt is not None and source_negative_prompt_embeds is not None:
|
709 |
-
raise ValueError(
|
710 |
-
f"Cannot forward both `source_negative_prompt`: {source_negative_prompt} and `source_negative_prompt_embeds`:"
|
711 |
-
f" {source_negative_prompt_embeds}. Please make sure to only forward one of the two."
|
712 |
-
)
|
713 |
-
|
714 |
-
if source_prompt_embeds is not None and source_negative_prompt_embeds is not None:
|
715 |
-
if source_prompt_embeds.shape != source_negative_prompt_embeds.shape:
|
716 |
-
raise ValueError(
|
717 |
-
"`source_prompt_embeds` and `source_negative_prompt_embeds` must have the same shape when passed"
|
718 |
-
f" directly, but got: `source_prompt_embeds` {source_prompt_embeds.shape} !="
|
719 |
-
f" `source_negative_prompt_embeds` {source_negative_prompt_embeds.shape}."
|
720 |
-
)
|
721 |
-
|
722 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
723 |
-
def get_timesteps(self, num_inference_steps, strength, device):
|
724 |
-
# get the original timestep using init_timestep
|
725 |
-
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
726 |
-
|
727 |
-
t_start = max(num_inference_steps - init_timestep, 0)
|
728 |
-
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
729 |
-
|
730 |
-
return timesteps, num_inference_steps - t_start
|
731 |
-
|
732 |
-
def get_inverse_timesteps(self, num_inference_steps, strength, device):
|
733 |
-
# get the original timestep using init_timestep
|
734 |
-
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
735 |
-
|
736 |
-
t_start = max(num_inference_steps - init_timestep, 0)
|
737 |
-
|
738 |
-
# safety for t_start overflow to prevent empty timsteps slice
|
739 |
-
if t_start == 0:
|
740 |
-
return self.inverse_scheduler.timesteps, num_inference_steps
|
741 |
-
timesteps = self.inverse_scheduler.timesteps[:-t_start]
|
742 |
-
|
743 |
-
return timesteps, num_inference_steps - t_start
|
744 |
-
|
745 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
746 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
747 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
748 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
749 |
-
raise ValueError(
|
750 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
751 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
752 |
-
)
|
753 |
-
|
754 |
-
if latents is None:
|
755 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
756 |
-
else:
|
757 |
-
latents = latents.to(device)
|
758 |
-
|
759 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
760 |
-
latents = latents * self.scheduler.init_noise_sigma
|
761 |
-
return latents
|
762 |
-
|
763 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_pix2pix_zero.StableDiffusionPix2PixZeroPipeline.prepare_image_latents
|
764 |
-
def prepare_image_latents(self, image, batch_size, dtype, device, generator=None):
|
765 |
-
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
766 |
-
raise ValueError(
|
767 |
-
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
768 |
-
)
|
769 |
-
|
770 |
-
image = image.to(device=device, dtype=dtype)
|
771 |
-
|
772 |
-
if image.shape[1] == 4:
|
773 |
-
latents = image
|
774 |
-
|
775 |
-
else:
|
776 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
777 |
-
raise ValueError(
|
778 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
779 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
780 |
-
)
|
781 |
-
|
782 |
-
if isinstance(generator, list):
|
783 |
-
latents = [
|
784 |
-
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
785 |
-
]
|
786 |
-
latents = torch.cat(latents, dim=0)
|
787 |
-
else:
|
788 |
-
latents = self.vae.encode(image).latent_dist.sample(generator)
|
789 |
-
|
790 |
-
latents = self.vae.config.scaling_factor * latents
|
791 |
-
|
792 |
-
if batch_size != latents.shape[0]:
|
793 |
-
if batch_size % latents.shape[0] == 0:
|
794 |
-
# expand image_latents for batch_size
|
795 |
-
deprecation_message = (
|
796 |
-
f"You have passed {batch_size} text prompts (`prompt`), but only {latents.shape[0]} initial"
|
797 |
-
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
798 |
-
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
799 |
-
" your script to pass as many initial images as text prompts to suppress this warning."
|
800 |
-
)
|
801 |
-
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
802 |
-
additional_latents_per_image = batch_size // latents.shape[0]
|
803 |
-
latents = torch.cat([latents] * additional_latents_per_image, dim=0)
|
804 |
-
else:
|
805 |
-
raise ValueError(
|
806 |
-
f"Cannot duplicate `image` of batch size {latents.shape[0]} to {batch_size} text prompts."
|
807 |
-
)
|
808 |
-
else:
|
809 |
-
latents = torch.cat([latents], dim=0)
|
810 |
-
|
811 |
-
return latents
|
812 |
-
|
813 |
-
def get_epsilon(self, model_output: torch.Tensor, sample: torch.Tensor, timestep: int):
|
814 |
-
pred_type = self.inverse_scheduler.config.prediction_type
|
815 |
-
alpha_prod_t = self.inverse_scheduler.alphas_cumprod[timestep]
|
816 |
-
|
817 |
-
beta_prod_t = 1 - alpha_prod_t
|
818 |
-
|
819 |
-
if pred_type == "epsilon":
|
820 |
-
return model_output
|
821 |
-
elif pred_type == "sample":
|
822 |
-
return (sample - alpha_prod_t ** (0.5) * model_output) / beta_prod_t ** (0.5)
|
823 |
-
elif pred_type == "v_prediction":
|
824 |
-
return (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
|
825 |
-
else:
|
826 |
-
raise ValueError(
|
827 |
-
f"prediction_type given as {pred_type} must be one of `epsilon`, `sample`, or `v_prediction`"
|
828 |
-
)
|
829 |
-
|
830 |
-
@torch.no_grad()
|
831 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
832 |
-
def generate_mask(
|
833 |
-
self,
|
834 |
-
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
835 |
-
target_prompt: Optional[Union[str, List[str]]] = None,
|
836 |
-
target_negative_prompt: Optional[Union[str, List[str]]] = None,
|
837 |
-
target_prompt_embeds: Optional[torch.FloatTensor] = None,
|
838 |
-
target_negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
839 |
-
source_prompt: Optional[Union[str, List[str]]] = None,
|
840 |
-
source_negative_prompt: Optional[Union[str, List[str]]] = None,
|
841 |
-
source_prompt_embeds: Optional[torch.FloatTensor] = None,
|
842 |
-
source_negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
843 |
-
num_maps_per_mask: Optional[int] = 10,
|
844 |
-
mask_encode_strength: Optional[float] = 0.5,
|
845 |
-
mask_thresholding_ratio: Optional[float] = 3.0,
|
846 |
-
num_inference_steps: int = 50,
|
847 |
-
guidance_scale: float = 7.5,
|
848 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
849 |
-
output_type: Optional[str] = "np",
|
850 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
851 |
-
):
|
852 |
-
r"""
|
853 |
-
Generate a latent mask given a mask prompt, a target prompt, and an image.
|
854 |
-
|
855 |
-
Args:
|
856 |
-
image (`PIL.Image.Image`):
|
857 |
-
`Image` or tensor representing an image batch to be used for computing the mask.
|
858 |
-
target_prompt (`str` or `List[str]`, *optional*):
|
859 |
-
The prompt or prompts to guide semantic mask generation. If not defined, you need to pass
|
860 |
-
`prompt_embeds`.
|
861 |
-
target_negative_prompt (`str` or `List[str]`, *optional*):
|
862 |
-
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
863 |
-
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
864 |
-
target_prompt_embeds (`torch.FloatTensor`, *optional*):
|
865 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
866 |
-
provided, text embeddings are generated from the `prompt` input argument.
|
867 |
-
target_negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
868 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
869 |
-
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
870 |
-
source_prompt (`str` or `List[str]`, *optional*):
|
871 |
-
The prompt or prompts to guide semantic mask generation using DiffEdit. If not defined, you need to
|
872 |
-
pass `source_prompt_embeds` or `source_image` instead.
|
873 |
-
source_negative_prompt (`str` or `List[str]`, *optional*):
|
874 |
-
The prompt or prompts to guide semantic mask generation away from using DiffEdit. If not defined, you
|
875 |
-
need to pass `source_negative_prompt_embeds` or `source_image` instead.
|
876 |
-
source_prompt_embeds (`torch.FloatTensor`, *optional*):
|
877 |
-
Pre-generated text embeddings to guide the semantic mask generation. Can be used to easily tweak text
|
878 |
-
inputs (prompt weighting). If not provided, text embeddings are generated from `source_prompt` input
|
879 |
-
argument.
|
880 |
-
source_negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
881 |
-
Pre-generated text embeddings to negatively guide the semantic mask generation. Can be used to easily
|
882 |
-
tweak text inputs (prompt weighting). If not provided, text embeddings are generated from
|
883 |
-
`source_negative_prompt` input argument.
|
884 |
-
num_maps_per_mask (`int`, *optional*, defaults to 10):
|
885 |
-
The number of noise maps sampled to generate the semantic mask using DiffEdit.
|
886 |
-
mask_encode_strength (`float`, *optional*, defaults to 0.5):
|
887 |
-
The strength of the noise maps sampled to generate the semantic mask using DiffEdit. Must be between 0
|
888 |
-
and 1.
|
889 |
-
mask_thresholding_ratio (`float`, *optional*, defaults to 3.0):
|
890 |
-
The maximum multiple of the mean absolute difference used to clamp the semantic guidance map before
|
891 |
-
mask binarization.
|
892 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
893 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
894 |
-
expense of slower inference.
|
895 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
896 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
897 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
898 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
899 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
900 |
-
generation deterministic.
|
901 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
902 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
903 |
-
cross_attention_kwargs (`dict`, *optional*):
|
904 |
-
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
905 |
-
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
906 |
-
|
907 |
-
Examples:
|
908 |
-
|
909 |
-
Returns:
|
910 |
-
`List[PIL.Image.Image]` or `np.array`:
|
911 |
-
When returning a `List[PIL.Image.Image]`, the list consists of a batch of single-channel binary images
|
912 |
-
with dimensions `(height // self.vae_scale_factor, width // self.vae_scale_factor)`. If it's
|
913 |
-
`np.array`, the shape is `(batch_size, height // self.vae_scale_factor, width //
|
914 |
-
self.vae_scale_factor)`.
|
915 |
-
"""
|
916 |
-
|
917 |
-
# 1. Check inputs (Provide dummy argument for callback_steps)
|
918 |
-
self.check_inputs(
|
919 |
-
target_prompt,
|
920 |
-
mask_encode_strength,
|
921 |
-
1,
|
922 |
-
target_negative_prompt,
|
923 |
-
target_prompt_embeds,
|
924 |
-
target_negative_prompt_embeds,
|
925 |
-
)
|
926 |
-
|
927 |
-
self.check_source_inputs(
|
928 |
-
source_prompt,
|
929 |
-
source_negative_prompt,
|
930 |
-
source_prompt_embeds,
|
931 |
-
source_negative_prompt_embeds,
|
932 |
-
)
|
933 |
-
|
934 |
-
if (num_maps_per_mask is None) or (
|
935 |
-
num_maps_per_mask is not None and (not isinstance(num_maps_per_mask, int) or num_maps_per_mask <= 0)
|
936 |
-
):
|
937 |
-
raise ValueError(
|
938 |
-
f"`num_maps_per_mask` has to be a positive integer but is {num_maps_per_mask} of type"
|
939 |
-
f" {type(num_maps_per_mask)}."
|
940 |
-
)
|
941 |
-
|
942 |
-
if mask_thresholding_ratio is None or mask_thresholding_ratio <= 0:
|
943 |
-
raise ValueError(
|
944 |
-
f"`mask_thresholding_ratio` has to be positive but is {mask_thresholding_ratio} of type"
|
945 |
-
f" {type(mask_thresholding_ratio)}."
|
946 |
-
)
|
947 |
-
|
948 |
-
# 2. Define call parameters
|
949 |
-
if target_prompt is not None and isinstance(target_prompt, str):
|
950 |
-
batch_size = 1
|
951 |
-
elif target_prompt is not None and isinstance(target_prompt, list):
|
952 |
-
batch_size = len(target_prompt)
|
953 |
-
else:
|
954 |
-
batch_size = target_prompt_embeds.shape[0]
|
955 |
-
if cross_attention_kwargs is None:
|
956 |
-
cross_attention_kwargs = {}
|
957 |
-
|
958 |
-
device = self._execution_device
|
959 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
960 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
961 |
-
# corresponds to doing no classifier free guidance.
|
962 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
963 |
-
|
964 |
-
# 3. Encode input prompts
|
965 |
-
(cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None)
|
966 |
-
target_prompt_embeds = self._encode_prompt(
|
967 |
-
target_prompt,
|
968 |
-
device,
|
969 |
-
num_maps_per_mask,
|
970 |
-
do_classifier_free_guidance,
|
971 |
-
target_negative_prompt,
|
972 |
-
prompt_embeds=target_prompt_embeds,
|
973 |
-
negative_prompt_embeds=target_negative_prompt_embeds,
|
974 |
-
)
|
975 |
-
|
976 |
-
source_prompt_embeds = self._encode_prompt(
|
977 |
-
source_prompt,
|
978 |
-
device,
|
979 |
-
num_maps_per_mask,
|
980 |
-
do_classifier_free_guidance,
|
981 |
-
source_negative_prompt,
|
982 |
-
prompt_embeds=source_prompt_embeds,
|
983 |
-
negative_prompt_embeds=source_negative_prompt_embeds,
|
984 |
-
)
|
985 |
-
|
986 |
-
# 4. Preprocess image
|
987 |
-
image = self.image_processor.preprocess(image).repeat_interleave(num_maps_per_mask, dim=0)
|
988 |
-
|
989 |
-
# 5. Set timesteps
|
990 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
991 |
-
timesteps, _ = self.get_timesteps(num_inference_steps, mask_encode_strength, device)
|
992 |
-
encode_timestep = timesteps[0]
|
993 |
-
|
994 |
-
# 6. Prepare image latents and add noise with specified strength
|
995 |
-
image_latents = self.prepare_image_latents(
|
996 |
-
image, batch_size * num_maps_per_mask, self.vae.dtype, device, generator
|
997 |
-
)
|
998 |
-
noise = randn_tensor(image_latents.shape, generator=generator, device=device, dtype=self.vae.dtype)
|
999 |
-
image_latents = self.scheduler.add_noise(image_latents, noise, encode_timestep)
|
1000 |
-
|
1001 |
-
latent_model_input = torch.cat([image_latents] * (4 if do_classifier_free_guidance else 2))
|
1002 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, encode_timestep)
|
1003 |
-
|
1004 |
-
# 7. Predict the noise residual
|
1005 |
-
prompt_embeds = torch.cat([source_prompt_embeds, target_prompt_embeds])
|
1006 |
-
noise_pred = self.unet(
|
1007 |
-
latent_model_input,
|
1008 |
-
encode_timestep,
|
1009 |
-
encoder_hidden_states=prompt_embeds,
|
1010 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
1011 |
-
).sample
|
1012 |
-
|
1013 |
-
if do_classifier_free_guidance:
|
1014 |
-
noise_pred_neg_src, noise_pred_source, noise_pred_uncond, noise_pred_target = noise_pred.chunk(4)
|
1015 |
-
noise_pred_source = noise_pred_neg_src + guidance_scale * (noise_pred_source - noise_pred_neg_src)
|
1016 |
-
noise_pred_target = noise_pred_uncond + guidance_scale * (noise_pred_target - noise_pred_uncond)
|
1017 |
-
else:
|
1018 |
-
noise_pred_source, noise_pred_target = noise_pred.chunk(2)
|
1019 |
-
|
1020 |
-
# 8. Compute the mask from the absolute difference of predicted noise residuals
|
1021 |
-
# TODO: Consider smoothing mask guidance map
|
1022 |
-
mask_guidance_map = (
|
1023 |
-
torch.abs(noise_pred_target - noise_pred_source)
|
1024 |
-
.reshape(batch_size, num_maps_per_mask, *noise_pred_target.shape[-3:])
|
1025 |
-
.mean([1, 2])
|
1026 |
-
)
|
1027 |
-
clamp_magnitude = mask_guidance_map.mean() * mask_thresholding_ratio
|
1028 |
-
semantic_mask_image = mask_guidance_map.clamp(0, clamp_magnitude) / clamp_magnitude
|
1029 |
-
semantic_mask_image = torch.where(semantic_mask_image <= 0.5, 0, 1)
|
1030 |
-
mask_image = semantic_mask_image.cpu().numpy()
|
1031 |
-
|
1032 |
-
# 9. Convert to Numpy array or PIL.
|
1033 |
-
if output_type == "pil":
|
1034 |
-
mask_image = self.image_processor.numpy_to_pil(mask_image)
|
1035 |
-
|
1036 |
-
# Offload last model to CPU
|
1037 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
1038 |
-
self.final_offload_hook.offload()
|
1039 |
-
|
1040 |
-
return mask_image
|
1041 |
-
|
1042 |
-
@torch.no_grad()
|
1043 |
-
@replace_example_docstring(EXAMPLE_INVERT_DOC_STRING)
|
1044 |
-
def invert(
|
1045 |
-
self,
|
1046 |
-
prompt: Optional[Union[str, List[str]]] = None,
|
1047 |
-
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
1048 |
-
num_inference_steps: int = 50,
|
1049 |
-
inpaint_strength: float = 0.8,
|
1050 |
-
guidance_scale: float = 7.5,
|
1051 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
1052 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
1053 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
1054 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
1055 |
-
decode_latents: bool = False,
|
1056 |
-
output_type: Optional[str] = "pil",
|
1057 |
-
return_dict: bool = True,
|
1058 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
1059 |
-
callback_steps: Optional[int] = 1,
|
1060 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
1061 |
-
lambda_auto_corr: float = 20.0,
|
1062 |
-
lambda_kl: float = 20.0,
|
1063 |
-
num_reg_steps: int = 0,
|
1064 |
-
num_auto_corr_rolls: int = 5,
|
1065 |
-
):
|
1066 |
-
r"""
|
1067 |
-
Generate inverted latents given a prompt and image.
|
1068 |
-
|
1069 |
-
Args:
|
1070 |
-
prompt (`str` or `List[str]`, *optional*):
|
1071 |
-
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
1072 |
-
image (`PIL.Image.Image`):
|
1073 |
-
`Image` or tensor representing an image batch to produce the inverted latents guided by `prompt`.
|
1074 |
-
inpaint_strength (`float`, *optional*, defaults to 0.8):
|
1075 |
-
Indicates extent of the noising process to run latent inversion. Must be between 0 and 1. When
|
1076 |
-
`strength` is 1, the inversion process iss ru for the full number of iterations specified in
|
1077 |
-
`num_inference_steps`. `image` is used as a reference for the inversion process, adding more noise the
|
1078 |
-
larger the `strength`. If `strength` is 0, no inpainting occurs.
|
1079 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
1080 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
1081 |
-
expense of slower inference.
|
1082 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
1083 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
1084 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
1085 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
1086 |
-
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
1087 |
-
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
1088 |
-
generator (`torch.Generator`, *optional*):
|
1089 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
1090 |
-
generation deterministic.
|
1091 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
1092 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
1093 |
-
provided, text embeddings are generated from the `prompt` input argument.
|
1094 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
1095 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
1096 |
-
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
1097 |
-
decode_latents (`bool`, *optional*, defaults to `False`):
|
1098 |
-
Whether or not to decode the inverted latents into a generated image. Setting this argument to `True`
|
1099 |
-
decodes all inverted latents for each timestep into a list of generated images.
|
1100 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
1101 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
1102 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
1103 |
-
Whether or not to return a [`~pipelines.stable_diffusion.DiffEditInversionPipelineOutput`] instead of a
|
1104 |
-
plain tuple.
|
1105 |
-
callback (`Callable`, *optional*):
|
1106 |
-
A function that calls every `callback_steps` steps during inference. The function is called with the
|
1107 |
-
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
1108 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
1109 |
-
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
1110 |
-
every step.
|
1111 |
-
cross_attention_kwargs (`dict`, *optional*):
|
1112 |
-
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
1113 |
-
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
1114 |
-
lambda_auto_corr (`float`, *optional*, defaults to 20.0):
|
1115 |
-
Lambda parameter to control auto correction.
|
1116 |
-
lambda_kl (`float`, *optional*, defaults to 20.0):
|
1117 |
-
Lambda parameter to control Kullback–Leibler divergence output.
|
1118 |
-
num_reg_steps (`int`, *optional*, defaults to 0):
|
1119 |
-
Number of regularization loss steps.
|
1120 |
-
num_auto_corr_rolls (`int`, *optional*, defaults to 5):
|
1121 |
-
Number of auto correction roll steps.
|
1122 |
-
|
1123 |
-
Examples:
|
1124 |
-
|
1125 |
-
Returns:
|
1126 |
-
[`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] or
|
1127 |
-
`tuple`:
|
1128 |
-
If `return_dict` is `True`,
|
1129 |
-
[`~pipelines.stable_diffusion.pipeline_stable_diffusion_diffedit.DiffEditInversionPipelineOutput`] is
|
1130 |
-
returned, otherwise a `tuple` is returned where the first element is the inverted latents tensors
|
1131 |
-
ordered by increasing noise, and the second is the corresponding decoded images if `decode_latents` is
|
1132 |
-
`True`, otherwise `None`.
|
1133 |
-
"""
|
1134 |
-
|
1135 |
-
# 1. Check inputs
|
1136 |
-
self.check_inputs(
|
1137 |
-
prompt,
|
1138 |
-
inpaint_strength,
|
1139 |
-
callback_steps,
|
1140 |
-
negative_prompt,
|
1141 |
-
prompt_embeds,
|
1142 |
-
negative_prompt_embeds,
|
1143 |
-
)
|
1144 |
-
|
1145 |
-
if image is None:
|
1146 |
-
raise ValueError("`image` input cannot be undefined.")
|
1147 |
-
|
1148 |
-
# 2. Define call parameters
|
1149 |
-
if prompt is not None and isinstance(prompt, str):
|
1150 |
-
batch_size = 1
|
1151 |
-
elif prompt is not None and isinstance(prompt, list):
|
1152 |
-
batch_size = len(prompt)
|
1153 |
-
else:
|
1154 |
-
batch_size = prompt_embeds.shape[0]
|
1155 |
-
if cross_attention_kwargs is None:
|
1156 |
-
cross_attention_kwargs = {}
|
1157 |
-
|
1158 |
-
device = self._execution_device
|
1159 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
1160 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
1161 |
-
# corresponds to doing no classifier free guidance.
|
1162 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
1163 |
-
|
1164 |
-
# 3. Preprocess image
|
1165 |
-
image = self.image_processor.preprocess(image)
|
1166 |
-
|
1167 |
-
# 4. Prepare latent variables
|
1168 |
-
num_images_per_prompt = 1
|
1169 |
-
latents = self.prepare_image_latents(
|
1170 |
-
image, batch_size * num_images_per_prompt, self.vae.dtype, device, generator
|
1171 |
-
)
|
1172 |
-
|
1173 |
-
# 5. Encode input prompt
|
1174 |
-
prompt_embeds = self._encode_prompt(
|
1175 |
-
prompt,
|
1176 |
-
device,
|
1177 |
-
num_images_per_prompt,
|
1178 |
-
do_classifier_free_guidance,
|
1179 |
-
negative_prompt,
|
1180 |
-
prompt_embeds=prompt_embeds,
|
1181 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
1182 |
-
)
|
1183 |
-
|
1184 |
-
# 6. Prepare timesteps
|
1185 |
-
self.inverse_scheduler.set_timesteps(num_inference_steps, device=device)
|
1186 |
-
timesteps, num_inference_steps = self.get_inverse_timesteps(num_inference_steps, inpaint_strength, device)
|
1187 |
-
|
1188 |
-
# 7. Noising loop where we obtain the intermediate noised latent image for each timestep.
|
1189 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.inverse_scheduler.order
|
1190 |
-
inverted_latents = []
|
1191 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1192 |
-
for i, t in enumerate(timesteps):
|
1193 |
-
# expand the latents if we are doing classifier free guidance
|
1194 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
1195 |
-
latent_model_input = self.inverse_scheduler.scale_model_input(latent_model_input, t)
|
1196 |
-
|
1197 |
-
# predict the noise residual
|
1198 |
-
noise_pred = self.unet(
|
1199 |
-
latent_model_input,
|
1200 |
-
t,
|
1201 |
-
encoder_hidden_states=prompt_embeds,
|
1202 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
1203 |
-
).sample
|
1204 |
-
|
1205 |
-
# perform guidance
|
1206 |
-
if do_classifier_free_guidance:
|
1207 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1208 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1209 |
-
|
1210 |
-
# regularization of the noise prediction (not in original code or paper but borrowed from Pix2PixZero)
|
1211 |
-
if num_reg_steps > 0:
|
1212 |
-
with torch.enable_grad():
|
1213 |
-
for _ in range(num_reg_steps):
|
1214 |
-
if lambda_auto_corr > 0:
|
1215 |
-
for _ in range(num_auto_corr_rolls):
|
1216 |
-
var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True)
|
1217 |
-
|
1218 |
-
# Derive epsilon from model output before regularizing to IID standard normal
|
1219 |
-
var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t)
|
1220 |
-
|
1221 |
-
l_ac = auto_corr_loss(var_epsilon, generator=generator)
|
1222 |
-
l_ac.backward()
|
1223 |
-
|
1224 |
-
grad = var.grad.detach() / num_auto_corr_rolls
|
1225 |
-
noise_pred = noise_pred - lambda_auto_corr * grad
|
1226 |
-
|
1227 |
-
if lambda_kl > 0:
|
1228 |
-
var = torch.autograd.Variable(noise_pred.detach().clone(), requires_grad=True)
|
1229 |
-
|
1230 |
-
# Derive epsilon from model output before regularizing to IID standard normal
|
1231 |
-
var_epsilon = self.get_epsilon(var, latent_model_input.detach(), t)
|
1232 |
-
|
1233 |
-
l_kld = kl_divergence(var_epsilon)
|
1234 |
-
l_kld.backward()
|
1235 |
-
|
1236 |
-
grad = var.grad.detach()
|
1237 |
-
noise_pred = noise_pred - lambda_kl * grad
|
1238 |
-
|
1239 |
-
noise_pred = noise_pred.detach()
|
1240 |
-
|
1241 |
-
# compute the previous noisy sample x_t -> x_t-1
|
1242 |
-
latents = self.inverse_scheduler.step(noise_pred, t, latents).prev_sample
|
1243 |
-
inverted_latents.append(latents.detach().clone())
|
1244 |
-
|
1245 |
-
# call the callback, if provided
|
1246 |
-
if i == len(timesteps) - 1 or (
|
1247 |
-
(i + 1) > num_warmup_steps and (i + 1) % self.inverse_scheduler.order == 0
|
1248 |
-
):
|
1249 |
-
progress_bar.update()
|
1250 |
-
if callback is not None and i % callback_steps == 0:
|
1251 |
-
callback(i, t, latents)
|
1252 |
-
|
1253 |
-
assert len(inverted_latents) == len(timesteps)
|
1254 |
-
latents = torch.stack(list(reversed(inverted_latents)), 1)
|
1255 |
-
|
1256 |
-
# 8. Post-processing
|
1257 |
-
image = None
|
1258 |
-
if decode_latents:
|
1259 |
-
image = self.decode_latents(latents.flatten(0, 1))
|
1260 |
-
|
1261 |
-
# 9. Convert to PIL.
|
1262 |
-
if decode_latents and output_type == "pil":
|
1263 |
-
image = self.image_processor.numpy_to_pil(image)
|
1264 |
-
|
1265 |
-
# Offload last model to CPU
|
1266 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
1267 |
-
self.final_offload_hook.offload()
|
1268 |
-
|
1269 |
-
if not return_dict:
|
1270 |
-
return (latents, image)
|
1271 |
-
|
1272 |
-
return DiffEditInversionPipelineOutput(latents=latents, images=image)
|
1273 |
-
|
1274 |
-
@torch.no_grad()
|
1275 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
1276 |
-
def __call__(
|
1277 |
-
self,
|
1278 |
-
prompt: Optional[Union[str, List[str]]] = None,
|
1279 |
-
mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
1280 |
-
image_latents: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
1281 |
-
inpaint_strength: Optional[float] = 0.8,
|
1282 |
-
num_inference_steps: int = 50,
|
1283 |
-
guidance_scale: float = 7.5,
|
1284 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
1285 |
-
num_images_per_prompt: Optional[int] = 1,
|
1286 |
-
eta: float = 0.0,
|
1287 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
1288 |
-
latents: Optional[torch.FloatTensor] = None,
|
1289 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
1290 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
1291 |
-
output_type: Optional[str] = "pil",
|
1292 |
-
return_dict: bool = True,
|
1293 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
1294 |
-
callback_steps: int = 1,
|
1295 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
1296 |
-
):
|
1297 |
-
r"""
|
1298 |
-
The call function to the pipeline for generation.
|
1299 |
-
|
1300 |
-
Args:
|
1301 |
-
prompt (`str` or `List[str]`, *optional*):
|
1302 |
-
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
1303 |
-
mask_image (`PIL.Image.Image`):
|
1304 |
-
`Image` or tensor representing an image batch to mask the generated image. White pixels in the mask are
|
1305 |
-
repainted, while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a
|
1306 |
-
single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
1307 |
-
instead of 3, so the expected shape would be `(B, 1, H, W)`.
|
1308 |
-
image_latents (`PIL.Image.Image` or `torch.FloatTensor`):
|
1309 |
-
Partially noised image latents from the inversion process to be used as inputs for image generation.
|
1310 |
-
inpaint_strength (`float`, *optional*, defaults to 0.8):
|
1311 |
-
Indicates extent to inpaint the masked area. Must be between 0 and 1. When `strength` is 1, the
|
1312 |
-
denoising process is run on the masked area for the full number of iterations specified in
|
1313 |
-
`num_inference_steps`. `image_latents` is used as a reference for the masked area, adding more noise to
|
1314 |
-
that region the larger the `strength`. If `strength` is 0, no inpainting occurs.
|
1315 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
1316 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
1317 |
-
expense of slower inference.
|
1318 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
1319 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
1320 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
1321 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
1322 |
-
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
1323 |
-
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
1324 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
1325 |
-
The number of images to generate per prompt.
|
1326 |
-
eta (`float`, *optional*, defaults to 0.0):
|
1327 |
-
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
1328 |
-
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
1329 |
-
generator (`torch.Generator`, *optional*):
|
1330 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
1331 |
-
generation deterministic.
|
1332 |
-
latents (`torch.FloatTensor`, *optional*):
|
1333 |
-
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
1334 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
1335 |
-
tensor is generated by sampling using the supplied random `generator`.
|
1336 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
1337 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
1338 |
-
provided, text embeddings are generated from the `prompt` input argument.
|
1339 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
1340 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
1341 |
-
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
1342 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
1343 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
1344 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
1345 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
1346 |
-
plain tuple.
|
1347 |
-
callback (`Callable`, *optional*):
|
1348 |
-
A function that calls every `callback_steps` steps during inference. The function is called with the
|
1349 |
-
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
1350 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
1351 |
-
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
1352 |
-
every step.
|
1353 |
-
cross_attention_kwargs (`dict`, *optional*):
|
1354 |
-
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
1355 |
-
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
1356 |
-
|
1357 |
-
Examples:
|
1358 |
-
|
1359 |
-
Returns:
|
1360 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
1361 |
-
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
1362 |
-
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
1363 |
-
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
1364 |
-
"not-safe-for-work" (nsfw) content.
|
1365 |
-
"""
|
1366 |
-
|
1367 |
-
# 1. Check inputs
|
1368 |
-
self.check_inputs(
|
1369 |
-
prompt,
|
1370 |
-
inpaint_strength,
|
1371 |
-
callback_steps,
|
1372 |
-
negative_prompt,
|
1373 |
-
prompt_embeds,
|
1374 |
-
negative_prompt_embeds,
|
1375 |
-
)
|
1376 |
-
|
1377 |
-
if mask_image is None:
|
1378 |
-
raise ValueError(
|
1379 |
-
"`mask_image` input cannot be undefined. Use `generate_mask()` to compute `mask_image` from text prompts."
|
1380 |
-
)
|
1381 |
-
if image_latents is None:
|
1382 |
-
raise ValueError(
|
1383 |
-
"`image_latents` input cannot be undefined. Use `invert()` to compute `image_latents` from input images."
|
1384 |
-
)
|
1385 |
-
|
1386 |
-
# 2. Define call parameters
|
1387 |
-
if prompt is not None and isinstance(prompt, str):
|
1388 |
-
batch_size = 1
|
1389 |
-
elif prompt is not None and isinstance(prompt, list):
|
1390 |
-
batch_size = len(prompt)
|
1391 |
-
else:
|
1392 |
-
batch_size = prompt_embeds.shape[0]
|
1393 |
-
if cross_attention_kwargs is None:
|
1394 |
-
cross_attention_kwargs = {}
|
1395 |
-
|
1396 |
-
device = self._execution_device
|
1397 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
1398 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
1399 |
-
# corresponds to doing no classifier free guidance.
|
1400 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
1401 |
-
|
1402 |
-
# 3. Encode input prompt
|
1403 |
-
text_encoder_lora_scale = (
|
1404 |
-
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
1405 |
-
)
|
1406 |
-
prompt_embeds = self._encode_prompt(
|
1407 |
-
prompt,
|
1408 |
-
device,
|
1409 |
-
num_images_per_prompt,
|
1410 |
-
do_classifier_free_guidance,
|
1411 |
-
negative_prompt,
|
1412 |
-
prompt_embeds=prompt_embeds,
|
1413 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
1414 |
-
lora_scale=text_encoder_lora_scale,
|
1415 |
-
)
|
1416 |
-
|
1417 |
-
# 4. Preprocess mask
|
1418 |
-
mask_image = preprocess_mask(mask_image, batch_size)
|
1419 |
-
latent_height, latent_width = mask_image.shape[-2:]
|
1420 |
-
mask_image = torch.cat([mask_image] * num_images_per_prompt)
|
1421 |
-
mask_image = mask_image.to(device=device, dtype=prompt_embeds.dtype)
|
1422 |
-
|
1423 |
-
# 5. Set timesteps
|
1424 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
1425 |
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, inpaint_strength, device)
|
1426 |
-
|
1427 |
-
# 6. Preprocess image latents
|
1428 |
-
if isinstance(image_latents, list) and any(isinstance(l, torch.Tensor) and l.ndim == 5 for l in image_latents):
|
1429 |
-
image_latents = torch.cat(image_latents).detach()
|
1430 |
-
elif isinstance(image_latents, torch.Tensor) and image_latents.ndim == 5:
|
1431 |
-
image_latents = image_latents.detach()
|
1432 |
-
else:
|
1433 |
-
image_latents = self.image_processor.preprocess(image_latents).detach()
|
1434 |
-
|
1435 |
-
latent_shape = (self.vae.config.latent_channels, latent_height, latent_width)
|
1436 |
-
if image_latents.shape[-3:] != latent_shape:
|
1437 |
-
raise ValueError(
|
1438 |
-
f"Each latent image in `image_latents` must have shape {latent_shape}, "
|
1439 |
-
f"but has shape {image_latents.shape[-3:]}"
|
1440 |
-
)
|
1441 |
-
if image_latents.ndim == 4:
|
1442 |
-
image_latents = image_latents.reshape(batch_size, len(timesteps), *latent_shape)
|
1443 |
-
if image_latents.shape[:2] != (batch_size, len(timesteps)):
|
1444 |
-
raise ValueError(
|
1445 |
-
f"`image_latents` must have batch size {batch_size} with latent images from {len(timesteps)}"
|
1446 |
-
f" timesteps, but has batch size {image_latents.shape[0]} with latent images from"
|
1447 |
-
f" {image_latents.shape[1]} timesteps."
|
1448 |
-
)
|
1449 |
-
image_latents = image_latents.transpose(0, 1).repeat_interleave(num_images_per_prompt, dim=1)
|
1450 |
-
image_latents = image_latents.to(device=device, dtype=prompt_embeds.dtype)
|
1451 |
-
|
1452 |
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1453 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
1454 |
-
|
1455 |
-
# 8. Denoising loop
|
1456 |
-
latents = image_latents[0].clone()
|
1457 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
1458 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1459 |
-
for i, t in enumerate(timesteps):
|
1460 |
-
# expand the latents if we are doing classifier free guidance
|
1461 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
1462 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
1463 |
-
|
1464 |
-
# predict the noise residual
|
1465 |
-
noise_pred = self.unet(
|
1466 |
-
latent_model_input,
|
1467 |
-
t,
|
1468 |
-
encoder_hidden_states=prompt_embeds,
|
1469 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
1470 |
-
).sample
|
1471 |
-
|
1472 |
-
# perform guidance
|
1473 |
-
if do_classifier_free_guidance:
|
1474 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1475 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1476 |
-
|
1477 |
-
# compute the previous noisy sample x_t -> x_t-1
|
1478 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
1479 |
-
|
1480 |
-
# mask with inverted latents from appropriate timestep - use original image latent for last step
|
1481 |
-
latents = latents * mask_image + image_latents[i] * (1 - mask_image)
|
1482 |
-
|
1483 |
-
# call the callback, if provided
|
1484 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
1485 |
-
progress_bar.update()
|
1486 |
-
if callback is not None and i % callback_steps == 0:
|
1487 |
-
callback(i, t, latents)
|
1488 |
-
|
1489 |
-
if not output_type == "latent":
|
1490 |
-
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
1491 |
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
1492 |
-
else:
|
1493 |
-
image = latents
|
1494 |
-
has_nsfw_concept = None
|
1495 |
-
|
1496 |
-
if has_nsfw_concept is None:
|
1497 |
-
do_denormalize = [True] * image.shape[0]
|
1498 |
-
else:
|
1499 |
-
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
1500 |
-
|
1501 |
-
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
1502 |
-
|
1503 |
-
# Offload last model to CPU
|
1504 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
1505 |
-
self.final_offload_hook.offload()
|
1506 |
-
|
1507 |
-
if not return_dict:
|
1508 |
-
return (image, has_nsfw_concept)
|
1509 |
-
|
1510 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_flax.py
DELETED
@@ -1,919 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import inspect
|
16 |
-
import tempfile
|
17 |
-
import unittest
|
18 |
-
from typing import Dict, List, Tuple
|
19 |
-
|
20 |
-
from diffusers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxPNDMScheduler
|
21 |
-
from diffusers.utils import is_flax_available
|
22 |
-
from diffusers.utils.testing_utils import require_flax
|
23 |
-
|
24 |
-
|
25 |
-
if is_flax_available():
|
26 |
-
import jax
|
27 |
-
import jax.numpy as jnp
|
28 |
-
from jax import random
|
29 |
-
|
30 |
-
jax_device = jax.default_backend()
|
31 |
-
|
32 |
-
|
33 |
-
@require_flax
|
34 |
-
class FlaxSchedulerCommonTest(unittest.TestCase):
|
35 |
-
scheduler_classes = ()
|
36 |
-
forward_default_kwargs = ()
|
37 |
-
|
38 |
-
@property
|
39 |
-
def dummy_sample(self):
|
40 |
-
batch_size = 4
|
41 |
-
num_channels = 3
|
42 |
-
height = 8
|
43 |
-
width = 8
|
44 |
-
|
45 |
-
key1, key2 = random.split(random.PRNGKey(0))
|
46 |
-
sample = random.uniform(key1, (batch_size, num_channels, height, width))
|
47 |
-
|
48 |
-
return sample, key2
|
49 |
-
|
50 |
-
@property
|
51 |
-
def dummy_sample_deter(self):
|
52 |
-
batch_size = 4
|
53 |
-
num_channels = 3
|
54 |
-
height = 8
|
55 |
-
width = 8
|
56 |
-
|
57 |
-
num_elems = batch_size * num_channels * height * width
|
58 |
-
sample = jnp.arange(num_elems)
|
59 |
-
sample = sample.reshape(num_channels, height, width, batch_size)
|
60 |
-
sample = sample / num_elems
|
61 |
-
return jnp.transpose(sample, (3, 0, 1, 2))
|
62 |
-
|
63 |
-
def get_scheduler_config(self):
|
64 |
-
raise NotImplementedError
|
65 |
-
|
66 |
-
def dummy_model(self):
|
67 |
-
def model(sample, t, *args):
|
68 |
-
return sample * t / (t + 1)
|
69 |
-
|
70 |
-
return model
|
71 |
-
|
72 |
-
def check_over_configs(self, time_step=0, **config):
|
73 |
-
kwargs = dict(self.forward_default_kwargs)
|
74 |
-
|
75 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
76 |
-
|
77 |
-
for scheduler_class in self.scheduler_classes:
|
78 |
-
sample, key = self.dummy_sample
|
79 |
-
residual = 0.1 * sample
|
80 |
-
|
81 |
-
scheduler_config = self.get_scheduler_config(**config)
|
82 |
-
scheduler = scheduler_class(**scheduler_config)
|
83 |
-
state = scheduler.create_state()
|
84 |
-
|
85 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
86 |
-
scheduler.save_config(tmpdirname)
|
87 |
-
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
|
88 |
-
|
89 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
90 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
91 |
-
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
|
92 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
93 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
94 |
-
|
95 |
-
output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample
|
96 |
-
new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample
|
97 |
-
|
98 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
99 |
-
|
100 |
-
def check_over_forward(self, time_step=0, **forward_kwargs):
|
101 |
-
kwargs = dict(self.forward_default_kwargs)
|
102 |
-
kwargs.update(forward_kwargs)
|
103 |
-
|
104 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
105 |
-
|
106 |
-
for scheduler_class in self.scheduler_classes:
|
107 |
-
sample, key = self.dummy_sample
|
108 |
-
residual = 0.1 * sample
|
109 |
-
|
110 |
-
scheduler_config = self.get_scheduler_config()
|
111 |
-
scheduler = scheduler_class(**scheduler_config)
|
112 |
-
state = scheduler.create_state()
|
113 |
-
|
114 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
115 |
-
scheduler.save_config(tmpdirname)
|
116 |
-
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
|
117 |
-
|
118 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
119 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
120 |
-
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
|
121 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
122 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
123 |
-
|
124 |
-
output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample
|
125 |
-
new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample
|
126 |
-
|
127 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
128 |
-
|
129 |
-
def test_from_save_pretrained(self):
|
130 |
-
kwargs = dict(self.forward_default_kwargs)
|
131 |
-
|
132 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
133 |
-
|
134 |
-
for scheduler_class in self.scheduler_classes:
|
135 |
-
sample, key = self.dummy_sample
|
136 |
-
residual = 0.1 * sample
|
137 |
-
|
138 |
-
scheduler_config = self.get_scheduler_config()
|
139 |
-
scheduler = scheduler_class(**scheduler_config)
|
140 |
-
state = scheduler.create_state()
|
141 |
-
|
142 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
143 |
-
scheduler.save_config(tmpdirname)
|
144 |
-
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
|
145 |
-
|
146 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
147 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
148 |
-
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
|
149 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
150 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
151 |
-
|
152 |
-
output = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample
|
153 |
-
new_output = new_scheduler.step(new_state, residual, 1, sample, key, **kwargs).prev_sample
|
154 |
-
|
155 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
156 |
-
|
157 |
-
def test_step_shape(self):
|
158 |
-
kwargs = dict(self.forward_default_kwargs)
|
159 |
-
|
160 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
161 |
-
|
162 |
-
for scheduler_class in self.scheduler_classes:
|
163 |
-
scheduler_config = self.get_scheduler_config()
|
164 |
-
scheduler = scheduler_class(**scheduler_config)
|
165 |
-
state = scheduler.create_state()
|
166 |
-
|
167 |
-
sample, key = self.dummy_sample
|
168 |
-
residual = 0.1 * sample
|
169 |
-
|
170 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
171 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
172 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
173 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
174 |
-
|
175 |
-
output_0 = scheduler.step(state, residual, 0, sample, key, **kwargs).prev_sample
|
176 |
-
output_1 = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample
|
177 |
-
|
178 |
-
self.assertEqual(output_0.shape, sample.shape)
|
179 |
-
self.assertEqual(output_0.shape, output_1.shape)
|
180 |
-
|
181 |
-
def test_scheduler_outputs_equivalence(self):
|
182 |
-
def set_nan_tensor_to_zero(t):
|
183 |
-
return t.at[t != t].set(0)
|
184 |
-
|
185 |
-
def recursive_check(tuple_object, dict_object):
|
186 |
-
if isinstance(tuple_object, (List, Tuple)):
|
187 |
-
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
|
188 |
-
recursive_check(tuple_iterable_value, dict_iterable_value)
|
189 |
-
elif isinstance(tuple_object, Dict):
|
190 |
-
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
|
191 |
-
recursive_check(tuple_iterable_value, dict_iterable_value)
|
192 |
-
elif tuple_object is None:
|
193 |
-
return
|
194 |
-
else:
|
195 |
-
self.assertTrue(
|
196 |
-
jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5),
|
197 |
-
msg=(
|
198 |
-
"Tuple and dict output are not equal. Difference:"
|
199 |
-
f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:"
|
200 |
-
f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has"
|
201 |
-
f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}."
|
202 |
-
),
|
203 |
-
)
|
204 |
-
|
205 |
-
kwargs = dict(self.forward_default_kwargs)
|
206 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
207 |
-
|
208 |
-
for scheduler_class in self.scheduler_classes:
|
209 |
-
scheduler_config = self.get_scheduler_config()
|
210 |
-
scheduler = scheduler_class(**scheduler_config)
|
211 |
-
state = scheduler.create_state()
|
212 |
-
|
213 |
-
sample, key = self.dummy_sample
|
214 |
-
residual = 0.1 * sample
|
215 |
-
|
216 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
217 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
218 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
219 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
220 |
-
|
221 |
-
outputs_dict = scheduler.step(state, residual, 0, sample, key, **kwargs)
|
222 |
-
|
223 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
224 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
225 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
226 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
227 |
-
|
228 |
-
outputs_tuple = scheduler.step(state, residual, 0, sample, key, return_dict=False, **kwargs)
|
229 |
-
|
230 |
-
recursive_check(outputs_tuple[0], outputs_dict.prev_sample)
|
231 |
-
|
232 |
-
def test_deprecated_kwargs(self):
|
233 |
-
for scheduler_class in self.scheduler_classes:
|
234 |
-
has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters
|
235 |
-
has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0
|
236 |
-
|
237 |
-
if has_kwarg_in_model_class and not has_deprecated_kwarg:
|
238 |
-
raise ValueError(
|
239 |
-
f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated"
|
240 |
-
" kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if"
|
241 |
-
" there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs ="
|
242 |
-
" [<deprecated_argument>]`"
|
243 |
-
)
|
244 |
-
|
245 |
-
if not has_kwarg_in_model_class and has_deprecated_kwarg:
|
246 |
-
raise ValueError(
|
247 |
-
f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated"
|
248 |
-
" kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`"
|
249 |
-
f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the"
|
250 |
-
" deprecated argument from `_deprecated_kwargs = [<deprecated_argument>]`"
|
251 |
-
)
|
252 |
-
|
253 |
-
|
254 |
-
@require_flax
|
255 |
-
class FlaxDDPMSchedulerTest(FlaxSchedulerCommonTest):
|
256 |
-
scheduler_classes = (FlaxDDPMScheduler,)
|
257 |
-
|
258 |
-
def get_scheduler_config(self, **kwargs):
|
259 |
-
config = {
|
260 |
-
"num_train_timesteps": 1000,
|
261 |
-
"beta_start": 0.0001,
|
262 |
-
"beta_end": 0.02,
|
263 |
-
"beta_schedule": "linear",
|
264 |
-
"variance_type": "fixed_small",
|
265 |
-
"clip_sample": True,
|
266 |
-
}
|
267 |
-
|
268 |
-
config.update(**kwargs)
|
269 |
-
return config
|
270 |
-
|
271 |
-
def test_timesteps(self):
|
272 |
-
for timesteps in [1, 5, 100, 1000]:
|
273 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
274 |
-
|
275 |
-
def test_betas(self):
|
276 |
-
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
|
277 |
-
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
|
278 |
-
|
279 |
-
def test_schedules(self):
|
280 |
-
for schedule in ["linear", "squaredcos_cap_v2"]:
|
281 |
-
self.check_over_configs(beta_schedule=schedule)
|
282 |
-
|
283 |
-
def test_variance_type(self):
|
284 |
-
for variance in ["fixed_small", "fixed_large", "other"]:
|
285 |
-
self.check_over_configs(variance_type=variance)
|
286 |
-
|
287 |
-
def test_clip_sample(self):
|
288 |
-
for clip_sample in [True, False]:
|
289 |
-
self.check_over_configs(clip_sample=clip_sample)
|
290 |
-
|
291 |
-
def test_time_indices(self):
|
292 |
-
for t in [0, 500, 999]:
|
293 |
-
self.check_over_forward(time_step=t)
|
294 |
-
|
295 |
-
def test_variance(self):
|
296 |
-
scheduler_class = self.scheduler_classes[0]
|
297 |
-
scheduler_config = self.get_scheduler_config()
|
298 |
-
scheduler = scheduler_class(**scheduler_config)
|
299 |
-
state = scheduler.create_state()
|
300 |
-
|
301 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0) - 0.0)) < 1e-5
|
302 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487) - 0.00979)) < 1e-5
|
303 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999) - 0.02)) < 1e-5
|
304 |
-
|
305 |
-
def test_full_loop_no_noise(self):
|
306 |
-
scheduler_class = self.scheduler_classes[0]
|
307 |
-
scheduler_config = self.get_scheduler_config()
|
308 |
-
scheduler = scheduler_class(**scheduler_config)
|
309 |
-
state = scheduler.create_state()
|
310 |
-
|
311 |
-
num_trained_timesteps = len(scheduler)
|
312 |
-
|
313 |
-
model = self.dummy_model()
|
314 |
-
sample = self.dummy_sample_deter
|
315 |
-
key1, key2 = random.split(random.PRNGKey(0))
|
316 |
-
|
317 |
-
for t in reversed(range(num_trained_timesteps)):
|
318 |
-
# 1. predict noise residual
|
319 |
-
residual = model(sample, t)
|
320 |
-
|
321 |
-
# 2. predict previous mean of sample x_t-1
|
322 |
-
output = scheduler.step(state, residual, t, sample, key1)
|
323 |
-
pred_prev_sample = output.prev_sample
|
324 |
-
state = output.state
|
325 |
-
key1, key2 = random.split(key2)
|
326 |
-
|
327 |
-
# if t > 0:
|
328 |
-
# noise = self.dummy_sample_deter
|
329 |
-
# variance = scheduler.get_variance(t) ** (0.5) * noise
|
330 |
-
#
|
331 |
-
# sample = pred_prev_sample + variance
|
332 |
-
sample = pred_prev_sample
|
333 |
-
|
334 |
-
result_sum = jnp.sum(jnp.abs(sample))
|
335 |
-
result_mean = jnp.mean(jnp.abs(sample))
|
336 |
-
|
337 |
-
if jax_device == "tpu":
|
338 |
-
assert abs(result_sum - 255.0714) < 1e-2
|
339 |
-
assert abs(result_mean - 0.332124) < 1e-3
|
340 |
-
else:
|
341 |
-
assert abs(result_sum - 255.1113) < 1e-2
|
342 |
-
assert abs(result_mean - 0.332176) < 1e-3
|
343 |
-
|
344 |
-
|
345 |
-
@require_flax
|
346 |
-
class FlaxDDIMSchedulerTest(FlaxSchedulerCommonTest):
|
347 |
-
scheduler_classes = (FlaxDDIMScheduler,)
|
348 |
-
forward_default_kwargs = (("num_inference_steps", 50),)
|
349 |
-
|
350 |
-
def get_scheduler_config(self, **kwargs):
|
351 |
-
config = {
|
352 |
-
"num_train_timesteps": 1000,
|
353 |
-
"beta_start": 0.0001,
|
354 |
-
"beta_end": 0.02,
|
355 |
-
"beta_schedule": "linear",
|
356 |
-
}
|
357 |
-
|
358 |
-
config.update(**kwargs)
|
359 |
-
return config
|
360 |
-
|
361 |
-
def full_loop(self, **config):
|
362 |
-
scheduler_class = self.scheduler_classes[0]
|
363 |
-
scheduler_config = self.get_scheduler_config(**config)
|
364 |
-
scheduler = scheduler_class(**scheduler_config)
|
365 |
-
state = scheduler.create_state()
|
366 |
-
key1, key2 = random.split(random.PRNGKey(0))
|
367 |
-
|
368 |
-
num_inference_steps = 10
|
369 |
-
|
370 |
-
model = self.dummy_model()
|
371 |
-
sample = self.dummy_sample_deter
|
372 |
-
|
373 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
374 |
-
|
375 |
-
for t in state.timesteps:
|
376 |
-
residual = model(sample, t)
|
377 |
-
output = scheduler.step(state, residual, t, sample)
|
378 |
-
sample = output.prev_sample
|
379 |
-
state = output.state
|
380 |
-
key1, key2 = random.split(key2)
|
381 |
-
|
382 |
-
return sample
|
383 |
-
|
384 |
-
def check_over_configs(self, time_step=0, **config):
|
385 |
-
kwargs = dict(self.forward_default_kwargs)
|
386 |
-
|
387 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
388 |
-
|
389 |
-
for scheduler_class in self.scheduler_classes:
|
390 |
-
sample, _ = self.dummy_sample
|
391 |
-
residual = 0.1 * sample
|
392 |
-
|
393 |
-
scheduler_config = self.get_scheduler_config(**config)
|
394 |
-
scheduler = scheduler_class(**scheduler_config)
|
395 |
-
state = scheduler.create_state()
|
396 |
-
|
397 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
398 |
-
scheduler.save_config(tmpdirname)
|
399 |
-
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
|
400 |
-
|
401 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
402 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
403 |
-
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
|
404 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
405 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
406 |
-
|
407 |
-
output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample
|
408 |
-
new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample
|
409 |
-
|
410 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
411 |
-
|
412 |
-
def test_from_save_pretrained(self):
|
413 |
-
kwargs = dict(self.forward_default_kwargs)
|
414 |
-
|
415 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
416 |
-
|
417 |
-
for scheduler_class in self.scheduler_classes:
|
418 |
-
sample, _ = self.dummy_sample
|
419 |
-
residual = 0.1 * sample
|
420 |
-
|
421 |
-
scheduler_config = self.get_scheduler_config()
|
422 |
-
scheduler = scheduler_class(**scheduler_config)
|
423 |
-
state = scheduler.create_state()
|
424 |
-
|
425 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
426 |
-
scheduler.save_config(tmpdirname)
|
427 |
-
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
|
428 |
-
|
429 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
430 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
431 |
-
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
|
432 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
433 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
434 |
-
|
435 |
-
output = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample
|
436 |
-
new_output = new_scheduler.step(new_state, residual, 1, sample, **kwargs).prev_sample
|
437 |
-
|
438 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
439 |
-
|
440 |
-
def check_over_forward(self, time_step=0, **forward_kwargs):
|
441 |
-
kwargs = dict(self.forward_default_kwargs)
|
442 |
-
kwargs.update(forward_kwargs)
|
443 |
-
|
444 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
445 |
-
|
446 |
-
for scheduler_class in self.scheduler_classes:
|
447 |
-
sample, _ = self.dummy_sample
|
448 |
-
residual = 0.1 * sample
|
449 |
-
|
450 |
-
scheduler_config = self.get_scheduler_config()
|
451 |
-
scheduler = scheduler_class(**scheduler_config)
|
452 |
-
state = scheduler.create_state()
|
453 |
-
|
454 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
455 |
-
scheduler.save_config(tmpdirname)
|
456 |
-
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
|
457 |
-
|
458 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
459 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
460 |
-
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
|
461 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
462 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
463 |
-
|
464 |
-
output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample
|
465 |
-
new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample
|
466 |
-
|
467 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
468 |
-
|
469 |
-
def test_scheduler_outputs_equivalence(self):
|
470 |
-
def set_nan_tensor_to_zero(t):
|
471 |
-
return t.at[t != t].set(0)
|
472 |
-
|
473 |
-
def recursive_check(tuple_object, dict_object):
|
474 |
-
if isinstance(tuple_object, (List, Tuple)):
|
475 |
-
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
|
476 |
-
recursive_check(tuple_iterable_value, dict_iterable_value)
|
477 |
-
elif isinstance(tuple_object, Dict):
|
478 |
-
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
|
479 |
-
recursive_check(tuple_iterable_value, dict_iterable_value)
|
480 |
-
elif tuple_object is None:
|
481 |
-
return
|
482 |
-
else:
|
483 |
-
self.assertTrue(
|
484 |
-
jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5),
|
485 |
-
msg=(
|
486 |
-
"Tuple and dict output are not equal. Difference:"
|
487 |
-
f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:"
|
488 |
-
f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has"
|
489 |
-
f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}."
|
490 |
-
),
|
491 |
-
)
|
492 |
-
|
493 |
-
kwargs = dict(self.forward_default_kwargs)
|
494 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
495 |
-
|
496 |
-
for scheduler_class in self.scheduler_classes:
|
497 |
-
scheduler_config = self.get_scheduler_config()
|
498 |
-
scheduler = scheduler_class(**scheduler_config)
|
499 |
-
state = scheduler.create_state()
|
500 |
-
|
501 |
-
sample, _ = self.dummy_sample
|
502 |
-
residual = 0.1 * sample
|
503 |
-
|
504 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
505 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
506 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
507 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
508 |
-
|
509 |
-
outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs)
|
510 |
-
|
511 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
512 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
513 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
514 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
515 |
-
|
516 |
-
outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs)
|
517 |
-
|
518 |
-
recursive_check(outputs_tuple[0], outputs_dict.prev_sample)
|
519 |
-
|
520 |
-
def test_step_shape(self):
|
521 |
-
kwargs = dict(self.forward_default_kwargs)
|
522 |
-
|
523 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
524 |
-
|
525 |
-
for scheduler_class in self.scheduler_classes:
|
526 |
-
scheduler_config = self.get_scheduler_config()
|
527 |
-
scheduler = scheduler_class(**scheduler_config)
|
528 |
-
state = scheduler.create_state()
|
529 |
-
|
530 |
-
sample, _ = self.dummy_sample
|
531 |
-
residual = 0.1 * sample
|
532 |
-
|
533 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
534 |
-
state = scheduler.set_timesteps(state, num_inference_steps)
|
535 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
536 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
537 |
-
|
538 |
-
output_0 = scheduler.step(state, residual, 0, sample, **kwargs).prev_sample
|
539 |
-
output_1 = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample
|
540 |
-
|
541 |
-
self.assertEqual(output_0.shape, sample.shape)
|
542 |
-
self.assertEqual(output_0.shape, output_1.shape)
|
543 |
-
|
544 |
-
def test_timesteps(self):
|
545 |
-
for timesteps in [100, 500, 1000]:
|
546 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
547 |
-
|
548 |
-
def test_steps_offset(self):
|
549 |
-
for steps_offset in [0, 1]:
|
550 |
-
self.check_over_configs(steps_offset=steps_offset)
|
551 |
-
|
552 |
-
scheduler_class = self.scheduler_classes[0]
|
553 |
-
scheduler_config = self.get_scheduler_config(steps_offset=1)
|
554 |
-
scheduler = scheduler_class(**scheduler_config)
|
555 |
-
state = scheduler.create_state()
|
556 |
-
state = scheduler.set_timesteps(state, 5)
|
557 |
-
assert jnp.equal(state.timesteps, jnp.array([801, 601, 401, 201, 1])).all()
|
558 |
-
|
559 |
-
def test_betas(self):
|
560 |
-
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
|
561 |
-
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
|
562 |
-
|
563 |
-
def test_schedules(self):
|
564 |
-
for schedule in ["linear", "squaredcos_cap_v2"]:
|
565 |
-
self.check_over_configs(beta_schedule=schedule)
|
566 |
-
|
567 |
-
def test_time_indices(self):
|
568 |
-
for t in [1, 10, 49]:
|
569 |
-
self.check_over_forward(time_step=t)
|
570 |
-
|
571 |
-
def test_inference_steps(self):
|
572 |
-
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
|
573 |
-
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
|
574 |
-
|
575 |
-
def test_variance(self):
|
576 |
-
scheduler_class = self.scheduler_classes[0]
|
577 |
-
scheduler_config = self.get_scheduler_config()
|
578 |
-
scheduler = scheduler_class(**scheduler_config)
|
579 |
-
state = scheduler.create_state()
|
580 |
-
|
581 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5
|
582 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 420, 400) - 0.14771)) < 1e-5
|
583 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 980, 960) - 0.32460)) < 1e-5
|
584 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5
|
585 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487, 486) - 0.00979)) < 1e-5
|
586 |
-
assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999, 998) - 0.02)) < 1e-5
|
587 |
-
|
588 |
-
def test_full_loop_no_noise(self):
|
589 |
-
sample = self.full_loop()
|
590 |
-
|
591 |
-
result_sum = jnp.sum(jnp.abs(sample))
|
592 |
-
result_mean = jnp.mean(jnp.abs(sample))
|
593 |
-
|
594 |
-
assert abs(result_sum - 172.0067) < 1e-2
|
595 |
-
assert abs(result_mean - 0.223967) < 1e-3
|
596 |
-
|
597 |
-
def test_full_loop_with_set_alpha_to_one(self):
|
598 |
-
# We specify different beta, so that the first alpha is 0.99
|
599 |
-
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
|
600 |
-
result_sum = jnp.sum(jnp.abs(sample))
|
601 |
-
result_mean = jnp.mean(jnp.abs(sample))
|
602 |
-
|
603 |
-
if jax_device == "tpu":
|
604 |
-
assert abs(result_sum - 149.8409) < 1e-2
|
605 |
-
assert abs(result_mean - 0.1951) < 1e-3
|
606 |
-
else:
|
607 |
-
assert abs(result_sum - 149.8295) < 1e-2
|
608 |
-
assert abs(result_mean - 0.1951) < 1e-3
|
609 |
-
|
610 |
-
def test_full_loop_with_no_set_alpha_to_one(self):
|
611 |
-
# We specify different beta, so that the first alpha is 0.99
|
612 |
-
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
|
613 |
-
result_sum = jnp.sum(jnp.abs(sample))
|
614 |
-
result_mean = jnp.mean(jnp.abs(sample))
|
615 |
-
|
616 |
-
if jax_device == "tpu":
|
617 |
-
pass
|
618 |
-
# FIXME: both result_sum and result_mean are nan on TPU
|
619 |
-
# assert jnp.isnan(result_sum)
|
620 |
-
# assert jnp.isnan(result_mean)
|
621 |
-
else:
|
622 |
-
assert abs(result_sum - 149.0784) < 1e-2
|
623 |
-
assert abs(result_mean - 0.1941) < 1e-3
|
624 |
-
|
625 |
-
def test_prediction_type(self):
|
626 |
-
for prediction_type in ["epsilon", "sample", "v_prediction"]:
|
627 |
-
self.check_over_configs(prediction_type=prediction_type)
|
628 |
-
|
629 |
-
|
630 |
-
@require_flax
|
631 |
-
class FlaxPNDMSchedulerTest(FlaxSchedulerCommonTest):
|
632 |
-
scheduler_classes = (FlaxPNDMScheduler,)
|
633 |
-
forward_default_kwargs = (("num_inference_steps", 50),)
|
634 |
-
|
635 |
-
def get_scheduler_config(self, **kwargs):
|
636 |
-
config = {
|
637 |
-
"num_train_timesteps": 1000,
|
638 |
-
"beta_start": 0.0001,
|
639 |
-
"beta_end": 0.02,
|
640 |
-
"beta_schedule": "linear",
|
641 |
-
}
|
642 |
-
|
643 |
-
config.update(**kwargs)
|
644 |
-
return config
|
645 |
-
|
646 |
-
def check_over_configs(self, time_step=0, **config):
|
647 |
-
kwargs = dict(self.forward_default_kwargs)
|
648 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
649 |
-
sample, _ = self.dummy_sample
|
650 |
-
residual = 0.1 * sample
|
651 |
-
dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05])
|
652 |
-
|
653 |
-
for scheduler_class in self.scheduler_classes:
|
654 |
-
scheduler_config = self.get_scheduler_config(**config)
|
655 |
-
scheduler = scheduler_class(**scheduler_config)
|
656 |
-
state = scheduler.create_state()
|
657 |
-
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
|
658 |
-
# copy over dummy past residuals
|
659 |
-
state = state.replace(ets=dummy_past_residuals[:])
|
660 |
-
|
661 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
662 |
-
scheduler.save_config(tmpdirname)
|
663 |
-
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
|
664 |
-
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape)
|
665 |
-
# copy over dummy past residuals
|
666 |
-
new_state = new_state.replace(ets=dummy_past_residuals[:])
|
667 |
-
|
668 |
-
(prev_sample, state) = scheduler.step_prk(state, residual, time_step, sample, **kwargs)
|
669 |
-
(new_prev_sample, new_state) = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs)
|
670 |
-
|
671 |
-
assert jnp.sum(jnp.abs(prev_sample - new_prev_sample)) < 1e-5, "Scheduler outputs are not identical"
|
672 |
-
|
673 |
-
output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs)
|
674 |
-
new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs)
|
675 |
-
|
676 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
677 |
-
|
678 |
-
def test_from_save_pretrained(self):
|
679 |
-
pass
|
680 |
-
|
681 |
-
def test_scheduler_outputs_equivalence(self):
|
682 |
-
def set_nan_tensor_to_zero(t):
|
683 |
-
return t.at[t != t].set(0)
|
684 |
-
|
685 |
-
def recursive_check(tuple_object, dict_object):
|
686 |
-
if isinstance(tuple_object, (List, Tuple)):
|
687 |
-
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
|
688 |
-
recursive_check(tuple_iterable_value, dict_iterable_value)
|
689 |
-
elif isinstance(tuple_object, Dict):
|
690 |
-
for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
|
691 |
-
recursive_check(tuple_iterable_value, dict_iterable_value)
|
692 |
-
elif tuple_object is None:
|
693 |
-
return
|
694 |
-
else:
|
695 |
-
self.assertTrue(
|
696 |
-
jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5),
|
697 |
-
msg=(
|
698 |
-
"Tuple and dict output are not equal. Difference:"
|
699 |
-
f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:"
|
700 |
-
f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has"
|
701 |
-
f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}."
|
702 |
-
),
|
703 |
-
)
|
704 |
-
|
705 |
-
kwargs = dict(self.forward_default_kwargs)
|
706 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
707 |
-
|
708 |
-
for scheduler_class in self.scheduler_classes:
|
709 |
-
scheduler_config = self.get_scheduler_config()
|
710 |
-
scheduler = scheduler_class(**scheduler_config)
|
711 |
-
state = scheduler.create_state()
|
712 |
-
|
713 |
-
sample, _ = self.dummy_sample
|
714 |
-
residual = 0.1 * sample
|
715 |
-
|
716 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
717 |
-
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
|
718 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
719 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
720 |
-
|
721 |
-
outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs)
|
722 |
-
|
723 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
724 |
-
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
|
725 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
726 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
727 |
-
|
728 |
-
outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs)
|
729 |
-
|
730 |
-
recursive_check(outputs_tuple[0], outputs_dict.prev_sample)
|
731 |
-
|
732 |
-
def check_over_forward(self, time_step=0, **forward_kwargs):
|
733 |
-
kwargs = dict(self.forward_default_kwargs)
|
734 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
735 |
-
sample, _ = self.dummy_sample
|
736 |
-
residual = 0.1 * sample
|
737 |
-
dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05])
|
738 |
-
|
739 |
-
for scheduler_class in self.scheduler_classes:
|
740 |
-
scheduler_config = self.get_scheduler_config()
|
741 |
-
scheduler = scheduler_class(**scheduler_config)
|
742 |
-
state = scheduler.create_state()
|
743 |
-
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
|
744 |
-
|
745 |
-
# copy over dummy past residuals (must be after setting timesteps)
|
746 |
-
scheduler.ets = dummy_past_residuals[:]
|
747 |
-
|
748 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
749 |
-
scheduler.save_config(tmpdirname)
|
750 |
-
new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname)
|
751 |
-
# copy over dummy past residuals
|
752 |
-
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape)
|
753 |
-
|
754 |
-
# copy over dummy past residual (must be after setting timesteps)
|
755 |
-
new_state.replace(ets=dummy_past_residuals[:])
|
756 |
-
|
757 |
-
output, state = scheduler.step_prk(state, residual, time_step, sample, **kwargs)
|
758 |
-
new_output, new_state = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs)
|
759 |
-
|
760 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
761 |
-
|
762 |
-
output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs)
|
763 |
-
new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs)
|
764 |
-
|
765 |
-
assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
|
766 |
-
|
767 |
-
def full_loop(self, **config):
|
768 |
-
scheduler_class = self.scheduler_classes[0]
|
769 |
-
scheduler_config = self.get_scheduler_config(**config)
|
770 |
-
scheduler = scheduler_class(**scheduler_config)
|
771 |
-
state = scheduler.create_state()
|
772 |
-
|
773 |
-
num_inference_steps = 10
|
774 |
-
model = self.dummy_model()
|
775 |
-
sample = self.dummy_sample_deter
|
776 |
-
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
|
777 |
-
|
778 |
-
for i, t in enumerate(state.prk_timesteps):
|
779 |
-
residual = model(sample, t)
|
780 |
-
sample, state = scheduler.step_prk(state, residual, t, sample)
|
781 |
-
|
782 |
-
for i, t in enumerate(state.plms_timesteps):
|
783 |
-
residual = model(sample, t)
|
784 |
-
sample, state = scheduler.step_plms(state, residual, t, sample)
|
785 |
-
|
786 |
-
return sample
|
787 |
-
|
788 |
-
def test_step_shape(self):
|
789 |
-
kwargs = dict(self.forward_default_kwargs)
|
790 |
-
|
791 |
-
num_inference_steps = kwargs.pop("num_inference_steps", None)
|
792 |
-
|
793 |
-
for scheduler_class in self.scheduler_classes:
|
794 |
-
scheduler_config = self.get_scheduler_config()
|
795 |
-
scheduler = scheduler_class(**scheduler_config)
|
796 |
-
state = scheduler.create_state()
|
797 |
-
|
798 |
-
sample, _ = self.dummy_sample
|
799 |
-
residual = 0.1 * sample
|
800 |
-
|
801 |
-
if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
|
802 |
-
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
|
803 |
-
elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
|
804 |
-
kwargs["num_inference_steps"] = num_inference_steps
|
805 |
-
|
806 |
-
# copy over dummy past residuals (must be done after set_timesteps)
|
807 |
-
dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05])
|
808 |
-
state = state.replace(ets=dummy_past_residuals[:])
|
809 |
-
|
810 |
-
output_0, state = scheduler.step_prk(state, residual, 0, sample, **kwargs)
|
811 |
-
output_1, state = scheduler.step_prk(state, residual, 1, sample, **kwargs)
|
812 |
-
|
813 |
-
self.assertEqual(output_0.shape, sample.shape)
|
814 |
-
self.assertEqual(output_0.shape, output_1.shape)
|
815 |
-
|
816 |
-
output_0, state = scheduler.step_plms(state, residual, 0, sample, **kwargs)
|
817 |
-
output_1, state = scheduler.step_plms(state, residual, 1, sample, **kwargs)
|
818 |
-
|
819 |
-
self.assertEqual(output_0.shape, sample.shape)
|
820 |
-
self.assertEqual(output_0.shape, output_1.shape)
|
821 |
-
|
822 |
-
def test_timesteps(self):
|
823 |
-
for timesteps in [100, 1000]:
|
824 |
-
self.check_over_configs(num_train_timesteps=timesteps)
|
825 |
-
|
826 |
-
def test_steps_offset(self):
|
827 |
-
for steps_offset in [0, 1]:
|
828 |
-
self.check_over_configs(steps_offset=steps_offset)
|
829 |
-
|
830 |
-
scheduler_class = self.scheduler_classes[0]
|
831 |
-
scheduler_config = self.get_scheduler_config(steps_offset=1)
|
832 |
-
scheduler = scheduler_class(**scheduler_config)
|
833 |
-
state = scheduler.create_state()
|
834 |
-
state = scheduler.set_timesteps(state, 10, shape=())
|
835 |
-
assert jnp.equal(
|
836 |
-
state.timesteps,
|
837 |
-
jnp.array([901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]),
|
838 |
-
).all()
|
839 |
-
|
840 |
-
def test_betas(self):
|
841 |
-
for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]):
|
842 |
-
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
|
843 |
-
|
844 |
-
def test_schedules(self):
|
845 |
-
for schedule in ["linear", "squaredcos_cap_v2"]:
|
846 |
-
self.check_over_configs(beta_schedule=schedule)
|
847 |
-
|
848 |
-
def test_time_indices(self):
|
849 |
-
for t in [1, 5, 10]:
|
850 |
-
self.check_over_forward(time_step=t)
|
851 |
-
|
852 |
-
def test_inference_steps(self):
|
853 |
-
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
|
854 |
-
self.check_over_forward(num_inference_steps=num_inference_steps)
|
855 |
-
|
856 |
-
def test_pow_of_3_inference_steps(self):
|
857 |
-
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
|
858 |
-
num_inference_steps = 27
|
859 |
-
|
860 |
-
for scheduler_class in self.scheduler_classes:
|
861 |
-
sample, _ = self.dummy_sample
|
862 |
-
residual = 0.1 * sample
|
863 |
-
|
864 |
-
scheduler_config = self.get_scheduler_config()
|
865 |
-
scheduler = scheduler_class(**scheduler_config)
|
866 |
-
state = scheduler.create_state()
|
867 |
-
|
868 |
-
state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape)
|
869 |
-
|
870 |
-
# before power of 3 fix, would error on first step, so we only need to do two
|
871 |
-
for i, t in enumerate(state.prk_timesteps[:2]):
|
872 |
-
sample, state = scheduler.step_prk(state, residual, t, sample)
|
873 |
-
|
874 |
-
def test_inference_plms_no_past_residuals(self):
|
875 |
-
with self.assertRaises(ValueError):
|
876 |
-
scheduler_class = self.scheduler_classes[0]
|
877 |
-
scheduler_config = self.get_scheduler_config()
|
878 |
-
scheduler = scheduler_class(**scheduler_config)
|
879 |
-
state = scheduler.create_state()
|
880 |
-
|
881 |
-
scheduler.step_plms(state, self.dummy_sample, 1, self.dummy_sample).prev_sample
|
882 |
-
|
883 |
-
def test_full_loop_no_noise(self):
|
884 |
-
sample = self.full_loop()
|
885 |
-
result_sum = jnp.sum(jnp.abs(sample))
|
886 |
-
result_mean = jnp.mean(jnp.abs(sample))
|
887 |
-
|
888 |
-
if jax_device == "tpu":
|
889 |
-
assert abs(result_sum - 198.1275) < 1e-2
|
890 |
-
assert abs(result_mean - 0.2580) < 1e-3
|
891 |
-
else:
|
892 |
-
assert abs(result_sum - 198.1318) < 1e-2
|
893 |
-
assert abs(result_mean - 0.2580) < 1e-3
|
894 |
-
|
895 |
-
def test_full_loop_with_set_alpha_to_one(self):
|
896 |
-
# We specify different beta, so that the first alpha is 0.99
|
897 |
-
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
|
898 |
-
result_sum = jnp.sum(jnp.abs(sample))
|
899 |
-
result_mean = jnp.mean(jnp.abs(sample))
|
900 |
-
|
901 |
-
if jax_device == "tpu":
|
902 |
-
assert abs(result_sum - 186.83226) < 1e-2
|
903 |
-
assert abs(result_mean - 0.24327) < 1e-3
|
904 |
-
else:
|
905 |
-
assert abs(result_sum - 186.9466) < 1e-2
|
906 |
-
assert abs(result_mean - 0.24342) < 1e-3
|
907 |
-
|
908 |
-
def test_full_loop_with_no_set_alpha_to_one(self):
|
909 |
-
# We specify different beta, so that the first alpha is 0.99
|
910 |
-
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
|
911 |
-
result_sum = jnp.sum(jnp.abs(sample))
|
912 |
-
result_mean = jnp.mean(jnp.abs(sample))
|
913 |
-
|
914 |
-
if jax_device == "tpu":
|
915 |
-
assert abs(result_sum - 186.83226) < 1e-2
|
916 |
-
assert abs(result_mean - 0.24327) < 1e-3
|
917 |
-
else:
|
918 |
-
assert abs(result_sum - 186.9482) < 1e-2
|
919 |
-
assert abs(result_mean - 0.2434) < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py
DELETED
@@ -1,237 +0,0 @@
|
|
1 |
-
import mmcv
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
|
5 |
-
from ..builder import BBOX_CODERS
|
6 |
-
from .base_bbox_coder import BaseBBoxCoder
|
7 |
-
|
8 |
-
|
9 |
-
@BBOX_CODERS.register_module()
|
10 |
-
class DeltaXYWHBBoxCoder(BaseBBoxCoder):
|
11 |
-
"""Delta XYWH BBox coder.
|
12 |
-
|
13 |
-
Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_,
|
14 |
-
this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and
|
15 |
-
decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).
|
16 |
-
|
17 |
-
Args:
|
18 |
-
target_means (Sequence[float]): Denormalizing means of target for
|
19 |
-
delta coordinates
|
20 |
-
target_stds (Sequence[float]): Denormalizing standard deviation of
|
21 |
-
target for delta coordinates
|
22 |
-
clip_border (bool, optional): Whether clip the objects outside the
|
23 |
-
border of the image. Defaults to True.
|
24 |
-
"""
|
25 |
-
|
26 |
-
def __init__(self,
|
27 |
-
target_means=(0., 0., 0., 0.),
|
28 |
-
target_stds=(1., 1., 1., 1.),
|
29 |
-
clip_border=True):
|
30 |
-
super(BaseBBoxCoder, self).__init__()
|
31 |
-
self.means = target_means
|
32 |
-
self.stds = target_stds
|
33 |
-
self.clip_border = clip_border
|
34 |
-
|
35 |
-
def encode(self, bboxes, gt_bboxes):
|
36 |
-
"""Get box regression transformation deltas that can be used to
|
37 |
-
transform the ``bboxes`` into the ``gt_bboxes``.
|
38 |
-
|
39 |
-
Args:
|
40 |
-
bboxes (torch.Tensor): Source boxes, e.g., object proposals.
|
41 |
-
gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
|
42 |
-
ground-truth boxes.
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
torch.Tensor: Box transformation deltas
|
46 |
-
"""
|
47 |
-
|
48 |
-
assert bboxes.size(0) == gt_bboxes.size(0)
|
49 |
-
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
|
50 |
-
encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)
|
51 |
-
return encoded_bboxes
|
52 |
-
|
53 |
-
def decode(self,
|
54 |
-
bboxes,
|
55 |
-
pred_bboxes,
|
56 |
-
max_shape=None,
|
57 |
-
wh_ratio_clip=16 / 1000):
|
58 |
-
"""Apply transformation `pred_bboxes` to `boxes`.
|
59 |
-
|
60 |
-
Args:
|
61 |
-
bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4)
|
62 |
-
pred_bboxes (Tensor): Encoded offsets with respect to each roi.
|
63 |
-
Has shape (B, N, num_classes * 4) or (B, N, 4) or
|
64 |
-
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
|
65 |
-
when rois is a grid of anchors.Offset encoding follows [1]_.
|
66 |
-
max_shape (Sequence[int] or torch.Tensor or Sequence[
|
67 |
-
Sequence[int]],optional): Maximum bounds for boxes, specifies
|
68 |
-
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
|
69 |
-
the max_shape should be a Sequence[Sequence[int]]
|
70 |
-
and the length of max_shape should also be B.
|
71 |
-
wh_ratio_clip (float, optional): The allowed ratio between
|
72 |
-
width and height.
|
73 |
-
|
74 |
-
Returns:
|
75 |
-
torch.Tensor: Decoded boxes.
|
76 |
-
"""
|
77 |
-
|
78 |
-
assert pred_bboxes.size(0) == bboxes.size(0)
|
79 |
-
if pred_bboxes.ndim == 3:
|
80 |
-
assert pred_bboxes.size(1) == bboxes.size(1)
|
81 |
-
decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds,
|
82 |
-
max_shape, wh_ratio_clip, self.clip_border)
|
83 |
-
|
84 |
-
return decoded_bboxes
|
85 |
-
|
86 |
-
|
87 |
-
@mmcv.jit(coderize=True)
|
88 |
-
def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
|
89 |
-
"""Compute deltas of proposals w.r.t. gt.
|
90 |
-
|
91 |
-
We usually compute the deltas of x, y, w, h of proposals w.r.t ground
|
92 |
-
truth bboxes to get regression target.
|
93 |
-
This is the inverse function of :func:`delta2bbox`.
|
94 |
-
|
95 |
-
Args:
|
96 |
-
proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
|
97 |
-
gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
|
98 |
-
means (Sequence[float]): Denormalizing means for delta coordinates
|
99 |
-
stds (Sequence[float]): Denormalizing standard deviation for delta
|
100 |
-
coordinates
|
101 |
-
|
102 |
-
Returns:
|
103 |
-
Tensor: deltas with shape (N, 4), where columns represent dx, dy,
|
104 |
-
dw, dh.
|
105 |
-
"""
|
106 |
-
assert proposals.size() == gt.size()
|
107 |
-
|
108 |
-
proposals = proposals.float()
|
109 |
-
gt = gt.float()
|
110 |
-
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
|
111 |
-
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
|
112 |
-
pw = proposals[..., 2] - proposals[..., 0]
|
113 |
-
ph = proposals[..., 3] - proposals[..., 1]
|
114 |
-
|
115 |
-
gx = (gt[..., 0] + gt[..., 2]) * 0.5
|
116 |
-
gy = (gt[..., 1] + gt[..., 3]) * 0.5
|
117 |
-
gw = gt[..., 2] - gt[..., 0]
|
118 |
-
gh = gt[..., 3] - gt[..., 1]
|
119 |
-
|
120 |
-
dx = (gx - px) / pw
|
121 |
-
dy = (gy - py) / ph
|
122 |
-
dw = torch.log(gw / pw)
|
123 |
-
dh = torch.log(gh / ph)
|
124 |
-
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
|
125 |
-
|
126 |
-
means = deltas.new_tensor(means).unsqueeze(0)
|
127 |
-
stds = deltas.new_tensor(stds).unsqueeze(0)
|
128 |
-
deltas = deltas.sub_(means).div_(stds)
|
129 |
-
|
130 |
-
return deltas
|
131 |
-
|
132 |
-
|
133 |
-
@mmcv.jit(coderize=True)
|
134 |
-
def delta2bbox(rois,
|
135 |
-
deltas,
|
136 |
-
means=(0., 0., 0., 0.),
|
137 |
-
stds=(1., 1., 1., 1.),
|
138 |
-
max_shape=None,
|
139 |
-
wh_ratio_clip=16 / 1000,
|
140 |
-
clip_border=True):
|
141 |
-
"""Apply deltas to shift/scale base boxes.
|
142 |
-
|
143 |
-
Typically the rois are anchor or proposed bounding boxes and the deltas are
|
144 |
-
network outputs used to shift/scale those boxes.
|
145 |
-
This is the inverse function of :func:`bbox2delta`.
|
146 |
-
|
147 |
-
Args:
|
148 |
-
rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
|
149 |
-
deltas (Tensor): Encoded offsets with respect to each roi.
|
150 |
-
Has shape (B, N, num_classes * 4) or (B, N, 4) or
|
151 |
-
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
|
152 |
-
when rois is a grid of anchors.Offset encoding follows [1]_.
|
153 |
-
means (Sequence[float]): Denormalizing means for delta coordinates
|
154 |
-
stds (Sequence[float]): Denormalizing standard deviation for delta
|
155 |
-
coordinates
|
156 |
-
max_shape (Sequence[int] or torch.Tensor or Sequence[
|
157 |
-
Sequence[int]],optional): Maximum bounds for boxes, specifies
|
158 |
-
(H, W, C) or (H, W). If rois shape is (B, N, 4), then
|
159 |
-
the max_shape should be a Sequence[Sequence[int]]
|
160 |
-
and the length of max_shape should also be B.
|
161 |
-
wh_ratio_clip (float): Maximum aspect ratio for boxes.
|
162 |
-
clip_border (bool, optional): Whether clip the objects outside the
|
163 |
-
border of the image. Defaults to True.
|
164 |
-
|
165 |
-
Returns:
|
166 |
-
Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or
|
167 |
-
(N, num_classes * 4) or (N, 4), where 4 represent
|
168 |
-
tl_x, tl_y, br_x, br_y.
|
169 |
-
|
170 |
-
References:
|
171 |
-
.. [1] https://arxiv.org/abs/1311.2524
|
172 |
-
|
173 |
-
Example:
|
174 |
-
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
|
175 |
-
>>> [ 0., 0., 1., 1.],
|
176 |
-
>>> [ 0., 0., 1., 1.],
|
177 |
-
>>> [ 5., 5., 5., 5.]])
|
178 |
-
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
|
179 |
-
>>> [ 1., 1., 1., 1.],
|
180 |
-
>>> [ 0., 0., 2., -1.],
|
181 |
-
>>> [ 0.7, -1.9, -0.5, 0.3]])
|
182 |
-
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
|
183 |
-
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
|
184 |
-
[0.1409, 0.1409, 2.8591, 2.8591],
|
185 |
-
[0.0000, 0.3161, 4.1945, 0.6839],
|
186 |
-
[5.0000, 5.0000, 5.0000, 5.0000]])
|
187 |
-
"""
|
188 |
-
means = deltas.new_tensor(means).view(1,
|
189 |
-
-1).repeat(1,
|
190 |
-
deltas.size(-1) // 4)
|
191 |
-
stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)
|
192 |
-
denorm_deltas = deltas * stds + means
|
193 |
-
dx = denorm_deltas[..., 0::4]
|
194 |
-
dy = denorm_deltas[..., 1::4]
|
195 |
-
dw = denorm_deltas[..., 2::4]
|
196 |
-
dh = denorm_deltas[..., 3::4]
|
197 |
-
max_ratio = np.abs(np.log(wh_ratio_clip))
|
198 |
-
dw = dw.clamp(min=-max_ratio, max=max_ratio)
|
199 |
-
dh = dh.clamp(min=-max_ratio, max=max_ratio)
|
200 |
-
x1, y1 = rois[..., 0], rois[..., 1]
|
201 |
-
x2, y2 = rois[..., 2], rois[..., 3]
|
202 |
-
# Compute center of each roi
|
203 |
-
px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
|
204 |
-
py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
|
205 |
-
# Compute width/height of each roi
|
206 |
-
pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
|
207 |
-
ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
|
208 |
-
# Use exp(network energy) to enlarge/shrink each roi
|
209 |
-
gw = pw * dw.exp()
|
210 |
-
gh = ph * dh.exp()
|
211 |
-
# Use network energy to shift the center of each roi
|
212 |
-
gx = px + pw * dx
|
213 |
-
gy = py + ph * dy
|
214 |
-
# Convert center-xy/width/height to top-left, bottom-right
|
215 |
-
x1 = gx - gw * 0.5
|
216 |
-
y1 = gy - gh * 0.5
|
217 |
-
x2 = gx + gw * 0.5
|
218 |
-
y2 = gy + gh * 0.5
|
219 |
-
|
220 |
-
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
|
221 |
-
|
222 |
-
if clip_border and max_shape is not None:
|
223 |
-
if not isinstance(max_shape, torch.Tensor):
|
224 |
-
max_shape = x1.new_tensor(max_shape)
|
225 |
-
max_shape = max_shape[..., :2].type_as(x1)
|
226 |
-
if max_shape.ndim == 2:
|
227 |
-
assert bboxes.ndim == 3
|
228 |
-
assert max_shape.size(0) == bboxes.size(0)
|
229 |
-
|
230 |
-
min_xy = x1.new_tensor(0)
|
231 |
-
max_xy = torch.cat(
|
232 |
-
[max_shape] * (deltas.size(-1) // 2),
|
233 |
-
dim=-1).flip(-1).unsqueeze(-2)
|
234 |
-
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
|
235 |
-
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
|
236 |
-
|
237 |
-
return bboxes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = '../pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnest101',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeSt',
|
6 |
-
stem_channels=128,
|
7 |
-
radix=2,
|
8 |
-
reduction_factor=4,
|
9 |
-
avg_down_stride=True))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/progressbar.py
DELETED
@@ -1,208 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import sys
|
3 |
-
from collections.abc import Iterable
|
4 |
-
from multiprocessing import Pool
|
5 |
-
from shutil import get_terminal_size
|
6 |
-
|
7 |
-
from .timer import Timer
|
8 |
-
|
9 |
-
|
10 |
-
class ProgressBar:
|
11 |
-
"""A progress bar which can print the progress."""
|
12 |
-
|
13 |
-
def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout):
|
14 |
-
self.task_num = task_num
|
15 |
-
self.bar_width = bar_width
|
16 |
-
self.completed = 0
|
17 |
-
self.file = file
|
18 |
-
if start:
|
19 |
-
self.start()
|
20 |
-
|
21 |
-
@property
|
22 |
-
def terminal_width(self):
|
23 |
-
width, _ = get_terminal_size()
|
24 |
-
return width
|
25 |
-
|
26 |
-
def start(self):
|
27 |
-
if self.task_num > 0:
|
28 |
-
self.file.write(f'[{" " * self.bar_width}] 0/{self.task_num}, '
|
29 |
-
'elapsed: 0s, ETA:')
|
30 |
-
else:
|
31 |
-
self.file.write('completed: 0, elapsed: 0s')
|
32 |
-
self.file.flush()
|
33 |
-
self.timer = Timer()
|
34 |
-
|
35 |
-
def update(self, num_tasks=1):
|
36 |
-
assert num_tasks > 0
|
37 |
-
self.completed += num_tasks
|
38 |
-
elapsed = self.timer.since_start()
|
39 |
-
if elapsed > 0:
|
40 |
-
fps = self.completed / elapsed
|
41 |
-
else:
|
42 |
-
fps = float('inf')
|
43 |
-
if self.task_num > 0:
|
44 |
-
percentage = self.completed / float(self.task_num)
|
45 |
-
eta = int(elapsed * (1 - percentage) / percentage + 0.5)
|
46 |
-
msg = f'\r[{{}}] {self.completed}/{self.task_num}, ' \
|
47 |
-
f'{fps:.1f} task/s, elapsed: {int(elapsed + 0.5)}s, ' \
|
48 |
-
f'ETA: {eta:5}s'
|
49 |
-
|
50 |
-
bar_width = min(self.bar_width,
|
51 |
-
int(self.terminal_width - len(msg)) + 2,
|
52 |
-
int(self.terminal_width * 0.6))
|
53 |
-
bar_width = max(2, bar_width)
|
54 |
-
mark_width = int(bar_width * percentage)
|
55 |
-
bar_chars = '>' * mark_width + ' ' * (bar_width - mark_width)
|
56 |
-
self.file.write(msg.format(bar_chars))
|
57 |
-
else:
|
58 |
-
self.file.write(
|
59 |
-
f'completed: {self.completed}, elapsed: {int(elapsed + 0.5)}s,'
|
60 |
-
f' {fps:.1f} tasks/s')
|
61 |
-
self.file.flush()
|
62 |
-
|
63 |
-
|
64 |
-
def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs):
|
65 |
-
"""Track the progress of tasks execution with a progress bar.
|
66 |
-
|
67 |
-
Tasks are done with a simple for-loop.
|
68 |
-
|
69 |
-
Args:
|
70 |
-
func (callable): The function to be applied to each task.
|
71 |
-
tasks (list or tuple[Iterable, int]): A list of tasks or
|
72 |
-
(tasks, total num).
|
73 |
-
bar_width (int): Width of progress bar.
|
74 |
-
|
75 |
-
Returns:
|
76 |
-
list: The task results.
|
77 |
-
"""
|
78 |
-
if isinstance(tasks, tuple):
|
79 |
-
assert len(tasks) == 2
|
80 |
-
assert isinstance(tasks[0], Iterable)
|
81 |
-
assert isinstance(tasks[1], int)
|
82 |
-
task_num = tasks[1]
|
83 |
-
tasks = tasks[0]
|
84 |
-
elif isinstance(tasks, Iterable):
|
85 |
-
task_num = len(tasks)
|
86 |
-
else:
|
87 |
-
raise TypeError(
|
88 |
-
'"tasks" must be an iterable object or a (iterator, int) tuple')
|
89 |
-
prog_bar = ProgressBar(task_num, bar_width, file=file)
|
90 |
-
results = []
|
91 |
-
for task in tasks:
|
92 |
-
results.append(func(task, **kwargs))
|
93 |
-
prog_bar.update()
|
94 |
-
prog_bar.file.write('\n')
|
95 |
-
return results
|
96 |
-
|
97 |
-
|
98 |
-
def init_pool(process_num, initializer=None, initargs=None):
|
99 |
-
if initializer is None:
|
100 |
-
return Pool(process_num)
|
101 |
-
elif initargs is None:
|
102 |
-
return Pool(process_num, initializer)
|
103 |
-
else:
|
104 |
-
if not isinstance(initargs, tuple):
|
105 |
-
raise TypeError('"initargs" must be a tuple')
|
106 |
-
return Pool(process_num, initializer, initargs)
|
107 |
-
|
108 |
-
|
109 |
-
def track_parallel_progress(func,
|
110 |
-
tasks,
|
111 |
-
nproc,
|
112 |
-
initializer=None,
|
113 |
-
initargs=None,
|
114 |
-
bar_width=50,
|
115 |
-
chunksize=1,
|
116 |
-
skip_first=False,
|
117 |
-
keep_order=True,
|
118 |
-
file=sys.stdout):
|
119 |
-
"""Track the progress of parallel task execution with a progress bar.
|
120 |
-
|
121 |
-
The built-in :mod:`multiprocessing` module is used for process pools and
|
122 |
-
tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.
|
123 |
-
|
124 |
-
Args:
|
125 |
-
func (callable): The function to be applied to each task.
|
126 |
-
tasks (list or tuple[Iterable, int]): A list of tasks or
|
127 |
-
(tasks, total num).
|
128 |
-
nproc (int): Process (worker) number.
|
129 |
-
initializer (None or callable): Refer to :class:`multiprocessing.Pool`
|
130 |
-
for details.
|
131 |
-
initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for
|
132 |
-
details.
|
133 |
-
chunksize (int): Refer to :class:`multiprocessing.Pool` for details.
|
134 |
-
bar_width (int): Width of progress bar.
|
135 |
-
skip_first (bool): Whether to skip the first sample for each worker
|
136 |
-
when estimating fps, since the initialization step may takes
|
137 |
-
longer.
|
138 |
-
keep_order (bool): If True, :func:`Pool.imap` is used, otherwise
|
139 |
-
:func:`Pool.imap_unordered` is used.
|
140 |
-
|
141 |
-
Returns:
|
142 |
-
list: The task results.
|
143 |
-
"""
|
144 |
-
if isinstance(tasks, tuple):
|
145 |
-
assert len(tasks) == 2
|
146 |
-
assert isinstance(tasks[0], Iterable)
|
147 |
-
assert isinstance(tasks[1], int)
|
148 |
-
task_num = tasks[1]
|
149 |
-
tasks = tasks[0]
|
150 |
-
elif isinstance(tasks, Iterable):
|
151 |
-
task_num = len(tasks)
|
152 |
-
else:
|
153 |
-
raise TypeError(
|
154 |
-
'"tasks" must be an iterable object or a (iterator, int) tuple')
|
155 |
-
pool = init_pool(nproc, initializer, initargs)
|
156 |
-
start = not skip_first
|
157 |
-
task_num -= nproc * chunksize * int(skip_first)
|
158 |
-
prog_bar = ProgressBar(task_num, bar_width, start, file=file)
|
159 |
-
results = []
|
160 |
-
if keep_order:
|
161 |
-
gen = pool.imap(func, tasks, chunksize)
|
162 |
-
else:
|
163 |
-
gen = pool.imap_unordered(func, tasks, chunksize)
|
164 |
-
for result in gen:
|
165 |
-
results.append(result)
|
166 |
-
if skip_first:
|
167 |
-
if len(results) < nproc * chunksize:
|
168 |
-
continue
|
169 |
-
elif len(results) == nproc * chunksize:
|
170 |
-
prog_bar.start()
|
171 |
-
continue
|
172 |
-
prog_bar.update()
|
173 |
-
prog_bar.file.write('\n')
|
174 |
-
pool.close()
|
175 |
-
pool.join()
|
176 |
-
return results
|
177 |
-
|
178 |
-
|
179 |
-
def track_iter_progress(tasks, bar_width=50, file=sys.stdout):
|
180 |
-
"""Track the progress of tasks iteration or enumeration with a progress
|
181 |
-
bar.
|
182 |
-
|
183 |
-
Tasks are yielded with a simple for-loop.
|
184 |
-
|
185 |
-
Args:
|
186 |
-
tasks (list or tuple[Iterable, int]): A list of tasks or
|
187 |
-
(tasks, total num).
|
188 |
-
bar_width (int): Width of progress bar.
|
189 |
-
|
190 |
-
Yields:
|
191 |
-
list: The task results.
|
192 |
-
"""
|
193 |
-
if isinstance(tasks, tuple):
|
194 |
-
assert len(tasks) == 2
|
195 |
-
assert isinstance(tasks[0], Iterable)
|
196 |
-
assert isinstance(tasks[1], int)
|
197 |
-
task_num = tasks[1]
|
198 |
-
tasks = tasks[0]
|
199 |
-
elif isinstance(tasks, Iterable):
|
200 |
-
task_num = len(tasks)
|
201 |
-
else:
|
202 |
-
raise TypeError(
|
203 |
-
'"tasks" must be an iterable object or a (iterator, int) tuple')
|
204 |
-
prog_bar = ProgressBar(task_num, bar_width, file=file)
|
205 |
-
for task in tasks:
|
206 |
-
yield task
|
207 |
-
prog_bar.update()
|
208 |
-
prog_bar.file.write('\n')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models/infer_pack/transforms.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(
|
13 |
-
inputs,
|
14 |
-
unnormalized_widths,
|
15 |
-
unnormalized_heights,
|
16 |
-
unnormalized_derivatives,
|
17 |
-
inverse=False,
|
18 |
-
tails=None,
|
19 |
-
tail_bound=1.0,
|
20 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
21 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
22 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
23 |
-
):
|
24 |
-
if tails is None:
|
25 |
-
spline_fn = rational_quadratic_spline
|
26 |
-
spline_kwargs = {}
|
27 |
-
else:
|
28 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
29 |
-
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
|
30 |
-
|
31 |
-
outputs, logabsdet = spline_fn(
|
32 |
-
inputs=inputs,
|
33 |
-
unnormalized_widths=unnormalized_widths,
|
34 |
-
unnormalized_heights=unnormalized_heights,
|
35 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
36 |
-
inverse=inverse,
|
37 |
-
min_bin_width=min_bin_width,
|
38 |
-
min_bin_height=min_bin_height,
|
39 |
-
min_derivative=min_derivative,
|
40 |
-
**spline_kwargs
|
41 |
-
)
|
42 |
-
return outputs, logabsdet
|
43 |
-
|
44 |
-
|
45 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
46 |
-
bin_locations[..., -1] += eps
|
47 |
-
return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
|
48 |
-
|
49 |
-
|
50 |
-
def unconstrained_rational_quadratic_spline(
|
51 |
-
inputs,
|
52 |
-
unnormalized_widths,
|
53 |
-
unnormalized_heights,
|
54 |
-
unnormalized_derivatives,
|
55 |
-
inverse=False,
|
56 |
-
tails="linear",
|
57 |
-
tail_bound=1.0,
|
58 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
59 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
60 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
61 |
-
):
|
62 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
63 |
-
outside_interval_mask = ~inside_interval_mask
|
64 |
-
|
65 |
-
outputs = torch.zeros_like(inputs)
|
66 |
-
logabsdet = torch.zeros_like(inputs)
|
67 |
-
|
68 |
-
if tails == "linear":
|
69 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
70 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
71 |
-
unnormalized_derivatives[..., 0] = constant
|
72 |
-
unnormalized_derivatives[..., -1] = constant
|
73 |
-
|
74 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
75 |
-
logabsdet[outside_interval_mask] = 0
|
76 |
-
else:
|
77 |
-
raise RuntimeError("{} tails are not implemented.".format(tails))
|
78 |
-
|
79 |
-
(
|
80 |
-
outputs[inside_interval_mask],
|
81 |
-
logabsdet[inside_interval_mask],
|
82 |
-
) = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound,
|
89 |
-
right=tail_bound,
|
90 |
-
bottom=-tail_bound,
|
91 |
-
top=tail_bound,
|
92 |
-
min_bin_width=min_bin_width,
|
93 |
-
min_bin_height=min_bin_height,
|
94 |
-
min_derivative=min_derivative,
|
95 |
-
)
|
96 |
-
|
97 |
-
return outputs, logabsdet
|
98 |
-
|
99 |
-
|
100 |
-
def rational_quadratic_spline(
|
101 |
-
inputs,
|
102 |
-
unnormalized_widths,
|
103 |
-
unnormalized_heights,
|
104 |
-
unnormalized_derivatives,
|
105 |
-
inverse=False,
|
106 |
-
left=0.0,
|
107 |
-
right=1.0,
|
108 |
-
bottom=0.0,
|
109 |
-
top=1.0,
|
110 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
111 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
112 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE,
|
113 |
-
):
|
114 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
115 |
-
raise ValueError("Input to a transform is not within its domain")
|
116 |
-
|
117 |
-
num_bins = unnormalized_widths.shape[-1]
|
118 |
-
|
119 |
-
if min_bin_width * num_bins > 1.0:
|
120 |
-
raise ValueError("Minimal bin width too large for the number of bins")
|
121 |
-
if min_bin_height * num_bins > 1.0:
|
122 |
-
raise ValueError("Minimal bin height too large for the number of bins")
|
123 |
-
|
124 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
125 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
126 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
127 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
|
128 |
-
cumwidths = (right - left) * cumwidths + left
|
129 |
-
cumwidths[..., 0] = left
|
130 |
-
cumwidths[..., -1] = right
|
131 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
132 |
-
|
133 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
134 |
-
|
135 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
136 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
137 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
138 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
|
139 |
-
cumheights = (top - bottom) * cumheights + bottom
|
140 |
-
cumheights[..., 0] = bottom
|
141 |
-
cumheights[..., -1] = top
|
142 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
143 |
-
|
144 |
-
if inverse:
|
145 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
146 |
-
else:
|
147 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
148 |
-
|
149 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
150 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
153 |
-
delta = heights / widths
|
154 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
155 |
-
|
156 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
157 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
158 |
-
|
159 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
160 |
-
|
161 |
-
if inverse:
|
162 |
-
a = (inputs - input_cumheights) * (
|
163 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
164 |
-
) + input_heights * (input_delta - input_derivatives)
|
165 |
-
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
|
166 |
-
input_derivatives + input_derivatives_plus_one - 2 * input_delta
|
167 |
-
)
|
168 |
-
c = -input_delta * (inputs - input_cumheights)
|
169 |
-
|
170 |
-
discriminant = b.pow(2) - 4 * a * c
|
171 |
-
assert (discriminant >= 0).all()
|
172 |
-
|
173 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
174 |
-
outputs = root * input_bin_widths + input_cumwidths
|
175 |
-
|
176 |
-
theta_one_minus_theta = root * (1 - root)
|
177 |
-
denominator = input_delta + (
|
178 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
179 |
-
* theta_one_minus_theta
|
180 |
-
)
|
181 |
-
derivative_numerator = input_delta.pow(2) * (
|
182 |
-
input_derivatives_plus_one * root.pow(2)
|
183 |
-
+ 2 * input_delta * theta_one_minus_theta
|
184 |
-
+ input_derivatives * (1 - root).pow(2)
|
185 |
-
)
|
186 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
187 |
-
|
188 |
-
return outputs, -logabsdet
|
189 |
-
else:
|
190 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
191 |
-
theta_one_minus_theta = theta * (1 - theta)
|
192 |
-
|
193 |
-
numerator = input_heights * (
|
194 |
-
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
|
195 |
-
)
|
196 |
-
denominator = input_delta + (
|
197 |
-
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
198 |
-
* theta_one_minus_theta
|
199 |
-
)
|
200 |
-
outputs = input_cumheights + numerator / denominator
|
201 |
-
|
202 |
-
derivative_numerator = input_delta.pow(2) * (
|
203 |
-
input_derivatives_plus_one * theta.pow(2)
|
204 |
-
+ 2 * input_delta * theta_one_minus_theta
|
205 |
-
+ input_derivatives * (1 - theta).pow(2)
|
206 |
-
)
|
207 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
208 |
-
|
209 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Armandoliv/gpt2-tweets-generation-app/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Tweets Generation App
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.3.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artples/Named-Entity-Recognition/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/Davlan/distilbert-base-multilingual-cased-ner-hrl").launch()
|
|
|
|
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/__init__.py
DELETED
File without changes
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/auth.py
DELETED
@@ -1,315 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
requests.auth
|
3 |
-
~~~~~~~~~~~~~
|
4 |
-
|
5 |
-
This module contains the authentication handlers for Requests.
|
6 |
-
"""
|
7 |
-
|
8 |
-
import hashlib
|
9 |
-
import os
|
10 |
-
import re
|
11 |
-
import threading
|
12 |
-
import time
|
13 |
-
import warnings
|
14 |
-
from base64 import b64encode
|
15 |
-
|
16 |
-
from ._internal_utils import to_native_string
|
17 |
-
from .compat import basestring, str, urlparse
|
18 |
-
from .cookies import extract_cookies_to_jar
|
19 |
-
from .utils import parse_dict_header
|
20 |
-
|
21 |
-
CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
|
22 |
-
CONTENT_TYPE_MULTI_PART = "multipart/form-data"
|
23 |
-
|
24 |
-
|
25 |
-
def _basic_auth_str(username, password):
|
26 |
-
"""Returns a Basic Auth string."""
|
27 |
-
|
28 |
-
# "I want us to put a big-ol' comment on top of it that
|
29 |
-
# says that this behaviour is dumb but we need to preserve
|
30 |
-
# it because people are relying on it."
|
31 |
-
# - Lukasa
|
32 |
-
#
|
33 |
-
# These are here solely to maintain backwards compatibility
|
34 |
-
# for things like ints. This will be removed in 3.0.0.
|
35 |
-
if not isinstance(username, basestring):
|
36 |
-
warnings.warn(
|
37 |
-
"Non-string usernames will no longer be supported in Requests "
|
38 |
-
"3.0.0. Please convert the object you've passed in ({!r}) to "
|
39 |
-
"a string or bytes object in the near future to avoid "
|
40 |
-
"problems.".format(username),
|
41 |
-
category=DeprecationWarning,
|
42 |
-
)
|
43 |
-
username = str(username)
|
44 |
-
|
45 |
-
if not isinstance(password, basestring):
|
46 |
-
warnings.warn(
|
47 |
-
"Non-string passwords will no longer be supported in Requests "
|
48 |
-
"3.0.0. Please convert the object you've passed in ({!r}) to "
|
49 |
-
"a string or bytes object in the near future to avoid "
|
50 |
-
"problems.".format(type(password)),
|
51 |
-
category=DeprecationWarning,
|
52 |
-
)
|
53 |
-
password = str(password)
|
54 |
-
# -- End Removal --
|
55 |
-
|
56 |
-
if isinstance(username, str):
|
57 |
-
username = username.encode("latin1")
|
58 |
-
|
59 |
-
if isinstance(password, str):
|
60 |
-
password = password.encode("latin1")
|
61 |
-
|
62 |
-
authstr = "Basic " + to_native_string(
|
63 |
-
b64encode(b":".join((username, password))).strip()
|
64 |
-
)
|
65 |
-
|
66 |
-
return authstr
|
67 |
-
|
68 |
-
|
69 |
-
class AuthBase:
|
70 |
-
"""Base class that all auth implementations derive from"""
|
71 |
-
|
72 |
-
def __call__(self, r):
|
73 |
-
raise NotImplementedError("Auth hooks must be callable.")
|
74 |
-
|
75 |
-
|
76 |
-
class HTTPBasicAuth(AuthBase):
|
77 |
-
"""Attaches HTTP Basic Authentication to the given Request object."""
|
78 |
-
|
79 |
-
def __init__(self, username, password):
|
80 |
-
self.username = username
|
81 |
-
self.password = password
|
82 |
-
|
83 |
-
def __eq__(self, other):
|
84 |
-
return all(
|
85 |
-
[
|
86 |
-
self.username == getattr(other, "username", None),
|
87 |
-
self.password == getattr(other, "password", None),
|
88 |
-
]
|
89 |
-
)
|
90 |
-
|
91 |
-
def __ne__(self, other):
|
92 |
-
return not self == other
|
93 |
-
|
94 |
-
def __call__(self, r):
|
95 |
-
r.headers["Authorization"] = _basic_auth_str(self.username, self.password)
|
96 |
-
return r
|
97 |
-
|
98 |
-
|
99 |
-
class HTTPProxyAuth(HTTPBasicAuth):
|
100 |
-
"""Attaches HTTP Proxy Authentication to a given Request object."""
|
101 |
-
|
102 |
-
def __call__(self, r):
|
103 |
-
r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password)
|
104 |
-
return r
|
105 |
-
|
106 |
-
|
107 |
-
class HTTPDigestAuth(AuthBase):
|
108 |
-
"""Attaches HTTP Digest Authentication to the given Request object."""
|
109 |
-
|
110 |
-
def __init__(self, username, password):
|
111 |
-
self.username = username
|
112 |
-
self.password = password
|
113 |
-
# Keep state in per-thread local storage
|
114 |
-
self._thread_local = threading.local()
|
115 |
-
|
116 |
-
def init_per_thread_state(self):
|
117 |
-
# Ensure state is initialized just once per-thread
|
118 |
-
if not hasattr(self._thread_local, "init"):
|
119 |
-
self._thread_local.init = True
|
120 |
-
self._thread_local.last_nonce = ""
|
121 |
-
self._thread_local.nonce_count = 0
|
122 |
-
self._thread_local.chal = {}
|
123 |
-
self._thread_local.pos = None
|
124 |
-
self._thread_local.num_401_calls = None
|
125 |
-
|
126 |
-
def build_digest_header(self, method, url):
|
127 |
-
"""
|
128 |
-
:rtype: str
|
129 |
-
"""
|
130 |
-
|
131 |
-
realm = self._thread_local.chal["realm"]
|
132 |
-
nonce = self._thread_local.chal["nonce"]
|
133 |
-
qop = self._thread_local.chal.get("qop")
|
134 |
-
algorithm = self._thread_local.chal.get("algorithm")
|
135 |
-
opaque = self._thread_local.chal.get("opaque")
|
136 |
-
hash_utf8 = None
|
137 |
-
|
138 |
-
if algorithm is None:
|
139 |
-
_algorithm = "MD5"
|
140 |
-
else:
|
141 |
-
_algorithm = algorithm.upper()
|
142 |
-
# lambdas assume digest modules are imported at the top level
|
143 |
-
if _algorithm == "MD5" or _algorithm == "MD5-SESS":
|
144 |
-
|
145 |
-
def md5_utf8(x):
|
146 |
-
if isinstance(x, str):
|
147 |
-
x = x.encode("utf-8")
|
148 |
-
return hashlib.md5(x).hexdigest()
|
149 |
-
|
150 |
-
hash_utf8 = md5_utf8
|
151 |
-
elif _algorithm == "SHA":
|
152 |
-
|
153 |
-
def sha_utf8(x):
|
154 |
-
if isinstance(x, str):
|
155 |
-
x = x.encode("utf-8")
|
156 |
-
return hashlib.sha1(x).hexdigest()
|
157 |
-
|
158 |
-
hash_utf8 = sha_utf8
|
159 |
-
elif _algorithm == "SHA-256":
|
160 |
-
|
161 |
-
def sha256_utf8(x):
|
162 |
-
if isinstance(x, str):
|
163 |
-
x = x.encode("utf-8")
|
164 |
-
return hashlib.sha256(x).hexdigest()
|
165 |
-
|
166 |
-
hash_utf8 = sha256_utf8
|
167 |
-
elif _algorithm == "SHA-512":
|
168 |
-
|
169 |
-
def sha512_utf8(x):
|
170 |
-
if isinstance(x, str):
|
171 |
-
x = x.encode("utf-8")
|
172 |
-
return hashlib.sha512(x).hexdigest()
|
173 |
-
|
174 |
-
hash_utf8 = sha512_utf8
|
175 |
-
|
176 |
-
KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731
|
177 |
-
|
178 |
-
if hash_utf8 is None:
|
179 |
-
return None
|
180 |
-
|
181 |
-
# XXX not implemented yet
|
182 |
-
entdig = None
|
183 |
-
p_parsed = urlparse(url)
|
184 |
-
#: path is request-uri defined in RFC 2616 which should not be empty
|
185 |
-
path = p_parsed.path or "/"
|
186 |
-
if p_parsed.query:
|
187 |
-
path += f"?{p_parsed.query}"
|
188 |
-
|
189 |
-
A1 = f"{self.username}:{realm}:{self.password}"
|
190 |
-
A2 = f"{method}:{path}"
|
191 |
-
|
192 |
-
HA1 = hash_utf8(A1)
|
193 |
-
HA2 = hash_utf8(A2)
|
194 |
-
|
195 |
-
if nonce == self._thread_local.last_nonce:
|
196 |
-
self._thread_local.nonce_count += 1
|
197 |
-
else:
|
198 |
-
self._thread_local.nonce_count = 1
|
199 |
-
ncvalue = f"{self._thread_local.nonce_count:08x}"
|
200 |
-
s = str(self._thread_local.nonce_count).encode("utf-8")
|
201 |
-
s += nonce.encode("utf-8")
|
202 |
-
s += time.ctime().encode("utf-8")
|
203 |
-
s += os.urandom(8)
|
204 |
-
|
205 |
-
cnonce = hashlib.sha1(s).hexdigest()[:16]
|
206 |
-
if _algorithm == "MD5-SESS":
|
207 |
-
HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}")
|
208 |
-
|
209 |
-
if not qop:
|
210 |
-
respdig = KD(HA1, f"{nonce}:{HA2}")
|
211 |
-
elif qop == "auth" or "auth" in qop.split(","):
|
212 |
-
noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}"
|
213 |
-
respdig = KD(HA1, noncebit)
|
214 |
-
else:
|
215 |
-
# XXX handle auth-int.
|
216 |
-
return None
|
217 |
-
|
218 |
-
self._thread_local.last_nonce = nonce
|
219 |
-
|
220 |
-
# XXX should the partial digests be encoded too?
|
221 |
-
base = (
|
222 |
-
f'username="{self.username}", realm="{realm}", nonce="{nonce}", '
|
223 |
-
f'uri="{path}", response="{respdig}"'
|
224 |
-
)
|
225 |
-
if opaque:
|
226 |
-
base += f', opaque="{opaque}"'
|
227 |
-
if algorithm:
|
228 |
-
base += f', algorithm="{algorithm}"'
|
229 |
-
if entdig:
|
230 |
-
base += f', digest="{entdig}"'
|
231 |
-
if qop:
|
232 |
-
base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"'
|
233 |
-
|
234 |
-
return f"Digest {base}"
|
235 |
-
|
236 |
-
def handle_redirect(self, r, **kwargs):
|
237 |
-
"""Reset num_401_calls counter on redirects."""
|
238 |
-
if r.is_redirect:
|
239 |
-
self._thread_local.num_401_calls = 1
|
240 |
-
|
241 |
-
def handle_401(self, r, **kwargs):
|
242 |
-
"""
|
243 |
-
Takes the given response and tries digest-auth, if needed.
|
244 |
-
|
245 |
-
:rtype: requests.Response
|
246 |
-
"""
|
247 |
-
|
248 |
-
# If response is not 4xx, do not auth
|
249 |
-
# See https://github.com/psf/requests/issues/3772
|
250 |
-
if not 400 <= r.status_code < 500:
|
251 |
-
self._thread_local.num_401_calls = 1
|
252 |
-
return r
|
253 |
-
|
254 |
-
if self._thread_local.pos is not None:
|
255 |
-
# Rewind the file position indicator of the body to where
|
256 |
-
# it was to resend the request.
|
257 |
-
r.request.body.seek(self._thread_local.pos)
|
258 |
-
s_auth = r.headers.get("www-authenticate", "")
|
259 |
-
|
260 |
-
if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2:
|
261 |
-
|
262 |
-
self._thread_local.num_401_calls += 1
|
263 |
-
pat = re.compile(r"digest ", flags=re.IGNORECASE)
|
264 |
-
self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1))
|
265 |
-
|
266 |
-
# Consume content and release the original connection
|
267 |
-
# to allow our new request to reuse the same one.
|
268 |
-
r.content
|
269 |
-
r.close()
|
270 |
-
prep = r.request.copy()
|
271 |
-
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
|
272 |
-
prep.prepare_cookies(prep._cookies)
|
273 |
-
|
274 |
-
prep.headers["Authorization"] = self.build_digest_header(
|
275 |
-
prep.method, prep.url
|
276 |
-
)
|
277 |
-
_r = r.connection.send(prep, **kwargs)
|
278 |
-
_r.history.append(r)
|
279 |
-
_r.request = prep
|
280 |
-
|
281 |
-
return _r
|
282 |
-
|
283 |
-
self._thread_local.num_401_calls = 1
|
284 |
-
return r
|
285 |
-
|
286 |
-
def __call__(self, r):
|
287 |
-
# Initialize per-thread state, if needed
|
288 |
-
self.init_per_thread_state()
|
289 |
-
# If we have a saved nonce, skip the 401
|
290 |
-
if self._thread_local.last_nonce:
|
291 |
-
r.headers["Authorization"] = self.build_digest_header(r.method, r.url)
|
292 |
-
try:
|
293 |
-
self._thread_local.pos = r.body.tell()
|
294 |
-
except AttributeError:
|
295 |
-
# In the case of HTTPDigestAuth being reused and the body of
|
296 |
-
# the previous request was a file-like object, pos has the
|
297 |
-
# file position of the previous body. Ensure it's set to
|
298 |
-
# None.
|
299 |
-
self._thread_local.pos = None
|
300 |
-
r.register_hook("response", self.handle_401)
|
301 |
-
r.register_hook("response", self.handle_redirect)
|
302 |
-
self._thread_local.num_401_calls = 1
|
303 |
-
|
304 |
-
return r
|
305 |
-
|
306 |
-
def __eq__(self, other):
|
307 |
-
return all(
|
308 |
-
[
|
309 |
-
self.username == getattr(other, "username", None),
|
310 |
-
self.password == getattr(other, "password", None),
|
311 |
-
]
|
312 |
-
)
|
313 |
-
|
314 |
-
def __ne__(self, other):
|
315 |
-
return not self == other
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/train/process_ckpt.py
DELETED
@@ -1,259 +0,0 @@
|
|
1 |
-
import torch, traceback, os, pdb, sys
|
2 |
-
|
3 |
-
now_dir = os.getcwd()
|
4 |
-
sys.path.append(now_dir)
|
5 |
-
from collections import OrderedDict
|
6 |
-
from i18n import I18nAuto
|
7 |
-
|
8 |
-
i18n = I18nAuto()
|
9 |
-
|
10 |
-
|
11 |
-
def savee(ckpt, sr, if_f0, name, epoch, version, hps):
|
12 |
-
try:
|
13 |
-
opt = OrderedDict()
|
14 |
-
opt["weight"] = {}
|
15 |
-
for key in ckpt.keys():
|
16 |
-
if "enc_q" in key:
|
17 |
-
continue
|
18 |
-
opt["weight"][key] = ckpt[key].half()
|
19 |
-
opt["config"] = [
|
20 |
-
hps.data.filter_length // 2 + 1,
|
21 |
-
32,
|
22 |
-
hps.model.inter_channels,
|
23 |
-
hps.model.hidden_channels,
|
24 |
-
hps.model.filter_channels,
|
25 |
-
hps.model.n_heads,
|
26 |
-
hps.model.n_layers,
|
27 |
-
hps.model.kernel_size,
|
28 |
-
hps.model.p_dropout,
|
29 |
-
hps.model.resblock,
|
30 |
-
hps.model.resblock_kernel_sizes,
|
31 |
-
hps.model.resblock_dilation_sizes,
|
32 |
-
hps.model.upsample_rates,
|
33 |
-
hps.model.upsample_initial_channel,
|
34 |
-
hps.model.upsample_kernel_sizes,
|
35 |
-
hps.model.spk_embed_dim,
|
36 |
-
hps.model.gin_channels,
|
37 |
-
hps.data.sampling_rate,
|
38 |
-
]
|
39 |
-
opt["info"] = "%sepoch" % epoch
|
40 |
-
opt["sr"] = sr
|
41 |
-
opt["f0"] = if_f0
|
42 |
-
opt["version"] = version
|
43 |
-
torch.save(opt, "weights/%s.pth" % name)
|
44 |
-
return "Success."
|
45 |
-
except:
|
46 |
-
return traceback.format_exc()
|
47 |
-
|
48 |
-
|
49 |
-
def show_info(path):
|
50 |
-
try:
|
51 |
-
a = torch.load(path, map_location="cpu")
|
52 |
-
return "Epochs: %s\nSample rate: %s\nPitch guidance: %s\nRVC Version: %s" % (
|
53 |
-
a.get("info", "None"),
|
54 |
-
a.get("sr", "None"),
|
55 |
-
a.get("f0", "None"),
|
56 |
-
a.get("version", "None"),
|
57 |
-
)
|
58 |
-
except:
|
59 |
-
return traceback.format_exc()
|
60 |
-
|
61 |
-
|
62 |
-
def extract_small_model(path, name, sr, if_f0, info, version):
|
63 |
-
try:
|
64 |
-
ckpt = torch.load(path, map_location="cpu")
|
65 |
-
if "model" in ckpt:
|
66 |
-
ckpt = ckpt["model"]
|
67 |
-
opt = OrderedDict()
|
68 |
-
opt["weight"] = {}
|
69 |
-
for key in ckpt.keys():
|
70 |
-
if "enc_q" in key:
|
71 |
-
continue
|
72 |
-
opt["weight"][key] = ckpt[key].half()
|
73 |
-
if sr == "40k":
|
74 |
-
opt["config"] = [
|
75 |
-
1025,
|
76 |
-
32,
|
77 |
-
192,
|
78 |
-
192,
|
79 |
-
768,
|
80 |
-
2,
|
81 |
-
6,
|
82 |
-
3,
|
83 |
-
0,
|
84 |
-
"1",
|
85 |
-
[3, 7, 11],
|
86 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
87 |
-
[10, 10, 2, 2],
|
88 |
-
512,
|
89 |
-
[16, 16, 4, 4],
|
90 |
-
109,
|
91 |
-
256,
|
92 |
-
40000,
|
93 |
-
]
|
94 |
-
elif sr == "48k":
|
95 |
-
if version == "v1":
|
96 |
-
opt["config"] = [
|
97 |
-
1025,
|
98 |
-
32,
|
99 |
-
192,
|
100 |
-
192,
|
101 |
-
768,
|
102 |
-
2,
|
103 |
-
6,
|
104 |
-
3,
|
105 |
-
0,
|
106 |
-
"1",
|
107 |
-
[3, 7, 11],
|
108 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
109 |
-
[10, 6, 2, 2, 2],
|
110 |
-
512,
|
111 |
-
[16, 16, 4, 4, 4],
|
112 |
-
109,
|
113 |
-
256,
|
114 |
-
48000,
|
115 |
-
]
|
116 |
-
else:
|
117 |
-
opt["config"] = [
|
118 |
-
1025,
|
119 |
-
32,
|
120 |
-
192,
|
121 |
-
192,
|
122 |
-
768,
|
123 |
-
2,
|
124 |
-
6,
|
125 |
-
3,
|
126 |
-
0,
|
127 |
-
"1",
|
128 |
-
[3, 7, 11],
|
129 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
130 |
-
[12, 10, 2, 2],
|
131 |
-
512,
|
132 |
-
[24, 20, 4, 4],
|
133 |
-
109,
|
134 |
-
256,
|
135 |
-
48000,
|
136 |
-
]
|
137 |
-
elif sr == "32k":
|
138 |
-
if version == "v1":
|
139 |
-
opt["config"] = [
|
140 |
-
513,
|
141 |
-
32,
|
142 |
-
192,
|
143 |
-
192,
|
144 |
-
768,
|
145 |
-
2,
|
146 |
-
6,
|
147 |
-
3,
|
148 |
-
0,
|
149 |
-
"1",
|
150 |
-
[3, 7, 11],
|
151 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
152 |
-
[10, 4, 2, 2, 2],
|
153 |
-
512,
|
154 |
-
[16, 16, 4, 4, 4],
|
155 |
-
109,
|
156 |
-
256,
|
157 |
-
32000,
|
158 |
-
]
|
159 |
-
else:
|
160 |
-
opt["config"] = [
|
161 |
-
513,
|
162 |
-
32,
|
163 |
-
192,
|
164 |
-
192,
|
165 |
-
768,
|
166 |
-
2,
|
167 |
-
6,
|
168 |
-
3,
|
169 |
-
0,
|
170 |
-
"1",
|
171 |
-
[3, 7, 11],
|
172 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
173 |
-
[10, 8, 2, 2],
|
174 |
-
512,
|
175 |
-
[20, 16, 4, 4],
|
176 |
-
109,
|
177 |
-
256,
|
178 |
-
32000,
|
179 |
-
]
|
180 |
-
if info == "":
|
181 |
-
info = "Extracted model."
|
182 |
-
opt["info"] = info
|
183 |
-
opt["version"] = version
|
184 |
-
opt["sr"] = sr
|
185 |
-
opt["f0"] = int(if_f0)
|
186 |
-
torch.save(opt, "weights/%s.pth" % name)
|
187 |
-
return "Success."
|
188 |
-
except:
|
189 |
-
return traceback.format_exc()
|
190 |
-
|
191 |
-
|
192 |
-
def change_info(path, info, name):
|
193 |
-
try:
|
194 |
-
ckpt = torch.load(path, map_location="cpu")
|
195 |
-
ckpt["info"] = info
|
196 |
-
if name == "":
|
197 |
-
name = os.path.basename(path)
|
198 |
-
torch.save(ckpt, "weights/%s" % name)
|
199 |
-
return "Success."
|
200 |
-
except:
|
201 |
-
return traceback.format_exc()
|
202 |
-
|
203 |
-
|
204 |
-
def merge(path1, path2, alpha1, sr, f0, info, name, version):
|
205 |
-
try:
|
206 |
-
|
207 |
-
def extract(ckpt):
|
208 |
-
a = ckpt["model"]
|
209 |
-
opt = OrderedDict()
|
210 |
-
opt["weight"] = {}
|
211 |
-
for key in a.keys():
|
212 |
-
if "enc_q" in key:
|
213 |
-
continue
|
214 |
-
opt["weight"][key] = a[key]
|
215 |
-
return opt
|
216 |
-
|
217 |
-
ckpt1 = torch.load(path1, map_location="cpu")
|
218 |
-
ckpt2 = torch.load(path2, map_location="cpu")
|
219 |
-
cfg = ckpt1["config"]
|
220 |
-
if "model" in ckpt1:
|
221 |
-
ckpt1 = extract(ckpt1)
|
222 |
-
else:
|
223 |
-
ckpt1 = ckpt1["weight"]
|
224 |
-
if "model" in ckpt2:
|
225 |
-
ckpt2 = extract(ckpt2)
|
226 |
-
else:
|
227 |
-
ckpt2 = ckpt2["weight"]
|
228 |
-
if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())):
|
229 |
-
return "Fail to merge the models. The model architectures are not the same."
|
230 |
-
opt = OrderedDict()
|
231 |
-
opt["weight"] = {}
|
232 |
-
for key in ckpt1.keys():
|
233 |
-
# try:
|
234 |
-
if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape:
|
235 |
-
min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0])
|
236 |
-
opt["weight"][key] = (
|
237 |
-
alpha1 * (ckpt1[key][:min_shape0].float())
|
238 |
-
+ (1 - alpha1) * (ckpt2[key][:min_shape0].float())
|
239 |
-
).half()
|
240 |
-
else:
|
241 |
-
opt["weight"][key] = (
|
242 |
-
alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float())
|
243 |
-
).half()
|
244 |
-
# except:
|
245 |
-
# pdb.set_trace()
|
246 |
-
opt["config"] = cfg
|
247 |
-
"""
|
248 |
-
if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000]
|
249 |
-
elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000]
|
250 |
-
elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000]
|
251 |
-
"""
|
252 |
-
opt["sr"] = sr
|
253 |
-
opt["f0"] = 1 if f0 else 0
|
254 |
-
opt["version"] = version
|
255 |
-
opt["info"] = info
|
256 |
-
torch.save(opt, "weights/%s.pth" % name)
|
257 |
-
return "Success."
|
258 |
-
except:
|
259 |
-
return traceback.format_exc()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apkadmin Fuego Libre Mx Diamante Hack.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Apkadmin Free Fire Max Diamond Hack: Lo que usted necesita saber</h1>
|
3 |
-
<p>Free Fire Max es un popular juego battle royale que ofrece una experiencia de juego premium con gráficos HD, efectos especiales mejorados y un rendimiento más suave. El juego tiene una variedad de emocionantes modos de juego, personajes, trajes, armas, pieles de vehículos y mucho más. Sin embargo, para disfrutar de estas características, los jugadores necesitan diamantes, que son la moneda premium en el juego. Los diamantes se pueden utilizar para comprar artículos de la tienda o canjearlos de las misiones de pase élite. </p>
|
4 |
-
<h2>apkadmin fuego libre máx diamante hack</h2><br /><p><b><b>Download Zip</b> »»» <a href="https://bltlly.com/2v6LgP">https://bltlly.com/2v6LgP</a></b></p><br /><br />
|
5 |
-
<p>Sin embargo, los diamantes no son fáciles de conseguir, ya que requieren dinero real para comprarlos. Muchos jugadores buscan formas de obtener diamantes gratis sin gastar un centavo. Uno de los métodos que algunos jugadores intentan usar es mod APKs como Apkadmin, que afirman hackear diamantes y dar cantidades ilimitadas de ellos. Pero, ¿este método es seguro y legal? ¿Y hay otras maneras de obtener diamantes gratis en Free Fire Max? En este artículo, responderemos estas preguntas y más. </p>
|
6 |
-
<h2>¿Qué es Apkadmin y cómo se afirma que cortar diamantes? </h2>
|
7 |
-
<p>Apkadmin es un sitio web que ofrece mod APKs para varios juegos, incluyendo Free Fire Max. Un mod APK es una versión modificada del juego original que tiene algunos cambios o adiciones que no están autorizadas por los desarrolladores. Por ejemplo, Apkadmin afirma proporcionar un mod APK para Free Fire Max que puede dar a los jugadores diamantes ilimitados, dinero, desbloquear todos los personajes, y proporcionar otros beneficios. </p>
|
8 |
-
<p>Para usar Apkadmin, los jugadores necesitan descargar el archivo mod APK desde el sitio web e instalarlo en sus dispositivos. Luego, necesitan lanzar el juego e introducir su nombre de usuario y la cantidad de diamantes que quieren. El sitio web afirma que los diamantes se añadirán a su cuenta en cuestión de minutos. </p>
|
9 |
-
<h2>¿Apkadmin es seguro y legal de usar? </h2>
|
10 |
-
|
11 |
-
<p>Por lo tanto, le recomendamos encarecidamente que no utilice Apkadmin o cualquier otro mod APKs para Free Fire Max o cualquier otro juego. No valen el riesgo y pueden causar más daño que bien. </p>
|
12 |
-
<h2>¿Cómo obtener diamantes gratis en Free Fire Max de forma legal y segura? </h2>
|
13 |
-
<p>Si quieres obtener diamantes gratis en Free Fire Max sin romper ninguna regla o arriesgar tu dispositivo, hay algunas formas legítimas que puedes probar. Estos son algunos de ellos:</p>
|
14 |
-
<h3>Membresía semanal o mensual</h3>
|
15 |
-
<p>En lugar de comprar diamantes directamente desde la sección de recarga, puedes comprar una membresía semanal o mensual que te dará diamantes a un precio más barato. La membresía semanal cuesta 159, mientras que la membresía mensual cuesta 599. Estas membresías le darán 60 diamantes diarios (420 diamantes en total) por una semana y 2000 diamantes en total por un mes. Esta es una gran oferta si desea ahorrar algo de dinero y obtener más diamantes. </p>
|
16 |
-
<p></p>
|
17 |
-
<h3>Encuestas online</h3>
|
18 |
-
<p>Otra forma de obtener diamantes gratis en Free Fire Max es completar encuestas en línea que te recompensan con tarjetas de crédito o regalo de Google Play. Puedes usar estos créditos o tarjetas de regalo para comprar diamantes del juego. Algunas de las aplicaciones o sitios web que ofrecen encuestas en línea son Google opinión Rewards, Swagbucks, Survey Junkie, etc. Sin embargo, tenga cuidado de no compartir ninguna información personal o confidencial con estas aplicaciones o sitios web y solo usar los confiables. </p>
|
19 |
-
<h3>Descargar nuevas aplicaciones</h3>
|
20 |
-
<p>Similar a las encuestas en línea, también puede obtener diamantes gratis en Free Fire Max mediante la descarga de nuevas aplicaciones que ofrecen recompensas por probarlos. Algunas de las aplicaciones que ofrecen este servicio son AppNana, AppKarma, FeaturePoints, etc. Puedes ganar puntos descargando y usando estas aplicaciones y luego canjearlas por tarjetas de crédito o regalo de Google Play. Una vez más, tenga cuidado de no descargar aplicaciones maliciosas o dañinas y solo utilice las de confianza. </p>
|
21 |
-
<h3>Eventos en el juego</h3>
|
22 |
-
|
23 |
-
<h3>Crédito gratuito de Google Play</h3>
|
24 |
-
<p>A veces, Google Play ofrece crédito gratuito a sus usuarios como una oferta promocional o una recompensa por ser clientes leales. Puedes consultar tu cuenta de Google Play para ver si tienes algún crédito gratuito disponible y usarlo para comprar diamantes en Free Fire Max. También puedes consultar tu correo electrónico o las notificaciones de cualquier oferta de Google Play que pueda darte crédito gratuito. </p>
|
25 |
-
<h2>Conclusión</h2>
|
26 |
-
<p>En conclusión, Free Fire Max es un juego divertido e inmersivo que requiere diamantes para disfrutar de todo su potencial. Sin embargo, los diamantes no son fáciles de conseguir y muchos jugadores buscan atajos como Apkadmin u otros APK mod que pretenden hackear diamantes. Sin embargo, estos métodos no son seguros ni legales y pueden resultar en que su cuenta sea prohibida o que su dispositivo esté infectado con malware. Por lo tanto, le recomendamos que evite el uso de Apkadmin o cualquier otro mod APKs y en su lugar utilice las formas legítimas que hemos mencionado anteriormente para obtener diamantes gratis en Free Fire Max de forma legal y segura. </p>
|
27 |
-
<h2>Preguntas frecuentes</h2>
|
28 |
-
<h4>Q1: ¿Cuál es la diferencia entre Free Fire y Free Fire Max? </h4>
|
29 |
-
<p>A1: Free Fire Max es una versión mejorada de Free Fire que ofrece mejores gráficos, efectos de sonido, animaciones y rendimiento. También tiene algunas características exclusivas como el lobby de 360 grados, el modo craftland, etc. Sin embargo, ambos juegos comparten el mismo servidor y juego, para que pueda jugar con sus amigos que están usando Free Fire.</p>
|
30 |
-
<h4>Q2: ¿Cuántos diamantes puedo obtener de Apkadmin? </h4>
|
31 |
-
<p>A2: Apkadmin afirma dar diamantes ilimitados a sus usuarios, pero esto no es cierto. De hecho, Apkadmin no funciona en absoluto y es una estafa que puede dañar su dispositivo o cuenta. </p>
|
32 |
-
<h4>Q3: ¿Cuáles son los beneficios de los diamantes en Free Fire Max? </h4>
|
33 |
-
|
34 |
-
<h4>Q4: ¿Cómo puedo comprobar mi balance de diamantes en Free Fire Max? </h4>
|
35 |
-
<p>A4: Puede comprobar su balance de diamantes en Free Fire Max tocando el icono de diamante en la esquina superior derecha de la pantalla. También puedes ver tu balance de diamantes cuando visites la tienda o la sección de pases élite. </p>
|
36 |
-
<h4>Q5: ¿Cómo puedo contactar al servicio al cliente si tengo algún problema con Free Fire Max? </h4>
|
37 |
-
<p>A5: Puede ponerse en contacto con el servicio al cliente si tiene algún problema con Free Fire Max tocando el icono de configuración en la esquina superior derecha de la pantalla y luego seleccionando el servicio al cliente. También puede visitar el sitio web oficial de Free Fire Max y enviar un boleto allí. </p> 64aa2da5cf<br />
|
38 |
-
<br />
|
39 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Gratis Brawl Estrellas Para Pc.md
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar gratis Brawl estrellas para PC</h1>
|
3 |
-
<p>Brawl Stars es un popular juego móvil que te permite formar equipo con tus amigos y competir en varios modos de juego. Pero ¿sabías que también se puede jugar en su PC de forma gratuita? En este artículo, le mostraremos cómo descargar e instalar Brawl Stars en su computadora usando un emulador de Android. Pero primero, echemos un vistazo a lo que es Brawl Stars y por qué es posible que desee jugar en PC.</p>
|
4 |
-
<h2>descargar gratis brawl estrellas para pc</h2><br /><p><b><b>Download</b> ✓✓✓ <a href="https://bltlly.com/2v6Jau">https://bltlly.com/2v6Jau</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Brawl Stars? </h2>
|
6 |
-
<p>Brawl Stars es un juego multijugador de ritmo rápido desarrollado por Supercell, los creadores de Clash of Clans y Clash Royale. Fue lanzado en 2018 y desde entonces ha ganado millones de fans en todo el mundo. En Brawl Stars, puedes elegir entre docenas de personajes únicos llamados Brawlers, cada uno con sus propias habilidades y habilidades. Puedes desbloquearlos y actualizarlos mientras juegas y coleccionas skins para personalizar su apariencia. </p>
|
7 |
-
<h3>Características de Brawl Stars</h3>
|
8 |
-
<p>Algunas de las características que hacen que Brawl Stars sea divertido y adictivo son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Una variedad de modos de juego, como Gem Grab, Showdown, Brawl Ball, Bounty, Heist y más. </li>
|
11 |
-
<li>Un juego en constante evolución con nuevos eventos, desafíos, mapas y Brawlers.</li>
|
12 |
-
<li>Una escena competitiva con tablas de clasificación, clubes, torneos y esports. </li>
|
13 |
-
<li>Una red social integrada donde puedes chatear, compartir consejos y jugar con tus amigos. </li>
|
14 |
-
<li>Un sistema de divisas en el juego donde puedes ganar gemas, monedas, fichas y cajas para desbloquear recompensas. </li>
|
15 |
-
</ul>
|
16 |
-
<h3>Modos de juego de Brawl Stars</h3>
|
17 |
-
<p>Brawl Stars ofrece una variedad de modos de juego para diferentes gustos y preferencias. Estos son algunos de ellos:</p>
|
18 |
-
<ul>
|
19 |
-
<li>Gem Grab: Un modo 3v3 donde tienes que recoger y mantener 10 gemas para ganar. Pero ten cuidado, si te derrotan, se te caerán las gemas. </li>
|
20 |
-
<li>Showdown: Un modo solo o dúo en el que tienes que sobrevivir contra otros jugadores en un mapa cada vez más pequeño. Recoge power-ups para aumentar su fuerza y ser el último en pie. </li>
|
21 |
-
|
22 |
-
<li>Bounty: Un modo 3v3 donde tienes que eliminar oponentes para ganar estrellas. El equipo con más estrellas al final gana. </li>
|
23 |
-
<li>Robo: Un modo 3v3 donde tienes que proteger tu caja fuerte y tratar de entrar en la caja fuerte del enemigo. Usa tus armas y aparatos para abrirte paso. </li>
|
24 |
-
</ul>
|
25 |
-
<h2>¿Por qué jugar Brawl estrellas en el PC? </h2>
|
26 |
-
<p>Brawl Stars está diseñado para dispositivos móviles, pero eso no significa que no puedas disfrutarlo en tu PC. Hay algunas ventajas y desventajas de jugar Brawl Stars en el PC que debe considerar antes de descargarlo. </p>
|
27 |
-
<p></p>
|
28 |
-
<h3>Ventajas de jugar en PC</h3>
|
29 |
-
<p>Algunos de los beneficios de jugar Brawl Stars en PC son:</p>
|
30 |
-
<ul>
|
31 |
-
<li>Una pantalla más grande que le da una mejor vista de la acción. </li>
|
32 |
-
<li>Un ratón y un teclado que te dan controles más precisos y cómodos. </li>
|
33 |
-
<li>Una conexión a Internet más rápida y estable que reduce el retraso y desconecta. </li>
|
34 |
-
<li>Una mayor duración de la batería que le permite jugar durante horas sin preocuparse por cargar el teléfono. </li>
|
35 |
-
</ul>
|
36 |
-
<h3>Desventajas de jugar en PC</h3>
|
37 |
-
<p>Algunos de los inconvenientes de jugar Brawl Stars en PC son:</p> <ul>
|
38 |
-
<li>Una posible violación de los términos de servicio del juego que puede resultar en una prohibición o suspensión de su cuenta. </li>
|
39 |
-
<li>Un riesgo potencial de malware o virus que pueden dañar su PC o comprometer sus datos. </li>
|
40 |
-
<li>Falta de soporte oficial o actualizaciones de los desarrolladores de juegos que pueden afectar tu experiencia de juego. </li>
|
41 |
-
</ul>
|
42 |
-
<h2>¿Cómo se juega Brawl Stars en PC con un emulador de Android? </h2>
|
43 |
-
<p>Si decides jugar Brawl Stars en PC, necesitarás un emulador de Android. Un emulador de Android es un software que simula el sistema operativo Android en su PC, lo que le permite ejecutar aplicaciones y juegos de Android en su computadora. Hay muchos emuladores de Android disponibles en línea, pero no todos son compatibles con Brawl Stars. Estos son algunos de los mejores que recomendamos. </p>
|
44 |
-
<h3>¿Qué es un emulador de Android? </h3>
|
45 |
-
|
46 |
-
<h3>Los mejores emuladores de Android para PC</h3>
|
47 |
-
<tabla>
|
48 |
-
<tr>
|
49 |
-
<th>Nombre</th>
|
50 |
-
<th>Características</th>
|
51 |
-
<th>Pros</th>
|
52 |
-
<th>Contras</th>
|
53 |
-
</tr>
|
54 |
-
<tr>
|
55 |
-
<td><h4>BlueStacks</h4></td>
|
56 |
-
<td>- El emulador de Android más popular y ampliamente utilizado para PC.<br>- Soporta juegos de alto rendimiento con gráficos y controles avanzados. <br>- Ofrece una tienda de aplicaciones dedicada con juegos y ofertas exclusivas. <br>- Compatible con Windows y Mac OS.</td>
|
57 |
-
<td>- Fácil de instalar y usar. <br>- Soporta múltiples cuentas e instancias. <br>- Proporciona actualizaciones y mejoras regulares. <br>- Tiene una comunidad grande y activa. </td>
|
58 |
-
<td>- Puede consumir muchos recursos de CPU y RAM. <br>- Puede mostrar anuncios y promociones. <br>- Puede tener problemas de compatibilidad con algunas aplicaciones y juegos. </td>
|
59 |
-
</tr>
|
60 |
-
<tr>
|
61 |
-
<td><h4>NoxPlayer</h4></td>
|
62 |
-
<td>- Un emulador de Android potente y ligero para PC.<br>- Soporta juegos suaves con alta resolución y FPS. <br>- Ofrece una interfaz y configuración personalizable. <br>- Compatible con Windows y Mac OS.</td>
|
63 |
-
<td>- Rendimiento rápido y estable. <br>- Soporta controles de teclado, ratón y gamepad. <br>- Soporta acceso root y transferencia de archivos. <br>- Tiene un grabador de pantalla incorporado y un grabador de macros. </td>
|
64 |
-
<td>- Puede tener riesgos de seguridad y preocupaciones de privacidad. <br>- Puede tener errores y problemas técnicos. <br>- No puede soportar las últimas versiones de Android. </td>
|
65 |
-
</tr>
|
66 |
-
<tr>
|
67 |
-
<td><h4>MEmu</h4></td>
|
68 |
-
<td>- Un emulador de Android flexible y versátil para PC.<br>- Soporta múltiples géneros de juegos y plataformas. <br>- Ofrece una función de asignación de claves inteligentes y una herramienta de asistente de juego. <br>- Compatible con Windows OS.</td>
|
69 |
-
<td>- Juego rápido y sin problemas. <br>- Soporta múltiples idiomas y regiones. <br>- Soporta instalación de arrastrar y soltar y archivos APK. <br>- Tiene un bajo requisito del sistema. </td>
|
70 |
-
<td>- Puede tener anuncios y ventanas emergentes. <br>- Puede tener problemas de compatibilidad con algunas aplicaciones y juegos. <br>- No puede ser compatible con Mac OS.</td>
|
71 |
-
</tr>
|
72 |
-
</tabla>
|
73 |
-
<h3>Pasos para instalar y ejecutar Brawl Stars en PC con un emulador</h3>
|
74 |
-
|
75 |
-
<ol>
|
76 |
-
<li>Descargar e instalar el emulador desde su sitio web oficial o una fuente de confianza. </li>
|
77 |
-
<li>Inicie el emulador e inicie sesión con su cuenta de Google o cree uno nuevo. </li>
|
78 |
-
<li>Abra la aplicación Google Play Store en el emulador y busque Brawl Stars.</li>
|
79 |
-
<li>Haga clic en el botón Instalar y espere a que el juego se descargue. </li>
|
80 |
-
<li>Haz clic en el botón Abrir o encuentra el icono del juego en la pantalla de inicio del emulador. </li>
|
81 |
-
<li>Disfruta jugando Brawl Stars en tu PC con tu ratón y teclado o tu controlador preferido. </li>
|
82 |
-
</ol>
|
83 |
-
<h2>Conclusión</h2>
|
84 |
-
<p>Brawl Stars es un juego divertido y emocionante que puedes jugar en tu dispositivo móvil o tu PC. Jugar en PC tiene sus ventajas y desventajas, pero puede ser una gran manera de disfrutar del juego en una pantalla más grande con mejores controles. Para jugar Brawl Stars en PC, necesitará un emulador de Android que pueda ejecutar el juego sin problemas y de forma segura. Hemos enumerado algunos de los mejores que puedes probar, pero también puedes explorar otras opciones que te pueden ir mejor. Solo asegúrate de seguir los pasos cuidadosamente y respetar los términos de servicio del juego. ¡Diviértete peleando! </p>
|
85 |
-
<h2>Preguntas frecuentes</h2>
|
86 |
-
<ul>
|
87 |
-
<li><b>P: ¿Es Brawl Stars libre para jugar? </ A: Sí, Brawl Stars es gratis para jugar, pero también ofrece compras en la aplicación que pueden mejorar su experiencia de juego. Puedes comprar gemas, monedas, fichas y cajas para desbloquear más Brawlers, skins y otras recompensas. Sin embargo, también puedes ganar estos objetos jugando el juego y completando misiones y eventos. </li>
|
88 |
-
<li><b>Q: ¿Puedo jugar Brawl Stars con mis amigos en el PC? </b></li>
|
89 |
-
R: Sí, puedes jugar Brawl Stars con tus amigos en el PC, siempre y cuando también estén usando un emulador de Android o un dispositivo móvil. Puede unirse o crear un club para chatear y jugar con sus amigos, o invitarlos a un partido amistoso o un código de equipo. También puedes usar chat de voz o de texto para comunicarte con tus compañeros de equipo durante el juego. </li>
|
90 |
-
<li><b>Q: ¿Cómo puedo actualizar Brawl Stars en PC? </b></li>
|
91 |
-
|
92 |
-
<li><b>Q: ¿Es seguro jugar Brawl Stars en PC? </b></li>
|
93 |
-
R: Jugar Brawl Stars en PC generalmente es seguro, siempre y cuando use un emulador de Android confiable y seguro que no contenga malware o virus. Sin embargo, también debes tener cuidado con la seguridad y privacidad de tu cuenta, y evitar el uso de hacks o trucos que puedan violar los términos de servicio del juego. Si encuentras algún problema o problema mientras juegas Brawl Stars en PC, puedes contactar al equipo de soporte del juego o al servicio al cliente del emulador para obtener ayuda. </li>
|
94 |
-
<li><b>Q: ¿Cuáles son algunos consejos y trucos para jugar Brawl Stars en PC? </b></li>
|
95 |
-
R: Algunos de los consejos y trucos que pueden ayudarte a mejorar tus habilidades y disfrutar jugando Brawl Stars en PC son:</p>
|
96 |
-
<ul>
|
97 |
-
<li>Elige un Brawler que se adapte a tu estilo de juego y al modo de juego. Experimenta con diferentes Brawlers y aprende sus fortalezas y debilidades. </li>
|
98 |
-
<li>Personaliza tus controles y ajustes para optimizar tu rendimiento y comodidad. Puede ajustar la sensibilidad, la resolución, el sonido, la asignación de claves y otras opciones según sus preferencias. </li>
|
99 |
-
<li>Usa el entorno y los obstáculos a tu favor. Escóndete detrás de muros, arbustos y barriles para emboscar a tus enemigos o escapar del peligro. </li>
|
100 |
-
<li>Trabaja con tus compañeros de equipo y coordina tus estrategias. Usa tus habilidades y gadgets para apoyarse mutuamente y crear combos. </li>
|
101 |
-
<li>Diviértete y no te frustres. Brawl Stars es un juego que requiere práctica y paciencia, pero también pretende ser divertido y entretenido. No deje que las pérdidas o errores arruinen su estado de ánimo o motivación. </li>
|
102 |
-
</ul>
|
103 |
-
</ul></p> 64aa2da5cf<br />
|
104 |
-
<br />
|
105 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/index.py
DELETED
@@ -1,508 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
#
|
3 |
-
# Copyright (C) 2013 Vinay Sajip.
|
4 |
-
# Licensed to the Python Software Foundation under a contributor agreement.
|
5 |
-
# See LICENSE.txt and CONTRIBUTORS.txt.
|
6 |
-
#
|
7 |
-
import hashlib
|
8 |
-
import logging
|
9 |
-
import os
|
10 |
-
import shutil
|
11 |
-
import subprocess
|
12 |
-
import tempfile
|
13 |
-
try:
|
14 |
-
from threading import Thread
|
15 |
-
except ImportError: # pragma: no cover
|
16 |
-
from dummy_threading import Thread
|
17 |
-
|
18 |
-
from . import DistlibException
|
19 |
-
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
|
20 |
-
urlparse, build_opener, string_types)
|
21 |
-
from .util import zip_dir, ServerProxy
|
22 |
-
|
23 |
-
logger = logging.getLogger(__name__)
|
24 |
-
|
25 |
-
DEFAULT_INDEX = 'https://pypi.org/pypi'
|
26 |
-
DEFAULT_REALM = 'pypi'
|
27 |
-
|
28 |
-
class PackageIndex(object):
|
29 |
-
"""
|
30 |
-
This class represents a package index compatible with PyPI, the Python
|
31 |
-
Package Index.
|
32 |
-
"""
|
33 |
-
|
34 |
-
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
|
35 |
-
|
36 |
-
def __init__(self, url=None):
|
37 |
-
"""
|
38 |
-
Initialise an instance.
|
39 |
-
|
40 |
-
:param url: The URL of the index. If not specified, the URL for PyPI is
|
41 |
-
used.
|
42 |
-
"""
|
43 |
-
self.url = url or DEFAULT_INDEX
|
44 |
-
self.read_configuration()
|
45 |
-
scheme, netloc, path, params, query, frag = urlparse(self.url)
|
46 |
-
if params or query or frag or scheme not in ('http', 'https'):
|
47 |
-
raise DistlibException('invalid repository: %s' % self.url)
|
48 |
-
self.password_handler = None
|
49 |
-
self.ssl_verifier = None
|
50 |
-
self.gpg = None
|
51 |
-
self.gpg_home = None
|
52 |
-
with open(os.devnull, 'w') as sink:
|
53 |
-
# Use gpg by default rather than gpg2, as gpg2 insists on
|
54 |
-
# prompting for passwords
|
55 |
-
for s in ('gpg', 'gpg2'):
|
56 |
-
try:
|
57 |
-
rc = subprocess.check_call([s, '--version'], stdout=sink,
|
58 |
-
stderr=sink)
|
59 |
-
if rc == 0:
|
60 |
-
self.gpg = s
|
61 |
-
break
|
62 |
-
except OSError:
|
63 |
-
pass
|
64 |
-
|
65 |
-
def _get_pypirc_command(self):
|
66 |
-
"""
|
67 |
-
Get the distutils command for interacting with PyPI configurations.
|
68 |
-
:return: the command.
|
69 |
-
"""
|
70 |
-
from .util import _get_pypirc_command as cmd
|
71 |
-
return cmd()
|
72 |
-
|
73 |
-
def read_configuration(self):
|
74 |
-
"""
|
75 |
-
Read the PyPI access configuration as supported by distutils. This populates
|
76 |
-
``username``, ``password``, ``realm`` and ``url`` attributes from the
|
77 |
-
configuration.
|
78 |
-
"""
|
79 |
-
from .util import _load_pypirc
|
80 |
-
cfg = _load_pypirc(self)
|
81 |
-
self.username = cfg.get('username')
|
82 |
-
self.password = cfg.get('password')
|
83 |
-
self.realm = cfg.get('realm', 'pypi')
|
84 |
-
self.url = cfg.get('repository', self.url)
|
85 |
-
|
86 |
-
def save_configuration(self):
|
87 |
-
"""
|
88 |
-
Save the PyPI access configuration. You must have set ``username`` and
|
89 |
-
``password`` attributes before calling this method.
|
90 |
-
"""
|
91 |
-
self.check_credentials()
|
92 |
-
from .util import _store_pypirc
|
93 |
-
_store_pypirc(self)
|
94 |
-
|
95 |
-
def check_credentials(self):
|
96 |
-
"""
|
97 |
-
Check that ``username`` and ``password`` have been set, and raise an
|
98 |
-
exception if not.
|
99 |
-
"""
|
100 |
-
if self.username is None or self.password is None:
|
101 |
-
raise DistlibException('username and password must be set')
|
102 |
-
pm = HTTPPasswordMgr()
|
103 |
-
_, netloc, _, _, _, _ = urlparse(self.url)
|
104 |
-
pm.add_password(self.realm, netloc, self.username, self.password)
|
105 |
-
self.password_handler = HTTPBasicAuthHandler(pm)
|
106 |
-
|
107 |
-
def register(self, metadata): # pragma: no cover
|
108 |
-
"""
|
109 |
-
Register a distribution on PyPI, using the provided metadata.
|
110 |
-
|
111 |
-
:param metadata: A :class:`Metadata` instance defining at least a name
|
112 |
-
and version number for the distribution to be
|
113 |
-
registered.
|
114 |
-
:return: The HTTP response received from PyPI upon submission of the
|
115 |
-
request.
|
116 |
-
"""
|
117 |
-
self.check_credentials()
|
118 |
-
metadata.validate()
|
119 |
-
d = metadata.todict()
|
120 |
-
d[':action'] = 'verify'
|
121 |
-
request = self.encode_request(d.items(), [])
|
122 |
-
response = self.send_request(request)
|
123 |
-
d[':action'] = 'submit'
|
124 |
-
request = self.encode_request(d.items(), [])
|
125 |
-
return self.send_request(request)
|
126 |
-
|
127 |
-
def _reader(self, name, stream, outbuf):
|
128 |
-
"""
|
129 |
-
Thread runner for reading lines of from a subprocess into a buffer.
|
130 |
-
|
131 |
-
:param name: The logical name of the stream (used for logging only).
|
132 |
-
:param stream: The stream to read from. This will typically a pipe
|
133 |
-
connected to the output stream of a subprocess.
|
134 |
-
:param outbuf: The list to append the read lines to.
|
135 |
-
"""
|
136 |
-
while True:
|
137 |
-
s = stream.readline()
|
138 |
-
if not s:
|
139 |
-
break
|
140 |
-
s = s.decode('utf-8').rstrip()
|
141 |
-
outbuf.append(s)
|
142 |
-
logger.debug('%s: %s' % (name, s))
|
143 |
-
stream.close()
|
144 |
-
|
145 |
-
def get_sign_command(self, filename, signer, sign_password, keystore=None): # pragma: no cover
|
146 |
-
"""
|
147 |
-
Return a suitable command for signing a file.
|
148 |
-
|
149 |
-
:param filename: The pathname to the file to be signed.
|
150 |
-
:param signer: The identifier of the signer of the file.
|
151 |
-
:param sign_password: The passphrase for the signer's
|
152 |
-
private key used for signing.
|
153 |
-
:param keystore: The path to a directory which contains the keys
|
154 |
-
used in verification. If not specified, the
|
155 |
-
instance's ``gpg_home`` attribute is used instead.
|
156 |
-
:return: The signing command as a list suitable to be
|
157 |
-
passed to :class:`subprocess.Popen`.
|
158 |
-
"""
|
159 |
-
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
|
160 |
-
if keystore is None:
|
161 |
-
keystore = self.gpg_home
|
162 |
-
if keystore:
|
163 |
-
cmd.extend(['--homedir', keystore])
|
164 |
-
if sign_password is not None:
|
165 |
-
cmd.extend(['--batch', '--passphrase-fd', '0'])
|
166 |
-
td = tempfile.mkdtemp()
|
167 |
-
sf = os.path.join(td, os.path.basename(filename) + '.asc')
|
168 |
-
cmd.extend(['--detach-sign', '--armor', '--local-user',
|
169 |
-
signer, '--output', sf, filename])
|
170 |
-
logger.debug('invoking: %s', ' '.join(cmd))
|
171 |
-
return cmd, sf
|
172 |
-
|
173 |
-
def run_command(self, cmd, input_data=None):
|
174 |
-
"""
|
175 |
-
Run a command in a child process , passing it any input data specified.
|
176 |
-
|
177 |
-
:param cmd: The command to run.
|
178 |
-
:param input_data: If specified, this must be a byte string containing
|
179 |
-
data to be sent to the child process.
|
180 |
-
:return: A tuple consisting of the subprocess' exit code, a list of
|
181 |
-
lines read from the subprocess' ``stdout``, and a list of
|
182 |
-
lines read from the subprocess' ``stderr``.
|
183 |
-
"""
|
184 |
-
kwargs = {
|
185 |
-
'stdout': subprocess.PIPE,
|
186 |
-
'stderr': subprocess.PIPE,
|
187 |
-
}
|
188 |
-
if input_data is not None:
|
189 |
-
kwargs['stdin'] = subprocess.PIPE
|
190 |
-
stdout = []
|
191 |
-
stderr = []
|
192 |
-
p = subprocess.Popen(cmd, **kwargs)
|
193 |
-
# We don't use communicate() here because we may need to
|
194 |
-
# get clever with interacting with the command
|
195 |
-
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
|
196 |
-
t1.start()
|
197 |
-
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
|
198 |
-
t2.start()
|
199 |
-
if input_data is not None:
|
200 |
-
p.stdin.write(input_data)
|
201 |
-
p.stdin.close()
|
202 |
-
|
203 |
-
p.wait()
|
204 |
-
t1.join()
|
205 |
-
t2.join()
|
206 |
-
return p.returncode, stdout, stderr
|
207 |
-
|
208 |
-
def sign_file(self, filename, signer, sign_password, keystore=None): # pragma: no cover
|
209 |
-
"""
|
210 |
-
Sign a file.
|
211 |
-
|
212 |
-
:param filename: The pathname to the file to be signed.
|
213 |
-
:param signer: The identifier of the signer of the file.
|
214 |
-
:param sign_password: The passphrase for the signer's
|
215 |
-
private key used for signing.
|
216 |
-
:param keystore: The path to a directory which contains the keys
|
217 |
-
used in signing. If not specified, the instance's
|
218 |
-
``gpg_home`` attribute is used instead.
|
219 |
-
:return: The absolute pathname of the file where the signature is
|
220 |
-
stored.
|
221 |
-
"""
|
222 |
-
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
|
223 |
-
keystore)
|
224 |
-
rc, stdout, stderr = self.run_command(cmd,
|
225 |
-
sign_password.encode('utf-8'))
|
226 |
-
if rc != 0:
|
227 |
-
raise DistlibException('sign command failed with error '
|
228 |
-
'code %s' % rc)
|
229 |
-
return sig_file
|
230 |
-
|
231 |
-
def upload_file(self, metadata, filename, signer=None, sign_password=None,
|
232 |
-
filetype='sdist', pyversion='source', keystore=None):
|
233 |
-
"""
|
234 |
-
Upload a release file to the index.
|
235 |
-
|
236 |
-
:param metadata: A :class:`Metadata` instance defining at least a name
|
237 |
-
and version number for the file to be uploaded.
|
238 |
-
:param filename: The pathname of the file to be uploaded.
|
239 |
-
:param signer: The identifier of the signer of the file.
|
240 |
-
:param sign_password: The passphrase for the signer's
|
241 |
-
private key used for signing.
|
242 |
-
:param filetype: The type of the file being uploaded. This is the
|
243 |
-
distutils command which produced that file, e.g.
|
244 |
-
``sdist`` or ``bdist_wheel``.
|
245 |
-
:param pyversion: The version of Python which the release relates
|
246 |
-
to. For code compatible with any Python, this would
|
247 |
-
be ``source``, otherwise it would be e.g. ``3.2``.
|
248 |
-
:param keystore: The path to a directory which contains the keys
|
249 |
-
used in signing. If not specified, the instance's
|
250 |
-
``gpg_home`` attribute is used instead.
|
251 |
-
:return: The HTTP response received from PyPI upon submission of the
|
252 |
-
request.
|
253 |
-
"""
|
254 |
-
self.check_credentials()
|
255 |
-
if not os.path.exists(filename):
|
256 |
-
raise DistlibException('not found: %s' % filename)
|
257 |
-
metadata.validate()
|
258 |
-
d = metadata.todict()
|
259 |
-
sig_file = None
|
260 |
-
if signer:
|
261 |
-
if not self.gpg:
|
262 |
-
logger.warning('no signing program available - not signed')
|
263 |
-
else:
|
264 |
-
sig_file = self.sign_file(filename, signer, sign_password,
|
265 |
-
keystore)
|
266 |
-
with open(filename, 'rb') as f:
|
267 |
-
file_data = f.read()
|
268 |
-
md5_digest = hashlib.md5(file_data).hexdigest()
|
269 |
-
sha256_digest = hashlib.sha256(file_data).hexdigest()
|
270 |
-
d.update({
|
271 |
-
':action': 'file_upload',
|
272 |
-
'protocol_version': '1',
|
273 |
-
'filetype': filetype,
|
274 |
-
'pyversion': pyversion,
|
275 |
-
'md5_digest': md5_digest,
|
276 |
-
'sha256_digest': sha256_digest,
|
277 |
-
})
|
278 |
-
files = [('content', os.path.basename(filename), file_data)]
|
279 |
-
if sig_file:
|
280 |
-
with open(sig_file, 'rb') as f:
|
281 |
-
sig_data = f.read()
|
282 |
-
files.append(('gpg_signature', os.path.basename(sig_file),
|
283 |
-
sig_data))
|
284 |
-
shutil.rmtree(os.path.dirname(sig_file))
|
285 |
-
request = self.encode_request(d.items(), files)
|
286 |
-
return self.send_request(request)
|
287 |
-
|
288 |
-
def upload_documentation(self, metadata, doc_dir): # pragma: no cover
|
289 |
-
"""
|
290 |
-
Upload documentation to the index.
|
291 |
-
|
292 |
-
:param metadata: A :class:`Metadata` instance defining at least a name
|
293 |
-
and version number for the documentation to be
|
294 |
-
uploaded.
|
295 |
-
:param doc_dir: The pathname of the directory which contains the
|
296 |
-
documentation. This should be the directory that
|
297 |
-
contains the ``index.html`` for the documentation.
|
298 |
-
:return: The HTTP response received from PyPI upon submission of the
|
299 |
-
request.
|
300 |
-
"""
|
301 |
-
self.check_credentials()
|
302 |
-
if not os.path.isdir(doc_dir):
|
303 |
-
raise DistlibException('not a directory: %r' % doc_dir)
|
304 |
-
fn = os.path.join(doc_dir, 'index.html')
|
305 |
-
if not os.path.exists(fn):
|
306 |
-
raise DistlibException('not found: %r' % fn)
|
307 |
-
metadata.validate()
|
308 |
-
name, version = metadata.name, metadata.version
|
309 |
-
zip_data = zip_dir(doc_dir).getvalue()
|
310 |
-
fields = [(':action', 'doc_upload'),
|
311 |
-
('name', name), ('version', version)]
|
312 |
-
files = [('content', name, zip_data)]
|
313 |
-
request = self.encode_request(fields, files)
|
314 |
-
return self.send_request(request)
|
315 |
-
|
316 |
-
def get_verify_command(self, signature_filename, data_filename,
|
317 |
-
keystore=None):
|
318 |
-
"""
|
319 |
-
Return a suitable command for verifying a file.
|
320 |
-
|
321 |
-
:param signature_filename: The pathname to the file containing the
|
322 |
-
signature.
|
323 |
-
:param data_filename: The pathname to the file containing the
|
324 |
-
signed data.
|
325 |
-
:param keystore: The path to a directory which contains the keys
|
326 |
-
used in verification. If not specified, the
|
327 |
-
instance's ``gpg_home`` attribute is used instead.
|
328 |
-
:return: The verifying command as a list suitable to be
|
329 |
-
passed to :class:`subprocess.Popen`.
|
330 |
-
"""
|
331 |
-
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
|
332 |
-
if keystore is None:
|
333 |
-
keystore = self.gpg_home
|
334 |
-
if keystore:
|
335 |
-
cmd.extend(['--homedir', keystore])
|
336 |
-
cmd.extend(['--verify', signature_filename, data_filename])
|
337 |
-
logger.debug('invoking: %s', ' '.join(cmd))
|
338 |
-
return cmd
|
339 |
-
|
340 |
-
def verify_signature(self, signature_filename, data_filename,
|
341 |
-
keystore=None):
|
342 |
-
"""
|
343 |
-
Verify a signature for a file.
|
344 |
-
|
345 |
-
:param signature_filename: The pathname to the file containing the
|
346 |
-
signature.
|
347 |
-
:param data_filename: The pathname to the file containing the
|
348 |
-
signed data.
|
349 |
-
:param keystore: The path to a directory which contains the keys
|
350 |
-
used in verification. If not specified, the
|
351 |
-
instance's ``gpg_home`` attribute is used instead.
|
352 |
-
:return: True if the signature was verified, else False.
|
353 |
-
"""
|
354 |
-
if not self.gpg:
|
355 |
-
raise DistlibException('verification unavailable because gpg '
|
356 |
-
'unavailable')
|
357 |
-
cmd = self.get_verify_command(signature_filename, data_filename,
|
358 |
-
keystore)
|
359 |
-
rc, stdout, stderr = self.run_command(cmd)
|
360 |
-
if rc not in (0, 1):
|
361 |
-
raise DistlibException('verify command failed with error '
|
362 |
-
'code %s' % rc)
|
363 |
-
return rc == 0
|
364 |
-
|
365 |
-
def download_file(self, url, destfile, digest=None, reporthook=None):
|
366 |
-
"""
|
367 |
-
This is a convenience method for downloading a file from an URL.
|
368 |
-
Normally, this will be a file from the index, though currently
|
369 |
-
no check is made for this (i.e. a file can be downloaded from
|
370 |
-
anywhere).
|
371 |
-
|
372 |
-
The method is just like the :func:`urlretrieve` function in the
|
373 |
-
standard library, except that it allows digest computation to be
|
374 |
-
done during download and checking that the downloaded data
|
375 |
-
matched any expected value.
|
376 |
-
|
377 |
-
:param url: The URL of the file to be downloaded (assumed to be
|
378 |
-
available via an HTTP GET request).
|
379 |
-
:param destfile: The pathname where the downloaded file is to be
|
380 |
-
saved.
|
381 |
-
:param digest: If specified, this must be a (hasher, value)
|
382 |
-
tuple, where hasher is the algorithm used (e.g.
|
383 |
-
``'md5'``) and ``value`` is the expected value.
|
384 |
-
:param reporthook: The same as for :func:`urlretrieve` in the
|
385 |
-
standard library.
|
386 |
-
"""
|
387 |
-
if digest is None:
|
388 |
-
digester = None
|
389 |
-
logger.debug('No digest specified')
|
390 |
-
else:
|
391 |
-
if isinstance(digest, (list, tuple)):
|
392 |
-
hasher, digest = digest
|
393 |
-
else:
|
394 |
-
hasher = 'md5'
|
395 |
-
digester = getattr(hashlib, hasher)()
|
396 |
-
logger.debug('Digest specified: %s' % digest)
|
397 |
-
# The following code is equivalent to urlretrieve.
|
398 |
-
# We need to do it this way so that we can compute the
|
399 |
-
# digest of the file as we go.
|
400 |
-
with open(destfile, 'wb') as dfp:
|
401 |
-
# addinfourl is not a context manager on 2.x
|
402 |
-
# so we have to use try/finally
|
403 |
-
sfp = self.send_request(Request(url))
|
404 |
-
try:
|
405 |
-
headers = sfp.info()
|
406 |
-
blocksize = 8192
|
407 |
-
size = -1
|
408 |
-
read = 0
|
409 |
-
blocknum = 0
|
410 |
-
if "content-length" in headers:
|
411 |
-
size = int(headers["Content-Length"])
|
412 |
-
if reporthook:
|
413 |
-
reporthook(blocknum, blocksize, size)
|
414 |
-
while True:
|
415 |
-
block = sfp.read(blocksize)
|
416 |
-
if not block:
|
417 |
-
break
|
418 |
-
read += len(block)
|
419 |
-
dfp.write(block)
|
420 |
-
if digester:
|
421 |
-
digester.update(block)
|
422 |
-
blocknum += 1
|
423 |
-
if reporthook:
|
424 |
-
reporthook(blocknum, blocksize, size)
|
425 |
-
finally:
|
426 |
-
sfp.close()
|
427 |
-
|
428 |
-
# check that we got the whole file, if we can
|
429 |
-
if size >= 0 and read < size:
|
430 |
-
raise DistlibException(
|
431 |
-
'retrieval incomplete: got only %d out of %d bytes'
|
432 |
-
% (read, size))
|
433 |
-
# if we have a digest, it must match.
|
434 |
-
if digester:
|
435 |
-
actual = digester.hexdigest()
|
436 |
-
if digest != actual:
|
437 |
-
raise DistlibException('%s digest mismatch for %s: expected '
|
438 |
-
'%s, got %s' % (hasher, destfile,
|
439 |
-
digest, actual))
|
440 |
-
logger.debug('Digest verified: %s', digest)
|
441 |
-
|
442 |
-
def send_request(self, req):
|
443 |
-
"""
|
444 |
-
Send a standard library :class:`Request` to PyPI and return its
|
445 |
-
response.
|
446 |
-
|
447 |
-
:param req: The request to send.
|
448 |
-
:return: The HTTP response from PyPI (a standard library HTTPResponse).
|
449 |
-
"""
|
450 |
-
handlers = []
|
451 |
-
if self.password_handler:
|
452 |
-
handlers.append(self.password_handler)
|
453 |
-
if self.ssl_verifier:
|
454 |
-
handlers.append(self.ssl_verifier)
|
455 |
-
opener = build_opener(*handlers)
|
456 |
-
return opener.open(req)
|
457 |
-
|
458 |
-
def encode_request(self, fields, files):
|
459 |
-
"""
|
460 |
-
Encode fields and files for posting to an HTTP server.
|
461 |
-
|
462 |
-
:param fields: The fields to send as a list of (fieldname, value)
|
463 |
-
tuples.
|
464 |
-
:param files: The files to send as a list of (fieldname, filename,
|
465 |
-
file_bytes) tuple.
|
466 |
-
"""
|
467 |
-
# Adapted from packaging, which in turn was adapted from
|
468 |
-
# http://code.activestate.com/recipes/146306
|
469 |
-
|
470 |
-
parts = []
|
471 |
-
boundary = self.boundary
|
472 |
-
for k, values in fields:
|
473 |
-
if not isinstance(values, (list, tuple)):
|
474 |
-
values = [values]
|
475 |
-
|
476 |
-
for v in values:
|
477 |
-
parts.extend((
|
478 |
-
b'--' + boundary,
|
479 |
-
('Content-Disposition: form-data; name="%s"' %
|
480 |
-
k).encode('utf-8'),
|
481 |
-
b'',
|
482 |
-
v.encode('utf-8')))
|
483 |
-
for key, filename, value in files:
|
484 |
-
parts.extend((
|
485 |
-
b'--' + boundary,
|
486 |
-
('Content-Disposition: form-data; name="%s"; filename="%s"' %
|
487 |
-
(key, filename)).encode('utf-8'),
|
488 |
-
b'',
|
489 |
-
value))
|
490 |
-
|
491 |
-
parts.extend((b'--' + boundary + b'--', b''))
|
492 |
-
|
493 |
-
body = b'\r\n'.join(parts)
|
494 |
-
ct = b'multipart/form-data; boundary=' + boundary
|
495 |
-
headers = {
|
496 |
-
'Content-type': ct,
|
497 |
-
'Content-length': str(len(body))
|
498 |
-
}
|
499 |
-
return Request(self.url, body, headers)
|
500 |
-
|
501 |
-
def search(self, terms, operator=None): # pragma: no cover
|
502 |
-
if isinstance(terms, string_types):
|
503 |
-
terms = {'name': terms}
|
504 |
-
rpc_proxy = ServerProxy(self.url, timeout=3.0)
|
505 |
-
try:
|
506 |
-
return rpc_proxy.search(terms, operator or 'and')
|
507 |
-
finally:
|
508 |
-
rpc_proxy('close')()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py
DELETED
@@ -1,353 +0,0 @@
|
|
1 |
-
"""This is invoked in a subprocess to call the build backend hooks.
|
2 |
-
|
3 |
-
It expects:
|
4 |
-
- Command line args: hook_name, control_dir
|
5 |
-
- Environment variables:
|
6 |
-
PEP517_BUILD_BACKEND=entry.point:spec
|
7 |
-
PEP517_BACKEND_PATH=paths (separated with os.pathsep)
|
8 |
-
- control_dir/input.json:
|
9 |
-
- {"kwargs": {...}}
|
10 |
-
|
11 |
-
Results:
|
12 |
-
- control_dir/output.json
|
13 |
-
- {"return_val": ...}
|
14 |
-
"""
|
15 |
-
import json
|
16 |
-
import os
|
17 |
-
import os.path
|
18 |
-
import re
|
19 |
-
import shutil
|
20 |
-
import sys
|
21 |
-
import traceback
|
22 |
-
from glob import glob
|
23 |
-
from importlib import import_module
|
24 |
-
from os.path import join as pjoin
|
25 |
-
|
26 |
-
# This file is run as a script, and `import wrappers` is not zip-safe, so we
|
27 |
-
# include write_json() and read_json() from wrappers.py.
|
28 |
-
|
29 |
-
|
30 |
-
def write_json(obj, path, **kwargs):
|
31 |
-
with open(path, 'w', encoding='utf-8') as f:
|
32 |
-
json.dump(obj, f, **kwargs)
|
33 |
-
|
34 |
-
|
35 |
-
def read_json(path):
|
36 |
-
with open(path, encoding='utf-8') as f:
|
37 |
-
return json.load(f)
|
38 |
-
|
39 |
-
|
40 |
-
class BackendUnavailable(Exception):
|
41 |
-
"""Raised if we cannot import the backend"""
|
42 |
-
def __init__(self, traceback):
|
43 |
-
self.traceback = traceback
|
44 |
-
|
45 |
-
|
46 |
-
class BackendInvalid(Exception):
|
47 |
-
"""Raised if the backend is invalid"""
|
48 |
-
def __init__(self, message):
|
49 |
-
self.message = message
|
50 |
-
|
51 |
-
|
52 |
-
class HookMissing(Exception):
|
53 |
-
"""Raised if a hook is missing and we are not executing the fallback"""
|
54 |
-
def __init__(self, hook_name=None):
|
55 |
-
super().__init__(hook_name)
|
56 |
-
self.hook_name = hook_name
|
57 |
-
|
58 |
-
|
59 |
-
def contained_in(filename, directory):
|
60 |
-
"""Test if a file is located within the given directory."""
|
61 |
-
filename = os.path.normcase(os.path.abspath(filename))
|
62 |
-
directory = os.path.normcase(os.path.abspath(directory))
|
63 |
-
return os.path.commonprefix([filename, directory]) == directory
|
64 |
-
|
65 |
-
|
66 |
-
def _build_backend():
|
67 |
-
"""Find and load the build backend"""
|
68 |
-
# Add in-tree backend directories to the front of sys.path.
|
69 |
-
backend_path = os.environ.get('PEP517_BACKEND_PATH')
|
70 |
-
if backend_path:
|
71 |
-
extra_pathitems = backend_path.split(os.pathsep)
|
72 |
-
sys.path[:0] = extra_pathitems
|
73 |
-
|
74 |
-
ep = os.environ['PEP517_BUILD_BACKEND']
|
75 |
-
mod_path, _, obj_path = ep.partition(':')
|
76 |
-
try:
|
77 |
-
obj = import_module(mod_path)
|
78 |
-
except ImportError:
|
79 |
-
raise BackendUnavailable(traceback.format_exc())
|
80 |
-
|
81 |
-
if backend_path:
|
82 |
-
if not any(
|
83 |
-
contained_in(obj.__file__, path)
|
84 |
-
for path in extra_pathitems
|
85 |
-
):
|
86 |
-
raise BackendInvalid("Backend was not loaded from backend-path")
|
87 |
-
|
88 |
-
if obj_path:
|
89 |
-
for path_part in obj_path.split('.'):
|
90 |
-
obj = getattr(obj, path_part)
|
91 |
-
return obj
|
92 |
-
|
93 |
-
|
94 |
-
def _supported_features():
|
95 |
-
"""Return the list of options features supported by the backend.
|
96 |
-
|
97 |
-
Returns a list of strings.
|
98 |
-
The only possible value is 'build_editable'.
|
99 |
-
"""
|
100 |
-
backend = _build_backend()
|
101 |
-
features = []
|
102 |
-
if hasattr(backend, "build_editable"):
|
103 |
-
features.append("build_editable")
|
104 |
-
return features
|
105 |
-
|
106 |
-
|
107 |
-
def get_requires_for_build_wheel(config_settings):
|
108 |
-
"""Invoke the optional get_requires_for_build_wheel hook
|
109 |
-
|
110 |
-
Returns [] if the hook is not defined.
|
111 |
-
"""
|
112 |
-
backend = _build_backend()
|
113 |
-
try:
|
114 |
-
hook = backend.get_requires_for_build_wheel
|
115 |
-
except AttributeError:
|
116 |
-
return []
|
117 |
-
else:
|
118 |
-
return hook(config_settings)
|
119 |
-
|
120 |
-
|
121 |
-
def get_requires_for_build_editable(config_settings):
|
122 |
-
"""Invoke the optional get_requires_for_build_editable hook
|
123 |
-
|
124 |
-
Returns [] if the hook is not defined.
|
125 |
-
"""
|
126 |
-
backend = _build_backend()
|
127 |
-
try:
|
128 |
-
hook = backend.get_requires_for_build_editable
|
129 |
-
except AttributeError:
|
130 |
-
return []
|
131 |
-
else:
|
132 |
-
return hook(config_settings)
|
133 |
-
|
134 |
-
|
135 |
-
def prepare_metadata_for_build_wheel(
|
136 |
-
metadata_directory, config_settings, _allow_fallback):
|
137 |
-
"""Invoke optional prepare_metadata_for_build_wheel
|
138 |
-
|
139 |
-
Implements a fallback by building a wheel if the hook isn't defined,
|
140 |
-
unless _allow_fallback is False in which case HookMissing is raised.
|
141 |
-
"""
|
142 |
-
backend = _build_backend()
|
143 |
-
try:
|
144 |
-
hook = backend.prepare_metadata_for_build_wheel
|
145 |
-
except AttributeError:
|
146 |
-
if not _allow_fallback:
|
147 |
-
raise HookMissing()
|
148 |
-
else:
|
149 |
-
return hook(metadata_directory, config_settings)
|
150 |
-
# fallback to build_wheel outside the try block to avoid exception chaining
|
151 |
-
# which can be confusing to users and is not relevant
|
152 |
-
whl_basename = backend.build_wheel(metadata_directory, config_settings)
|
153 |
-
return _get_wheel_metadata_from_wheel(whl_basename, metadata_directory,
|
154 |
-
config_settings)
|
155 |
-
|
156 |
-
|
157 |
-
def prepare_metadata_for_build_editable(
|
158 |
-
metadata_directory, config_settings, _allow_fallback):
|
159 |
-
"""Invoke optional prepare_metadata_for_build_editable
|
160 |
-
|
161 |
-
Implements a fallback by building an editable wheel if the hook isn't
|
162 |
-
defined, unless _allow_fallback is False in which case HookMissing is
|
163 |
-
raised.
|
164 |
-
"""
|
165 |
-
backend = _build_backend()
|
166 |
-
try:
|
167 |
-
hook = backend.prepare_metadata_for_build_editable
|
168 |
-
except AttributeError:
|
169 |
-
if not _allow_fallback:
|
170 |
-
raise HookMissing()
|
171 |
-
try:
|
172 |
-
build_hook = backend.build_editable
|
173 |
-
except AttributeError:
|
174 |
-
raise HookMissing(hook_name='build_editable')
|
175 |
-
else:
|
176 |
-
whl_basename = build_hook(metadata_directory, config_settings)
|
177 |
-
return _get_wheel_metadata_from_wheel(whl_basename,
|
178 |
-
metadata_directory,
|
179 |
-
config_settings)
|
180 |
-
else:
|
181 |
-
return hook(metadata_directory, config_settings)
|
182 |
-
|
183 |
-
|
184 |
-
WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL'
|
185 |
-
|
186 |
-
|
187 |
-
def _dist_info_files(whl_zip):
|
188 |
-
"""Identify the .dist-info folder inside a wheel ZipFile."""
|
189 |
-
res = []
|
190 |
-
for path in whl_zip.namelist():
|
191 |
-
m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path)
|
192 |
-
if m:
|
193 |
-
res.append(path)
|
194 |
-
if res:
|
195 |
-
return res
|
196 |
-
raise Exception("No .dist-info folder found in wheel")
|
197 |
-
|
198 |
-
|
199 |
-
def _get_wheel_metadata_from_wheel(
|
200 |
-
whl_basename, metadata_directory, config_settings):
|
201 |
-
"""Extract the metadata from a wheel.
|
202 |
-
|
203 |
-
Fallback for when the build backend does not
|
204 |
-
define the 'get_wheel_metadata' hook.
|
205 |
-
"""
|
206 |
-
from zipfile import ZipFile
|
207 |
-
with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'):
|
208 |
-
pass # Touch marker file
|
209 |
-
|
210 |
-
whl_file = os.path.join(metadata_directory, whl_basename)
|
211 |
-
with ZipFile(whl_file) as zipf:
|
212 |
-
dist_info = _dist_info_files(zipf)
|
213 |
-
zipf.extractall(path=metadata_directory, members=dist_info)
|
214 |
-
return dist_info[0].split('/')[0]
|
215 |
-
|
216 |
-
|
217 |
-
def _find_already_built_wheel(metadata_directory):
|
218 |
-
"""Check for a wheel already built during the get_wheel_metadata hook.
|
219 |
-
"""
|
220 |
-
if not metadata_directory:
|
221 |
-
return None
|
222 |
-
metadata_parent = os.path.dirname(metadata_directory)
|
223 |
-
if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)):
|
224 |
-
return None
|
225 |
-
|
226 |
-
whl_files = glob(os.path.join(metadata_parent, '*.whl'))
|
227 |
-
if not whl_files:
|
228 |
-
print('Found wheel built marker, but no .whl files')
|
229 |
-
return None
|
230 |
-
if len(whl_files) > 1:
|
231 |
-
print('Found multiple .whl files; unspecified behaviour. '
|
232 |
-
'Will call build_wheel.')
|
233 |
-
return None
|
234 |
-
|
235 |
-
# Exactly one .whl file
|
236 |
-
return whl_files[0]
|
237 |
-
|
238 |
-
|
239 |
-
def build_wheel(wheel_directory, config_settings, metadata_directory=None):
|
240 |
-
"""Invoke the mandatory build_wheel hook.
|
241 |
-
|
242 |
-
If a wheel was already built in the
|
243 |
-
prepare_metadata_for_build_wheel fallback, this
|
244 |
-
will copy it rather than rebuilding the wheel.
|
245 |
-
"""
|
246 |
-
prebuilt_whl = _find_already_built_wheel(metadata_directory)
|
247 |
-
if prebuilt_whl:
|
248 |
-
shutil.copy2(prebuilt_whl, wheel_directory)
|
249 |
-
return os.path.basename(prebuilt_whl)
|
250 |
-
|
251 |
-
return _build_backend().build_wheel(wheel_directory, config_settings,
|
252 |
-
metadata_directory)
|
253 |
-
|
254 |
-
|
255 |
-
def build_editable(wheel_directory, config_settings, metadata_directory=None):
|
256 |
-
"""Invoke the optional build_editable hook.
|
257 |
-
|
258 |
-
If a wheel was already built in the
|
259 |
-
prepare_metadata_for_build_editable fallback, this
|
260 |
-
will copy it rather than rebuilding the wheel.
|
261 |
-
"""
|
262 |
-
backend = _build_backend()
|
263 |
-
try:
|
264 |
-
hook = backend.build_editable
|
265 |
-
except AttributeError:
|
266 |
-
raise HookMissing()
|
267 |
-
else:
|
268 |
-
prebuilt_whl = _find_already_built_wheel(metadata_directory)
|
269 |
-
if prebuilt_whl:
|
270 |
-
shutil.copy2(prebuilt_whl, wheel_directory)
|
271 |
-
return os.path.basename(prebuilt_whl)
|
272 |
-
|
273 |
-
return hook(wheel_directory, config_settings, metadata_directory)
|
274 |
-
|
275 |
-
|
276 |
-
def get_requires_for_build_sdist(config_settings):
|
277 |
-
"""Invoke the optional get_requires_for_build_wheel hook
|
278 |
-
|
279 |
-
Returns [] if the hook is not defined.
|
280 |
-
"""
|
281 |
-
backend = _build_backend()
|
282 |
-
try:
|
283 |
-
hook = backend.get_requires_for_build_sdist
|
284 |
-
except AttributeError:
|
285 |
-
return []
|
286 |
-
else:
|
287 |
-
return hook(config_settings)
|
288 |
-
|
289 |
-
|
290 |
-
class _DummyException(Exception):
|
291 |
-
"""Nothing should ever raise this exception"""
|
292 |
-
|
293 |
-
|
294 |
-
class GotUnsupportedOperation(Exception):
|
295 |
-
"""For internal use when backend raises UnsupportedOperation"""
|
296 |
-
def __init__(self, traceback):
|
297 |
-
self.traceback = traceback
|
298 |
-
|
299 |
-
|
300 |
-
def build_sdist(sdist_directory, config_settings):
|
301 |
-
"""Invoke the mandatory build_sdist hook."""
|
302 |
-
backend = _build_backend()
|
303 |
-
try:
|
304 |
-
return backend.build_sdist(sdist_directory, config_settings)
|
305 |
-
except getattr(backend, 'UnsupportedOperation', _DummyException):
|
306 |
-
raise GotUnsupportedOperation(traceback.format_exc())
|
307 |
-
|
308 |
-
|
309 |
-
HOOK_NAMES = {
|
310 |
-
'get_requires_for_build_wheel',
|
311 |
-
'prepare_metadata_for_build_wheel',
|
312 |
-
'build_wheel',
|
313 |
-
'get_requires_for_build_editable',
|
314 |
-
'prepare_metadata_for_build_editable',
|
315 |
-
'build_editable',
|
316 |
-
'get_requires_for_build_sdist',
|
317 |
-
'build_sdist',
|
318 |
-
'_supported_features',
|
319 |
-
}
|
320 |
-
|
321 |
-
|
322 |
-
def main():
|
323 |
-
if len(sys.argv) < 3:
|
324 |
-
sys.exit("Needs args: hook_name, control_dir")
|
325 |
-
hook_name = sys.argv[1]
|
326 |
-
control_dir = sys.argv[2]
|
327 |
-
if hook_name not in HOOK_NAMES:
|
328 |
-
sys.exit("Unknown hook: %s" % hook_name)
|
329 |
-
hook = globals()[hook_name]
|
330 |
-
|
331 |
-
hook_input = read_json(pjoin(control_dir, 'input.json'))
|
332 |
-
|
333 |
-
json_out = {'unsupported': False, 'return_val': None}
|
334 |
-
try:
|
335 |
-
json_out['return_val'] = hook(**hook_input['kwargs'])
|
336 |
-
except BackendUnavailable as e:
|
337 |
-
json_out['no_backend'] = True
|
338 |
-
json_out['traceback'] = e.traceback
|
339 |
-
except BackendInvalid as e:
|
340 |
-
json_out['backend_invalid'] = True
|
341 |
-
json_out['backend_error'] = e.message
|
342 |
-
except GotUnsupportedOperation as e:
|
343 |
-
json_out['unsupported'] = True
|
344 |
-
json_out['traceback'] = e.traceback
|
345 |
-
except HookMissing as e:
|
346 |
-
json_out['hook_missing'] = True
|
347 |
-
json_out['missing_hook_name'] = e.hook_name or hook_name
|
348 |
-
|
349 |
-
write_json(json_out, pjoin(control_dir, 'output.json'), indent=2)
|
350 |
-
|
351 |
-
|
352 |
-
if __name__ == '__main__':
|
353 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlign/ROIAlign.h
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
#pragma once
|
3 |
-
#include <torch/types.h>
|
4 |
-
|
5 |
-
namespace detectron2 {
|
6 |
-
|
7 |
-
at::Tensor ROIAlign_forward_cpu(
|
8 |
-
const at::Tensor& input,
|
9 |
-
const at::Tensor& rois,
|
10 |
-
const float spatial_scale,
|
11 |
-
const int pooled_height,
|
12 |
-
const int pooled_width,
|
13 |
-
const int sampling_ratio,
|
14 |
-
bool aligned);
|
15 |
-
|
16 |
-
at::Tensor ROIAlign_backward_cpu(
|
17 |
-
const at::Tensor& grad,
|
18 |
-
const at::Tensor& rois,
|
19 |
-
const float spatial_scale,
|
20 |
-
const int pooled_height,
|
21 |
-
const int pooled_width,
|
22 |
-
const int batch_size,
|
23 |
-
const int channels,
|
24 |
-
const int height,
|
25 |
-
const int width,
|
26 |
-
const int sampling_ratio,
|
27 |
-
bool aligned);
|
28 |
-
|
29 |
-
#ifdef WITH_CUDA
|
30 |
-
at::Tensor ROIAlign_forward_cuda(
|
31 |
-
const at::Tensor& input,
|
32 |
-
const at::Tensor& rois,
|
33 |
-
const float spatial_scale,
|
34 |
-
const int pooled_height,
|
35 |
-
const int pooled_width,
|
36 |
-
const int sampling_ratio,
|
37 |
-
bool aligned);
|
38 |
-
|
39 |
-
at::Tensor ROIAlign_backward_cuda(
|
40 |
-
const at::Tensor& grad,
|
41 |
-
const at::Tensor& rois,
|
42 |
-
const float spatial_scale,
|
43 |
-
const int pooled_height,
|
44 |
-
const int pooled_width,
|
45 |
-
const int batch_size,
|
46 |
-
const int channels,
|
47 |
-
const int height,
|
48 |
-
const int width,
|
49 |
-
const int sampling_ratio,
|
50 |
-
bool aligned);
|
51 |
-
#endif
|
52 |
-
|
53 |
-
// Interface for Python
|
54 |
-
inline at::Tensor ROIAlign_forward(
|
55 |
-
const at::Tensor& input,
|
56 |
-
const at::Tensor& rois,
|
57 |
-
const float spatial_scale,
|
58 |
-
const int pooled_height,
|
59 |
-
const int pooled_width,
|
60 |
-
const int sampling_ratio,
|
61 |
-
bool aligned) {
|
62 |
-
if (input.type().is_cuda()) {
|
63 |
-
#ifdef WITH_CUDA
|
64 |
-
return ROIAlign_forward_cuda(
|
65 |
-
input,
|
66 |
-
rois,
|
67 |
-
spatial_scale,
|
68 |
-
pooled_height,
|
69 |
-
pooled_width,
|
70 |
-
sampling_ratio,
|
71 |
-
aligned);
|
72 |
-
#else
|
73 |
-
AT_ERROR("Not compiled with GPU support");
|
74 |
-
#endif
|
75 |
-
}
|
76 |
-
return ROIAlign_forward_cpu(
|
77 |
-
input,
|
78 |
-
rois,
|
79 |
-
spatial_scale,
|
80 |
-
pooled_height,
|
81 |
-
pooled_width,
|
82 |
-
sampling_ratio,
|
83 |
-
aligned);
|
84 |
-
}
|
85 |
-
|
86 |
-
inline at::Tensor ROIAlign_backward(
|
87 |
-
const at::Tensor& grad,
|
88 |
-
const at::Tensor& rois,
|
89 |
-
const float spatial_scale,
|
90 |
-
const int pooled_height,
|
91 |
-
const int pooled_width,
|
92 |
-
const int batch_size,
|
93 |
-
const int channels,
|
94 |
-
const int height,
|
95 |
-
const int width,
|
96 |
-
const int sampling_ratio,
|
97 |
-
bool aligned) {
|
98 |
-
if (grad.type().is_cuda()) {
|
99 |
-
#ifdef WITH_CUDA
|
100 |
-
return ROIAlign_backward_cuda(
|
101 |
-
grad,
|
102 |
-
rois,
|
103 |
-
spatial_scale,
|
104 |
-
pooled_height,
|
105 |
-
pooled_width,
|
106 |
-
batch_size,
|
107 |
-
channels,
|
108 |
-
height,
|
109 |
-
width,
|
110 |
-
sampling_ratio,
|
111 |
-
aligned);
|
112 |
-
#else
|
113 |
-
AT_ERROR("Not compiled with GPU support");
|
114 |
-
#endif
|
115 |
-
}
|
116 |
-
return ROIAlign_backward_cpu(
|
117 |
-
grad,
|
118 |
-
rois,
|
119 |
-
spatial_scale,
|
120 |
-
pooled_height,
|
121 |
-
pooled_width,
|
122 |
-
batch_size,
|
123 |
-
channels,
|
124 |
-
height,
|
125 |
-
width,
|
126 |
-
sampling_ratio,
|
127 |
-
aligned);
|
128 |
-
}
|
129 |
-
|
130 |
-
} // namespace detectron2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/scan.h
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file scan.h
|
19 |
-
* \brief Sequential implementations of scan functions.
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
#include <thrust/system/detail/sequential/execution_policy.h>
|
26 |
-
#include <thrust/iterator/iterator_traits.h>
|
27 |
-
#include <thrust/detail/type_traits.h>
|
28 |
-
#include <thrust/detail/type_traits/function_traits.h>
|
29 |
-
#include <thrust/detail/type_traits/iterator/is_output_iterator.h>
|
30 |
-
#include <thrust/detail/function.h>
|
31 |
-
|
32 |
-
namespace thrust
|
33 |
-
{
|
34 |
-
namespace system
|
35 |
-
{
|
36 |
-
namespace detail
|
37 |
-
{
|
38 |
-
namespace sequential
|
39 |
-
{
|
40 |
-
|
41 |
-
|
42 |
-
__thrust_exec_check_disable__
|
43 |
-
template<typename DerivedPolicy,
|
44 |
-
typename InputIterator,
|
45 |
-
typename OutputIterator,
|
46 |
-
typename BinaryFunction>
|
47 |
-
__host__ __device__
|
48 |
-
OutputIterator inclusive_scan(sequential::execution_policy<DerivedPolicy> &,
|
49 |
-
InputIterator first,
|
50 |
-
InputIterator last,
|
51 |
-
OutputIterator result,
|
52 |
-
BinaryFunction binary_op)
|
53 |
-
{
|
54 |
-
using namespace thrust::detail;
|
55 |
-
|
56 |
-
// Use the input iterator's value type per https://wg21.link/P0571
|
57 |
-
using ValueType = typename thrust::iterator_value<InputIterator>::type;
|
58 |
-
|
59 |
-
// wrap binary_op
|
60 |
-
thrust::detail::wrapped_function<
|
61 |
-
BinaryFunction,
|
62 |
-
ValueType
|
63 |
-
> wrapped_binary_op(binary_op);
|
64 |
-
|
65 |
-
if(first != last)
|
66 |
-
{
|
67 |
-
ValueType sum = *first;
|
68 |
-
|
69 |
-
*result = *first;
|
70 |
-
|
71 |
-
for(++first, ++result; first != last; ++first, ++result)
|
72 |
-
*result = sum = wrapped_binary_op(sum,*first);
|
73 |
-
}
|
74 |
-
|
75 |
-
return result;
|
76 |
-
}
|
77 |
-
|
78 |
-
|
79 |
-
__thrust_exec_check_disable__
|
80 |
-
template<typename DerivedPolicy,
|
81 |
-
typename InputIterator,
|
82 |
-
typename OutputIterator,
|
83 |
-
typename InitialValueType,
|
84 |
-
typename BinaryFunction>
|
85 |
-
__host__ __device__
|
86 |
-
OutputIterator exclusive_scan(sequential::execution_policy<DerivedPolicy> &,
|
87 |
-
InputIterator first,
|
88 |
-
InputIterator last,
|
89 |
-
OutputIterator result,
|
90 |
-
InitialValueType init,
|
91 |
-
BinaryFunction binary_op)
|
92 |
-
{
|
93 |
-
using namespace thrust::detail;
|
94 |
-
|
95 |
-
// Use the initial value type per https://wg21.link/P0571
|
96 |
-
using ValueType = InitialValueType;
|
97 |
-
|
98 |
-
if(first != last)
|
99 |
-
{
|
100 |
-
ValueType tmp = *first; // temporary value allows in-situ scan
|
101 |
-
ValueType sum = init;
|
102 |
-
|
103 |
-
*result = sum;
|
104 |
-
sum = binary_op(sum, tmp);
|
105 |
-
|
106 |
-
for(++first, ++result; first != last; ++first, ++result)
|
107 |
-
{
|
108 |
-
tmp = *first;
|
109 |
-
*result = sum;
|
110 |
-
sum = binary_op(sum, tmp);
|
111 |
-
}
|
112 |
-
}
|
113 |
-
|
114 |
-
return result;
|
115 |
-
}
|
116 |
-
|
117 |
-
|
118 |
-
} // end namespace sequential
|
119 |
-
} // end namespace detail
|
120 |
-
} // end namespace system
|
121 |
-
} // end namespace thrust
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Candeloro/anime-remove-background/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Anime Remove Background
|
3 |
-
emoji: 🪄🖼️
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: skytnt/anime-remove-background
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/hammer/__init__.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from PIL.Image import Image as IMG
|
5 |
-
from pil_utils import BuildImage
|
6 |
-
|
7 |
-
from meme_generator import add_meme
|
8 |
-
from meme_generator.utils import save_gif
|
9 |
-
|
10 |
-
img_dir = Path(__file__).parent / "images"
|
11 |
-
|
12 |
-
|
13 |
-
def hammer(images: List[BuildImage], texts, args):
|
14 |
-
img = images[0].convert("RGBA").square()
|
15 |
-
# fmt: off
|
16 |
-
locs = [
|
17 |
-
(62, 143, 158, 113), (52, 177, 173, 105), (42, 192, 192, 92), (46, 182, 184, 100),
|
18 |
-
(54, 169, 174, 110), (69, 128, 144, 135), (65, 130, 152, 124),
|
19 |
-
]
|
20 |
-
# fmt: on
|
21 |
-
frames: List[IMG] = []
|
22 |
-
for i in range(7):
|
23 |
-
frame = BuildImage.open(img_dir / f"{i}.png")
|
24 |
-
x, y, w, h = locs[i]
|
25 |
-
frame.paste(img.resize((w, h)), (x, y), below=True)
|
26 |
-
frames.append(frame.image)
|
27 |
-
return save_gif(frames, 0.07)
|
28 |
-
|
29 |
-
|
30 |
-
add_meme("hammer", hammer, min_images=1, max_images=1, keywords=["锤"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cpp4App/Cpp4App/CDM/detect_text/ocr.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import os
|
3 |
-
import requests
|
4 |
-
import json
|
5 |
-
from base64 import b64encode
|
6 |
-
import time
|
7 |
-
|
8 |
-
|
9 |
-
def Google_OCR_makeImageData(imgpath):
|
10 |
-
with open(imgpath, 'rb') as f:
|
11 |
-
ctxt = b64encode(f.read()).decode()
|
12 |
-
img_req = {
|
13 |
-
'image': {
|
14 |
-
'content': ctxt
|
15 |
-
},
|
16 |
-
'features': [{
|
17 |
-
'type': 'DOCUMENT_TEXT_DETECTION',
|
18 |
-
# 'type': 'TEXT_DETECTION',
|
19 |
-
'maxResults': 1
|
20 |
-
}]
|
21 |
-
}
|
22 |
-
return json.dumps({"requests": img_req}).encode()
|
23 |
-
|
24 |
-
|
25 |
-
def ocr_detection_google(imgpath):
|
26 |
-
# start = time.clock()
|
27 |
-
url = 'https://vision.googleapis.com/v1/images:annotate'
|
28 |
-
# api_key = 'AIzaSyDUc4iOUASJQYkVwSomIArTKhE2C6bHK8U' # *** Replace with your own Key ***
|
29 |
-
api_key = os.environ.get('google_ocr')
|
30 |
-
|
31 |
-
imgdata = Google_OCR_makeImageData(imgpath)
|
32 |
-
response = requests.post(url,
|
33 |
-
data=imgdata,
|
34 |
-
params={'key': api_key},
|
35 |
-
headers={'Content_Type': 'application/json'})
|
36 |
-
# print('*** Text Detection Time Taken:%.3fs ***' % (time.clock() - start))
|
37 |
-
print("*** Please replace the Google OCR key at detect_text/ocr.py line 28 with your own (apply in https://cloud.google.com/vision) ***")
|
38 |
-
print('response.json(): ', response.json())
|
39 |
-
if response.json()['responses'] == [{}]:
|
40 |
-
# No Text
|
41 |
-
return None
|
42 |
-
else:
|
43 |
-
return response.json()['responses'][0]['textAnnotations'][1:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/cd.py
DELETED
@@ -1,390 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
from codecs import IncrementalDecoder
|
3 |
-
from collections import Counter
|
4 |
-
from functools import lru_cache
|
5 |
-
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
|
6 |
-
|
7 |
-
from .assets import FREQUENCIES
|
8 |
-
from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
|
9 |
-
from .md import is_suspiciously_successive_range
|
10 |
-
from .models import CoherenceMatches
|
11 |
-
from .utils import (
|
12 |
-
is_accentuated,
|
13 |
-
is_latin,
|
14 |
-
is_multi_byte_encoding,
|
15 |
-
is_unicode_range_secondary,
|
16 |
-
unicode_range,
|
17 |
-
)
|
18 |
-
|
19 |
-
|
20 |
-
def encoding_unicode_range(iana_name: str) -> List[str]:
|
21 |
-
"""
|
22 |
-
Return associated unicode ranges in a single byte code page.
|
23 |
-
"""
|
24 |
-
if is_multi_byte_encoding(iana_name):
|
25 |
-
raise IOError("Function not supported on multi-byte code page")
|
26 |
-
|
27 |
-
decoder = importlib.import_module(
|
28 |
-
"encodings.{}".format(iana_name)
|
29 |
-
).IncrementalDecoder
|
30 |
-
|
31 |
-
p: IncrementalDecoder = decoder(errors="ignore")
|
32 |
-
seen_ranges: Dict[str, int] = {}
|
33 |
-
character_count: int = 0
|
34 |
-
|
35 |
-
for i in range(0x40, 0xFF):
|
36 |
-
chunk: str = p.decode(bytes([i]))
|
37 |
-
|
38 |
-
if chunk:
|
39 |
-
character_range: Optional[str] = unicode_range(chunk)
|
40 |
-
|
41 |
-
if character_range is None:
|
42 |
-
continue
|
43 |
-
|
44 |
-
if is_unicode_range_secondary(character_range) is False:
|
45 |
-
if character_range not in seen_ranges:
|
46 |
-
seen_ranges[character_range] = 0
|
47 |
-
seen_ranges[character_range] += 1
|
48 |
-
character_count += 1
|
49 |
-
|
50 |
-
return sorted(
|
51 |
-
[
|
52 |
-
character_range
|
53 |
-
for character_range in seen_ranges
|
54 |
-
if seen_ranges[character_range] / character_count >= 0.15
|
55 |
-
]
|
56 |
-
)
|
57 |
-
|
58 |
-
|
59 |
-
def unicode_range_languages(primary_range: str) -> List[str]:
|
60 |
-
"""
|
61 |
-
Return inferred languages used with a unicode range.
|
62 |
-
"""
|
63 |
-
languages: List[str] = []
|
64 |
-
|
65 |
-
for language, characters in FREQUENCIES.items():
|
66 |
-
for character in characters:
|
67 |
-
if unicode_range(character) == primary_range:
|
68 |
-
languages.append(language)
|
69 |
-
break
|
70 |
-
|
71 |
-
return languages
|
72 |
-
|
73 |
-
|
74 |
-
@lru_cache()
|
75 |
-
def encoding_languages(iana_name: str) -> List[str]:
|
76 |
-
"""
|
77 |
-
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
|
78 |
-
This function does the correspondence.
|
79 |
-
"""
|
80 |
-
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
|
81 |
-
primary_range: Optional[str] = None
|
82 |
-
|
83 |
-
for specified_range in unicode_ranges:
|
84 |
-
if "Latin" not in specified_range:
|
85 |
-
primary_range = specified_range
|
86 |
-
break
|
87 |
-
|
88 |
-
if primary_range is None:
|
89 |
-
return ["Latin Based"]
|
90 |
-
|
91 |
-
return unicode_range_languages(primary_range)
|
92 |
-
|
93 |
-
|
94 |
-
@lru_cache()
|
95 |
-
def mb_encoding_languages(iana_name: str) -> List[str]:
|
96 |
-
"""
|
97 |
-
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
|
98 |
-
This function does the correspondence.
|
99 |
-
"""
|
100 |
-
if (
|
101 |
-
iana_name.startswith("shift_")
|
102 |
-
or iana_name.startswith("iso2022_jp")
|
103 |
-
or iana_name.startswith("euc_j")
|
104 |
-
or iana_name == "cp932"
|
105 |
-
):
|
106 |
-
return ["Japanese"]
|
107 |
-
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
|
108 |
-
return ["Chinese"]
|
109 |
-
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
|
110 |
-
return ["Korean"]
|
111 |
-
|
112 |
-
return []
|
113 |
-
|
114 |
-
|
115 |
-
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
|
116 |
-
def get_target_features(language: str) -> Tuple[bool, bool]:
|
117 |
-
"""
|
118 |
-
Determine main aspects from a supported language if it contains accents and if is pure Latin.
|
119 |
-
"""
|
120 |
-
target_have_accents: bool = False
|
121 |
-
target_pure_latin: bool = True
|
122 |
-
|
123 |
-
for character in FREQUENCIES[language]:
|
124 |
-
if not target_have_accents and is_accentuated(character):
|
125 |
-
target_have_accents = True
|
126 |
-
if target_pure_latin and is_latin(character) is False:
|
127 |
-
target_pure_latin = False
|
128 |
-
|
129 |
-
return target_have_accents, target_pure_latin
|
130 |
-
|
131 |
-
|
132 |
-
def alphabet_languages(
|
133 |
-
characters: List[str], ignore_non_latin: bool = False
|
134 |
-
) -> List[str]:
|
135 |
-
"""
|
136 |
-
Return associated languages associated to given characters.
|
137 |
-
"""
|
138 |
-
languages: List[Tuple[str, float]] = []
|
139 |
-
|
140 |
-
source_have_accents = any(is_accentuated(character) for character in characters)
|
141 |
-
|
142 |
-
for language, language_characters in FREQUENCIES.items():
|
143 |
-
target_have_accents, target_pure_latin = get_target_features(language)
|
144 |
-
|
145 |
-
if ignore_non_latin and target_pure_latin is False:
|
146 |
-
continue
|
147 |
-
|
148 |
-
if target_have_accents is False and source_have_accents:
|
149 |
-
continue
|
150 |
-
|
151 |
-
character_count: int = len(language_characters)
|
152 |
-
|
153 |
-
character_match_count: int = len(
|
154 |
-
[c for c in language_characters if c in characters]
|
155 |
-
)
|
156 |
-
|
157 |
-
ratio: float = character_match_count / character_count
|
158 |
-
|
159 |
-
if ratio >= 0.2:
|
160 |
-
languages.append((language, ratio))
|
161 |
-
|
162 |
-
languages = sorted(languages, key=lambda x: x[1], reverse=True)
|
163 |
-
|
164 |
-
return [compatible_language[0] for compatible_language in languages]
|
165 |
-
|
166 |
-
|
167 |
-
def characters_popularity_compare(
|
168 |
-
language: str, ordered_characters: List[str]
|
169 |
-
) -> float:
|
170 |
-
"""
|
171 |
-
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
|
172 |
-
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
|
173 |
-
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
|
174 |
-
"""
|
175 |
-
if language not in FREQUENCIES:
|
176 |
-
raise ValueError("{} not available".format(language))
|
177 |
-
|
178 |
-
character_approved_count: int = 0
|
179 |
-
FREQUENCIES_language_set = set(FREQUENCIES[language])
|
180 |
-
|
181 |
-
ordered_characters_count: int = len(ordered_characters)
|
182 |
-
target_language_characters_count: int = len(FREQUENCIES[language])
|
183 |
-
|
184 |
-
large_alphabet: bool = target_language_characters_count > 26
|
185 |
-
|
186 |
-
for character, character_rank in zip(
|
187 |
-
ordered_characters, range(0, ordered_characters_count)
|
188 |
-
):
|
189 |
-
if character not in FREQUENCIES_language_set:
|
190 |
-
continue
|
191 |
-
|
192 |
-
character_rank_in_language: int = FREQUENCIES[language].index(character)
|
193 |
-
expected_projection_ratio: float = (
|
194 |
-
target_language_characters_count / ordered_characters_count
|
195 |
-
)
|
196 |
-
character_rank_projection: int = int(character_rank * expected_projection_ratio)
|
197 |
-
|
198 |
-
if (
|
199 |
-
large_alphabet is False
|
200 |
-
and abs(character_rank_projection - character_rank_in_language) > 4
|
201 |
-
):
|
202 |
-
continue
|
203 |
-
|
204 |
-
if (
|
205 |
-
large_alphabet is True
|
206 |
-
and abs(character_rank_projection - character_rank_in_language)
|
207 |
-
< target_language_characters_count / 3
|
208 |
-
):
|
209 |
-
character_approved_count += 1
|
210 |
-
continue
|
211 |
-
|
212 |
-
characters_before_source: List[str] = FREQUENCIES[language][
|
213 |
-
0:character_rank_in_language
|
214 |
-
]
|
215 |
-
characters_after_source: List[str] = FREQUENCIES[language][
|
216 |
-
character_rank_in_language:
|
217 |
-
]
|
218 |
-
characters_before: List[str] = ordered_characters[0:character_rank]
|
219 |
-
characters_after: List[str] = ordered_characters[character_rank:]
|
220 |
-
|
221 |
-
before_match_count: int = len(
|
222 |
-
set(characters_before) & set(characters_before_source)
|
223 |
-
)
|
224 |
-
|
225 |
-
after_match_count: int = len(
|
226 |
-
set(characters_after) & set(characters_after_source)
|
227 |
-
)
|
228 |
-
|
229 |
-
if len(characters_before_source) == 0 and before_match_count <= 4:
|
230 |
-
character_approved_count += 1
|
231 |
-
continue
|
232 |
-
|
233 |
-
if len(characters_after_source) == 0 and after_match_count <= 4:
|
234 |
-
character_approved_count += 1
|
235 |
-
continue
|
236 |
-
|
237 |
-
if (
|
238 |
-
before_match_count / len(characters_before_source) >= 0.4
|
239 |
-
or after_match_count / len(characters_after_source) >= 0.4
|
240 |
-
):
|
241 |
-
character_approved_count += 1
|
242 |
-
continue
|
243 |
-
|
244 |
-
return character_approved_count / len(ordered_characters)
|
245 |
-
|
246 |
-
|
247 |
-
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
|
248 |
-
"""
|
249 |
-
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
|
250 |
-
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
|
251 |
-
One containing the latin letters and the other hebrew.
|
252 |
-
"""
|
253 |
-
layers: Dict[str, str] = {}
|
254 |
-
|
255 |
-
for character in decoded_sequence:
|
256 |
-
if character.isalpha() is False:
|
257 |
-
continue
|
258 |
-
|
259 |
-
character_range: Optional[str] = unicode_range(character)
|
260 |
-
|
261 |
-
if character_range is None:
|
262 |
-
continue
|
263 |
-
|
264 |
-
layer_target_range: Optional[str] = None
|
265 |
-
|
266 |
-
for discovered_range in layers:
|
267 |
-
if (
|
268 |
-
is_suspiciously_successive_range(discovered_range, character_range)
|
269 |
-
is False
|
270 |
-
):
|
271 |
-
layer_target_range = discovered_range
|
272 |
-
break
|
273 |
-
|
274 |
-
if layer_target_range is None:
|
275 |
-
layer_target_range = character_range
|
276 |
-
|
277 |
-
if layer_target_range not in layers:
|
278 |
-
layers[layer_target_range] = character.lower()
|
279 |
-
continue
|
280 |
-
|
281 |
-
layers[layer_target_range] += character.lower()
|
282 |
-
|
283 |
-
return list(layers.values())
|
284 |
-
|
285 |
-
|
286 |
-
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
|
287 |
-
"""
|
288 |
-
This function merge results previously given by the function coherence_ratio.
|
289 |
-
The return type is the same as coherence_ratio.
|
290 |
-
"""
|
291 |
-
per_language_ratios: Dict[str, List[float]] = {}
|
292 |
-
for result in results:
|
293 |
-
for sub_result in result:
|
294 |
-
language, ratio = sub_result
|
295 |
-
if language not in per_language_ratios:
|
296 |
-
per_language_ratios[language] = [ratio]
|
297 |
-
continue
|
298 |
-
per_language_ratios[language].append(ratio)
|
299 |
-
|
300 |
-
merge = [
|
301 |
-
(
|
302 |
-
language,
|
303 |
-
round(
|
304 |
-
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
|
305 |
-
4,
|
306 |
-
),
|
307 |
-
)
|
308 |
-
for language in per_language_ratios
|
309 |
-
]
|
310 |
-
|
311 |
-
return sorted(merge, key=lambda x: x[1], reverse=True)
|
312 |
-
|
313 |
-
|
314 |
-
def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
|
315 |
-
"""
|
316 |
-
We shall NOT return "English—" in CoherenceMatches because it is an alternative
|
317 |
-
of "English". This function only keeps the best match and remove the em-dash in it.
|
318 |
-
"""
|
319 |
-
index_results: Dict[str, List[float]] = dict()
|
320 |
-
|
321 |
-
for result in results:
|
322 |
-
language, ratio = result
|
323 |
-
no_em_name: str = language.replace("—", "")
|
324 |
-
|
325 |
-
if no_em_name not in index_results:
|
326 |
-
index_results[no_em_name] = []
|
327 |
-
|
328 |
-
index_results[no_em_name].append(ratio)
|
329 |
-
|
330 |
-
if any(len(index_results[e]) > 1 for e in index_results):
|
331 |
-
filtered_results: CoherenceMatches = []
|
332 |
-
|
333 |
-
for language in index_results:
|
334 |
-
filtered_results.append((language, max(index_results[language])))
|
335 |
-
|
336 |
-
return filtered_results
|
337 |
-
|
338 |
-
return results
|
339 |
-
|
340 |
-
|
341 |
-
@lru_cache(maxsize=2048)
|
342 |
-
def coherence_ratio(
|
343 |
-
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
|
344 |
-
) -> CoherenceMatches:
|
345 |
-
"""
|
346 |
-
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
|
347 |
-
A layer = Character extraction by alphabets/ranges.
|
348 |
-
"""
|
349 |
-
|
350 |
-
results: List[Tuple[str, float]] = []
|
351 |
-
ignore_non_latin: bool = False
|
352 |
-
|
353 |
-
sufficient_match_count: int = 0
|
354 |
-
|
355 |
-
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
|
356 |
-
if "Latin Based" in lg_inclusion_list:
|
357 |
-
ignore_non_latin = True
|
358 |
-
lg_inclusion_list.remove("Latin Based")
|
359 |
-
|
360 |
-
for layer in alpha_unicode_split(decoded_sequence):
|
361 |
-
sequence_frequencies: TypeCounter[str] = Counter(layer)
|
362 |
-
most_common = sequence_frequencies.most_common()
|
363 |
-
|
364 |
-
character_count: int = sum(o for c, o in most_common)
|
365 |
-
|
366 |
-
if character_count <= TOO_SMALL_SEQUENCE:
|
367 |
-
continue
|
368 |
-
|
369 |
-
popular_character_ordered: List[str] = [c for c, o in most_common]
|
370 |
-
|
371 |
-
for language in lg_inclusion_list or alphabet_languages(
|
372 |
-
popular_character_ordered, ignore_non_latin
|
373 |
-
):
|
374 |
-
ratio: float = characters_popularity_compare(
|
375 |
-
language, popular_character_ordered
|
376 |
-
)
|
377 |
-
|
378 |
-
if ratio < threshold:
|
379 |
-
continue
|
380 |
-
elif ratio >= 0.8:
|
381 |
-
sufficient_match_count += 1
|
382 |
-
|
383 |
-
results.append((language, round(ratio, 4)))
|
384 |
-
|
385 |
-
if sufficient_match_count >= 3:
|
386 |
-
break
|
387 |
-
|
388 |
-
return sorted(
|
389 |
-
filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
|
390 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_a_n_k_r.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
from .otBase import BaseTTXConverter
|
2 |
-
|
3 |
-
|
4 |
-
class table__a_n_k_r(BaseTTXConverter):
|
5 |
-
"""
|
6 |
-
The anchor point table provides a way to define anchor points.
|
7 |
-
These are points within the coordinate space of a given glyph,
|
8 |
-
independent of the control points used to render the glyph.
|
9 |
-
Anchor points are used in conjunction with the 'kerx' table.
|
10 |
-
|
11 |
-
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html
|
12 |
-
"""
|
13 |
-
|
14 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-1cf9680f.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/__init__.py
DELETED
@@ -1,139 +0,0 @@
|
|
1 |
-
from ._api import request, stream
|
2 |
-
from ._async import (
|
3 |
-
AsyncConnectionInterface,
|
4 |
-
AsyncConnectionPool,
|
5 |
-
AsyncHTTP2Connection,
|
6 |
-
AsyncHTTP11Connection,
|
7 |
-
AsyncHTTPConnection,
|
8 |
-
AsyncHTTPProxy,
|
9 |
-
AsyncSOCKSProxy,
|
10 |
-
)
|
11 |
-
from ._backends.base import (
|
12 |
-
SOCKET_OPTION,
|
13 |
-
AsyncNetworkBackend,
|
14 |
-
AsyncNetworkStream,
|
15 |
-
NetworkBackend,
|
16 |
-
NetworkStream,
|
17 |
-
)
|
18 |
-
from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream
|
19 |
-
from ._backends.sync import SyncBackend
|
20 |
-
from ._exceptions import (
|
21 |
-
ConnectError,
|
22 |
-
ConnectionNotAvailable,
|
23 |
-
ConnectTimeout,
|
24 |
-
LocalProtocolError,
|
25 |
-
NetworkError,
|
26 |
-
PoolTimeout,
|
27 |
-
ProtocolError,
|
28 |
-
ProxyError,
|
29 |
-
ReadError,
|
30 |
-
ReadTimeout,
|
31 |
-
RemoteProtocolError,
|
32 |
-
TimeoutException,
|
33 |
-
UnsupportedProtocol,
|
34 |
-
WriteError,
|
35 |
-
WriteTimeout,
|
36 |
-
)
|
37 |
-
from ._models import URL, Origin, Request, Response
|
38 |
-
from ._ssl import default_ssl_context
|
39 |
-
from ._sync import (
|
40 |
-
ConnectionInterface,
|
41 |
-
ConnectionPool,
|
42 |
-
HTTP2Connection,
|
43 |
-
HTTP11Connection,
|
44 |
-
HTTPConnection,
|
45 |
-
HTTPProxy,
|
46 |
-
SOCKSProxy,
|
47 |
-
)
|
48 |
-
|
49 |
-
# The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed.
|
50 |
-
try:
|
51 |
-
from ._backends.anyio import AnyIOBackend
|
52 |
-
except ImportError: # pragma: nocover
|
53 |
-
|
54 |
-
class AnyIOBackend: # type: ignore
|
55 |
-
def __init__(self, *args, **kwargs): # type: ignore
|
56 |
-
msg = (
|
57 |
-
"Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed."
|
58 |
-
)
|
59 |
-
raise RuntimeError(msg)
|
60 |
-
|
61 |
-
|
62 |
-
# The 'httpcore.TrioBackend' class is conditional on 'trio' being installed.
|
63 |
-
try:
|
64 |
-
from ._backends.trio import TrioBackend
|
65 |
-
except ImportError: # pragma: nocover
|
66 |
-
|
67 |
-
class TrioBackend: # type: ignore
|
68 |
-
def __init__(self, *args, **kwargs): # type: ignore
|
69 |
-
msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed."
|
70 |
-
raise RuntimeError(msg)
|
71 |
-
|
72 |
-
|
73 |
-
__all__ = [
|
74 |
-
# top-level requests
|
75 |
-
"request",
|
76 |
-
"stream",
|
77 |
-
# models
|
78 |
-
"Origin",
|
79 |
-
"URL",
|
80 |
-
"Request",
|
81 |
-
"Response",
|
82 |
-
# async
|
83 |
-
"AsyncHTTPConnection",
|
84 |
-
"AsyncConnectionPool",
|
85 |
-
"AsyncHTTPProxy",
|
86 |
-
"AsyncHTTP11Connection",
|
87 |
-
"AsyncHTTP2Connection",
|
88 |
-
"AsyncConnectionInterface",
|
89 |
-
"AsyncSOCKSProxy",
|
90 |
-
# sync
|
91 |
-
"HTTPConnection",
|
92 |
-
"ConnectionPool",
|
93 |
-
"HTTPProxy",
|
94 |
-
"HTTP11Connection",
|
95 |
-
"HTTP2Connection",
|
96 |
-
"ConnectionInterface",
|
97 |
-
"SOCKSProxy",
|
98 |
-
# network backends, implementations
|
99 |
-
"SyncBackend",
|
100 |
-
"AnyIOBackend",
|
101 |
-
"TrioBackend",
|
102 |
-
# network backends, mock implementations
|
103 |
-
"AsyncMockBackend",
|
104 |
-
"AsyncMockStream",
|
105 |
-
"MockBackend",
|
106 |
-
"MockStream",
|
107 |
-
# network backends, interface
|
108 |
-
"AsyncNetworkStream",
|
109 |
-
"AsyncNetworkBackend",
|
110 |
-
"NetworkStream",
|
111 |
-
"NetworkBackend",
|
112 |
-
# util
|
113 |
-
"default_ssl_context",
|
114 |
-
"SOCKET_OPTION",
|
115 |
-
# exceptions
|
116 |
-
"ConnectionNotAvailable",
|
117 |
-
"ProxyError",
|
118 |
-
"ProtocolError",
|
119 |
-
"LocalProtocolError",
|
120 |
-
"RemoteProtocolError",
|
121 |
-
"UnsupportedProtocol",
|
122 |
-
"TimeoutException",
|
123 |
-
"PoolTimeout",
|
124 |
-
"ConnectTimeout",
|
125 |
-
"ReadTimeout",
|
126 |
-
"WriteTimeout",
|
127 |
-
"NetworkError",
|
128 |
-
"ConnectError",
|
129 |
-
"ReadError",
|
130 |
-
"WriteError",
|
131 |
-
]
|
132 |
-
|
133 |
-
__version__ = "0.17.3"
|
134 |
-
|
135 |
-
|
136 |
-
__locals = locals()
|
137 |
-
for __name in __all__:
|
138 |
-
if not __name.startswith("__"):
|
139 |
-
setattr(__locals[__name], "__module__", "httpcore") # noqa
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/_cli_utils.py
DELETED
@@ -1,63 +0,0 @@
|
|
1 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
"""Contains a utility for good-looking prints."""
|
15 |
-
import os
|
16 |
-
from typing import List, Union
|
17 |
-
|
18 |
-
|
19 |
-
class ANSI:
|
20 |
-
"""
|
21 |
-
Helper for en.wikipedia.org/wiki/ANSI_escape_code
|
22 |
-
"""
|
23 |
-
|
24 |
-
_bold = "\u001b[1m"
|
25 |
-
_gray = "\u001b[90m"
|
26 |
-
_red = "\u001b[31m"
|
27 |
-
_reset = "\u001b[0m"
|
28 |
-
|
29 |
-
@classmethod
|
30 |
-
def bold(cls, s: str) -> str:
|
31 |
-
return cls._format(s, cls._bold)
|
32 |
-
|
33 |
-
@classmethod
|
34 |
-
def gray(cls, s: str) -> str:
|
35 |
-
return cls._format(s, cls._gray)
|
36 |
-
|
37 |
-
@classmethod
|
38 |
-
def red(cls, s: str) -> str:
|
39 |
-
return cls._format(s, cls._bold + cls._red)
|
40 |
-
|
41 |
-
@classmethod
|
42 |
-
def _format(cls, s: str, code: str) -> str:
|
43 |
-
if os.environ.get("NO_COLOR"):
|
44 |
-
# See https://no-color.org/
|
45 |
-
return s
|
46 |
-
return f"{code}{s}{cls._reset}"
|
47 |
-
|
48 |
-
|
49 |
-
def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
|
50 |
-
"""
|
51 |
-
Inspired by:
|
52 |
-
|
53 |
-
- stackoverflow.com/a/8356620/593036
|
54 |
-
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
|
55 |
-
"""
|
56 |
-
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
|
57 |
-
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
|
58 |
-
lines = []
|
59 |
-
lines.append(row_format.format(*headers))
|
60 |
-
lines.append(row_format.format(*["-" * w for w in col_widths]))
|
61 |
-
for row in rows:
|
62 |
-
lines.append(row_format.format(*row))
|
63 |
-
return "\n".join(lines)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|