Commit
·
4de1047
1
Parent(s):
ec31aaa
Update parquet files (step 6 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/italygpt2/__init__.py +0 -70
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dragonball Z Raging Blast 2 PC.rar What Makes This Game So Awesome and How to Get It.md +0 -106
- spaces/1gistliPinn/ChatGPT4/Examples/!EXCLUSIVE! Download Buku Ppdgj Iii Pdf Files.md +0 -104
- spaces/1gistliPinn/ChatGPT4/Examples/(2011) Crack.PhotoElf.4.1.12 11 !!LINK!!.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Application X-msdownload How To Open ((INSTALL)).md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Archicad 16 ((FREE)) Crack Download Mega.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Casio Fx 880p Emulator.md +0 -20
- spaces/1gistliPinn/ChatGPT4/Examples/Coffee Crisis Download HOT For Pc [torrent Full].md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Emicsoft Vob Converter 4.1.20 REGISTRATION CODE.rar.md +0 -13
- spaces/1gistliPinn/ChatGPT4/Examples/Focusrite Serial Number Prefixes What They Mean and How to Use Them.md +0 -19
- spaces/1phancelerku/anime-remove-background/Enjoy Epic Stickman Fights with Supreme Duelist APK 2022 Download Now.md +0 -86
- spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine_base.py +0 -259
- spaces/801artistry/RVC801/lib/infer_pack/models_dml.py +0 -1124
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/__init__.py +0 -0
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/multi_window_disc.py +0 -196
- spaces/AIZ2H/Gradio331-3D-Models-AI-1/app.py +0 -24
- spaces/Abdulkader/Abdulkader-T5-MedRepAnalyzer/README.md +0 -13
- spaces/AchyuthGamer/OpenGPT/client/css/stop-generating.css +0 -38
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/DynamicText.js +0 -2
- spaces/Ajit025/Text_to_Image_conversion/text_to_image.py +0 -51
- spaces/AkitoP/umamusume_bert_vits2/data_utils.py +0 -406
- spaces/AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain/README.md +0 -12
- spaces/Ameaou/academic-chatgpt3.1/crazy_functions/谷歌检索小助手.py +0 -106
- spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/introduction.tex +0 -18
- spaces/Amrrs/DragGan-Inversion/stylegan_human/generate.py +0 -125
- spaces/Andres99/Tune-A-Video-Training-UI/uploader.py +0 -44
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md +0 -427
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py +0 -82
- spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/__init__.py +0 -11
- spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py +0 -2
- spaces/AnimalEquality/chatbot/_proc/styles.css +0 -37
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipelines/llava/pipelines.py +0 -27
- spaces/Ank0X0/Image-Upscaling-Playground/app.py +0 -85
- spaces/Annelisseishere/Streamlit_GPT/README.md +0 -12
- spaces/Asahi402/anime-remove-background/README.md +0 -14
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/show.py +0 -189
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/discovery.py +0 -600
- spaces/Azurro/APT-1B-Base/README.md +0 -13
- spaces/BIOML-SVM/SVM/proteinbind_new.py +0 -283
- spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers.py +0 -118
- spaces/Benson/text-generation/Examples/Alquimia Clsico 2 Mod Apk.md +0 -121
- spaces/Benson/text-generation/Examples/Carrom Pool Disc Game Mod Apk Monedas Y Gemas Ilimitadas.md +0 -71
- spaces/Benson/text-generation/Examples/Chess King Mod Apk.md +0 -91
- spaces/Benson/text-generation/Examples/Descargar 8 Bola Piscina Herramienta Apk.md +0 -57
- spaces/Benson/text-generation/Examples/Descargar Apk Mod Cazador Asesino 2.md +0 -56
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py +0 -0
spaces/101-5/gpt4free/g4f/.v1/gpt4free/italygpt2/__init__.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import requests
|
3 |
-
import hashlib
|
4 |
-
from fake_useragent import UserAgent
|
5 |
-
class Account:
|
6 |
-
@staticmethod
|
7 |
-
def create():
|
8 |
-
r=requests.get("https://italygpt.it/",headers=Account._header)
|
9 |
-
f=r.text
|
10 |
-
tid=re.search('<input type=\"hidden\" name=\"next_id\" id=\"next_id\" value=\"(\w+)\">',f).group(1)
|
11 |
-
if len(tid)==0:
|
12 |
-
raise RuntimeError("NetWorkError:failed to get id.")
|
13 |
-
else:
|
14 |
-
Account._tid=tid
|
15 |
-
Account._raw="[]"
|
16 |
-
return Account
|
17 |
-
def next(next_id:str)->str:
|
18 |
-
Account._tid=next_id
|
19 |
-
return Account._tid
|
20 |
-
def get()->str:
|
21 |
-
return Account._tid
|
22 |
-
_header={
|
23 |
-
"Host": "italygpt.it",
|
24 |
-
"Referer":"https://italygpt.it/",
|
25 |
-
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",#UserAgent().random,
|
26 |
-
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
27 |
-
"Accept-Language":"zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
|
28 |
-
"Upgrade-Insecure-Requests":"1",
|
29 |
-
"Sec-Fetch-Dest":"document",
|
30 |
-
"Sec-Fetch-Mode":"navigate",
|
31 |
-
"Sec-Fetch-Site":"none",
|
32 |
-
"Sec-Fetch-User":"?1",
|
33 |
-
"Connection":"keep-alive",
|
34 |
-
"Alt-Used":"italygpt.it",
|
35 |
-
"Pragma":"no-cache",
|
36 |
-
"Cache-Control":"no-cache",
|
37 |
-
"TE": "trailers"
|
38 |
-
}
|
39 |
-
def settraw(raws:str):
|
40 |
-
Account._raw=raws
|
41 |
-
return Account._raw
|
42 |
-
def gettraw():
|
43 |
-
return Account._raw
|
44 |
-
|
45 |
-
class Completion:
|
46 |
-
@staticmethod
|
47 |
-
def create(
|
48 |
-
account_data,
|
49 |
-
prompt: str,
|
50 |
-
message=False
|
51 |
-
):
|
52 |
-
param={
|
53 |
-
"prompt":prompt.replace(" ","+"),
|
54 |
-
"creative":"off",
|
55 |
-
"internet":"false",
|
56 |
-
"detailed":"off",
|
57 |
-
"current_id":"0",
|
58 |
-
"code":"",
|
59 |
-
"gpt4":"false",
|
60 |
-
"raw_messages":account_data.gettraw(),
|
61 |
-
"hash":hashlib.sha256(account_data.get().encode()).hexdigest()
|
62 |
-
}
|
63 |
-
if(message):
|
64 |
-
param["raw_messages"]=str(message)
|
65 |
-
r = requests.get("https://italygpt.it/question",headers=account_data._header,params=param,stream=True)
|
66 |
-
account_data.next(r.headers["Next_id"])
|
67 |
-
account_data.settraw(r.headers["Raw_messages"])
|
68 |
-
for chunk in r.iter_content(chunk_size=None):
|
69 |
-
r.raise_for_status()
|
70 |
-
yield chunk.decode()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dragonball Z Raging Blast 2 PC.rar What Makes This Game So Awesome and How to Get It.md
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What is Realtek ATI HDMI Audio Device 2-70 Crack?</h1>
|
3 |
-
<p>If you want to enjoy high-quality sound from your PC's HDMI port, you need a reliable audio driver that can communicate with your hardware and software. One of the most popular audio drivers for HDMI devices is <strong>Realtek ATI HDMI Audio Device Driver</strong>, which supports all of Realtek HD Audio Codec.</p>
|
4 |
-
<p>However, downloading and installing the official version of this driver may not be enough for some users who want to unlock more features and performance. That's why some people look for a <strong>crack version</strong> of this driver, which is a modified or hacked version that bypasses the license verification and activation process.</p>
|
5 |
-
<h2>Realtek ATI HDMI Audio Device 2-70 Crack</h2><br /><p><b><b>Download Zip</b> ===> <a href="https://byltly.com/2uKxWv">https://byltly.com/2uKxWv</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will tell you everything you need to know about <strong>Realtek ATI HDMI Audio Device 2-70 Crack</strong>, which is one of the latest versions of this driver package. We will explain why you may need it, how to download and install it, how to fix common issues with it, and what are some alternatives to it.</p>
|
7 |
-
<h2>Why do you need Realtek ATI HDMI Audio Device 2-70 Crack?</h2>
|
8 |
-
<p>There are several reasons why you may want to use a crack version of Realtek ATI HDMI Audio Device Driver instead of the official one. Here are some of them:</p>
|
9 |
-
<ul>
|
10 |
-
<li><strong>It is free.</strong> The official version of this driver requires a license fee, which may not be affordable for some users. The crack version, on the other hand, is available for free on various websites and forums.</li>
|
11 |
-
<li><strong>It has more features.</strong> The official version of this driver may have some limitations or restrictions on certain functions or settings. The crack version, on the other hand, may have more options and customizations that can enhance your audio experience.</li>
|
12 |
-
<li><strong>It has better performance.</strong> The official version of this driver may have some bugs or glitches that can affect your audio quality or stability. The crack version, on the other hand, may have fixed or improved some issues that can make your audio smoother and clearer.</li>
|
13 |
-
</ul>
|
14 |
-
<p>Of course, using a crack version also comes with some risks and drawbacks, such as:</p>
|
15 |
-
<ul>
|
16 |
-
<li><strong>It is illegal.</strong> The crack version violates the terms and conditions of the original software developer, which can result in legal consequences or penalties. You may also be infringing on the intellectual property rights of the software owner.</li>
|
17 |
-
<li><strong>It is unsafe.</strong> The crack version may contain viruses, malware, spyware, or other harmful programs that can damage your computer or steal your personal information. You may also expose yourself to cyberattacks or identity theft.</li>
|
18 |
-
<li><strong>It is unreliable.</strong> The crack version may not work properly or at all with your hardware or software configuration. You may also encounter compatibility issues or conflicts with other drivers or programs. You may also lose access to updates or support from the original software developer.</li>
|
19 |
-
</ul>
|
20 |
-
<p>Therefore, before you decide to use a crack version of Realtek ATI HDMI Audio Device Driver, you should weigh the pros and cons carefully and be aware of the potential consequences.</p>
|
21 |
-
<h3>How to download and install Realtek ATI HDMI Audio Device 2-70 Crack?</h3>
|
22 |
-
<p>If you still want to try Realtek ATI HDMI Audio Device 2-70 Crack, here are the steps you need to follow:</p>
|
23 |
-
<ol>
|
24 |
-
<li><strong>Download the crack file.</strong> You can find various sources for downloading this file on the internet, such as torrent sites, file-sharing platforms, or online forums. However, be careful not to download any fake or malicious files that can harm your computer. You should also scan any file you download with an antivirus program before opening it.</li>
|
25 |
-
<li><strong>Extract the crack file.</strong> After downloading the file, you need to extract it using a program like WinRAR or 7-Zip. You should see a folder containing several files, such as setup.exe, readme.txt, crack.dll, etc.</li>
|
26 |
-
<li><strong>Run the setup file.</strong> Double-click on the setup.exe file to launch the installation wizard. Follow the instructions on the screen to install the driver package. You may need to restart your computer after the installation is complete.</li>
|
27 |
-
<li><strong>Copy and paste the crack file.</strong> Locate the crack.dll file in the folder you extracted earlier. Copy this file and paste it into the installation directory of Realtek ATI HDMI Audio Device Driver. This is usually located in C:\Program Files\Realtek\Audio\HDA\. You may need to overwrite or replace an existing file with the same name.</li>
|
28 |
-
<li><strong>Enjoy your cracked driver.</strong> You have successfully installed Realtek ATI HDMI Audio Device 2-70 Crack on your computer. You can now access more features and settings from your audio device manager or control panel.</li>
|
29 |
-
</ol>
|
30 |
-
<h4>How to fix common issues with Realtek ATI HDMI Audio Device 2-70 Crack?</h4>
|
31 |
-
<p>Sometimes, you may encounter some problems or errors when using Realtek ATI HDMI Audio Device 2-70 Crack. Here are some tips and tricks on how to troubleshoot them:</p>
|
32 |
-
<ul>
|
33 |
-
<li><strong>No sound output from HDMI device.</strong> This may happen if your HDMI device is not detected by your computer or if your audio settings are incorrect. To fix this issue, you can try these solutions: <ul>
|
34 |
-
<li>Check if your HDMI cable is properly connected between your PC and your monitor or TV.</li>
|
35 |
-
<li>Check if your HDMI device is turned on and set as the default playback device in your sound settings.</li>
|
36 |
-
<li>Check if your audio driver is up-to-date and compatible with your operating system and hardware configuration.</li>
|
37 |
-
<li>Check if there are any conflicts or interferences with other drivers or programs that may affect your audio output.</li>
|
38 |
-
</ul></li>
|
39 |
-
<li><strong>Poor sound quality from HDMI device.</strong> This may happen if your audio settings are not optimal for your HDMI device or if there are any background noises or distortions. To fix this issue, you can try these solutions: <ul>
|
40 |
-
<li>Adjust your volume level and balance in your sound settings or control panel.</li>
|
41 |
-
<li>Select an appropriate sound mode or profile for your HDMI device in your audio device manager or control panel.</li>
|
42 |
-
<li>Tweak your equalizer settings or use a third-party software to enhance your sound quality.</li>
|
43 |
-
<li>Avoid placing any objects or devices that may cause interference near your HDMI device or cable.</li>
|
44 |
-
</ul></li>
|
45 |
-
<li><strong>HDMI device not recognized by audio driver.</strong> This may happen if your audio driver is corrupted or incompatible with your HDMI device. To fix this issue, you can try these solutions: <ul>
|
46 |
-
<li>Uninstall and reinstall your audio driver using a clean installation method.</li>
|
47 |
-
<li>Update your audio driver to the latest version available from the official website or a trusted source.</li>
|
48 |
-
<li>Contact customer support from Realtek or ATI for assistance or guidance on how to resolve this issue.</li>
|
49 |
-
</ul></li>
|
50 |
-
</ul>
|
51 |
-
<h2>What are the alternatives to Realtek ATI HDMI Audio Device 2-70 Crack?</h2>
|
52 |
-
Realtek ATI HDMI Audio Device 2-70 Crack, you may want to consider some other alternatives for HDMI audio drivers. Here are some of them:</p>
|
53 |
-
<h3>AMD High Definition Audio Device Driver</h3>
|
54 |
-
<p>If you have an AMD graphics card or chipset, you may want to use the AMD High Definition Audio Device Driver, which is designed to work with AMD HDMI devices. This driver supports various audio formats and features, such as Dolby TrueHD, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the AMD website or use the AMD Radeon Software to update it automatically.</p>
|
55 |
-
<p>How to download Realtek ATI HDMI Audio Device 2-70 Crack for free<br />
|
56 |
-
Realtek ATI HDMI Audio Device 2-70 Crack full version download<br />
|
57 |
-
Realtek ATI HDMI Audio Device 2-70 Crack serial key generator<br />
|
58 |
-
Realtek ATI HDMI Audio Device 2-70 Crack activation code<br />
|
59 |
-
Realtek ATI HDMI Audio Device 2-70 Crack license key<br />
|
60 |
-
Realtek ATI HDMI Audio Device 2-70 Crack patch<br />
|
61 |
-
Realtek ATI HDMI Audio Device 2-70 Crack torrent<br />
|
62 |
-
Realtek ATI HDMI Audio Device 2-70 Crack rar file<br />
|
63 |
-
Realtek ATI HDMI Audio Device 2-70 Crack zip file<br />
|
64 |
-
Realtek ATI HDMI Audio Device 2-70 Crack iso file<br />
|
65 |
-
Realtek ATI HDMI Audio Device 2-70 Crack setup file<br />
|
66 |
-
Realtek ATI HDMI Audio Device 2-70 Crack installer<br />
|
67 |
-
Realtek ATI HDMI Audio Device 2-70 Crack offline installer<br />
|
68 |
-
Realtek ATI HDMI Audio Device 2-70 Crack portable version<br />
|
69 |
-
Realtek ATI HDMI Audio Device 2-70 Crack latest version<br />
|
70 |
-
Realtek ATI HDMI Audio Device 2-70 Crack updated version<br />
|
71 |
-
Realtek ATI HDMI Audio Device 2-70 Crack review<br />
|
72 |
-
Realtek ATI HDMI Audio Device 2-70 Crack features<br />
|
73 |
-
Realtek ATI HDMI Audio Device 2-70 Crack benefits<br />
|
74 |
-
Realtek ATI HDMI Audio Device 2-70 Crack pros and cons<br />
|
75 |
-
Realtek ATI HDMI Audio Device 2-70 Crack comparison<br />
|
76 |
-
Realtek ATI HDMI Audio Device 2-70 Crack alternatives<br />
|
77 |
-
Realtek ATI HDMI Audio Device 2-70 Crack competitors<br />
|
78 |
-
Realtek ATI HDMI Audio Device 2-70 Crack compatibility<br />
|
79 |
-
Realtek ATI HDMI Audio Device 2-70 Crack system requirements<br />
|
80 |
-
Realtek ATI HDMI Audio Device 2-70 Crack troubleshooting<br />
|
81 |
-
Realtek ATI HDMI Audio Device 2-70 Crack error codes<br />
|
82 |
-
Realtek ATI HDMI Audio Device 2-70 Crack fix<br />
|
83 |
-
Realtek ATI HDMI Audio Device 2-70 Crack support<br />
|
84 |
-
Realtek ATI HDMI Audio Device 2-70 Crack customer service<br />
|
85 |
-
Realtek ATI HDMI Audio Device 2-70 Crack manual<br />
|
86 |
-
Realtek ATI HDMI Audio Device 2-70 Crack guide<br />
|
87 |
-
Realtek ATI HDMI Audio Device 2-70 Crack tutorial<br />
|
88 |
-
Realtek ATI HDMI Audio Device 2.7.0.1 Driver Download for Windows </p>
|
89 |
-
<h3>NVIDIA High Definition Audio Driver</h3>
|
90 |
-
<p>If you have an NVIDIA graphics card or chipset, you may want to use the NVIDIA High Definition Audio Driver, which is designed to work with NVIDIA HDMI devices. This driver supports various audio formats and features, such as Dolby Digital Plus, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the NVIDIA website or use the NVIDIA GeForce Experience to update it automatically.</p>
|
91 |
-
<h3>Intel High Definition Audio Driver</h3>
|
92 |
-
<p>If you have an Intel processor or chipset, you may want to use the Intel High Definition Audio Driver, which is designed to work with Intel HDMI devices. This driver supports various audio formats and features, such as Dolby Digital Plus, DTS-HD Master Audio, 7.1 surround sound, and more. You can download this driver from the Intel website or use the Intel Driver & Support Assistant to update it automatically.</p>
|
93 |
-
<h2>Conclusion</h2>
|
94 |
-
<p>In conclusion, Realtek ATI HDMI Audio Device 2-70 Crack is a crack version of a popular audio driver for HDMI devices that can offer more features and performance than the official version. However, it also comes with some risks and drawbacks that you should be aware of before using it. If you are looking for other options for HDMI audio drivers, you can try some of the alternatives we mentioned above.</p>
|
95 |
-
<p>We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
96 |
-
<h3>FAQs</h3>
|
97 |
-
<ul>
|
98 |
-
<li><strong>What is HDMI?</strong> HDMI stands for High-Definition Multimedia Interface, which is a standard for transmitting digital audio and video signals between devices, such as computers, monitors, TVs, speakers, etc.</li>
|
99 |
-
<li><strong>What is an audio driver?</strong> An audio driver is a software program that allows your computer to communicate with your audio device and enable its functions and features.</li>
|
100 |
-
<li><strong>What is a crack version?</strong> A crack version is a modified or hacked version of a software program that bypasses the license verification and activation process and allows you to use it for free or with more features.</li>
|
101 |
-
<li><strong>Is Realtek ATI HDMI Audio Device 2-70 Crack safe to use?</strong> No, it is not safe to use because it is illegal, unsafe, and unreliable. It may contain viruses or malware that can harm your computer or steal your personal information. It may also not work properly or at all with your hardware or software configuration. It may also expose you to legal consequences or penalties.</li>
|
102 |
-
<li><strong>How can I update my audio driver?</strong> You can update your audio driver by downloading the latest version from the official website or a trusted source and following the installation instructions. You can also use a software tool that can scan your computer and update your drivers automatically.</li>
|
103 |
-
</ul>
|
104 |
-
</p> 0a6ba089eb<br />
|
105 |
-
<br />
|
106 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/!EXCLUSIVE! Download Buku Ppdgj Iii Pdf Files.md
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Buku PPDGJ III PDF Files: A Complete Guide</h1>
|
3 |
-
|
4 |
-
<p>If you are looking for a reliable and comprehensive source of information on mental disorders, you may want to download buku ppdgj iii pdf files. Buku PPDGJ III is the Indonesian version of the Diagnostic and Statistical Manual of Mental Disorders (DSM), which is the most widely used classification system for mental disorders in the world. Buku PPDGJ III was published in 1993 by the World Health Organization (WHO) and the Indonesian Psychiatric Association (IPA), and it is based on the International Classification of Diseases (ICD-10).</p>
|
5 |
-
<h2>download buku ppdgj iii pdf files</h2><br /><p><b><b>DOWNLOAD</b> ✔✔✔ <a href="https://imgfil.com/2uxXyy">https://imgfil.com/2uxXyy</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<p>In this article, we will explain what buku ppdgj iii pdf files are, why they are useful, how to access and use them, and what benefits they offer for mental health professionals and students. We will also provide some tips on how to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference.</p>
|
8 |
-
|
9 |
-
<h2>What are Buku PPDGJ III PDF Files?</h2>
|
10 |
-
|
11 |
-
<p>Buku PPDGJ III PDF files are digital copies of the book PPDGJ III, which stands for Pedoman Penggolongan dan Diagnosis Gangguan Jiwa di Indonesia III (Guidelines for Classification and Diagnosis of Mental Disorders in Indonesia III). This book contains the official criteria and guidelines for diagnosing and classifying mental disorders in Indonesia, according to the international standards of WHO and IPA.</p>
|
12 |
-
|
13 |
-
<p>Buku PPDGJ III PDF files are available online for free download from various sources, such as Scribd, Doku, and Documents and E-books. You can also find them by searching for "download buku ppdgj iii pdf files" on Google or other search engines. The PDF files are usually around 9 MB in size and have about 170 pages.</p>
|
14 |
-
|
15 |
-
<h2>Why are Buku PPDGJ III PDF Files Useful?</h2>
|
16 |
-
|
17 |
-
<p>Buku PPDGJ III PDF files are useful for several reasons. First, they provide a comprehensive and updated overview of the current knowledge and practice of psychiatry in Indonesia. They cover a wide range of mental disorders, such as mood disorders, anxiety disorders, personality disorders, psychotic disorders, substance-related disorders, and more. They also include diagnostic criteria, clinical features, differential diagnosis, etiology, course, prognosis, treatment, and prevention of each disorder.</p>
|
18 |
-
|
19 |
-
<p>Second, they help to standardize and harmonize the diagnosis and classification of mental disorders in Indonesia. By using buku ppdgj iii pdf files as a reference, mental health professionals can ensure that they are following the same criteria and guidelines as their colleagues and peers. This can improve the quality and consistency of mental health services and research in Indonesia.</p>
|
20 |
-
|
21 |
-
<p>Third, they facilitate communication and collaboration among mental health professionals across different settings and regions. By using buku ppdgj iii pdf files as a common language, mental health professionals can easily share information and opinions about their cases and clients. They can also compare and contrast their findings and outcomes with other professionals who use the same system.</p>
|
22 |
-
<p></p>
|
23 |
-
|
24 |
-
<h2>How to Access and Use Buku PPDGJ III PDF Files?</h2>
|
25 |
-
|
26 |
-
<p>To access and use buku ppdgj iii pdf files, you need to have a computer or a mobile device with an internet connection and a PDF reader software. You can download buku ppdgj iii pdf files from any of the sources mentioned above or from other websites that offer them. You can also scan or photocopy the printed version of the book if you have access to it.</p>
|
27 |
-
|
28 |
-
<p>To use buku ppdgj iii pdf files effectively, you need to have some basic knowledge of psychiatry and mental disorders. You also need to be familiar with the structure and format of the book. The book is divided into four parts: Part I: Introduction; Part II: General Principles of Diagnosis; Part III: Specific Disorders; Part IV: Appendices.</p>
|
29 |
-
|
30 |
-
<p>Part I: Introduction provides some background information on the history, development, purpose, scope, limitations, and revisions of PPDGJ III. It also explains the basic concepts and terms used in the book.</p>
|
31 |
-
|
32 |
-
<p>Part II: General Principles of Diagnosis outlines the general rules and guidelines for diagnosing mental disorders using PPDGJ III. It covers topics such as diagnostic criteria, diagnostic categories, diagnostic axes, multiaxial assessment, differential diagnosis, comorbidity, reliability, validity, cultural factors, ethical issues, and legal implications.</p>
|
33 |
-
|
34 |
-
<p>Part III: Specific Disorders describes each specific disorder in detail. It follows a uniform format that includes: name of disorder; code number; diagnostic criteria; clinical features; differential diagnosis; etiology; course; prognosis; treatment; prevention; notes.</p>
|
35 |
-
|
36 |
-
<p>Part IV: Appendices contains some supplementary materials that support the main text of the book. It includes: glossary of terms; list of abbreviations; list of references; index.</p>
|
37 |
-
|
38 |
-
<h2>What Benefits do Buku PPDGJ III PDF Files Offer for Mental Health Professionals?</h2>
|
39 |
-
|
40 |
-
<p>Buku PPDGJ III PDF files offer many benefits for mental health professionals who work or study in Indonesia or who have an interest in Indonesian psychiatry. Some of these benefits are:</p>
|
41 |
-
|
42 |
-
<ul>
|
43 |
-
<li>They provide a comprehensive and updated source of information on mental disorders that is relevant to the Indonesian context.</li>
|
44 |
-
<li>They help to improve the quality and consistency of diagnosis and classification of mental disorders in Indonesia.</li>
|
45 |
-
<li>They facilitate communication and collaboration among mental health professionals across different settings and regions.</li>
|
46 |
-
<li>They enhance professional development and learning by providing opportunities for self-assessment, feedback, reflection, research, publication, teaching, training, supervision, consultation, etc.</li>
|
47 |
-
<li>They increase public awareness and understanding of mental disorders by providing accurate and reliable information that can be used for education, advocacy, prevention, intervention, etc.</li>
|
48 |
-
</ul>
|
49 |
-
|
50 |
-
<h2>How to Write an Effective and SEO-Optimized Article Using Buku PPDGJ III PDF Files as a Reference?</h2>
|
51 |
-
|
52 |
-
<p>If you want to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference</p>
|
53 |
-
|
54 |
-
|
55 |
-
- Add more details or examples to each point or paragraph
|
56 |
-
- Add more subheadings or sections to cover more aspects of the topic
|
57 |
-
- Add more images or videos to illustrate the content
|
58 |
-
- Add more quotes or testimonials from experts or users
|
59 |
-
- Add more statistics or facts to support the claims
|
60 |
-
|
61 |
-
I hope this helps you write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference. If you have any questions or feedback, please let me know.?
|
62 |
-
<h3>Download Links for Buku PPDGJ III PDF Files</h3>
|
63 |
-
|
64 |
-
<ul>
|
65 |
-
<li><a href="https://id.scribd.com/document/409231491/Download-Buku-Ppdgj-III-PDF-File">Buku PPDGJ III PDF | PDF - Scribd</a></li>
|
66 |
-
<li><a href="https://idoc.pub/documents/ppdgj-3pdf-9n0k71op5k4v">Ppdgj 3.pdf [9n0k71op5k4v] - Documents and E-books</a></li>
|
67 |
-
<li><a href="https://idoc.pub/download/ppdgj-3pdf-9n0k71op5k4v">Download PDF - Ppdgj 3.pdf [9n0k71op5k4v] - Documents and E-books</a></li>
|
68 |
-
</ul>
|
69 |
-
|
70 |
-
<h3>Resources and Information on Buku PPDGJ III PDF Files and Mental Health</h3>
|
71 |
-
|
72 |
-
<ul>
|
73 |
-
<li><a href="https://www.who.int/mental_health/en/">Mental health - World Health Organization</a></li>
|
74 |
-
<li><a href="https://www.pdpi.org/">Perhimpunan Dokter Spesialis Kedokteran Jiwa Indonesia (PDPI) - Indonesian Psychiatric Association</a></li>
|
75 |
-
</ul>
|
76 |
-
<h3>How to Use Buku PPDGJ III PDF Files for Diagnosis and Classification of Mental Disorders</h3>
|
77 |
-
|
78 |
-
<p>One of the main purposes of buku ppdgj iii pdf files is to help mental health professionals diagnose and classify mental disorders in Indonesia. To use buku ppdgj iii pdf files for this purpose, you need to follow some steps and guidelines. Here are some tips to help you use buku ppdgj iii pdf files effectively for diagnosis and classification of mental disorders.</p>
|
79 |
-
|
80 |
-
<ol>
|
81 |
-
<li>Conduct a thorough assessment of the patient's symptoms, history, and context. You can use various methods and tools, such as interviews, observations, tests, scales, questionnaires, etc. You can also consult with other professionals or family members if needed.</li>
|
82 |
-
<li>Compare the patient's symptoms and features with the diagnostic criteria and clinical features of each disorder in buku ppdgj iii pdf files. You can use the index or the table of contents to find the relevant disorder or category. You can also use the notes section to find additional information or clarifications.</li>
|
83 |
-
<li>Select the most appropriate diagnosis or diagnoses for the patient based on the best fit and evidence. You can use the differential diagnosis section to rule out other possible disorders or conditions. You can also use the multiaxial assessment system to assign a diagnosis on each of the five axes: Axis I: Clinical Disorders; Axis II: Personality Disorders and Mental Retardation; Axis III: General Medical Conditions; Axis IV: Psychosocial and Environmental Problems; Axis V: Global Assessment of Functioning.</li>
|
84 |
-
<li>Document and communicate your diagnosis or diagnoses clearly and accurately. You can use the code number and the name of each disorder as they appear in buku ppdgj iii pdf files. You can also use the etiology, course, prognosis, treatment, and prevention sections to provide more information or recommendations for the patient.</li>
|
85 |
-
</ol>
|
86 |
-
|
87 |
-
<h3>How to Use Buku PPDGJ III PDF Files for Learning and Teaching Psychiatry</h3>
|
88 |
-
|
89 |
-
<p>Another purpose of buku ppdgj iii pdf files is to help mental health professionals and students learn and teach psychiatry in Indonesia. To use buku ppdgj iii pdf files for this purpose, you need to follow some steps and guidelines. Here are some tips to help you use buku ppdgj iii pdf files effectively for learning and teaching psychiatry.</p>
|
90 |
-
|
91 |
-
<ol>
|
92 |
-
<li>Read and study buku ppdgj iii pdf files regularly and thoroughly. You can use the introduction and the general principles of diagnosis sections to learn the basic concepts and terms of psychiatry. You can also use the specific disorders sections to learn the details and features of each disorder.</li>
|
93 |
-
<li>Practice and apply buku ppdgj iii pdf files in real or simulated situations. You can use case studies, role plays, quizzes, exams, assignments, projects, etc. to test your knowledge and skills in diagnosing and classifying mental disorders using buku ppdgj iii pdf files. You can also use feedback, reflection, supervision, consultation, etc. to improve your performance and competence.</li>
|
94 |
-
<li>Share and discuss buku ppdgj iii pdf files with other professionals or students. You can use seminars, workshops, conferences, journals, blogs, forums, etc. to exchange information and opinions about buku ppdgj iii pdf files and psychiatry in general. You can also use research, publication, teaching, training, etc. to contribute to the development and dissemination of buku ppdgj iii pdf files and psychiatry in Indonesia.</li>
|
95 |
-
</ol>
|
96 |
-
<h2>Conclusion</h2>
|
97 |
-
|
98 |
-
<p>Buku PPDGJ III PDF files are valuable resources for mental health professionals and students who work or study in Indonesia or who have an interest in Indonesian psychiatry. They provide a comprehensive and updated source of information on mental disorders that is relevant to the Indonesian context. They also help to standardize and harmonize the diagnosis and classification of mental disorders in Indonesia. Furthermore, they facilitate communication and collaboration among mental health professionals across different settings and regions. They also enhance professional development and learning by providing opportunities for self-assessment, feedback, reflection, research, publication, teaching, training, supervision, consultation, etc. They also increase public awareness and understanding of mental disorders by providing accurate and reliable information that can be used for education, advocacy, prevention, intervention, etc.</p>
|
99 |
-
|
100 |
-
<p>If you want to download buku ppdgj iii pdf files or learn more about them, you can use the links and resources provided in this article. You can also use the tips and guidelines provided in this article to write an effective and SEO-optimized article using buku ppdgj iii pdf files as a reference. This will help you increase your website's visibility and traffic, as well as your credibility and authority in your field.</p>
|
101 |
-
|
102 |
-
<p>We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to contact us. Thank you for reading.</p> 3cee63e6c2<br />
|
103 |
-
<br />
|
104 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/(2011) Crack.PhotoElf.4.1.12 11 !!LINK!!.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>(2011) Crack.PhotoElf.4.1.12 11</h2><br /><p><b><b>Download File</b> 🗸🗸🗸 <a href="https://imgfil.com/2uxYnd">https://imgfil.com/2uxYnd</a></b></p><br /><br />
|
2 |
-
|
3 |
-
3cee63e6c2<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Application X-msdownload How To Open ((INSTALL)).md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<h2>application x-msdownload how to open</h2><br /><p><b><b>Download</b> 🆓 <a href="https://imgfil.com/2uxXnL">https://imgfil.com/2uxXnL</a></b></p><br /><br />
|
2 |
-
|
3 |
-
. KDE office; Microsoft Office Modeling; open office; Other Adobe applications. app/vnd.ms-cab-compressed . application/x-apple-diskimage. Download Adobe Acrobat Reader DC for Windows in Russian without registration and SMS from the link below.
|
4 |
-
Acrobat Reader for Windows 10 in Russian via a direct link from the official website without registration and SMS.
|
5 |
-
Download Adobe Acrobat Reader DC for free for Windows 7 in Russian without registration and SMS using the direct link below.
|
6 |
-
Adobe Reader DC 2019. 8a78ff9644<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
9 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Archicad 16 ((FREE)) Crack Download Mega.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>archicad 16 crack download mega</h2><br /><p><b><b>Download Zip</b> ✪ <a href="https://imgfil.com/2uxX4v">https://imgfil.com/2uxX4v</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
0:00 / 5:25•Watch the full video. Live. •. Scroll for details. Install full Archicad 16. 16,834 views16K views. September 7, 2015 . •. In the ArchiCAD Video Lessons section, you can watch video tutorials on working with ArchiCAD 16, which covers the basic working methods, such as creating and editing objects, creating walls, creating windows and doors, designing a roof, facade elements, creating and editing interior walls and partitions. The lesson uses an example project in which it is necessary to build a frame house. •. Scroll for details. Video tutorials Archicad 16. 2 771 views2 thousand views. 8a78ff9644<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Casio Fx 880p Emulator.md
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
<h2>Casio Fx 880p Emulator</h2><br /><p><b><b>DOWNLOAD</b> ○○○ <a href="https://imgfil.com/2uxYzF">https://imgfil.com/2uxYzF</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
casio fx 880p emulator
|
4 |
-
|
5 |
-
Length 00:10:42 - Size 9.42 MB
|
6 |
-
|
7 |
-
Convert Youtube video to mp3Download Mp3Download has been launched, thanks for supporting us.Download has been launched, thank you.DownloadedVideo is under conversion, please wait...Video is under conversion...Converting...Sorry, this video is protected, thank you for your understanding.Sorry, this video is protected.Protected video.
|
8 |
-
|
9 |
-
casio fx 880p emulator free convert to mp3
|
10 |
-
|
11 |
-
Length 00:10:24 - Size 9.04 MB
|
12 |
-
|
13 |
-
casio fx 880p emulator master cart
|
14 |
-
|
15 |
-
Length 00:25:47 - Size 23.33 MB
|
16 |
-
|
17 |
-
Convert Youtube video to mp3Download Mp3Download has been launched, thanks for supporting us.Download has been launched, thank you.DownloadedVideo is under conversion, please wait...Video is under conversion...Converting...Sorry, this video is protected, thank you for your understanding.Sorry, this video is protected.Prot 4fefd39f24<br />
|
18 |
-
<br />
|
19 |
-
<br />
|
20 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Coffee Crisis Download HOT For Pc [torrent Full].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Coffee Crisis Download For Pc [torrent Full]</h2><br /><p><b><b>Download Zip</b> ✵✵✵ <a href="https://imgfil.com/2uxZLq">https://imgfil.com/2uxZLq</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Coffee Crisis is an arcade-style beat 'em up full of caffeinated carnage! ... The AI codenamed DUDE lies deep in the computer core of a long forgotten laboratory. 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Emicsoft Vob Converter 4.1.20 REGISTRATION CODE.rar.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
<h2>emicsoft vob converter 4.1.20 REGISTRATION CODE.rar</h2><br /><p><b><b>Download File</b> ✫ <a href="https://imgfil.com/2uxZua">https://imgfil.com/2uxZua</a></b></p><br /><br />
|
2 |
-
|
3 |
-
16 April 2021 — Online unit converter, October 21, 2021 01:45 AM . .html]emicsoft vob convertor 4.1.20 REGISTRATION CODE.rar[/url] royarborbert .. Converters: Unit converter — Unit converter (Unit converter) ( download ) — Unit converter. .
|
4 |
-
Unit converter.
|
5 |
-
On this page you can download the unit converter.
|
6 |
-
This program allows you to convert values ​​from different systems.
|
7 |
-
Unit Converter - Download Unit Converter for free.
|
8 |
-
Unit Converter - A program for converting values ​​from one dimensional system to another.
|
9 |
-
Unit converter .
|
10 |
-
Download Unit converter . (for Windows), Unit Converter (For MAC OS) . 8a78ff9644<br />
|
11 |
-
<br />
|
12 |
-
<br />
|
13 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Focusrite Serial Number Prefixes What They Mean and How to Use Them.md
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>This link is for the <strong>Clarett</strong> interfaces with a <strong>Thunderbolt</strong> connection and the <strong>Clarett</strong> <strong>OctoPre</strong>.<br /><br />The serial numbers for the <strong>Clarett</strong> range begin with a U.</p>
|
3 |
-
<h2>Focusrite Serial Number</h2><br /><p><b><b>DOWNLOAD</b> ····· <a href="https://imgfil.com/2uxYaB">https://imgfil.com/2uxYaB</a></b></p><br /><br />
|
4 |
-
<p>The names of the different compressor and EQ patches only provide clues as to what they emulate, but see the 'Behind The Mask' box for a list of the real-life models that were researched in their creation. These range from expensive and rare outboard to popular consoles and workhorse rack gear, and the selection on offer is certainly impressive, covering many of the biggest and best-respected names in recording hardware from both the UK and the US, including a number of Focusrite's own products.</p>
|
5 |
-
<p>The Liquid Mix Manager software allows you to set the unit's sample rate, and also to specify the maximum number of channels of processing you wish to use, in case you want to conserve Firewire bandwidth.Operating the Liquid Mix is gratifyingly straightforward, and though some users might wish for photorealistic emulations of every plug-in control surface, there's a lot to be said for having a consistent control style. This being said, I personally find it less helpful when the normal layout is changed to reflect some of the oddities of the original, such as putting the HF controls on the left or having frequency controls that work backwards. While I'm making small gripes, I couldn't get the Snapshot facility to work in Logic, though the normal Save and Load menu does essentially the same thing, and does work. The compressor and EQ settings are always loaded together, though, and I feel it would make more sense to also have them available separately.</p>
|
6 |
-
<p>Sonically, I'm not able to vouch for the degree of authenticity of all the emulations, but there are some very nice-sounding EQs and compressors available covering a wide range of distinctive characters and styles. I particularly liked some of the more subtle passive EQs that seem to sweeten a mix or track with very little adjustment, and of course there are those kick-ass optical compressors that contrast nicely with the more workmanlike VCA versions. For the less experienced user, deciding which to use may present a challenge, but at the same time Liquid Mix offers a wonderful educational opportunity for any aspiring engineer to get familiar with the essential character of a number of classic compressors and equalisers that they may otherwise never come across.</p>
|
7 |
-
<p>We have had some reports of registration problems, but have been unable to reproduce the problem. Please send your username (anthonylavoie) along with your iLok account, plug-in serial number and key to [email protected] and we'll make sure we get you registered.</p>
|
8 |
-
<p></p>
|
9 |
-
<p>Free assistance is available for the first 60 days on new purchases, excluding internal hardware installations or networking support. Your invoice reference number will be required for free assistance.</p>
|
10 |
-
<p><br><strong>Registration</strong>: 7/10<br>First thing was to register it. It didn't accept my information the first 2 times online when I filled out the Bundle serial number part, but the third time it took to the information and registered it as a product I own. Slightly frustrating but I stuck with it and it worked eventually. it's a confusing series of jumps and kept forcing me to re-sign in then complained I was already signed in and that my Bundle ID wasn't recognized when it was exactly right. I typed it in the first 2 times and the 3rd successful time was just a copy/paste of the second attempt.</p>
|
11 |
-
<p>The second major advantage of Thunderbolt for audio purposes is the lower levels of latency that are achievable using this protocol. Thunderbolt connects straight through to the PCIe layer, as opposed to USB which must go through a number of stages first (each stage adding additional latency).</p>
|
12 |
-
<p>What Generation of Scarlett Solo you got?<br>The serial number of your Scarlett will be found on the underside, either on a sticker or etched on the casing. The prefix of your serial number will denote which generation Scarlett you own:</p>
|
13 |
-
<p>U. Zanghieri presented a scheme for carrying AES3 signals on the "spare" pairs (4-5 and 7-8) of Ethernet 100Base-TX in a system where an audio source and a number of destination devices (such as powered loudspeakers) are connected in a ring, so that the system can survive loss of any one of the links. The Ethernet connection is used purely for control, as the latency through each unit is much higher than on the AES3 connection. A Project Initiation Request will be submitted in due course.</p>
|
14 |
-
<p>Apple has informed its official retail stores, AppleCare employees, and authorized resellers that a small number of third-generation Apple TV units have WiFi issues. These issues surround not being able to locate a WiFi network, unable to join a network, and dropped or intermittent connections.</p>
|
15 |
-
<p>Apple has determined that a very small number of Apple TV (3rd generation) products might experience one of these Wi-Fi related connectivity issues: Cannot locate network, Unable to join network, Dropped or intermittent connection.</p>
|
16 |
-
<p>Apple, which works with suppliers to test new designs all the time, has been testing various TV prototypes for a number of years, according to people familiar with the efforts. The company generally tests and develops products internally before doing so with outside suppliers.</p>
|
17 |
-
<p>In Spain, the lack of podcasting data, and existing conflicting numbers, are holding the medium back, says a well-researched article published in TELOS Magazine. It quotes podcaster Francisco Izuzquiza:</p> aaccfb2cb3<br />
|
18 |
-
<br />
|
19 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy Epic Stickman Fights with Supreme Duelist APK 2022 Download Now.md
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Supreme Duelist Stickman APK 2022: A Fun and Crazy Stickman Game</h1>
|
3 |
-
<p>If you are looking for a fun and crazy stickman game to play on your Android device, you should download Supreme Duelist Stickman APK 2022. This is a popular stickman game that lets you fight against other stickmen in various modes and maps. You can also customize your character with different weapons and outfits, and enjoy realistic ragdoll physics. Whether you want to play solo or with your friends, Supreme Duelist Stickman APK 2022 will keep you entertained for hours.</p>
|
4 |
-
<h2>download supreme duelist stickman apk 2022</h2><br /><p><b><b>DOWNLOAD</b> ····· <a href="https://jinyurl.com/2uNKEJ">https://jinyurl.com/2uNKEJ</a></b></p><br /><br />
|
5 |
-
<h2>Features of Supreme Duelist Stickman APK 2022</h2>
|
6 |
-
<p>Supreme Duelist Stickman APK 2022 has many features that make it one of the best stickman games on the market. Here are some of them:</p>
|
7 |
-
<ul>
|
8 |
-
<li><strong>Mini game mode:</strong> You can play football with your friends on the same device or against the CPU. This is a fun way to test your skills and have some laughs.</li>
|
9 |
-
<li><strong>Boss Fight Tournament:</strong> You can challenge yourself against powerful enemies in this mode. You will face different bosses with different abilities and weapons. You will need to use your strategy and reflexes to defeat them.</li>
|
10 |
-
<li><strong>Ragdoll physics:</strong> You will love the realistic and hilarious animations of the stickmen in this game. You can see them fly, bounce, fall, and twist in various ways. You can also use the ragdoll button to make them flop around.</li>
|
11 |
-
<li <li><strong>Customizable characters:</strong> You can choose your favorite stickman and weapon from a variety of options. You can also change the color and size of your stickman, and unlock more items with coins. You can create your own unique stickman and show it off to your opponents.</li>
|
12 |
-
<li><strong>Various modes and maps:</strong> You can explore different scenarios and gameplay styles in this game. You can play in normal mode, survival mode, or duel mode. You can also choose from different maps, such as desert, forest, city, space, and more. Each map has its own features and challenges.</li>
|
13 |
-
</ul>
|
14 |
-
<h2>How to Download Supreme Duelist Stickman APK 2022</h2>
|
15 |
-
<p>Downloading Supreme Duelist Stickman APK 2022 is very easy and fast. Just follow these simple steps:</p>
|
16 |
-
<ol>
|
17 |
-
<li>Go to the official Google Play Store link or click <a href="">here</a>.</li>
|
18 |
-
<li>Tap on the Install button and wait for the download to finish.</li>
|
19 |
-
<li>Open the app and enjoy the game.</li>
|
20 |
-
</ol>
|
21 |
-
<h2>Tips and Tricks for Supreme Duelist Stickman APK 2022</h2>
|
22 |
-
<p>If you want to master Supreme Duelist Stickman APK 2022, you should know some tips and tricks that will help you improve your performance and have more fun. Here are some of them:</p>
|
23 |
-
<ul>
|
24 |
-
<li><strong>Use the joystick to move and jump, and the buttons to attack and defend:</strong> The controls of this game are very simple and intuitive. You can use the joystick on the left side of the screen to move your stickman around and jump over obstacles. You can use the buttons on the right side of the screen to attack with your weapon or defend yourself from enemy attacks.</li>
|
25 |
-
<li><strong>Try different weapons and find the one that suits your style:</strong> There are many weapons to choose from in this game, such as swords, axes, hammers, guns, bows, and more. Each weapon has its own advantages and disadvantages, such as range, speed, damage, and accuracy. You should try different weapons and find the one that matches your style and preference.</li>
|
26 |
-
<li><strong>Use the environment to your advantage, such as traps, spikes, and explosives:</strong> The maps in this game are not just backgrounds, they are also part of the gameplay. You can use the environment to your advantage, such as traps, spikes, explosives, and other objects. You can use them to damage or kill your enemies, or to escape from dangerous situations.</li>
|
27 |
-
<li><strong>Watch ads to get free coins and unlock more items:</strong> If you want to get more coins and unlock more items in this game, you can watch ads to get free rewards. You can watch ads after each match or from the main menu. You can use the coins to buy new weapons, outfits, colors, and sizes for your stickman.</li>
|
28 |
-
<li><strong>Practice in single-player mode before challenging your friends or online players:</strong> If you want to improve your skills and confidence in this game, you should practice in single-player mode before challenging your friends or online players. You can play against the CPU in different difficulty levels, or play in mini game mode or boss fight tournament mode. This will help you get familiar with the game mechanics and controls.</li>
|
29 |
-
</ul>
|
30 |
-
<h2>Conclusion</h2>
|
31 |
-
<p>Supreme Duelist Stickman APK 2022 is a fun and crazy stickman game that you should download and play on your Android device. It has many features that make it one of the best stickman games on the market, such as mini game mode, boss fight tournament mode, ragdoll physics, customizable characters, various modes and maps, and more. It is also easy to download and install, as long as you use the official Google Play Store link. Whether you want to play solo or with your friends, Supreme Duelist Stickman APK 2022 will keep you entertained for hours.</p>
|
32 |
-
<p>download supreme duelist stickman apk 2022 latest version<br />
|
33 |
-
download supreme duelist stickman apk 2022 mod<br />
|
34 |
-
download supreme duelist stickman apk 2022 free<br />
|
35 |
-
download supreme duelist stickman apk 2022 offline<br />
|
36 |
-
download supreme duelist stickman apk 2022 unlimited money<br />
|
37 |
-
download supreme duelist stickman apk 2022 for android<br />
|
38 |
-
download supreme duelist stickman apk 2022 for pc<br />
|
39 |
-
download supreme duelist stickman apk 2022 hack<br />
|
40 |
-
download supreme duelist stickman apk 2022 update<br />
|
41 |
-
download supreme duelist stickman apk 2022 full<br />
|
42 |
-
download supreme duelist stickman apk 2022 unlocked<br />
|
43 |
-
download supreme duelist stickman apk 2022 no ads<br />
|
44 |
-
download supreme duelist stickman apk 2022 game<br />
|
45 |
-
download supreme duelist stickman apk 2022 online<br />
|
46 |
-
download supreme duelist stickman apk 2022 pro<br />
|
47 |
-
download supreme duelist stickman apk 2022 premium<br />
|
48 |
-
download supreme duelist stickman apk 2022 cracked<br />
|
49 |
-
download supreme duelist stickman apk 2022 mega mod<br />
|
50 |
-
download supreme duelist stickman apk 2022 new<br />
|
51 |
-
download supreme duelist stickman apk 2022 original<br />
|
52 |
-
download supreme duelist stickman apk 2022 review<br />
|
53 |
-
download supreme duelist stickman apk 2022 best<br />
|
54 |
-
download supreme duelist stickman apk 2022 cheat<br />
|
55 |
-
download supreme duelist stickman apk 2022 fun<br />
|
56 |
-
download supreme duelist stickman apk 2022 guide<br />
|
57 |
-
download supreme duelist stickman apk 2022 tips<br />
|
58 |
-
download supreme duelist stickman apk 2022 tricks<br />
|
59 |
-
download supreme duelist stickman apk 2022 tutorial<br />
|
60 |
-
download supreme duelist stickman apk 2022 walkthrough<br />
|
61 |
-
download supreme duelist stickman apk 2022 gameplay<br />
|
62 |
-
download supreme duelist stickman apk 2022 features<br />
|
63 |
-
download supreme duelist stickman apk 2022 editor mode<br />
|
64 |
-
download supreme duelist stickman apk 2022 boss fight tournament mode<br />
|
65 |
-
download supreme duelist stickman apk 2022 football mini game mode<br />
|
66 |
-
download supreme duelist stickman apk 2022 skins unlocker<br />
|
67 |
-
download supreme duelist stickman apk 2022 realistic ragdoll physics<br />
|
68 |
-
download supreme duelist stickman apk 2022 energy shield option<br />
|
69 |
-
download supreme duelist stickman apk 2022 gravity option<br />
|
70 |
-
download supreme duelist stickman apk 2022 instant ko option<br />
|
71 |
-
download supreme duelist stickman apk 2022 multiplayer mode</p>
|
72 |
-
<h3>FAQs</h3>
|
73 |
-
<ul>
|
74 |
-
<li><strong>Q1: Is Supreme Duelist Stickman APK 2022 free?</strong></li>
|
75 |
-
<li>A1: Yes, it is free to download and play, but it contains ads and in-app purchases.</li>
|
76 |
-
<li><strong>Q2: Is Supreme Duelist Stickman APK 2022 safe?</strong></li>
|
77 |
-
<li>A2: Yes, it is safe to download and install, as long as you use the official Google Play Store link.</li>
|
78 |
-
<li><strong>Q3: Is Supreme Duelist Stickman APK 2022 compatible with my device?</strong></li>
|
79 |
-
<li>A3: It requires Android 4.4 or higher, and at least 40 MB of free storage space.</li> <li><strong>Q4: How can I contact the developer of Supreme Duelist Stickman APK 2022?</strong></li>
|
80 |
-
<li>A4: You can email them at [email protected] or follow them on Facebook or Instagram.</li>
|
81 |
-
<li><strong>Q5: How can I rate and review Supreme Duelist Stickman APK 2022?</strong></li>
|
82 |
-
<li>A5: You can rate and review it on the Google Play Store page, or share your feedback on social media.</li>
|
83 |
-
</ul>
|
84 |
-
<p>I hope you enjoyed this article and found it helpful. If you have any questions or comments, please feel free to leave them below. Thank you for reading and have a great day!</p> 197e85843d<br />
|
85 |
-
<br />
|
86 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2ndelement/voicevox/voicevox_engine/synthesis_engine/synthesis_engine_base.py
DELETED
@@ -1,259 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
from abc import ABCMeta, abstractmethod
|
3 |
-
from typing import List, Optional
|
4 |
-
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
from .. import full_context_label
|
8 |
-
from ..full_context_label import extract_full_context_label
|
9 |
-
from ..model import AccentPhrase, AudioQuery, Mora
|
10 |
-
from ..mora_list import openjtalk_mora2text
|
11 |
-
|
12 |
-
|
13 |
-
def mora_to_text(mora: str) -> str:
|
14 |
-
if mora[-1:] in ["A", "I", "U", "E", "O"]:
|
15 |
-
# 無声化母音を小文字に
|
16 |
-
mora = mora[:-1] + mora[-1].lower()
|
17 |
-
if mora in openjtalk_mora2text:
|
18 |
-
return openjtalk_mora2text[mora]
|
19 |
-
else:
|
20 |
-
return mora
|
21 |
-
|
22 |
-
|
23 |
-
def adjust_interrogative_accent_phrases(
|
24 |
-
accent_phrases: List[AccentPhrase],
|
25 |
-
) -> List[AccentPhrase]:
|
26 |
-
"""
|
27 |
-
enable_interrogative_upspeakが有効になっていて与えられたaccent_phrasesに疑問系のものがあった場合、
|
28 |
-
各accent_phraseの末尾にある疑問系発音用のMoraに対して直前のMoraより少し音を高くすることで疑問文ぽくする
|
29 |
-
NOTE: リファクタリング時に適切な場所へ移動させること
|
30 |
-
"""
|
31 |
-
return [
|
32 |
-
AccentPhrase(
|
33 |
-
moras=adjust_interrogative_moras(accent_phrase),
|
34 |
-
accent=accent_phrase.accent,
|
35 |
-
pause_mora=accent_phrase.pause_mora,
|
36 |
-
is_interrogative=accent_phrase.is_interrogative,
|
37 |
-
)
|
38 |
-
for accent_phrase in accent_phrases
|
39 |
-
]
|
40 |
-
|
41 |
-
|
42 |
-
def adjust_interrogative_moras(accent_phrase: AccentPhrase) -> List[Mora]:
|
43 |
-
moras = copy.deepcopy(accent_phrase.moras)
|
44 |
-
if accent_phrase.is_interrogative and not (len(moras) == 0 or moras[-1].pitch == 0):
|
45 |
-
interrogative_mora = make_interrogative_mora(moras[-1])
|
46 |
-
moras.append(interrogative_mora)
|
47 |
-
return moras
|
48 |
-
else:
|
49 |
-
return moras
|
50 |
-
|
51 |
-
|
52 |
-
def make_interrogative_mora(last_mora: Mora) -> Mora:
|
53 |
-
fix_vowel_length = 0.15
|
54 |
-
adjust_pitch = 0.3
|
55 |
-
max_pitch = 6.5
|
56 |
-
return Mora(
|
57 |
-
text=openjtalk_mora2text[last_mora.vowel],
|
58 |
-
consonant=None,
|
59 |
-
consonant_length=None,
|
60 |
-
vowel=last_mora.vowel,
|
61 |
-
vowel_length=fix_vowel_length,
|
62 |
-
pitch=min(last_mora.pitch + adjust_pitch, max_pitch),
|
63 |
-
)
|
64 |
-
|
65 |
-
|
66 |
-
def full_context_label_moras_to_moras(
|
67 |
-
full_context_moras: List[full_context_label.Mora],
|
68 |
-
) -> List[Mora]:
|
69 |
-
return [
|
70 |
-
Mora(
|
71 |
-
text=mora_to_text("".join([p.phoneme for p in mora.phonemes])),
|
72 |
-
consonant=(mora.consonant.phoneme if mora.consonant is not None else None),
|
73 |
-
consonant_length=0 if mora.consonant is not None else None,
|
74 |
-
vowel=mora.vowel.phoneme,
|
75 |
-
vowel_length=0,
|
76 |
-
pitch=0,
|
77 |
-
)
|
78 |
-
for mora in full_context_moras
|
79 |
-
]
|
80 |
-
|
81 |
-
|
82 |
-
class SynthesisEngineBase(metaclass=ABCMeta):
|
83 |
-
# FIXME: jsonではなくModelを返すようにする
|
84 |
-
@property
|
85 |
-
@abstractmethod
|
86 |
-
def speakers(self) -> str:
|
87 |
-
raise NotImplementedError
|
88 |
-
|
89 |
-
@property
|
90 |
-
@abstractmethod
|
91 |
-
def supported_devices(self) -> Optional[str]:
|
92 |
-
raise NotImplementedError
|
93 |
-
|
94 |
-
def initialize_speaker_synthesis( # noqa: B027
|
95 |
-
self, speaker_id: int, skip_reinit: bool
|
96 |
-
):
|
97 |
-
|
98 |
-
"""
|
99 |
-
指定した話者での音声合成を初期化する。何度も実行可能。
|
100 |
-
未実装の場合は何もしない
|
101 |
-
Parameters
|
102 |
-
----------
|
103 |
-
speaker_id : int
|
104 |
-
話者ID
|
105 |
-
skip_reinit : bool
|
106 |
-
True の場合, 既に初期化済みの話者の再初期化をスキップします
|
107 |
-
"""
|
108 |
-
pass
|
109 |
-
|
110 |
-
def is_initialized_speaker_synthesis(self, speaker_id: int) -> bool:
|
111 |
-
"""
|
112 |
-
指定した話者での音声合成が初期化されているかどうかを返す
|
113 |
-
Parameters
|
114 |
-
----------
|
115 |
-
speaker_id : int
|
116 |
-
話者ID
|
117 |
-
Returns
|
118 |
-
-------
|
119 |
-
bool
|
120 |
-
初期化されているかどうか
|
121 |
-
"""
|
122 |
-
return True
|
123 |
-
|
124 |
-
@abstractmethod
|
125 |
-
def replace_phoneme_length(
|
126 |
-
self, accent_phrases: List[AccentPhrase], speaker_id: int
|
127 |
-
) -> List[AccentPhrase]:
|
128 |
-
"""
|
129 |
-
accent_phrasesの母音・子音の長さを設定する
|
130 |
-
Parameters
|
131 |
-
----------
|
132 |
-
accent_phrases : List[AccentPhrase]
|
133 |
-
アクセント句モデルのリスト
|
134 |
-
speaker_id : int
|
135 |
-
話者ID
|
136 |
-
Returns
|
137 |
-
-------
|
138 |
-
accent_phrases : List[AccentPhrase]
|
139 |
-
母音・子音の長さが設定されたアクセント句モデルのリスト
|
140 |
-
"""
|
141 |
-
raise NotImplementedError()
|
142 |
-
|
143 |
-
@abstractmethod
|
144 |
-
def replace_mora_pitch(
|
145 |
-
self, accent_phrases: List[AccentPhrase], speaker_id: int
|
146 |
-
) -> List[AccentPhrase]:
|
147 |
-
"""
|
148 |
-
accent_phrasesの音高(ピッチ)を設定する
|
149 |
-
Parameters
|
150 |
-
----------
|
151 |
-
accent_phrases : List[AccentPhrase]
|
152 |
-
アクセント句モデルのリスト
|
153 |
-
speaker_id : int
|
154 |
-
話者ID
|
155 |
-
Returns
|
156 |
-
-------
|
157 |
-
accent_phrases : List[AccentPhrase]
|
158 |
-
音高(ピッチ)が設定されたアクセント句モデルのリスト
|
159 |
-
"""
|
160 |
-
raise NotImplementedError()
|
161 |
-
|
162 |
-
def replace_mora_data(
|
163 |
-
self,
|
164 |
-
accent_phrases: List[AccentPhrase],
|
165 |
-
speaker_id: int,
|
166 |
-
) -> List[AccentPhrase]:
|
167 |
-
return self.replace_mora_pitch(
|
168 |
-
accent_phrases=self.replace_phoneme_length(
|
169 |
-
accent_phrases=accent_phrases,
|
170 |
-
speaker_id=speaker_id,
|
171 |
-
),
|
172 |
-
speaker_id=speaker_id,
|
173 |
-
)
|
174 |
-
|
175 |
-
def create_accent_phrases(self, text: str, speaker_id: int) -> List[AccentPhrase]:
|
176 |
-
if len(text.strip()) == 0:
|
177 |
-
return []
|
178 |
-
|
179 |
-
utterance = extract_full_context_label(text)
|
180 |
-
if len(utterance.breath_groups) == 0:
|
181 |
-
return []
|
182 |
-
|
183 |
-
accent_phrases = self.replace_mora_data(
|
184 |
-
accent_phrases=[
|
185 |
-
AccentPhrase(
|
186 |
-
moras=full_context_label_moras_to_moras(accent_phrase.moras),
|
187 |
-
accent=accent_phrase.accent,
|
188 |
-
pause_mora=(
|
189 |
-
Mora(
|
190 |
-
text="、",
|
191 |
-
consonant=None,
|
192 |
-
consonant_length=None,
|
193 |
-
vowel="pau",
|
194 |
-
vowel_length=0,
|
195 |
-
pitch=0,
|
196 |
-
)
|
197 |
-
if (
|
198 |
-
i_accent_phrase == len(breath_group.accent_phrases) - 1
|
199 |
-
and i_breath_group != len(utterance.breath_groups) - 1
|
200 |
-
)
|
201 |
-
else None
|
202 |
-
),
|
203 |
-
is_interrogative=accent_phrase.is_interrogative,
|
204 |
-
)
|
205 |
-
for i_breath_group, breath_group in enumerate(utterance.breath_groups)
|
206 |
-
for i_accent_phrase, accent_phrase in enumerate(
|
207 |
-
breath_group.accent_phrases
|
208 |
-
)
|
209 |
-
],
|
210 |
-
speaker_id=speaker_id,
|
211 |
-
)
|
212 |
-
return accent_phrases
|
213 |
-
|
214 |
-
def synthesis(
|
215 |
-
self,
|
216 |
-
query: AudioQuery,
|
217 |
-
speaker_id: int,
|
218 |
-
enable_interrogative_upspeak: bool = True,
|
219 |
-
) -> np.ndarray:
|
220 |
-
"""
|
221 |
-
音声合成クエリ内の疑問文指定されたMoraを変形した後、
|
222 |
-
継承先における実装`_synthesis_impl`を使い音声合成を行う
|
223 |
-
Parameters
|
224 |
-
----------
|
225 |
-
query : AudioQuery
|
226 |
-
音声合成クエリ
|
227 |
-
speaker_id : int
|
228 |
-
話者ID
|
229 |
-
enable_interrogative_upspeak : bool
|
230 |
-
疑問系のテキストの語尾を自動調整する機能を有効にするか
|
231 |
-
Returns
|
232 |
-
-------
|
233 |
-
wave : numpy.ndarray
|
234 |
-
音声合成結果
|
235 |
-
"""
|
236 |
-
# モーフィング時などに同一参照のqueryで複数回呼ばれる可能性があるので、元の引数のqueryに破壊的変更を行わない
|
237 |
-
query = copy.deepcopy(query)
|
238 |
-
if enable_interrogative_upspeak:
|
239 |
-
query.accent_phrases = adjust_interrogative_accent_phrases(
|
240 |
-
query.accent_phrases
|
241 |
-
)
|
242 |
-
return self._synthesis_impl(query, speaker_id)
|
243 |
-
|
244 |
-
@abstractmethod
|
245 |
-
def _synthesis_impl(self, query: AudioQuery, speaker_id: int) -> np.ndarray:
|
246 |
-
"""
|
247 |
-
音声合成クエリから音声合成に必要な情報を構成し、実際に音声合成を行う
|
248 |
-
Parameters
|
249 |
-
----------
|
250 |
-
query : AudioQuery
|
251 |
-
音声合成クエリ
|
252 |
-
speaker_id : int
|
253 |
-
話者ID
|
254 |
-
Returns
|
255 |
-
-------
|
256 |
-
wave : numpy.ndarray
|
257 |
-
音声合成結果
|
258 |
-
"""
|
259 |
-
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/lib/infer_pack/models_dml.py
DELETED
@@ -1,1124 +0,0 @@
|
|
1 |
-
import math, pdb, os
|
2 |
-
from time import time as ttime
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
from lib.infer_pack import modules
|
7 |
-
from lib.infer_pack import attentions
|
8 |
-
from lib.infer_pack import commons
|
9 |
-
from lib.infer_pack.commons import init_weights, get_padding
|
10 |
-
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
11 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
12 |
-
from lib.infer_pack.commons import init_weights
|
13 |
-
import numpy as np
|
14 |
-
from lib.infer_pack import commons
|
15 |
-
|
16 |
-
|
17 |
-
class TextEncoder256(nn.Module):
|
18 |
-
def __init__(
|
19 |
-
self,
|
20 |
-
out_channels,
|
21 |
-
hidden_channels,
|
22 |
-
filter_channels,
|
23 |
-
n_heads,
|
24 |
-
n_layers,
|
25 |
-
kernel_size,
|
26 |
-
p_dropout,
|
27 |
-
f0=True,
|
28 |
-
):
|
29 |
-
super().__init__()
|
30 |
-
self.out_channels = out_channels
|
31 |
-
self.hidden_channels = hidden_channels
|
32 |
-
self.filter_channels = filter_channels
|
33 |
-
self.n_heads = n_heads
|
34 |
-
self.n_layers = n_layers
|
35 |
-
self.kernel_size = kernel_size
|
36 |
-
self.p_dropout = p_dropout
|
37 |
-
self.emb_phone = nn.Linear(256, hidden_channels)
|
38 |
-
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
39 |
-
if f0 == True:
|
40 |
-
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
41 |
-
self.encoder = attentions.Encoder(
|
42 |
-
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
43 |
-
)
|
44 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
45 |
-
|
46 |
-
def forward(self, phone, pitch, lengths):
|
47 |
-
if pitch == None:
|
48 |
-
x = self.emb_phone(phone)
|
49 |
-
else:
|
50 |
-
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
51 |
-
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
52 |
-
x = self.lrelu(x)
|
53 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
54 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
55 |
-
x.dtype
|
56 |
-
)
|
57 |
-
x = self.encoder(x * x_mask, x_mask)
|
58 |
-
stats = self.proj(x) * x_mask
|
59 |
-
|
60 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
61 |
-
return m, logs, x_mask
|
62 |
-
|
63 |
-
|
64 |
-
class TextEncoder768(nn.Module):
|
65 |
-
def __init__(
|
66 |
-
self,
|
67 |
-
out_channels,
|
68 |
-
hidden_channels,
|
69 |
-
filter_channels,
|
70 |
-
n_heads,
|
71 |
-
n_layers,
|
72 |
-
kernel_size,
|
73 |
-
p_dropout,
|
74 |
-
f0=True,
|
75 |
-
):
|
76 |
-
super().__init__()
|
77 |
-
self.out_channels = out_channels
|
78 |
-
self.hidden_channels = hidden_channels
|
79 |
-
self.filter_channels = filter_channels
|
80 |
-
self.n_heads = n_heads
|
81 |
-
self.n_layers = n_layers
|
82 |
-
self.kernel_size = kernel_size
|
83 |
-
self.p_dropout = p_dropout
|
84 |
-
self.emb_phone = nn.Linear(768, hidden_channels)
|
85 |
-
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
86 |
-
if f0 == True:
|
87 |
-
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
88 |
-
self.encoder = attentions.Encoder(
|
89 |
-
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
90 |
-
)
|
91 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
92 |
-
|
93 |
-
def forward(self, phone, pitch, lengths):
|
94 |
-
if pitch == None:
|
95 |
-
x = self.emb_phone(phone)
|
96 |
-
else:
|
97 |
-
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
98 |
-
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
99 |
-
x = self.lrelu(x)
|
100 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
101 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
102 |
-
x.dtype
|
103 |
-
)
|
104 |
-
x = self.encoder(x * x_mask, x_mask)
|
105 |
-
stats = self.proj(x) * x_mask
|
106 |
-
|
107 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
108 |
-
return m, logs, x_mask
|
109 |
-
|
110 |
-
|
111 |
-
class ResidualCouplingBlock(nn.Module):
|
112 |
-
def __init__(
|
113 |
-
self,
|
114 |
-
channels,
|
115 |
-
hidden_channels,
|
116 |
-
kernel_size,
|
117 |
-
dilation_rate,
|
118 |
-
n_layers,
|
119 |
-
n_flows=4,
|
120 |
-
gin_channels=0,
|
121 |
-
):
|
122 |
-
super().__init__()
|
123 |
-
self.channels = channels
|
124 |
-
self.hidden_channels = hidden_channels
|
125 |
-
self.kernel_size = kernel_size
|
126 |
-
self.dilation_rate = dilation_rate
|
127 |
-
self.n_layers = n_layers
|
128 |
-
self.n_flows = n_flows
|
129 |
-
self.gin_channels = gin_channels
|
130 |
-
|
131 |
-
self.flows = nn.ModuleList()
|
132 |
-
for i in range(n_flows):
|
133 |
-
self.flows.append(
|
134 |
-
modules.ResidualCouplingLayer(
|
135 |
-
channels,
|
136 |
-
hidden_channels,
|
137 |
-
kernel_size,
|
138 |
-
dilation_rate,
|
139 |
-
n_layers,
|
140 |
-
gin_channels=gin_channels,
|
141 |
-
mean_only=True,
|
142 |
-
)
|
143 |
-
)
|
144 |
-
self.flows.append(modules.Flip())
|
145 |
-
|
146 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
147 |
-
if not reverse:
|
148 |
-
for flow in self.flows:
|
149 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
150 |
-
else:
|
151 |
-
for flow in reversed(self.flows):
|
152 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
153 |
-
return x
|
154 |
-
|
155 |
-
def remove_weight_norm(self):
|
156 |
-
for i in range(self.n_flows):
|
157 |
-
self.flows[i * 2].remove_weight_norm()
|
158 |
-
|
159 |
-
|
160 |
-
class PosteriorEncoder(nn.Module):
|
161 |
-
def __init__(
|
162 |
-
self,
|
163 |
-
in_channels,
|
164 |
-
out_channels,
|
165 |
-
hidden_channels,
|
166 |
-
kernel_size,
|
167 |
-
dilation_rate,
|
168 |
-
n_layers,
|
169 |
-
gin_channels=0,
|
170 |
-
):
|
171 |
-
super().__init__()
|
172 |
-
self.in_channels = in_channels
|
173 |
-
self.out_channels = out_channels
|
174 |
-
self.hidden_channels = hidden_channels
|
175 |
-
self.kernel_size = kernel_size
|
176 |
-
self.dilation_rate = dilation_rate
|
177 |
-
self.n_layers = n_layers
|
178 |
-
self.gin_channels = gin_channels
|
179 |
-
|
180 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
181 |
-
self.enc = modules.WN(
|
182 |
-
hidden_channels,
|
183 |
-
kernel_size,
|
184 |
-
dilation_rate,
|
185 |
-
n_layers,
|
186 |
-
gin_channels=gin_channels,
|
187 |
-
)
|
188 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
189 |
-
|
190 |
-
def forward(self, x, x_lengths, g=None):
|
191 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
192 |
-
x.dtype
|
193 |
-
)
|
194 |
-
x = self.pre(x) * x_mask
|
195 |
-
x = self.enc(x, x_mask, g=g)
|
196 |
-
stats = self.proj(x) * x_mask
|
197 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
198 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
199 |
-
return z, m, logs, x_mask
|
200 |
-
|
201 |
-
def remove_weight_norm(self):
|
202 |
-
self.enc.remove_weight_norm()
|
203 |
-
|
204 |
-
|
205 |
-
class Generator(torch.nn.Module):
|
206 |
-
def __init__(
|
207 |
-
self,
|
208 |
-
initial_channel,
|
209 |
-
resblock,
|
210 |
-
resblock_kernel_sizes,
|
211 |
-
resblock_dilation_sizes,
|
212 |
-
upsample_rates,
|
213 |
-
upsample_initial_channel,
|
214 |
-
upsample_kernel_sizes,
|
215 |
-
gin_channels=0,
|
216 |
-
):
|
217 |
-
super(Generator, self).__init__()
|
218 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
219 |
-
self.num_upsamples = len(upsample_rates)
|
220 |
-
self.conv_pre = Conv1d(
|
221 |
-
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
222 |
-
)
|
223 |
-
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
224 |
-
|
225 |
-
self.ups = nn.ModuleList()
|
226 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
227 |
-
self.ups.append(
|
228 |
-
weight_norm(
|
229 |
-
ConvTranspose1d(
|
230 |
-
upsample_initial_channel // (2**i),
|
231 |
-
upsample_initial_channel // (2 ** (i + 1)),
|
232 |
-
k,
|
233 |
-
u,
|
234 |
-
padding=(k - u) // 2,
|
235 |
-
)
|
236 |
-
)
|
237 |
-
)
|
238 |
-
|
239 |
-
self.resblocks = nn.ModuleList()
|
240 |
-
for i in range(len(self.ups)):
|
241 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
242 |
-
for j, (k, d) in enumerate(
|
243 |
-
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
244 |
-
):
|
245 |
-
self.resblocks.append(resblock(ch, k, d))
|
246 |
-
|
247 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
248 |
-
self.ups.apply(init_weights)
|
249 |
-
|
250 |
-
if gin_channels != 0:
|
251 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
252 |
-
|
253 |
-
def forward(self, x, g=None):
|
254 |
-
x = self.conv_pre(x)
|
255 |
-
if g is not None:
|
256 |
-
x = x + self.cond(g)
|
257 |
-
|
258 |
-
for i in range(self.num_upsamples):
|
259 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
260 |
-
x = self.ups[i](x)
|
261 |
-
xs = None
|
262 |
-
for j in range(self.num_kernels):
|
263 |
-
if xs is None:
|
264 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
265 |
-
else:
|
266 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
267 |
-
x = xs / self.num_kernels
|
268 |
-
x = F.leaky_relu(x)
|
269 |
-
x = self.conv_post(x)
|
270 |
-
x = torch.tanh(x)
|
271 |
-
|
272 |
-
return x
|
273 |
-
|
274 |
-
def remove_weight_norm(self):
|
275 |
-
for l in self.ups:
|
276 |
-
remove_weight_norm(l)
|
277 |
-
for l in self.resblocks:
|
278 |
-
l.remove_weight_norm()
|
279 |
-
|
280 |
-
|
281 |
-
class SineGen(torch.nn.Module):
|
282 |
-
"""Definition of sine generator
|
283 |
-
SineGen(samp_rate, harmonic_num = 0,
|
284 |
-
sine_amp = 0.1, noise_std = 0.003,
|
285 |
-
voiced_threshold = 0,
|
286 |
-
flag_for_pulse=False)
|
287 |
-
samp_rate: sampling rate in Hz
|
288 |
-
harmonic_num: number of harmonic overtones (default 0)
|
289 |
-
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
290 |
-
noise_std: std of Gaussian noise (default 0.003)
|
291 |
-
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
292 |
-
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
293 |
-
Note: when flag_for_pulse is True, the first time step of a voiced
|
294 |
-
segment is always sin(np.pi) or cos(0)
|
295 |
-
"""
|
296 |
-
|
297 |
-
def __init__(
|
298 |
-
self,
|
299 |
-
samp_rate,
|
300 |
-
harmonic_num=0,
|
301 |
-
sine_amp=0.1,
|
302 |
-
noise_std=0.003,
|
303 |
-
voiced_threshold=0,
|
304 |
-
flag_for_pulse=False,
|
305 |
-
):
|
306 |
-
super(SineGen, self).__init__()
|
307 |
-
self.sine_amp = sine_amp
|
308 |
-
self.noise_std = noise_std
|
309 |
-
self.harmonic_num = harmonic_num
|
310 |
-
self.dim = self.harmonic_num + 1
|
311 |
-
self.sampling_rate = samp_rate
|
312 |
-
self.voiced_threshold = voiced_threshold
|
313 |
-
|
314 |
-
def _f02uv(self, f0):
|
315 |
-
# generate uv signal
|
316 |
-
uv = torch.ones_like(f0)
|
317 |
-
uv = uv * (f0 > self.voiced_threshold)
|
318 |
-
return uv.float()
|
319 |
-
|
320 |
-
def forward(self, f0, upp):
|
321 |
-
"""sine_tensor, uv = forward(f0)
|
322 |
-
input F0: tensor(batchsize=1, length, dim=1)
|
323 |
-
f0 for unvoiced steps should be 0
|
324 |
-
output sine_tensor: tensor(batchsize=1, length, dim)
|
325 |
-
output uv: tensor(batchsize=1, length, 1)
|
326 |
-
"""
|
327 |
-
with torch.no_grad():
|
328 |
-
f0 = f0[:, None].transpose(1, 2)
|
329 |
-
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
330 |
-
# fundamental component
|
331 |
-
f0_buf[:, :, 0] = f0[:, :, 0]
|
332 |
-
for idx in np.arange(self.harmonic_num):
|
333 |
-
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
334 |
-
idx + 2
|
335 |
-
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
336 |
-
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
337 |
-
rand_ini = torch.rand(
|
338 |
-
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
339 |
-
)
|
340 |
-
rand_ini[:, 0] = 0
|
341 |
-
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
342 |
-
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
343 |
-
tmp_over_one *= upp
|
344 |
-
tmp_over_one = F.interpolate(
|
345 |
-
tmp_over_one.transpose(2, 1),
|
346 |
-
scale_factor=upp,
|
347 |
-
mode="linear",
|
348 |
-
align_corners=True,
|
349 |
-
).transpose(2, 1)
|
350 |
-
rad_values = F.interpolate(
|
351 |
-
rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
|
352 |
-
).transpose(
|
353 |
-
2, 1
|
354 |
-
) #######
|
355 |
-
tmp_over_one %= 1
|
356 |
-
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
357 |
-
cumsum_shift = torch.zeros_like(rad_values)
|
358 |
-
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
359 |
-
sine_waves = torch.sin(
|
360 |
-
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
|
361 |
-
)
|
362 |
-
sine_waves = sine_waves * self.sine_amp
|
363 |
-
uv = self._f02uv(f0)
|
364 |
-
uv = F.interpolate(
|
365 |
-
uv.transpose(2, 1), scale_factor=upp, mode="nearest"
|
366 |
-
).transpose(2, 1)
|
367 |
-
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
368 |
-
noise = noise_amp * torch.randn_like(sine_waves)
|
369 |
-
sine_waves = sine_waves * uv + noise
|
370 |
-
return sine_waves, uv, noise
|
371 |
-
|
372 |
-
|
373 |
-
class SourceModuleHnNSF(torch.nn.Module):
|
374 |
-
"""SourceModule for hn-nsf
|
375 |
-
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
376 |
-
add_noise_std=0.003, voiced_threshod=0)
|
377 |
-
sampling_rate: sampling_rate in Hz
|
378 |
-
harmonic_num: number of harmonic above F0 (default: 0)
|
379 |
-
sine_amp: amplitude of sine source signal (default: 0.1)
|
380 |
-
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
381 |
-
note that amplitude of noise in unvoiced is decided
|
382 |
-
by sine_amp
|
383 |
-
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
384 |
-
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
385 |
-
F0_sampled (batchsize, length, 1)
|
386 |
-
Sine_source (batchsize, length, 1)
|
387 |
-
noise_source (batchsize, length 1)
|
388 |
-
uv (batchsize, length, 1)
|
389 |
-
"""
|
390 |
-
|
391 |
-
def __init__(
|
392 |
-
self,
|
393 |
-
sampling_rate,
|
394 |
-
harmonic_num=0,
|
395 |
-
sine_amp=0.1,
|
396 |
-
add_noise_std=0.003,
|
397 |
-
voiced_threshod=0,
|
398 |
-
is_half=True,
|
399 |
-
):
|
400 |
-
super(SourceModuleHnNSF, self).__init__()
|
401 |
-
|
402 |
-
self.sine_amp = sine_amp
|
403 |
-
self.noise_std = add_noise_std
|
404 |
-
self.is_half = is_half
|
405 |
-
# to produce sine waveforms
|
406 |
-
self.l_sin_gen = SineGen(
|
407 |
-
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
408 |
-
)
|
409 |
-
|
410 |
-
# to merge source harmonics into a single excitation
|
411 |
-
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
412 |
-
self.l_tanh = torch.nn.Tanh()
|
413 |
-
|
414 |
-
def forward(self, x, upp=None):
|
415 |
-
sine_wavs, uv, _ = self.l_sin_gen(x, upp)
|
416 |
-
if self.is_half:
|
417 |
-
sine_wavs = sine_wavs.half()
|
418 |
-
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
419 |
-
return sine_merge, None, None # noise, uv
|
420 |
-
|
421 |
-
|
422 |
-
class GeneratorNSF(torch.nn.Module):
|
423 |
-
def __init__(
|
424 |
-
self,
|
425 |
-
initial_channel,
|
426 |
-
resblock,
|
427 |
-
resblock_kernel_sizes,
|
428 |
-
resblock_dilation_sizes,
|
429 |
-
upsample_rates,
|
430 |
-
upsample_initial_channel,
|
431 |
-
upsample_kernel_sizes,
|
432 |
-
gin_channels,
|
433 |
-
sr,
|
434 |
-
is_half=False,
|
435 |
-
):
|
436 |
-
super(GeneratorNSF, self).__init__()
|
437 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
438 |
-
self.num_upsamples = len(upsample_rates)
|
439 |
-
|
440 |
-
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
441 |
-
self.m_source = SourceModuleHnNSF(
|
442 |
-
sampling_rate=sr, harmonic_num=0, is_half=is_half
|
443 |
-
)
|
444 |
-
self.noise_convs = nn.ModuleList()
|
445 |
-
self.conv_pre = Conv1d(
|
446 |
-
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
447 |
-
)
|
448 |
-
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
449 |
-
|
450 |
-
self.ups = nn.ModuleList()
|
451 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
452 |
-
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
453 |
-
self.ups.append(
|
454 |
-
weight_norm(
|
455 |
-
ConvTranspose1d(
|
456 |
-
upsample_initial_channel // (2**i),
|
457 |
-
upsample_initial_channel // (2 ** (i + 1)),
|
458 |
-
k,
|
459 |
-
u,
|
460 |
-
padding=(k - u) // 2,
|
461 |
-
)
|
462 |
-
)
|
463 |
-
)
|
464 |
-
if i + 1 < len(upsample_rates):
|
465 |
-
stride_f0 = np.prod(upsample_rates[i + 1 :])
|
466 |
-
self.noise_convs.append(
|
467 |
-
Conv1d(
|
468 |
-
1,
|
469 |
-
c_cur,
|
470 |
-
kernel_size=stride_f0 * 2,
|
471 |
-
stride=stride_f0,
|
472 |
-
padding=stride_f0 // 2,
|
473 |
-
)
|
474 |
-
)
|
475 |
-
else:
|
476 |
-
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
477 |
-
|
478 |
-
self.resblocks = nn.ModuleList()
|
479 |
-
for i in range(len(self.ups)):
|
480 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
481 |
-
for j, (k, d) in enumerate(
|
482 |
-
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
483 |
-
):
|
484 |
-
self.resblocks.append(resblock(ch, k, d))
|
485 |
-
|
486 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
487 |
-
self.ups.apply(init_weights)
|
488 |
-
|
489 |
-
if gin_channels != 0:
|
490 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
491 |
-
|
492 |
-
self.upp = np.prod(upsample_rates)
|
493 |
-
|
494 |
-
def forward(self, x, f0, g=None):
|
495 |
-
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
496 |
-
har_source = har_source.transpose(1, 2)
|
497 |
-
x = self.conv_pre(x)
|
498 |
-
if g is not None:
|
499 |
-
x = x + self.cond(g)
|
500 |
-
|
501 |
-
for i in range(self.num_upsamples):
|
502 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
503 |
-
x = self.ups[i](x)
|
504 |
-
x_source = self.noise_convs[i](har_source)
|
505 |
-
x = x + x_source
|
506 |
-
xs = None
|
507 |
-
for j in range(self.num_kernels):
|
508 |
-
if xs is None:
|
509 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
510 |
-
else:
|
511 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
512 |
-
x = xs / self.num_kernels
|
513 |
-
x = F.leaky_relu(x)
|
514 |
-
x = self.conv_post(x)
|
515 |
-
x = torch.tanh(x)
|
516 |
-
return x
|
517 |
-
|
518 |
-
def remove_weight_norm(self):
|
519 |
-
for l in self.ups:
|
520 |
-
remove_weight_norm(l)
|
521 |
-
for l in self.resblocks:
|
522 |
-
l.remove_weight_norm()
|
523 |
-
|
524 |
-
|
525 |
-
sr2sr = {
|
526 |
-
"32k": 32000,
|
527 |
-
"40k": 40000,
|
528 |
-
"48k": 48000,
|
529 |
-
}
|
530 |
-
|
531 |
-
|
532 |
-
class SynthesizerTrnMs256NSFsid(nn.Module):
|
533 |
-
def __init__(
|
534 |
-
self,
|
535 |
-
spec_channels,
|
536 |
-
segment_size,
|
537 |
-
inter_channels,
|
538 |
-
hidden_channels,
|
539 |
-
filter_channels,
|
540 |
-
n_heads,
|
541 |
-
n_layers,
|
542 |
-
kernel_size,
|
543 |
-
p_dropout,
|
544 |
-
resblock,
|
545 |
-
resblock_kernel_sizes,
|
546 |
-
resblock_dilation_sizes,
|
547 |
-
upsample_rates,
|
548 |
-
upsample_initial_channel,
|
549 |
-
upsample_kernel_sizes,
|
550 |
-
spk_embed_dim,
|
551 |
-
gin_channels,
|
552 |
-
sr,
|
553 |
-
**kwargs
|
554 |
-
):
|
555 |
-
super().__init__()
|
556 |
-
if type(sr) == type("strr"):
|
557 |
-
sr = sr2sr[sr]
|
558 |
-
self.spec_channels = spec_channels
|
559 |
-
self.inter_channels = inter_channels
|
560 |
-
self.hidden_channels = hidden_channels
|
561 |
-
self.filter_channels = filter_channels
|
562 |
-
self.n_heads = n_heads
|
563 |
-
self.n_layers = n_layers
|
564 |
-
self.kernel_size = kernel_size
|
565 |
-
self.p_dropout = p_dropout
|
566 |
-
self.resblock = resblock
|
567 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
568 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
569 |
-
self.upsample_rates = upsample_rates
|
570 |
-
self.upsample_initial_channel = upsample_initial_channel
|
571 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
572 |
-
self.segment_size = segment_size
|
573 |
-
self.gin_channels = gin_channels
|
574 |
-
# self.hop_length = hop_length#
|
575 |
-
self.spk_embed_dim = spk_embed_dim
|
576 |
-
self.enc_p = TextEncoder256(
|
577 |
-
inter_channels,
|
578 |
-
hidden_channels,
|
579 |
-
filter_channels,
|
580 |
-
n_heads,
|
581 |
-
n_layers,
|
582 |
-
kernel_size,
|
583 |
-
p_dropout,
|
584 |
-
)
|
585 |
-
self.dec = GeneratorNSF(
|
586 |
-
inter_channels,
|
587 |
-
resblock,
|
588 |
-
resblock_kernel_sizes,
|
589 |
-
resblock_dilation_sizes,
|
590 |
-
upsample_rates,
|
591 |
-
upsample_initial_channel,
|
592 |
-
upsample_kernel_sizes,
|
593 |
-
gin_channels=gin_channels,
|
594 |
-
sr=sr,
|
595 |
-
is_half=kwargs["is_half"],
|
596 |
-
)
|
597 |
-
self.enc_q = PosteriorEncoder(
|
598 |
-
spec_channels,
|
599 |
-
inter_channels,
|
600 |
-
hidden_channels,
|
601 |
-
5,
|
602 |
-
1,
|
603 |
-
16,
|
604 |
-
gin_channels=gin_channels,
|
605 |
-
)
|
606 |
-
self.flow = ResidualCouplingBlock(
|
607 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
608 |
-
)
|
609 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
610 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
611 |
-
|
612 |
-
def remove_weight_norm(self):
|
613 |
-
self.dec.remove_weight_norm()
|
614 |
-
self.flow.remove_weight_norm()
|
615 |
-
self.enc_q.remove_weight_norm()
|
616 |
-
|
617 |
-
def forward(
|
618 |
-
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
619 |
-
): # 这里ds是id,[bs,1]
|
620 |
-
# print(1,pitch.shape)#[bs,t]
|
621 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
622 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
623 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
624 |
-
z_p = self.flow(z, y_mask, g=g)
|
625 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
626 |
-
z, y_lengths, self.segment_size
|
627 |
-
)
|
628 |
-
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
629 |
-
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
630 |
-
# print(-2,pitchf.shape,z_slice.shape)
|
631 |
-
o = self.dec(z_slice, pitchf, g=g)
|
632 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
633 |
-
|
634 |
-
def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
|
635 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
636 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
637 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
638 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
639 |
-
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
|
640 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
641 |
-
|
642 |
-
|
643 |
-
class SynthesizerTrnMs768NSFsid(nn.Module):
|
644 |
-
def __init__(
|
645 |
-
self,
|
646 |
-
spec_channels,
|
647 |
-
segment_size,
|
648 |
-
inter_channels,
|
649 |
-
hidden_channels,
|
650 |
-
filter_channels,
|
651 |
-
n_heads,
|
652 |
-
n_layers,
|
653 |
-
kernel_size,
|
654 |
-
p_dropout,
|
655 |
-
resblock,
|
656 |
-
resblock_kernel_sizes,
|
657 |
-
resblock_dilation_sizes,
|
658 |
-
upsample_rates,
|
659 |
-
upsample_initial_channel,
|
660 |
-
upsample_kernel_sizes,
|
661 |
-
spk_embed_dim,
|
662 |
-
gin_channels,
|
663 |
-
sr,
|
664 |
-
**kwargs
|
665 |
-
):
|
666 |
-
super().__init__()
|
667 |
-
if type(sr) == type("strr"):
|
668 |
-
sr = sr2sr[sr]
|
669 |
-
self.spec_channels = spec_channels
|
670 |
-
self.inter_channels = inter_channels
|
671 |
-
self.hidden_channels = hidden_channels
|
672 |
-
self.filter_channels = filter_channels
|
673 |
-
self.n_heads = n_heads
|
674 |
-
self.n_layers = n_layers
|
675 |
-
self.kernel_size = kernel_size
|
676 |
-
self.p_dropout = p_dropout
|
677 |
-
self.resblock = resblock
|
678 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
679 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
680 |
-
self.upsample_rates = upsample_rates
|
681 |
-
self.upsample_initial_channel = upsample_initial_channel
|
682 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
683 |
-
self.segment_size = segment_size
|
684 |
-
self.gin_channels = gin_channels
|
685 |
-
# self.hop_length = hop_length#
|
686 |
-
self.spk_embed_dim = spk_embed_dim
|
687 |
-
self.enc_p = TextEncoder768(
|
688 |
-
inter_channels,
|
689 |
-
hidden_channels,
|
690 |
-
filter_channels,
|
691 |
-
n_heads,
|
692 |
-
n_layers,
|
693 |
-
kernel_size,
|
694 |
-
p_dropout,
|
695 |
-
)
|
696 |
-
self.dec = GeneratorNSF(
|
697 |
-
inter_channels,
|
698 |
-
resblock,
|
699 |
-
resblock_kernel_sizes,
|
700 |
-
resblock_dilation_sizes,
|
701 |
-
upsample_rates,
|
702 |
-
upsample_initial_channel,
|
703 |
-
upsample_kernel_sizes,
|
704 |
-
gin_channels=gin_channels,
|
705 |
-
sr=sr,
|
706 |
-
is_half=kwargs["is_half"],
|
707 |
-
)
|
708 |
-
self.enc_q = PosteriorEncoder(
|
709 |
-
spec_channels,
|
710 |
-
inter_channels,
|
711 |
-
hidden_channels,
|
712 |
-
5,
|
713 |
-
1,
|
714 |
-
16,
|
715 |
-
gin_channels=gin_channels,
|
716 |
-
)
|
717 |
-
self.flow = ResidualCouplingBlock(
|
718 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
719 |
-
)
|
720 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
721 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
722 |
-
|
723 |
-
def remove_weight_norm(self):
|
724 |
-
self.dec.remove_weight_norm()
|
725 |
-
self.flow.remove_weight_norm()
|
726 |
-
self.enc_q.remove_weight_norm()
|
727 |
-
|
728 |
-
def forward(
|
729 |
-
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
|
730 |
-
): # 这里ds是id,[bs,1]
|
731 |
-
# print(1,pitch.shape)#[bs,t]
|
732 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
733 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
734 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
735 |
-
z_p = self.flow(z, y_mask, g=g)
|
736 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
737 |
-
z, y_lengths, self.segment_size
|
738 |
-
)
|
739 |
-
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
|
740 |
-
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
|
741 |
-
# print(-2,pitchf.shape,z_slice.shape)
|
742 |
-
o = self.dec(z_slice, pitchf, g=g)
|
743 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
744 |
-
|
745 |
-
def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
|
746 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
747 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
748 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
749 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
750 |
-
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
|
751 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
752 |
-
|
753 |
-
|
754 |
-
class SynthesizerTrnMs256NSFsid_nono(nn.Module):
|
755 |
-
def __init__(
|
756 |
-
self,
|
757 |
-
spec_channels,
|
758 |
-
segment_size,
|
759 |
-
inter_channels,
|
760 |
-
hidden_channels,
|
761 |
-
filter_channels,
|
762 |
-
n_heads,
|
763 |
-
n_layers,
|
764 |
-
kernel_size,
|
765 |
-
p_dropout,
|
766 |
-
resblock,
|
767 |
-
resblock_kernel_sizes,
|
768 |
-
resblock_dilation_sizes,
|
769 |
-
upsample_rates,
|
770 |
-
upsample_initial_channel,
|
771 |
-
upsample_kernel_sizes,
|
772 |
-
spk_embed_dim,
|
773 |
-
gin_channels,
|
774 |
-
sr=None,
|
775 |
-
**kwargs
|
776 |
-
):
|
777 |
-
super().__init__()
|
778 |
-
self.spec_channels = spec_channels
|
779 |
-
self.inter_channels = inter_channels
|
780 |
-
self.hidden_channels = hidden_channels
|
781 |
-
self.filter_channels = filter_channels
|
782 |
-
self.n_heads = n_heads
|
783 |
-
self.n_layers = n_layers
|
784 |
-
self.kernel_size = kernel_size
|
785 |
-
self.p_dropout = p_dropout
|
786 |
-
self.resblock = resblock
|
787 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
788 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
789 |
-
self.upsample_rates = upsample_rates
|
790 |
-
self.upsample_initial_channel = upsample_initial_channel
|
791 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
792 |
-
self.segment_size = segment_size
|
793 |
-
self.gin_channels = gin_channels
|
794 |
-
# self.hop_length = hop_length#
|
795 |
-
self.spk_embed_dim = spk_embed_dim
|
796 |
-
self.enc_p = TextEncoder256(
|
797 |
-
inter_channels,
|
798 |
-
hidden_channels,
|
799 |
-
filter_channels,
|
800 |
-
n_heads,
|
801 |
-
n_layers,
|
802 |
-
kernel_size,
|
803 |
-
p_dropout,
|
804 |
-
f0=False,
|
805 |
-
)
|
806 |
-
self.dec = Generator(
|
807 |
-
inter_channels,
|
808 |
-
resblock,
|
809 |
-
resblock_kernel_sizes,
|
810 |
-
resblock_dilation_sizes,
|
811 |
-
upsample_rates,
|
812 |
-
upsample_initial_channel,
|
813 |
-
upsample_kernel_sizes,
|
814 |
-
gin_channels=gin_channels,
|
815 |
-
)
|
816 |
-
self.enc_q = PosteriorEncoder(
|
817 |
-
spec_channels,
|
818 |
-
inter_channels,
|
819 |
-
hidden_channels,
|
820 |
-
5,
|
821 |
-
1,
|
822 |
-
16,
|
823 |
-
gin_channels=gin_channels,
|
824 |
-
)
|
825 |
-
self.flow = ResidualCouplingBlock(
|
826 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
827 |
-
)
|
828 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
829 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
830 |
-
|
831 |
-
def remove_weight_norm(self):
|
832 |
-
self.dec.remove_weight_norm()
|
833 |
-
self.flow.remove_weight_norm()
|
834 |
-
self.enc_q.remove_weight_norm()
|
835 |
-
|
836 |
-
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
837 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
838 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
839 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
840 |
-
z_p = self.flow(z, y_mask, g=g)
|
841 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
842 |
-
z, y_lengths, self.segment_size
|
843 |
-
)
|
844 |
-
o = self.dec(z_slice, g=g)
|
845 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
846 |
-
|
847 |
-
def infer(self, phone, phone_lengths, sid, max_len=None):
|
848 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
849 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
850 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
851 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
852 |
-
o = self.dec((z * x_mask)[:, :, :max_len], g=g)
|
853 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
854 |
-
|
855 |
-
|
856 |
-
class SynthesizerTrnMs768NSFsid_nono(nn.Module):
|
857 |
-
def __init__(
|
858 |
-
self,
|
859 |
-
spec_channels,
|
860 |
-
segment_size,
|
861 |
-
inter_channels,
|
862 |
-
hidden_channels,
|
863 |
-
filter_channels,
|
864 |
-
n_heads,
|
865 |
-
n_layers,
|
866 |
-
kernel_size,
|
867 |
-
p_dropout,
|
868 |
-
resblock,
|
869 |
-
resblock_kernel_sizes,
|
870 |
-
resblock_dilation_sizes,
|
871 |
-
upsample_rates,
|
872 |
-
upsample_initial_channel,
|
873 |
-
upsample_kernel_sizes,
|
874 |
-
spk_embed_dim,
|
875 |
-
gin_channels,
|
876 |
-
sr=None,
|
877 |
-
**kwargs
|
878 |
-
):
|
879 |
-
super().__init__()
|
880 |
-
self.spec_channels = spec_channels
|
881 |
-
self.inter_channels = inter_channels
|
882 |
-
self.hidden_channels = hidden_channels
|
883 |
-
self.filter_channels = filter_channels
|
884 |
-
self.n_heads = n_heads
|
885 |
-
self.n_layers = n_layers
|
886 |
-
self.kernel_size = kernel_size
|
887 |
-
self.p_dropout = p_dropout
|
888 |
-
self.resblock = resblock
|
889 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
890 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
891 |
-
self.upsample_rates = upsample_rates
|
892 |
-
self.upsample_initial_channel = upsample_initial_channel
|
893 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
894 |
-
self.segment_size = segment_size
|
895 |
-
self.gin_channels = gin_channels
|
896 |
-
# self.hop_length = hop_length#
|
897 |
-
self.spk_embed_dim = spk_embed_dim
|
898 |
-
self.enc_p = TextEncoder768(
|
899 |
-
inter_channels,
|
900 |
-
hidden_channels,
|
901 |
-
filter_channels,
|
902 |
-
n_heads,
|
903 |
-
n_layers,
|
904 |
-
kernel_size,
|
905 |
-
p_dropout,
|
906 |
-
f0=False,
|
907 |
-
)
|
908 |
-
self.dec = Generator(
|
909 |
-
inter_channels,
|
910 |
-
resblock,
|
911 |
-
resblock_kernel_sizes,
|
912 |
-
resblock_dilation_sizes,
|
913 |
-
upsample_rates,
|
914 |
-
upsample_initial_channel,
|
915 |
-
upsample_kernel_sizes,
|
916 |
-
gin_channels=gin_channels,
|
917 |
-
)
|
918 |
-
self.enc_q = PosteriorEncoder(
|
919 |
-
spec_channels,
|
920 |
-
inter_channels,
|
921 |
-
hidden_channels,
|
922 |
-
5,
|
923 |
-
1,
|
924 |
-
16,
|
925 |
-
gin_channels=gin_channels,
|
926 |
-
)
|
927 |
-
self.flow = ResidualCouplingBlock(
|
928 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
929 |
-
)
|
930 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
931 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
932 |
-
|
933 |
-
def remove_weight_norm(self):
|
934 |
-
self.dec.remove_weight_norm()
|
935 |
-
self.flow.remove_weight_norm()
|
936 |
-
self.enc_q.remove_weight_norm()
|
937 |
-
|
938 |
-
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
|
939 |
-
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
940 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
941 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
|
942 |
-
z_p = self.flow(z, y_mask, g=g)
|
943 |
-
z_slice, ids_slice = commons.rand_slice_segments(
|
944 |
-
z, y_lengths, self.segment_size
|
945 |
-
)
|
946 |
-
o = self.dec(z_slice, g=g)
|
947 |
-
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
|
948 |
-
|
949 |
-
def infer(self, phone, phone_lengths, sid, max_len=None):
|
950 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
951 |
-
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
|
952 |
-
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
|
953 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
954 |
-
o = self.dec((z * x_mask)[:, :, :max_len], g=g)
|
955 |
-
return o, x_mask, (z, z_p, m_p, logs_p)
|
956 |
-
|
957 |
-
|
958 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
959 |
-
def __init__(self, use_spectral_norm=False):
|
960 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
961 |
-
periods = [2, 3, 5, 7, 11, 17]
|
962 |
-
# periods = [3, 5, 7, 11, 17, 23, 37]
|
963 |
-
|
964 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
965 |
-
discs = discs + [
|
966 |
-
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
967 |
-
]
|
968 |
-
self.discriminators = nn.ModuleList(discs)
|
969 |
-
|
970 |
-
def forward(self, y, y_hat):
|
971 |
-
y_d_rs = [] #
|
972 |
-
y_d_gs = []
|
973 |
-
fmap_rs = []
|
974 |
-
fmap_gs = []
|
975 |
-
for i, d in enumerate(self.discriminators):
|
976 |
-
y_d_r, fmap_r = d(y)
|
977 |
-
y_d_g, fmap_g = d(y_hat)
|
978 |
-
# for j in range(len(fmap_r)):
|
979 |
-
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
980 |
-
y_d_rs.append(y_d_r)
|
981 |
-
y_d_gs.append(y_d_g)
|
982 |
-
fmap_rs.append(fmap_r)
|
983 |
-
fmap_gs.append(fmap_g)
|
984 |
-
|
985 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
986 |
-
|
987 |
-
|
988 |
-
class MultiPeriodDiscriminatorV2(torch.nn.Module):
|
989 |
-
def __init__(self, use_spectral_norm=False):
|
990 |
-
super(MultiPeriodDiscriminatorV2, self).__init__()
|
991 |
-
# periods = [2, 3, 5, 7, 11, 17]
|
992 |
-
periods = [2, 3, 5, 7, 11, 17, 23, 37]
|
993 |
-
|
994 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
995 |
-
discs = discs + [
|
996 |
-
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
997 |
-
]
|
998 |
-
self.discriminators = nn.ModuleList(discs)
|
999 |
-
|
1000 |
-
def forward(self, y, y_hat):
|
1001 |
-
y_d_rs = [] #
|
1002 |
-
y_d_gs = []
|
1003 |
-
fmap_rs = []
|
1004 |
-
fmap_gs = []
|
1005 |
-
for i, d in enumerate(self.discriminators):
|
1006 |
-
y_d_r, fmap_r = d(y)
|
1007 |
-
y_d_g, fmap_g = d(y_hat)
|
1008 |
-
# for j in range(len(fmap_r)):
|
1009 |
-
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
1010 |
-
y_d_rs.append(y_d_r)
|
1011 |
-
y_d_gs.append(y_d_g)
|
1012 |
-
fmap_rs.append(fmap_r)
|
1013 |
-
fmap_gs.append(fmap_g)
|
1014 |
-
|
1015 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
1016 |
-
|
1017 |
-
|
1018 |
-
class DiscriminatorS(torch.nn.Module):
|
1019 |
-
def __init__(self, use_spectral_norm=False):
|
1020 |
-
super(DiscriminatorS, self).__init__()
|
1021 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
1022 |
-
self.convs = nn.ModuleList(
|
1023 |
-
[
|
1024 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
1025 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
1026 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
1027 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
1028 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
1029 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
1030 |
-
]
|
1031 |
-
)
|
1032 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
1033 |
-
|
1034 |
-
def forward(self, x):
|
1035 |
-
fmap = []
|
1036 |
-
|
1037 |
-
for l in self.convs:
|
1038 |
-
x = l(x)
|
1039 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1040 |
-
fmap.append(x)
|
1041 |
-
x = self.conv_post(x)
|
1042 |
-
fmap.append(x)
|
1043 |
-
x = torch.flatten(x, 1, -1)
|
1044 |
-
|
1045 |
-
return x, fmap
|
1046 |
-
|
1047 |
-
|
1048 |
-
class DiscriminatorP(torch.nn.Module):
|
1049 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
1050 |
-
super(DiscriminatorP, self).__init__()
|
1051 |
-
self.period = period
|
1052 |
-
self.use_spectral_norm = use_spectral_norm
|
1053 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
1054 |
-
self.convs = nn.ModuleList(
|
1055 |
-
[
|
1056 |
-
norm_f(
|
1057 |
-
Conv2d(
|
1058 |
-
1,
|
1059 |
-
32,
|
1060 |
-
(kernel_size, 1),
|
1061 |
-
(stride, 1),
|
1062 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1063 |
-
)
|
1064 |
-
),
|
1065 |
-
norm_f(
|
1066 |
-
Conv2d(
|
1067 |
-
32,
|
1068 |
-
128,
|
1069 |
-
(kernel_size, 1),
|
1070 |
-
(stride, 1),
|
1071 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1072 |
-
)
|
1073 |
-
),
|
1074 |
-
norm_f(
|
1075 |
-
Conv2d(
|
1076 |
-
128,
|
1077 |
-
512,
|
1078 |
-
(kernel_size, 1),
|
1079 |
-
(stride, 1),
|
1080 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1081 |
-
)
|
1082 |
-
),
|
1083 |
-
norm_f(
|
1084 |
-
Conv2d(
|
1085 |
-
512,
|
1086 |
-
1024,
|
1087 |
-
(kernel_size, 1),
|
1088 |
-
(stride, 1),
|
1089 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1090 |
-
)
|
1091 |
-
),
|
1092 |
-
norm_f(
|
1093 |
-
Conv2d(
|
1094 |
-
1024,
|
1095 |
-
1024,
|
1096 |
-
(kernel_size, 1),
|
1097 |
-
1,
|
1098 |
-
padding=(get_padding(kernel_size, 1), 0),
|
1099 |
-
)
|
1100 |
-
),
|
1101 |
-
]
|
1102 |
-
)
|
1103 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
1104 |
-
|
1105 |
-
def forward(self, x):
|
1106 |
-
fmap = []
|
1107 |
-
|
1108 |
-
# 1d to 2d
|
1109 |
-
b, c, t = x.shape
|
1110 |
-
if t % self.period != 0: # pad first
|
1111 |
-
n_pad = self.period - (t % self.period)
|
1112 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
1113 |
-
t = t + n_pad
|
1114 |
-
x = x.view(b, c, t // self.period, self.period)
|
1115 |
-
|
1116 |
-
for l in self.convs:
|
1117 |
-
x = l(x)
|
1118 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
1119 |
-
fmap.append(x)
|
1120 |
-
x = self.conv_post(x)
|
1121 |
-
fmap.append(x)
|
1122 |
-
x = torch.flatten(x, 1, -1)
|
1123 |
-
|
1124 |
-
return x, fmap
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/__init__.py
DELETED
File without changes
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/discriminator/multi_window_disc.py
DELETED
@@ -1,196 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
|
5 |
-
|
6 |
-
class Discriminator2DFactory(nn.Module):
|
7 |
-
def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128,
|
8 |
-
norm_type='bn', reduction='sum'):
|
9 |
-
super(Discriminator2DFactory, self).__init__()
|
10 |
-
padding = (kernel[0] // 2, kernel[1] // 2)
|
11 |
-
|
12 |
-
def discriminator_block(in_filters, out_filters, first=False):
|
13 |
-
"""
|
14 |
-
Input: (B, in, 2H, 2W)
|
15 |
-
Output:(B, out, H, W)
|
16 |
-
"""
|
17 |
-
conv = nn.Conv2d(in_filters, out_filters, kernel, (2, 2), padding)
|
18 |
-
if norm_type == 'sn':
|
19 |
-
conv = nn.utils.spectral_norm(conv)
|
20 |
-
block = [
|
21 |
-
conv, # padding = kernel//2
|
22 |
-
nn.LeakyReLU(0.2, inplace=True),
|
23 |
-
nn.Dropout2d(0.25)
|
24 |
-
]
|
25 |
-
if norm_type == 'bn' and not first:
|
26 |
-
block.append(nn.BatchNorm2d(out_filters, 0.8))
|
27 |
-
if norm_type == 'in' and not first:
|
28 |
-
block.append(nn.InstanceNorm2d(out_filters, affine=True))
|
29 |
-
block = nn.Sequential(*block)
|
30 |
-
return block
|
31 |
-
|
32 |
-
self.model = nn.ModuleList([
|
33 |
-
discriminator_block(c_in, hidden_size, first=True),
|
34 |
-
discriminator_block(hidden_size, hidden_size),
|
35 |
-
discriminator_block(hidden_size, hidden_size),
|
36 |
-
])
|
37 |
-
|
38 |
-
self.reduction = reduction
|
39 |
-
ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3)
|
40 |
-
if reduction != 'none':
|
41 |
-
# The height and width of downsampled image
|
42 |
-
self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1)
|
43 |
-
else:
|
44 |
-
self.adv_layer = nn.Linear(hidden_size * ds_size[1], 1)
|
45 |
-
|
46 |
-
def forward(self, x):
|
47 |
-
"""
|
48 |
-
|
49 |
-
:param x: [B, C, T, n_bins]
|
50 |
-
:return: validity: [B, 1], h: List of hiddens
|
51 |
-
"""
|
52 |
-
h = []
|
53 |
-
for l in self.model:
|
54 |
-
x = l(x)
|
55 |
-
h.append(x)
|
56 |
-
if self.reduction != 'none':
|
57 |
-
x = x.view(x.shape[0], -1)
|
58 |
-
validity = self.adv_layer(x) # [B, 1]
|
59 |
-
else:
|
60 |
-
B, _, T_, _ = x.shape
|
61 |
-
x = x.transpose(1, 2).reshape(B, T_, -1)
|
62 |
-
validity = self.adv_layer(x)[:, :, 0] # [B, T]
|
63 |
-
return validity, h
|
64 |
-
|
65 |
-
|
66 |
-
class MultiWindowDiscriminator(nn.Module):
|
67 |
-
def __init__(self, time_lengths, cond_size=0, freq_length=80, kernel=(3, 3),
|
68 |
-
c_in=1, hidden_size=128, norm_type='bn', reduction='sum'):
|
69 |
-
super(MultiWindowDiscriminator, self).__init__()
|
70 |
-
self.win_lengths = time_lengths
|
71 |
-
self.reduction = reduction
|
72 |
-
|
73 |
-
self.conv_layers = nn.ModuleList()
|
74 |
-
if cond_size > 0:
|
75 |
-
self.cond_proj_layers = nn.ModuleList()
|
76 |
-
self.mel_proj_layers = nn.ModuleList()
|
77 |
-
for time_length in time_lengths:
|
78 |
-
conv_layer = [
|
79 |
-
Discriminator2DFactory(
|
80 |
-
time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size,
|
81 |
-
norm_type=norm_type, reduction=reduction)
|
82 |
-
]
|
83 |
-
self.conv_layers += conv_layer
|
84 |
-
if cond_size > 0:
|
85 |
-
self.cond_proj_layers.append(nn.Linear(cond_size, freq_length))
|
86 |
-
self.mel_proj_layers.append(nn.Linear(freq_length, freq_length))
|
87 |
-
|
88 |
-
def forward(self, x, x_len, cond=None, start_frames_wins=None):
|
89 |
-
'''
|
90 |
-
Args:
|
91 |
-
x (tensor): input mel, (B, c_in, T, n_bins).
|
92 |
-
x_length (tensor): len of per mel. (B,).
|
93 |
-
|
94 |
-
Returns:
|
95 |
-
tensor : (B).
|
96 |
-
'''
|
97 |
-
validity = []
|
98 |
-
if start_frames_wins is None:
|
99 |
-
start_frames_wins = [None] * len(self.conv_layers)
|
100 |
-
h = []
|
101 |
-
for i, start_frames in zip(range(len(self.conv_layers)), start_frames_wins):
|
102 |
-
x_clip, c_clip, start_frames = self.clip(
|
103 |
-
x, cond, x_len, self.win_lengths[i], start_frames) # (B, win_length, C)
|
104 |
-
start_frames_wins[i] = start_frames
|
105 |
-
if x_clip is None:
|
106 |
-
continue
|
107 |
-
if cond is not None:
|
108 |
-
x_clip = self.mel_proj_layers[i](x_clip) # (B, 1, win_length, C)
|
109 |
-
c_clip = self.cond_proj_layers[i](c_clip)[:, None] # (B, 1, win_length, C)
|
110 |
-
x_clip = x_clip + c_clip
|
111 |
-
x_clip, h_ = self.conv_layers[i](x_clip)
|
112 |
-
h += h_
|
113 |
-
validity.append(x_clip)
|
114 |
-
if len(validity) != len(self.conv_layers):
|
115 |
-
return None, start_frames_wins, h
|
116 |
-
if self.reduction == 'sum':
|
117 |
-
validity = sum(validity) # [B]
|
118 |
-
elif self.reduction == 'stack':
|
119 |
-
validity = torch.stack(validity, -1) # [B, W_L]
|
120 |
-
elif self.reduction == 'none':
|
121 |
-
validity = torch.cat(validity, -1) # [B, W_sum]
|
122 |
-
return validity, start_frames_wins, h
|
123 |
-
|
124 |
-
def clip(self, x, cond, x_len, win_length, start_frames=None):
|
125 |
-
'''Ramdom clip x to win_length.
|
126 |
-
Args:
|
127 |
-
x (tensor) : (B, c_in, T, n_bins).
|
128 |
-
cond (tensor) : (B, T, H).
|
129 |
-
x_len (tensor) : (B,).
|
130 |
-
win_length (int): target clip length
|
131 |
-
|
132 |
-
Returns:
|
133 |
-
(tensor) : (B, c_in, win_length, n_bins).
|
134 |
-
|
135 |
-
'''
|
136 |
-
T_start = 0
|
137 |
-
T_end = x_len.max() - win_length
|
138 |
-
if T_end < 0:
|
139 |
-
return None, None, start_frames
|
140 |
-
T_end = T_end.item()
|
141 |
-
if start_frames is None:
|
142 |
-
start_frame = np.random.randint(low=T_start, high=T_end + 1)
|
143 |
-
start_frames = [start_frame] * x.size(0)
|
144 |
-
else:
|
145 |
-
start_frame = start_frames[0]
|
146 |
-
x_batch = x[:, :, start_frame: start_frame + win_length]
|
147 |
-
c_batch = cond[:, start_frame: start_frame + win_length] if cond is not None else None
|
148 |
-
return x_batch, c_batch, start_frames
|
149 |
-
|
150 |
-
|
151 |
-
class Discriminator(nn.Module):
|
152 |
-
def __init__(self, time_lengths=[32, 64, 128], freq_length=80, cond_size=0, kernel=(3, 3), c_in=1,
|
153 |
-
hidden_size=128, norm_type='bn', reduction='sum', uncond_disc=True):
|
154 |
-
super(Discriminator, self).__init__()
|
155 |
-
self.time_lengths = time_lengths
|
156 |
-
self.cond_size = cond_size
|
157 |
-
self.reduction = reduction
|
158 |
-
self.uncond_disc = uncond_disc
|
159 |
-
if uncond_disc:
|
160 |
-
self.discriminator = MultiWindowDiscriminator(
|
161 |
-
freq_length=freq_length,
|
162 |
-
time_lengths=time_lengths,
|
163 |
-
kernel=kernel,
|
164 |
-
c_in=c_in, hidden_size=hidden_size, norm_type=norm_type,
|
165 |
-
reduction=reduction
|
166 |
-
)
|
167 |
-
if cond_size > 0:
|
168 |
-
self.cond_disc = MultiWindowDiscriminator(
|
169 |
-
freq_length=freq_length,
|
170 |
-
time_lengths=time_lengths,
|
171 |
-
cond_size=cond_size,
|
172 |
-
kernel=kernel,
|
173 |
-
c_in=c_in, hidden_size=hidden_size, norm_type=norm_type,
|
174 |
-
reduction=reduction
|
175 |
-
)
|
176 |
-
|
177 |
-
def forward(self, x, cond=None, start_frames_wins=None):
|
178 |
-
"""
|
179 |
-
|
180 |
-
:param x: [B, T, 80]
|
181 |
-
:param cond: [B, T, cond_size]
|
182 |
-
:param return_y_only:
|
183 |
-
:return:
|
184 |
-
"""
|
185 |
-
if len(x.shape) == 3:
|
186 |
-
x = x[:, None, :, :]
|
187 |
-
x_len = x.sum([1, -1]).ne(0).int().sum([-1])
|
188 |
-
ret = {'y_c': None, 'y': None}
|
189 |
-
if self.uncond_disc:
|
190 |
-
ret['y'], start_frames_wins, ret['h'] = self.discriminator(
|
191 |
-
x, x_len, start_frames_wins=start_frames_wins)
|
192 |
-
if self.cond_size > 0 and cond is not None:
|
193 |
-
ret['y_c'], start_frames_wins, ret['h_c'] = self.cond_disc(
|
194 |
-
x, x_len, cond, start_frames_wins=start_frames_wins)
|
195 |
-
ret['start_frames_wins'] = start_frames_wins
|
196 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZ2H/Gradio331-3D-Models-AI-1/app.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
import gradio as gr
|
3 |
-
import os
|
4 |
-
|
5 |
-
def load_mesh(mesh_file_name):
|
6 |
-
return mesh_file_name, mesh_file_name
|
7 |
-
|
8 |
-
demo = gr.Interface(
|
9 |
-
fn=load_mesh,
|
10 |
-
inputs=gr.Model3D(),
|
11 |
-
outputs=[
|
12 |
-
gr.Model3D(
|
13 |
-
clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"),
|
14 |
-
gr.File(label="Download 3D Model")
|
15 |
-
],
|
16 |
-
examples=[
|
17 |
-
[os.path.join(os.path.dirname(__file__), "files/Duck.glb")],
|
18 |
-
[os.path.join(os.path.dirname(__file__), "files/rubber_duck.glb")],
|
19 |
-
[os.path.join(os.path.dirname(__file__), "files/GroundVehicle.glb")]
|
20 |
-
],
|
21 |
-
)
|
22 |
-
|
23 |
-
if __name__ == "__main__":
|
24 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abdulkader/Abdulkader-T5-MedRepAnalyzer/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Abdulkader T5 MedRepAnalyzer
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.13.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/client/css/stop-generating.css
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
.stop-generating {
|
2 |
-
position: absolute;
|
3 |
-
bottom: 128px;
|
4 |
-
left: 50%;
|
5 |
-
transform: translateX(-50%);
|
6 |
-
z-index: 1000000;
|
7 |
-
}
|
8 |
-
|
9 |
-
.stop-generating button {
|
10 |
-
backdrop-filter: blur(20px);
|
11 |
-
-webkit-backdrop-filter: blur(20px);
|
12 |
-
background-color: var(--blur-bg);
|
13 |
-
color: var(--colour-3);
|
14 |
-
cursor: pointer;
|
15 |
-
animation: show_popup 0.4s;
|
16 |
-
}
|
17 |
-
|
18 |
-
@keyframes show_popup {
|
19 |
-
from {
|
20 |
-
opacity: 0;
|
21 |
-
transform: translateY(10px);
|
22 |
-
}
|
23 |
-
}
|
24 |
-
|
25 |
-
@keyframes hide_popup {
|
26 |
-
to {
|
27 |
-
opacity: 0;
|
28 |
-
transform: translateY(10px);
|
29 |
-
}
|
30 |
-
}
|
31 |
-
|
32 |
-
.stop-generating-hiding button {
|
33 |
-
animation: hide_popup 0.4s;
|
34 |
-
}
|
35 |
-
|
36 |
-
.stop-generating-hidden button {
|
37 |
-
display: none;
|
38 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import LZString from './string/lzstring/LZString.js';
|
2 |
-
export default LZString;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/dynamictext/DynamicText.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import DynamicText from '../../../plugins/dynamictext.js';
|
2 |
-
export default DynamicText;
|
|
|
|
|
|
spaces/Ajit025/Text_to_Image_conversion/text_to_image.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
from transformers.tools.base import Tool, get_default_device
|
2 |
-
from transformers.utils import is_accelerate_available
|
3 |
-
import torch
|
4 |
-
|
5 |
-
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
6 |
-
|
7 |
-
|
8 |
-
TEXT_TO_IMAGE_DESCRIPTION = (
|
9 |
-
"This is a tool that creates an image according to a prompt, which is a text description. It takes an input named `prompt` which "
|
10 |
-
"contains the image description and outputs an image."
|
11 |
-
)
|
12 |
-
|
13 |
-
|
14 |
-
class TextToImageTool(Tool):
|
15 |
-
default_checkpoint = "runwayml/stable-diffusion-v1-5"
|
16 |
-
description = TEXT_TO_IMAGE_DESCRIPTION
|
17 |
-
inputs = ['text']
|
18 |
-
outputs = ['image']
|
19 |
-
|
20 |
-
def __init__(self, device=None, **hub_kwargs) -> None:
|
21 |
-
if not is_accelerate_available():
|
22 |
-
raise ImportError("Accelerate should be installed in order to use tools.")
|
23 |
-
|
24 |
-
super().__init__()
|
25 |
-
|
26 |
-
self.device = device
|
27 |
-
self.pipeline = None
|
28 |
-
self.hub_kwargs = hub_kwargs
|
29 |
-
|
30 |
-
def setup(self):
|
31 |
-
if self.device is None:
|
32 |
-
self.device = get_default_device()
|
33 |
-
|
34 |
-
self.pipeline = DiffusionPipeline.from_pretrained(self.default_checkpoint)
|
35 |
-
self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(self.pipeline.scheduler.config)
|
36 |
-
self.pipeline.to(self.device)
|
37 |
-
|
38 |
-
if self.device.type == "cuda":
|
39 |
-
self.pipeline.to(torch_dtype=torch.float16)
|
40 |
-
|
41 |
-
self.is_initialized = True
|
42 |
-
|
43 |
-
def __call__(self, prompt):
|
44 |
-
if not self.is_initialized:
|
45 |
-
self.setup()
|
46 |
-
|
47 |
-
negative_prompt = "low quality, bad quality, deformed, low resolution"
|
48 |
-
added_prompt = " , highest quality, highly realistic, very high resolution"
|
49 |
-
|
50 |
-
return self.pipeline(prompt + added_prompt, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AkitoP/umamusume_bert_vits2/data_utils.py
DELETED
@@ -1,406 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import random
|
3 |
-
import torch
|
4 |
-
import torch.utils.data
|
5 |
-
from tqdm import tqdm
|
6 |
-
from loguru import logger
|
7 |
-
import commons
|
8 |
-
from mel_processing import spectrogram_torch, mel_spectrogram_torch
|
9 |
-
from utils import load_wav_to_torch, load_filepaths_and_text
|
10 |
-
from text import cleaned_text_to_sequence, get_bert
|
11 |
-
|
12 |
-
"""Multi speaker version"""
|
13 |
-
|
14 |
-
|
15 |
-
class TextAudioSpeakerLoader(torch.utils.data.Dataset):
|
16 |
-
"""
|
17 |
-
1) loads audio, speaker_id, text pairs
|
18 |
-
2) normalizes text and converts them to sequences of integers
|
19 |
-
3) computes spectrograms from audio files.
|
20 |
-
"""
|
21 |
-
|
22 |
-
def __init__(self, audiopaths_sid_text, hparams):
|
23 |
-
self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
|
24 |
-
self.max_wav_value = hparams.max_wav_value
|
25 |
-
self.sampling_rate = hparams.sampling_rate
|
26 |
-
self.filter_length = hparams.filter_length
|
27 |
-
self.hop_length = hparams.hop_length
|
28 |
-
self.win_length = hparams.win_length
|
29 |
-
self.sampling_rate = hparams.sampling_rate
|
30 |
-
self.spk_map = hparams.spk2id
|
31 |
-
self.hparams = hparams
|
32 |
-
|
33 |
-
self.use_mel_spec_posterior = getattr(
|
34 |
-
hparams, "use_mel_posterior_encoder", False
|
35 |
-
)
|
36 |
-
if self.use_mel_spec_posterior:
|
37 |
-
self.n_mel_channels = getattr(hparams, "n_mel_channels", 80)
|
38 |
-
|
39 |
-
self.cleaned_text = getattr(hparams, "cleaned_text", False)
|
40 |
-
|
41 |
-
self.add_blank = hparams.add_blank
|
42 |
-
self.min_text_len = getattr(hparams, "min_text_len", 1)
|
43 |
-
self.max_text_len = getattr(hparams, "max_text_len", 300)
|
44 |
-
|
45 |
-
random.seed(1234)
|
46 |
-
random.shuffle(self.audiopaths_sid_text)
|
47 |
-
self._filter()
|
48 |
-
|
49 |
-
def _filter(self):
|
50 |
-
"""
|
51 |
-
Filter text & store spec lengths
|
52 |
-
"""
|
53 |
-
# Store spectrogram lengths for Bucketing
|
54 |
-
# wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
|
55 |
-
# spec_length = wav_length // hop_length
|
56 |
-
|
57 |
-
audiopaths_sid_text_new = []
|
58 |
-
lengths = []
|
59 |
-
skipped = 0
|
60 |
-
logger.info("Init dataset...")
|
61 |
-
for _id, spk, language, text, phones, tone, word2ph in tqdm(
|
62 |
-
self.audiopaths_sid_text
|
63 |
-
):
|
64 |
-
audiopath = f"{_id}"
|
65 |
-
if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
|
66 |
-
phones = phones.split(" ")
|
67 |
-
tone = [int(i) for i in tone.split(" ")]
|
68 |
-
word2ph = [int(i) for i in word2ph.split(" ")]
|
69 |
-
audiopaths_sid_text_new.append(
|
70 |
-
[audiopath, spk, language, text, phones, tone, word2ph]
|
71 |
-
)
|
72 |
-
lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
|
73 |
-
else:
|
74 |
-
skipped += 1
|
75 |
-
logger.info(
|
76 |
-
"skipped: "
|
77 |
-
+ str(skipped)
|
78 |
-
+ ", total: "
|
79 |
-
+ str(len(self.audiopaths_sid_text))
|
80 |
-
)
|
81 |
-
self.audiopaths_sid_text = audiopaths_sid_text_new
|
82 |
-
self.lengths = lengths
|
83 |
-
|
84 |
-
def get_audio_text_speaker_pair(self, audiopath_sid_text):
|
85 |
-
# separate filename, speaker_id and text
|
86 |
-
audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
|
87 |
-
|
88 |
-
bert, ja_bert, phones, tone, language = self.get_text(
|
89 |
-
text, word2ph, phones, tone, language, audiopath
|
90 |
-
)
|
91 |
-
|
92 |
-
spec, wav = self.get_audio(audiopath)
|
93 |
-
sid = torch.LongTensor([int(self.spk_map[sid])])
|
94 |
-
return (phones, spec, wav, sid, tone, language, bert, ja_bert)
|
95 |
-
|
96 |
-
def get_audio(self, filename):
|
97 |
-
audio, sampling_rate = load_wav_to_torch(filename)
|
98 |
-
if sampling_rate != self.sampling_rate:
|
99 |
-
raise ValueError(
|
100 |
-
"{} {} SR doesn't match target {} SR".format(
|
101 |
-
filename, sampling_rate, self.sampling_rate
|
102 |
-
)
|
103 |
-
)
|
104 |
-
audio_norm = audio / self.max_wav_value
|
105 |
-
audio_norm = audio_norm.unsqueeze(0)
|
106 |
-
spec_filename = filename.replace(".wav", ".spec.pt")
|
107 |
-
if self.use_mel_spec_posterior:
|
108 |
-
spec_filename = spec_filename.replace(".spec.pt", ".mel.pt")
|
109 |
-
try:
|
110 |
-
spec = torch.load(spec_filename)
|
111 |
-
except:
|
112 |
-
if self.use_mel_spec_posterior:
|
113 |
-
spec = mel_spectrogram_torch(
|
114 |
-
audio_norm,
|
115 |
-
self.filter_length,
|
116 |
-
self.n_mel_channels,
|
117 |
-
self.sampling_rate,
|
118 |
-
self.hop_length,
|
119 |
-
self.win_length,
|
120 |
-
self.hparams.mel_fmin,
|
121 |
-
self.hparams.mel_fmax,
|
122 |
-
center=False,
|
123 |
-
)
|
124 |
-
else:
|
125 |
-
spec = spectrogram_torch(
|
126 |
-
audio_norm,
|
127 |
-
self.filter_length,
|
128 |
-
self.sampling_rate,
|
129 |
-
self.hop_length,
|
130 |
-
self.win_length,
|
131 |
-
center=False,
|
132 |
-
)
|
133 |
-
spec = torch.squeeze(spec, 0)
|
134 |
-
torch.save(spec, spec_filename)
|
135 |
-
return spec, audio_norm
|
136 |
-
|
137 |
-
def get_text(self, text, word2ph, phone, tone, language_str, wav_path):
|
138 |
-
phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
|
139 |
-
if self.add_blank:
|
140 |
-
phone = commons.intersperse(phone, 0)
|
141 |
-
tone = commons.intersperse(tone, 0)
|
142 |
-
language = commons.intersperse(language, 0)
|
143 |
-
for i in range(len(word2ph)):
|
144 |
-
word2ph[i] = word2ph[i] * 2
|
145 |
-
word2ph[0] += 1
|
146 |
-
bert_path = wav_path.replace(".wav", ".bert.pt")
|
147 |
-
try:
|
148 |
-
bert = torch.load(bert_path)
|
149 |
-
assert bert.shape[-1] == len(phone)
|
150 |
-
except:
|
151 |
-
bert = get_bert(text, word2ph, language_str)
|
152 |
-
torch.save(bert, bert_path)
|
153 |
-
assert bert.shape[-1] == len(phone), phone
|
154 |
-
|
155 |
-
if language_str == "ZH":
|
156 |
-
bert = bert
|
157 |
-
ja_bert = torch.zeros(768, len(phone))
|
158 |
-
elif language_str == "JP":
|
159 |
-
ja_bert = bert
|
160 |
-
bert = torch.zeros(1024, len(phone))
|
161 |
-
else:
|
162 |
-
bert = torch.zeros(1024, len(phone))
|
163 |
-
ja_bert = torch.zeros(768, len(phone))
|
164 |
-
assert bert.shape[-1] == len(phone), (
|
165 |
-
bert.shape,
|
166 |
-
len(phone),
|
167 |
-
sum(word2ph),
|
168 |
-
p1,
|
169 |
-
p2,
|
170 |
-
t1,
|
171 |
-
t2,
|
172 |
-
pold,
|
173 |
-
pold2,
|
174 |
-
word2ph,
|
175 |
-
text,
|
176 |
-
w2pho,
|
177 |
-
)
|
178 |
-
phone = torch.LongTensor(phone)
|
179 |
-
tone = torch.LongTensor(tone)
|
180 |
-
language = torch.LongTensor(language)
|
181 |
-
return bert, ja_bert, phone, tone, language
|
182 |
-
|
183 |
-
def get_sid(self, sid):
|
184 |
-
sid = torch.LongTensor([int(sid)])
|
185 |
-
return sid
|
186 |
-
|
187 |
-
def __getitem__(self, index):
|
188 |
-
return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
|
189 |
-
|
190 |
-
def __len__(self):
|
191 |
-
return len(self.audiopaths_sid_text)
|
192 |
-
|
193 |
-
|
194 |
-
class TextAudioSpeakerCollate:
|
195 |
-
"""Zero-pads model inputs and targets"""
|
196 |
-
|
197 |
-
def __init__(self, return_ids=False):
|
198 |
-
self.return_ids = return_ids
|
199 |
-
|
200 |
-
def __call__(self, batch):
|
201 |
-
"""Collate's training batch from normalized text, audio and speaker identities
|
202 |
-
PARAMS
|
203 |
-
------
|
204 |
-
batch: [text_normalized, spec_normalized, wav_normalized, sid]
|
205 |
-
"""
|
206 |
-
# Right zero-pad all one-hot text sequences to max input length
|
207 |
-
_, ids_sorted_decreasing = torch.sort(
|
208 |
-
torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True
|
209 |
-
)
|
210 |
-
|
211 |
-
max_text_len = max([len(x[0]) for x in batch])
|
212 |
-
max_spec_len = max([x[1].size(1) for x in batch])
|
213 |
-
max_wav_len = max([x[2].size(1) for x in batch])
|
214 |
-
|
215 |
-
text_lengths = torch.LongTensor(len(batch))
|
216 |
-
spec_lengths = torch.LongTensor(len(batch))
|
217 |
-
wav_lengths = torch.LongTensor(len(batch))
|
218 |
-
sid = torch.LongTensor(len(batch))
|
219 |
-
|
220 |
-
text_padded = torch.LongTensor(len(batch), max_text_len)
|
221 |
-
tone_padded = torch.LongTensor(len(batch), max_text_len)
|
222 |
-
language_padded = torch.LongTensor(len(batch), max_text_len)
|
223 |
-
bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
|
224 |
-
ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len)
|
225 |
-
|
226 |
-
spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
|
227 |
-
wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
|
228 |
-
text_padded.zero_()
|
229 |
-
tone_padded.zero_()
|
230 |
-
language_padded.zero_()
|
231 |
-
spec_padded.zero_()
|
232 |
-
wav_padded.zero_()
|
233 |
-
bert_padded.zero_()
|
234 |
-
ja_bert_padded.zero_()
|
235 |
-
for i in range(len(ids_sorted_decreasing)):
|
236 |
-
row = batch[ids_sorted_decreasing[i]]
|
237 |
-
|
238 |
-
text = row[0]
|
239 |
-
text_padded[i, : text.size(0)] = text
|
240 |
-
text_lengths[i] = text.size(0)
|
241 |
-
|
242 |
-
spec = row[1]
|
243 |
-
spec_padded[i, :, : spec.size(1)] = spec
|
244 |
-
spec_lengths[i] = spec.size(1)
|
245 |
-
|
246 |
-
wav = row[2]
|
247 |
-
wav_padded[i, :, : wav.size(1)] = wav
|
248 |
-
wav_lengths[i] = wav.size(1)
|
249 |
-
|
250 |
-
sid[i] = row[3]
|
251 |
-
|
252 |
-
tone = row[4]
|
253 |
-
tone_padded[i, : tone.size(0)] = tone
|
254 |
-
|
255 |
-
language = row[5]
|
256 |
-
language_padded[i, : language.size(0)] = language
|
257 |
-
|
258 |
-
bert = row[6]
|
259 |
-
bert_padded[i, :, : bert.size(1)] = bert
|
260 |
-
|
261 |
-
ja_bert = row[7]
|
262 |
-
ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert
|
263 |
-
|
264 |
-
return (
|
265 |
-
text_padded,
|
266 |
-
text_lengths,
|
267 |
-
spec_padded,
|
268 |
-
spec_lengths,
|
269 |
-
wav_padded,
|
270 |
-
wav_lengths,
|
271 |
-
sid,
|
272 |
-
tone_padded,
|
273 |
-
language_padded,
|
274 |
-
bert_padded,
|
275 |
-
ja_bert_padded,
|
276 |
-
)
|
277 |
-
|
278 |
-
|
279 |
-
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
|
280 |
-
"""
|
281 |
-
Maintain similar input lengths in a batch.
|
282 |
-
Length groups are specified by boundaries.
|
283 |
-
Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
|
284 |
-
|
285 |
-
It removes samples which are not included in the boundaries.
|
286 |
-
Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
|
287 |
-
"""
|
288 |
-
|
289 |
-
def __init__(
|
290 |
-
self,
|
291 |
-
dataset,
|
292 |
-
batch_size,
|
293 |
-
boundaries,
|
294 |
-
num_replicas=None,
|
295 |
-
rank=None,
|
296 |
-
shuffle=True,
|
297 |
-
):
|
298 |
-
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
|
299 |
-
self.lengths = dataset.lengths
|
300 |
-
self.batch_size = batch_size
|
301 |
-
self.boundaries = boundaries
|
302 |
-
|
303 |
-
self.buckets, self.num_samples_per_bucket = self._create_buckets()
|
304 |
-
self.total_size = sum(self.num_samples_per_bucket)
|
305 |
-
self.num_samples = self.total_size // self.num_replicas
|
306 |
-
|
307 |
-
def _create_buckets(self):
|
308 |
-
buckets = [[] for _ in range(len(self.boundaries) - 1)]
|
309 |
-
for i in range(len(self.lengths)):
|
310 |
-
length = self.lengths[i]
|
311 |
-
idx_bucket = self._bisect(length)
|
312 |
-
if idx_bucket != -1:
|
313 |
-
buckets[idx_bucket].append(i)
|
314 |
-
|
315 |
-
try:
|
316 |
-
for i in range(len(buckets) - 1, 0, -1):
|
317 |
-
if len(buckets[i]) == 0:
|
318 |
-
buckets.pop(i)
|
319 |
-
self.boundaries.pop(i + 1)
|
320 |
-
assert all(len(bucket) > 0 for bucket in buckets)
|
321 |
-
# When one bucket is not traversed
|
322 |
-
except Exception as e:
|
323 |
-
print("Bucket warning ", e)
|
324 |
-
for i in range(len(buckets) - 1, -1, -1):
|
325 |
-
if len(buckets[i]) == 0:
|
326 |
-
buckets.pop(i)
|
327 |
-
self.boundaries.pop(i + 1)
|
328 |
-
|
329 |
-
num_samples_per_bucket = []
|
330 |
-
for i in range(len(buckets)):
|
331 |
-
len_bucket = len(buckets[i])
|
332 |
-
total_batch_size = self.num_replicas * self.batch_size
|
333 |
-
rem = (
|
334 |
-
total_batch_size - (len_bucket % total_batch_size)
|
335 |
-
) % total_batch_size
|
336 |
-
num_samples_per_bucket.append(len_bucket + rem)
|
337 |
-
return buckets, num_samples_per_bucket
|
338 |
-
|
339 |
-
def __iter__(self):
|
340 |
-
# deterministically shuffle based on epoch
|
341 |
-
g = torch.Generator()
|
342 |
-
g.manual_seed(self.epoch)
|
343 |
-
|
344 |
-
indices = []
|
345 |
-
if self.shuffle:
|
346 |
-
for bucket in self.buckets:
|
347 |
-
indices.append(torch.randperm(len(bucket), generator=g).tolist())
|
348 |
-
else:
|
349 |
-
for bucket in self.buckets:
|
350 |
-
indices.append(list(range(len(bucket))))
|
351 |
-
|
352 |
-
batches = []
|
353 |
-
for i in range(len(self.buckets)):
|
354 |
-
bucket = self.buckets[i]
|
355 |
-
len_bucket = len(bucket)
|
356 |
-
if len_bucket == 0:
|
357 |
-
continue
|
358 |
-
ids_bucket = indices[i]
|
359 |
-
num_samples_bucket = self.num_samples_per_bucket[i]
|
360 |
-
|
361 |
-
# add extra samples to make it evenly divisible
|
362 |
-
rem = num_samples_bucket - len_bucket
|
363 |
-
ids_bucket = (
|
364 |
-
ids_bucket
|
365 |
-
+ ids_bucket * (rem // len_bucket)
|
366 |
-
+ ids_bucket[: (rem % len_bucket)]
|
367 |
-
)
|
368 |
-
|
369 |
-
# subsample
|
370 |
-
ids_bucket = ids_bucket[self.rank :: self.num_replicas]
|
371 |
-
|
372 |
-
# batching
|
373 |
-
for j in range(len(ids_bucket) // self.batch_size):
|
374 |
-
batch = [
|
375 |
-
bucket[idx]
|
376 |
-
for idx in ids_bucket[
|
377 |
-
j * self.batch_size : (j + 1) * self.batch_size
|
378 |
-
]
|
379 |
-
]
|
380 |
-
batches.append(batch)
|
381 |
-
|
382 |
-
if self.shuffle:
|
383 |
-
batch_ids = torch.randperm(len(batches), generator=g).tolist()
|
384 |
-
batches = [batches[i] for i in batch_ids]
|
385 |
-
self.batches = batches
|
386 |
-
|
387 |
-
assert len(self.batches) * self.batch_size == self.num_samples
|
388 |
-
return iter(self.batches)
|
389 |
-
|
390 |
-
def _bisect(self, x, lo=0, hi=None):
|
391 |
-
if hi is None:
|
392 |
-
hi = len(self.boundaries) - 1
|
393 |
-
|
394 |
-
if hi > lo:
|
395 |
-
mid = (hi + lo) // 2
|
396 |
-
if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
|
397 |
-
return mid
|
398 |
-
elif x <= self.boundaries[mid]:
|
399 |
-
return self._bisect(x, lo, mid)
|
400 |
-
else:
|
401 |
-
return self._bisect(x, mid + 1, hi)
|
402 |
-
else:
|
403 |
-
return -1
|
404 |
-
|
405 |
-
def __len__(self):
|
406 |
-
return self.num_samples // self.batch_size
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Implicit And Explicit Aspects Extraction In Restaurant Reviews Domain
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.21.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/谷歌检索小助手.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file
|
3 |
-
from toolbox import update_ui
|
4 |
-
|
5 |
-
def get_meta_information(url, chatbot, history):
|
6 |
-
import requests
|
7 |
-
import arxiv
|
8 |
-
import difflib
|
9 |
-
from bs4 import BeautifulSoup
|
10 |
-
from toolbox import get_conf
|
11 |
-
proxies, = get_conf('proxies')
|
12 |
-
headers = {
|
13 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
|
14 |
-
}
|
15 |
-
# 发送 GET 请求
|
16 |
-
response = requests.get(url, proxies=proxies, headers=headers)
|
17 |
-
|
18 |
-
# 解析网页内容
|
19 |
-
soup = BeautifulSoup(response.text, "html.parser")
|
20 |
-
|
21 |
-
def string_similar(s1, s2):
|
22 |
-
return difflib.SequenceMatcher(None, s1, s2).quick_ratio()
|
23 |
-
|
24 |
-
profile = []
|
25 |
-
# 获取所有文章的标题和作者
|
26 |
-
for result in soup.select(".gs_ri"):
|
27 |
-
title = result.a.text.replace('\n', ' ').replace(' ', ' ')
|
28 |
-
author = result.select_one(".gs_a").text
|
29 |
-
try:
|
30 |
-
citation = result.select_one(".gs_fl > a[href*='cites']").text # 引用次数是链接中的文本,直接取出来
|
31 |
-
except:
|
32 |
-
citation = 'cited by 0'
|
33 |
-
abstract = result.select_one(".gs_rs").text.strip() # 摘要在 .gs_rs 中的文本,需要清除首尾空格
|
34 |
-
search = arxiv.Search(
|
35 |
-
query = title,
|
36 |
-
max_results = 1,
|
37 |
-
sort_by = arxiv.SortCriterion.Relevance,
|
38 |
-
)
|
39 |
-
paper = next(search.results())
|
40 |
-
if string_similar(title, paper.title) > 0.90: # same paper
|
41 |
-
abstract = paper.summary.replace('\n', ' ')
|
42 |
-
is_paper_in_arxiv = True
|
43 |
-
else: # different paper
|
44 |
-
abstract = abstract
|
45 |
-
is_paper_in_arxiv = False
|
46 |
-
paper = next(search.results())
|
47 |
-
print(title)
|
48 |
-
print(author)
|
49 |
-
print(citation)
|
50 |
-
profile.append({
|
51 |
-
'title':title,
|
52 |
-
'author':author,
|
53 |
-
'citation':citation,
|
54 |
-
'abstract':abstract,
|
55 |
-
'is_paper_in_arxiv':is_paper_in_arxiv,
|
56 |
-
})
|
57 |
-
|
58 |
-
chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract]
|
59 |
-
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
60 |
-
return profile
|
61 |
-
|
62 |
-
@CatchException
|
63 |
-
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
64 |
-
# 基本信息:功能、贡献者
|
65 |
-
chatbot.append([
|
66 |
-
"函数插件功能?",
|
67 |
-
"分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."])
|
68 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
69 |
-
|
70 |
-
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
71 |
-
try:
|
72 |
-
import arxiv
|
73 |
-
from bs4 import BeautifulSoup
|
74 |
-
except:
|
75 |
-
report_execption(chatbot, history,
|
76 |
-
a = f"解析项目: {txt}",
|
77 |
-
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
|
78 |
-
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
79 |
-
return
|
80 |
-
|
81 |
-
# 清空历史,以免输入溢出
|
82 |
-
history = []
|
83 |
-
|
84 |
-
meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
|
85 |
-
|
86 |
-
if len(meta_paper_info_list[:10]) > 0:
|
87 |
-
i_say = "下面是一些学术文献的数据,请从中提取出以下内容。" + \
|
88 |
-
"1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \
|
89 |
-
f"以下是信息源:{str(meta_paper_info_list[:10])}"
|
90 |
-
|
91 |
-
inputs_show_user = f"请分析此页面中出现的所有文章:{txt}"
|
92 |
-
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
93 |
-
inputs=i_say, inputs_show_user=inputs_show_user,
|
94 |
-
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
95 |
-
sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown格式。你必须逐个文献进行处理。"
|
96 |
-
)
|
97 |
-
|
98 |
-
history.extend([ "第一批", gpt_say ])
|
99 |
-
meta_paper_info_list = meta_paper_info_list[10:]
|
100 |
-
|
101 |
-
chatbot.append(["状态?", "已经全部完成"])
|
102 |
-
msg = '正常'
|
103 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
104 |
-
res = write_results_to_file(history)
|
105 |
-
chatbot.append(("完成了吗?", res));
|
106 |
-
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/latex/attention/introduction.tex
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
|
2 |
-
|
3 |
-
Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
|
4 |
-
%\marginpar{not sure if the memory constraints are understandable here}
|
5 |
-
Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
|
6 |
-
|
7 |
-
%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
|
8 |
-
|
9 |
-
Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
|
10 |
-
|
11 |
-
%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
|
12 |
-
%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
|
13 |
-
|
14 |
-
In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
|
15 |
-
%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
|
16 |
-
|
17 |
-
% Just a standard paragraph with citations, rewrite.
|
18 |
-
%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/generate.py
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
|
4 |
-
# This work is made available under the Nvidia Source Code License-NC.
|
5 |
-
# To view a copy of this license, visit
|
6 |
-
# https://nvlabs.github.io/stylegan2/license.html
|
7 |
-
|
8 |
-
|
9 |
-
## this script is for generating images from pre-trained network based on StyleGAN1 (TensorFlow) and StyleGAN2-ada (PyTorch) ##
|
10 |
-
|
11 |
-
import os
|
12 |
-
import click
|
13 |
-
import dnnlib
|
14 |
-
import numpy as np
|
15 |
-
import PIL.Image
|
16 |
-
import legacy
|
17 |
-
from typing import List, Optional
|
18 |
-
|
19 |
-
"""
|
20 |
-
Generate images using pretrained network pickle.
|
21 |
-
Examples:
|
22 |
-
|
23 |
-
\b
|
24 |
-
# Generate human full-body images without truncation
|
25 |
-
python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=1 --seeds=1,3,5,7 \\
|
26 |
-
--network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
|
27 |
-
|
28 |
-
\b
|
29 |
-
# Generate human full-body images with truncation
|
30 |
-
python generate.py --outdir=outputs/generate/stylegan_human_v2_1024 --trunc=0.8 --seeds=0-100\\
|
31 |
-
--network=pretrained_models/stylegan_human_v2_1024.pkl --version 2
|
32 |
-
|
33 |
-
# \b
|
34 |
-
# Generate human full-body images using stylegan V1
|
35 |
-
# python generate.py --outdir=outputs/generate/stylegan_human_v1_1024 \\
|
36 |
-
# --network=pretrained_models/stylegan_human_v1_1024.pkl --version 1
|
37 |
-
"""
|
38 |
-
|
39 |
-
|
40 |
-
@click.command()
|
41 |
-
@click.pass_context
|
42 |
-
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
|
43 |
-
@click.option('--seeds', type=legacy.num_range, help='List of random seeds')
|
44 |
-
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
|
45 |
-
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
|
46 |
-
@click.option('--outdir', help='Where to save the output images', default='outputs/generate/', type=str, required=True, metavar='DIR')
|
47 |
-
@click.option('--version', help="stylegan version, 1, 2 or 3", type=int, default=2)
|
48 |
-
def generate_images(
|
49 |
-
ctx: click.Context,
|
50 |
-
network_pkl: str,
|
51 |
-
seeds: Optional[List[int]],
|
52 |
-
truncation_psi: float,
|
53 |
-
noise_mode: str,
|
54 |
-
outdir: str,
|
55 |
-
version: int
|
56 |
-
):
|
57 |
-
|
58 |
-
print('Loading networks from "%s"...' % network_pkl)
|
59 |
-
if version == 1:
|
60 |
-
import dnnlib.tflib as tflib
|
61 |
-
tflib.init_tf()
|
62 |
-
G, D, Gs = legacy.load_pkl(network_pkl)
|
63 |
-
|
64 |
-
else:
|
65 |
-
import torch
|
66 |
-
device = torch.device('cuda')
|
67 |
-
with dnnlib.util.open_url(network_pkl) as f:
|
68 |
-
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
|
69 |
-
os.makedirs(outdir, exist_ok=True)
|
70 |
-
|
71 |
-
if seeds is None:
|
72 |
-
ctx.fail('--seeds option is required.')
|
73 |
-
|
74 |
-
# Generate images.
|
75 |
-
target_z = np.array([])
|
76 |
-
target_w = np.array([])
|
77 |
-
latent_out = outdir.replace('/images/', '')
|
78 |
-
for seed_idx, seed in enumerate(seeds):
|
79 |
-
if seed % 5000 == 0:
|
80 |
-
print('Generating image for seed %d (%d/%d) ...' %
|
81 |
-
(seed, seed_idx, len(seeds)))
|
82 |
-
|
83 |
-
if version == 1: # stylegan v1
|
84 |
-
z = np.random.RandomState(seed).randn(1, Gs.input_shape[1])
|
85 |
-
# Generate image.
|
86 |
-
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
|
87 |
-
if noise_mode == 'const':
|
88 |
-
randomize_noise = False
|
89 |
-
else:
|
90 |
-
randomize_noise = True
|
91 |
-
images = Gs.run(z, None, truncation_psi=truncation_psi,
|
92 |
-
randomize_noise=randomize_noise, output_transform=fmt)
|
93 |
-
PIL.Image.fromarray(images[0], 'RGB').save(
|
94 |
-
f'{outdir}/seed{seed:04d}.png')
|
95 |
-
|
96 |
-
else: # stylegan v2/v3
|
97 |
-
label = torch.zeros([1, G.c_dim], device=device)
|
98 |
-
z = torch.from_numpy(np.random.RandomState(
|
99 |
-
seed).randn(1, G.z_dim)).to(device)
|
100 |
-
if target_z.size == 0:
|
101 |
-
target_z = z.cpu()
|
102 |
-
else:
|
103 |
-
target_z = np.append(target_z, z.cpu(), axis=0)
|
104 |
-
|
105 |
-
w = G.mapping(z, label, truncation_psi=truncation_psi)
|
106 |
-
img = G.synthesis(w, noise_mode=noise_mode, force_fp32=True)
|
107 |
-
if target_w.size == 0:
|
108 |
-
target_w = w.cpu()
|
109 |
-
else:
|
110 |
-
target_w = np.append(target_w, w.cpu(), axis=0)
|
111 |
-
|
112 |
-
img = (img.permute(0, 2, 3, 1) * 127.5 +
|
113 |
-
128).clamp(0, 255).to(torch.uint8)
|
114 |
-
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(
|
115 |
-
f'{outdir}/seed{seed:04d}.png')
|
116 |
-
# print(target_z)
|
117 |
-
# print(target_z.shape,target_w.shape)
|
118 |
-
|
119 |
-
|
120 |
-
# ----------------------------------------------------------------------------
|
121 |
-
|
122 |
-
if __name__ == "__main__":
|
123 |
-
generate_images()
|
124 |
-
|
125 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andres99/Tune-A-Video-Training-UI/uploader.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from huggingface_hub import HfApi
|
4 |
-
|
5 |
-
|
6 |
-
class Uploader:
|
7 |
-
def __init__(self, hf_token: str | None):
|
8 |
-
self.hf_token = hf_token
|
9 |
-
|
10 |
-
def upload(self,
|
11 |
-
folder_path: str,
|
12 |
-
repo_name: str,
|
13 |
-
organization: str = '',
|
14 |
-
repo_type: str = 'model',
|
15 |
-
private: bool = True,
|
16 |
-
delete_existing_repo: bool = False,
|
17 |
-
input_token: str | None = None) -> str:
|
18 |
-
|
19 |
-
api = HfApi(token=self.hf_token if self.hf_token else input_token)
|
20 |
-
|
21 |
-
if not folder_path:
|
22 |
-
raise ValueError
|
23 |
-
if not repo_name:
|
24 |
-
raise ValueError
|
25 |
-
if not organization:
|
26 |
-
organization = api.whoami()['name']
|
27 |
-
|
28 |
-
repo_id = f'{organization}/{repo_name}'
|
29 |
-
if delete_existing_repo:
|
30 |
-
try:
|
31 |
-
api.delete_repo(repo_id, repo_type=repo_type)
|
32 |
-
except Exception:
|
33 |
-
pass
|
34 |
-
try:
|
35 |
-
api.create_repo(repo_id, repo_type=repo_type, private=private)
|
36 |
-
api.upload_folder(repo_id=repo_id,
|
37 |
-
folder_path=folder_path,
|
38 |
-
path_in_repo='.',
|
39 |
-
repo_type=repo_type)
|
40 |
-
url = f'https://huggingface.co/{repo_id}'
|
41 |
-
message = f'Your model was successfully uploaded to <a href="{url}" target="_blank">{url}</a>.'
|
42 |
-
except Exception as e:
|
43 |
-
message = str(e)
|
44 |
-
return message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_xl.md
DELETED
@@ -1,427 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Stable diffusion XL
|
14 |
-
|
15 |
-
Stable Diffusion XL was proposed in [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://arxiv.org/abs/2307.01952) by Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, Robin Rombach
|
16 |
-
|
17 |
-
The abstract of the paper is the following:
|
18 |
-
|
19 |
-
*We present SDXL, a latent diffusion model for text-to-image synthesis. Compared to previous versions of Stable Diffusion, SDXL leverages a three times larger UNet backbone: The increase of model parameters is mainly due to more attention blocks and a larger cross-attention context as SDXL uses a second text encoder. We design multiple novel conditioning schemes and train SDXL on multiple aspect ratios. We also introduce a refinement model which is used to improve the visual fidelity of samples generated by SDXL using a post-hoc image-to-image technique. We demonstrate that SDXL shows drastically improved performance compared the previous versions of Stable Diffusion and achieves results competitive with those of black-box state-of-the-art image generators.*
|
20 |
-
|
21 |
-
## Tips
|
22 |
-
|
23 |
-
- Stable Diffusion XL works especially well with images between 768 and 1024.
|
24 |
-
- Stable Diffusion XL can pass a different prompt for each of the text encoders it was trained on as shown below. We can even pass different parts of the same prompt to the text encoders.
|
25 |
-
- Stable Diffusion XL output image can be improved by making use of a refiner as shown below.
|
26 |
-
|
27 |
-
### Available checkpoints:
|
28 |
-
|
29 |
-
- *Text-to-Image (1024x1024 resolution)*: [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) with [`StableDiffusionXLPipeline`]
|
30 |
-
- *Image-to-Image / Refiner (1024x1024 resolution)*: [stabilityai/stable-diffusion-xl-refiner-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0) with [`StableDiffusionXLImg2ImgPipeline`]
|
31 |
-
|
32 |
-
## Usage Example
|
33 |
-
|
34 |
-
Before using SDXL make sure to have `transformers`, `accelerate`, `safetensors` and `invisible_watermark` installed.
|
35 |
-
You can install the libraries as follows:
|
36 |
-
|
37 |
-
```
|
38 |
-
pip install transformers
|
39 |
-
pip install accelerate
|
40 |
-
pip install safetensors
|
41 |
-
```
|
42 |
-
|
43 |
-
### Watermarker
|
44 |
-
|
45 |
-
We recommend to add an invisible watermark to images generating by Stable Diffusion XL, this can help with identifying if an image is machine-synthesised for downstream applications. To do so, please install
|
46 |
-
the [invisible-watermark library](https://pypi.org/project/invisible-watermark/) via:
|
47 |
-
|
48 |
-
```
|
49 |
-
pip install invisible-watermark>=0.2.0
|
50 |
-
```
|
51 |
-
|
52 |
-
If the `invisible-watermark` library is installed the watermarker will be used **by default**.
|
53 |
-
|
54 |
-
If you have other provisions for generating or deploying images safely, you can disable the watermarker as follows:
|
55 |
-
|
56 |
-
```py
|
57 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(..., add_watermarker=False)
|
58 |
-
```
|
59 |
-
|
60 |
-
### Text-to-Image
|
61 |
-
|
62 |
-
You can use SDXL as follows for *text-to-image*:
|
63 |
-
|
64 |
-
```py
|
65 |
-
from diffusers import StableDiffusionXLPipeline
|
66 |
-
import torch
|
67 |
-
|
68 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(
|
69 |
-
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
70 |
-
)
|
71 |
-
pipe.to("cuda")
|
72 |
-
|
73 |
-
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
|
74 |
-
image = pipe(prompt=prompt).images[0]
|
75 |
-
```
|
76 |
-
|
77 |
-
### Image-to-image
|
78 |
-
|
79 |
-
You can use SDXL as follows for *image-to-image*:
|
80 |
-
|
81 |
-
```py
|
82 |
-
import torch
|
83 |
-
from diffusers import StableDiffusionXLImg2ImgPipeline
|
84 |
-
from diffusers.utils import load_image
|
85 |
-
|
86 |
-
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
|
87 |
-
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
88 |
-
)
|
89 |
-
pipe = pipe.to("cuda")
|
90 |
-
url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
|
91 |
-
|
92 |
-
init_image = load_image(url).convert("RGB")
|
93 |
-
prompt = "a photo of an astronaut riding a horse on mars"
|
94 |
-
image = pipe(prompt, image=init_image).images[0]
|
95 |
-
```
|
96 |
-
|
97 |
-
### Inpainting
|
98 |
-
|
99 |
-
You can use SDXL as follows for *inpainting*
|
100 |
-
|
101 |
-
```py
|
102 |
-
import torch
|
103 |
-
from diffusers import StableDiffusionXLInpaintPipeline
|
104 |
-
from diffusers.utils import load_image
|
105 |
-
|
106 |
-
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
|
107 |
-
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
108 |
-
)
|
109 |
-
pipe.to("cuda")
|
110 |
-
|
111 |
-
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
112 |
-
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
113 |
-
|
114 |
-
init_image = load_image(img_url).convert("RGB")
|
115 |
-
mask_image = load_image(mask_url).convert("RGB")
|
116 |
-
|
117 |
-
prompt = "A majestic tiger sitting on a bench"
|
118 |
-
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0]
|
119 |
-
```
|
120 |
-
|
121 |
-
### Refining the image output
|
122 |
-
|
123 |
-
In addition to the [base model checkpoint](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0),
|
124 |
-
StableDiffusion-XL also includes a [refiner checkpoint](huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)
|
125 |
-
that is specialized in denoising low-noise stage images to generate images of improved high-frequency quality.
|
126 |
-
This refiner checkpoint can be used as a "second-step" pipeline after having run the base checkpoint to improve
|
127 |
-
image quality.
|
128 |
-
|
129 |
-
When using the refiner, one can easily
|
130 |
-
- 1.) employ the base model and refiner as an *Ensemble of Expert Denoisers* as first proposed in [eDiff-I](https://research.nvidia.com/labs/dir/eDiff-I/) or
|
131 |
-
- 2.) simply run the refiner in [SDEdit](https://arxiv.org/abs/2108.01073) fashion after the base model.
|
132 |
-
|
133 |
-
**Note**: The idea of using SD-XL base & refiner as an ensemble of experts was first brought forward by
|
134 |
-
a couple community contributors which also helped shape the following `diffusers` implementation, namely:
|
135 |
-
- [SytanSD](https://github.com/SytanSD)
|
136 |
-
- [bghira](https://github.com/bghira)
|
137 |
-
- [Birch-san](https://github.com/Birch-san)
|
138 |
-
- [AmericanPresidentJimmyCarter](https://github.com/AmericanPresidentJimmyCarter)
|
139 |
-
|
140 |
-
#### 1.) Ensemble of Expert Denoisers
|
141 |
-
|
142 |
-
When using the base and refiner model as an ensemble of expert of denoisers, the base model should serve as the
|
143 |
-
expert for the high-noise diffusion stage and the refiner serves as the expert for the low-noise diffusion stage.
|
144 |
-
|
145 |
-
The advantage of 1.) over 2.) is that it requires less overall denoising steps and therefore should be significantly
|
146 |
-
faster. The drawback is that one cannot really inspect the output of the base model; it will still be heavily denoised.
|
147 |
-
|
148 |
-
To use the base model and refiner as an ensemble of expert denoisers, make sure to define the span
|
149 |
-
of timesteps which should be run through the high-noise denoising stage (*i.e.* the base model) and the low-noise
|
150 |
-
denoising stage (*i.e.* the refiner model) respectively. We can set the intervals using the [`denoising_end`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.denoising_end) of the base model
|
151 |
-
and [`denoising_start`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start) of the refiner model.
|
152 |
-
|
153 |
-
For both `denoising_end` and `denoising_start` a float value between 0 and 1 should be passed.
|
154 |
-
When passed, the end and start of denoising will be defined by proportions of discrete timesteps as
|
155 |
-
defined by the model schedule.
|
156 |
-
Note that this will override `strength` if it is also declared, since the number of denoising steps
|
157 |
-
is determined by the discrete timesteps the model was trained on and the declared fractional cutoff.
|
158 |
-
|
159 |
-
Let's look at an example.
|
160 |
-
First, we import the two pipelines. Since the text encoders and variational autoencoder are the same
|
161 |
-
you don't have to load those again for the refiner.
|
162 |
-
|
163 |
-
```py
|
164 |
-
from diffusers import DiffusionPipeline
|
165 |
-
import torch
|
166 |
-
|
167 |
-
base = DiffusionPipeline.from_pretrained(
|
168 |
-
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
169 |
-
)
|
170 |
-
base.to("cuda")
|
171 |
-
|
172 |
-
refiner = DiffusionPipeline.from_pretrained(
|
173 |
-
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
174 |
-
text_encoder_2=base.text_encoder_2,
|
175 |
-
vae=base.vae,
|
176 |
-
torch_dtype=torch.float16,
|
177 |
-
use_safetensors=True,
|
178 |
-
variant="fp16",
|
179 |
-
)
|
180 |
-
refiner.to("cuda")
|
181 |
-
```
|
182 |
-
|
183 |
-
Now we define the number of inference steps and the point at which the model shall be run through the
|
184 |
-
high-noise denoising stage (*i.e.* the base model).
|
185 |
-
|
186 |
-
```py
|
187 |
-
n_steps = 40
|
188 |
-
high_noise_frac = 0.8
|
189 |
-
```
|
190 |
-
|
191 |
-
Stable Diffusion XL base is trained on timesteps 0-999 and Stable Diffusion XL refiner is finetuned
|
192 |
-
from the base model on low noise timesteps 0-199 inclusive, so we use the base model for the first
|
193 |
-
800 timesteps (high noise) and the refiner for the last 200 timesteps (low noise). Hence, `high_noise_frac`
|
194 |
-
is set to 0.8, so that all steps 200-999 (the first 80% of denoising timesteps) are performed by the
|
195 |
-
base model and steps 0-199 (the last 20% of denoising timesteps) are performed by the refiner model.
|
196 |
-
|
197 |
-
Remember, the denoising process starts at **high value** (high noise) timesteps and ends at
|
198 |
-
**low value** (low noise) timesteps.
|
199 |
-
|
200 |
-
Let's run the two pipelines now. Make sure to set `denoising_end` and
|
201 |
-
`denoising_start` to the same values and keep `num_inference_steps` constant. Also remember that
|
202 |
-
the output of the base model should be in latent space:
|
203 |
-
|
204 |
-
```py
|
205 |
-
prompt = "A majestic lion jumping from a big stone at night"
|
206 |
-
|
207 |
-
image = base(
|
208 |
-
prompt=prompt,
|
209 |
-
num_inference_steps=n_steps,
|
210 |
-
denoising_end=high_noise_frac,
|
211 |
-
output_type="latent",
|
212 |
-
).images
|
213 |
-
image = refiner(
|
214 |
-
prompt=prompt,
|
215 |
-
num_inference_steps=n_steps,
|
216 |
-
denoising_start=high_noise_frac,
|
217 |
-
image=image,
|
218 |
-
).images[0]
|
219 |
-
```
|
220 |
-
|
221 |
-
Let's have a look at the images
|
222 |
-
|
223 |
-
| Original Image | Ensemble of Denoisers Experts |
|
224 |
-
|---|---|
|
225 |
-
|  | 
|
226 |
-
|
227 |
-
If we would have just run the base model on the same 40 steps, the image would have been arguably less detailed (e.g. the lion eyes and nose):
|
228 |
-
|
229 |
-
<Tip>
|
230 |
-
|
231 |
-
The ensemble-of-experts method works well on all available schedulers!
|
232 |
-
|
233 |
-
</Tip>
|
234 |
-
|
235 |
-
#### 2.) Refining the image output from fully denoised base image
|
236 |
-
|
237 |
-
In standard [`StableDiffusionImg2ImgPipeline`]-fashion, the fully-denoised image generated of the base model
|
238 |
-
can be further improved using the [refiner checkpoint](huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0).
|
239 |
-
|
240 |
-
For this, you simply run the refiner as a normal image-to-image pipeline after the "base" text-to-image
|
241 |
-
pipeline. You can leave the outputs of the base model in latent space.
|
242 |
-
|
243 |
-
```py
|
244 |
-
from diffusers import DiffusionPipeline
|
245 |
-
import torch
|
246 |
-
|
247 |
-
pipe = DiffusionPipeline.from_pretrained(
|
248 |
-
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
249 |
-
)
|
250 |
-
pipe.to("cuda")
|
251 |
-
|
252 |
-
refiner = DiffusionPipeline.from_pretrained(
|
253 |
-
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
254 |
-
text_encoder_2=pipe.text_encoder_2,
|
255 |
-
vae=pipe.vae,
|
256 |
-
torch_dtype=torch.float16,
|
257 |
-
use_safetensors=True,
|
258 |
-
variant="fp16",
|
259 |
-
)
|
260 |
-
refiner.to("cuda")
|
261 |
-
|
262 |
-
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
|
263 |
-
|
264 |
-
image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0]
|
265 |
-
image = refiner(prompt=prompt, image=image[None, :]).images[0]
|
266 |
-
```
|
267 |
-
|
268 |
-
| Original Image | Refined Image |
|
269 |
-
|---|---|
|
270 |
-
|  |  |
|
271 |
-
|
272 |
-
<Tip>
|
273 |
-
|
274 |
-
The refiner can also very well be used in an in-painting setting. To do so just make
|
275 |
-
sure you use the [`StableDiffusionXLInpaintPipeline`] classes as shown below
|
276 |
-
|
277 |
-
</Tip>
|
278 |
-
|
279 |
-
To use the refiner for inpainting in the Ensemble of Expert Denoisers setting you can do the following:
|
280 |
-
|
281 |
-
```py
|
282 |
-
from diffusers import StableDiffusionXLInpaintPipeline
|
283 |
-
from diffusers.utils import load_image
|
284 |
-
|
285 |
-
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
|
286 |
-
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
287 |
-
)
|
288 |
-
pipe.to("cuda")
|
289 |
-
|
290 |
-
refiner = StableDiffusionXLInpaintPipeline.from_pretrained(
|
291 |
-
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
292 |
-
text_encoder_2=pipe.text_encoder_2,
|
293 |
-
vae=pipe.vae,
|
294 |
-
torch_dtype=torch.float16,
|
295 |
-
use_safetensors=True,
|
296 |
-
variant="fp16",
|
297 |
-
)
|
298 |
-
refiner.to("cuda")
|
299 |
-
|
300 |
-
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
301 |
-
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
302 |
-
|
303 |
-
init_image = load_image(img_url).convert("RGB")
|
304 |
-
mask_image = load_image(mask_url).convert("RGB")
|
305 |
-
|
306 |
-
prompt = "A majestic tiger sitting on a bench"
|
307 |
-
num_inference_steps = 75
|
308 |
-
high_noise_frac = 0.7
|
309 |
-
|
310 |
-
image = pipe(
|
311 |
-
prompt=prompt,
|
312 |
-
image=init_image,
|
313 |
-
mask_image=mask_image,
|
314 |
-
num_inference_steps=num_inference_steps,
|
315 |
-
denoising_start=high_noise_frac,
|
316 |
-
output_type="latent",
|
317 |
-
).images
|
318 |
-
image = refiner(
|
319 |
-
prompt=prompt,
|
320 |
-
image=image,
|
321 |
-
mask_image=mask_image,
|
322 |
-
num_inference_steps=num_inference_steps,
|
323 |
-
denoising_start=high_noise_frac,
|
324 |
-
).images[0]
|
325 |
-
```
|
326 |
-
|
327 |
-
To use the refiner for inpainting in the standard SDE-style setting, simply remove `denoising_end` and `denoising_start` and choose a smaller
|
328 |
-
number of inference steps for the refiner.
|
329 |
-
|
330 |
-
### Loading single file checkpoints / original file format
|
331 |
-
|
332 |
-
By making use of [`~diffusers.loaders.FromSingleFileMixin.from_single_file`] you can also load the
|
333 |
-
original file format into `diffusers`:
|
334 |
-
|
335 |
-
```py
|
336 |
-
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
|
337 |
-
import torch
|
338 |
-
|
339 |
-
pipe = StableDiffusionXLPipeline.from_single_file(
|
340 |
-
"./sd_xl_base_1.0.safetensors", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
341 |
-
)
|
342 |
-
pipe.to("cuda")
|
343 |
-
|
344 |
-
refiner = StableDiffusionXLImg2ImgPipeline.from_single_file(
|
345 |
-
"./sd_xl_refiner_1.0.safetensors", torch_dtype=torch.float16, use_safetensors=True, variant="fp16"
|
346 |
-
)
|
347 |
-
refiner.to("cuda")
|
348 |
-
```
|
349 |
-
|
350 |
-
### Memory optimization via model offloading
|
351 |
-
|
352 |
-
If you are seeing out-of-memory errors, we recommend making use of [`StableDiffusionXLPipeline.enable_model_cpu_offload`].
|
353 |
-
|
354 |
-
```diff
|
355 |
-
- pipe.to("cuda")
|
356 |
-
+ pipe.enable_model_cpu_offload()
|
357 |
-
```
|
358 |
-
|
359 |
-
and
|
360 |
-
|
361 |
-
```diff
|
362 |
-
- refiner.to("cuda")
|
363 |
-
+ refiner.enable_model_cpu_offload()
|
364 |
-
```
|
365 |
-
|
366 |
-
### Speed-up inference with `torch.compile`
|
367 |
-
|
368 |
-
You can speed up inference by making use of `torch.compile`. This should give you **ca.** 20% speed-up.
|
369 |
-
|
370 |
-
```diff
|
371 |
-
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
372 |
-
+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
|
373 |
-
```
|
374 |
-
|
375 |
-
### Running with `torch < 2.0`
|
376 |
-
|
377 |
-
**Note** that if you want to run Stable Diffusion XL with `torch` < 2.0, please make sure to enable xformers
|
378 |
-
attention:
|
379 |
-
|
380 |
-
```
|
381 |
-
pip install xformers
|
382 |
-
```
|
383 |
-
|
384 |
-
```diff
|
385 |
-
+pipe.enable_xformers_memory_efficient_attention()
|
386 |
-
+refiner.enable_xformers_memory_efficient_attention()
|
387 |
-
```
|
388 |
-
|
389 |
-
## StableDiffusionXLPipeline
|
390 |
-
|
391 |
-
[[autodoc]] StableDiffusionXLPipeline
|
392 |
-
- all
|
393 |
-
- __call__
|
394 |
-
|
395 |
-
## StableDiffusionXLImg2ImgPipeline
|
396 |
-
|
397 |
-
[[autodoc]] StableDiffusionXLImg2ImgPipeline
|
398 |
-
- all
|
399 |
-
- __call__
|
400 |
-
|
401 |
-
## StableDiffusionXLInpaintPipeline
|
402 |
-
|
403 |
-
[[autodoc]] StableDiffusionXLInpaintPipeline
|
404 |
-
- all
|
405 |
-
- __call__
|
406 |
-
|
407 |
-
### Passing different prompts to each text-encoder
|
408 |
-
|
409 |
-
Stable Diffusion XL was trained on two text encoders. The default behavior is to pass the same prompt to each. But it is possible to pass a different prompt for each text-encoder, as [some users](https://github.com/huggingface/diffusers/issues/4004#issuecomment-1627764201) noted that it can boost quality.
|
410 |
-
To do so, you can pass `prompt_2` and `negative_prompt_2` in addition to `prompt` and `negative_prompt`. By doing that, you will pass the original prompts and negative prompts (as in `prompt` and `negative_prompt`) to `text_encoder` (in official SDXL 0.9/1.0 that is [OpenAI CLIP-ViT/L-14](https://huggingface.co/openai/clip-vit-large-patch14)),
|
411 |
-
and `prompt_2` and `negative_prompt_2` to `text_encoder_2` (in official SDXL 0.9/1.0 that is [OpenCLIP-ViT/bigG-14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
|
412 |
-
|
413 |
-
```py
|
414 |
-
from diffusers import StableDiffusionXLPipeline
|
415 |
-
import torch
|
416 |
-
|
417 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(
|
418 |
-
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
419 |
-
)
|
420 |
-
pipe.to("cuda")
|
421 |
-
|
422 |
-
# prompt will be passed to OAI CLIP-ViT/L-14
|
423 |
-
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
|
424 |
-
# prompt_2 will be passed to OpenCLIP-ViT/bigG-14
|
425 |
-
prompt_2 = "monet painting"
|
426 |
-
image = pipe(prompt=prompt, prompt_2=prompt_2).images[0]
|
427 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
from diffusers import FlaxStableDiffusionInpaintPipeline
|
20 |
-
from diffusers.utils import is_flax_available, load_image, slow
|
21 |
-
from diffusers.utils.testing_utils import require_flax
|
22 |
-
|
23 |
-
|
24 |
-
if is_flax_available():
|
25 |
-
import jax
|
26 |
-
import jax.numpy as jnp
|
27 |
-
from flax.jax_utils import replicate
|
28 |
-
from flax.training.common_utils import shard
|
29 |
-
|
30 |
-
|
31 |
-
@slow
|
32 |
-
@require_flax
|
33 |
-
class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
|
34 |
-
def tearDown(self):
|
35 |
-
# clean up the VRAM after each test
|
36 |
-
super().tearDown()
|
37 |
-
gc.collect()
|
38 |
-
|
39 |
-
def test_stable_diffusion_inpaint_pipeline(self):
|
40 |
-
init_image = load_image(
|
41 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
42 |
-
"/sd2-inpaint/init_image.png"
|
43 |
-
)
|
44 |
-
mask_image = load_image(
|
45 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
|
46 |
-
)
|
47 |
-
|
48 |
-
model_id = "xvjiarui/stable-diffusion-2-inpainting"
|
49 |
-
pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None)
|
50 |
-
|
51 |
-
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
|
52 |
-
|
53 |
-
prng_seed = jax.random.PRNGKey(0)
|
54 |
-
num_inference_steps = 50
|
55 |
-
|
56 |
-
num_samples = jax.device_count()
|
57 |
-
prompt = num_samples * [prompt]
|
58 |
-
init_image = num_samples * [init_image]
|
59 |
-
mask_image = num_samples * [mask_image]
|
60 |
-
prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image)
|
61 |
-
|
62 |
-
# shard inputs and rng
|
63 |
-
params = replicate(params)
|
64 |
-
prng_seed = jax.random.split(prng_seed, jax.device_count())
|
65 |
-
prompt_ids = shard(prompt_ids)
|
66 |
-
processed_masked_images = shard(processed_masked_images)
|
67 |
-
processed_masks = shard(processed_masks)
|
68 |
-
|
69 |
-
output = pipeline(
|
70 |
-
prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True
|
71 |
-
)
|
72 |
-
|
73 |
-
images = output.images.reshape(num_samples, 512, 512, 3)
|
74 |
-
|
75 |
-
image_slice = images[0, 253:256, 253:256, -1]
|
76 |
-
|
77 |
-
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
|
78 |
-
expected_slice = jnp.array(
|
79 |
-
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084]
|
80 |
-
)
|
81 |
-
print(f"output_slice: {output_slice}")
|
82 |
-
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/__init__.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
|
2 |
-
YOLOAnchorGenerator)
|
3 |
-
from .builder import ANCHOR_GENERATORS, build_anchor_generator
|
4 |
-
from .point_generator import PointGenerator
|
5 |
-
from .utils import anchor_inside_flags, calc_region, images_to_levels
|
6 |
-
|
7 |
-
__all__ = [
|
8 |
-
'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
|
9 |
-
'PointGenerator', 'images_to_levels', 'calc_region',
|
10 |
-
'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator'
|
11 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x512_80k_ade20k.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3_r50-d8_512x512_80k_ade20k.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101b-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='torchvision://resnet101',
|
4 |
-
backbone=dict(type='ResNet', depth=101))
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_20k_voc12aug.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './psanet_r50-d8_512x512_20k_voc12aug.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/AnimalEquality/chatbot/_proc/styles.css
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
.cell {
|
2 |
-
margin-bottom: 1rem;
|
3 |
-
}
|
4 |
-
|
5 |
-
.cell > .sourceCode {
|
6 |
-
margin-bottom: 0;
|
7 |
-
}
|
8 |
-
|
9 |
-
.cell-output > pre {
|
10 |
-
margin-bottom: 0;
|
11 |
-
}
|
12 |
-
|
13 |
-
.cell-output > pre, .cell-output > .sourceCode > pre, .cell-output-stdout > pre {
|
14 |
-
margin-left: 0.8rem;
|
15 |
-
margin-top: 0;
|
16 |
-
background: none;
|
17 |
-
border-left: 2px solid lightsalmon;
|
18 |
-
border-top-left-radius: 0;
|
19 |
-
border-top-right-radius: 0;
|
20 |
-
}
|
21 |
-
|
22 |
-
.cell-output > .sourceCode {
|
23 |
-
border: none;
|
24 |
-
}
|
25 |
-
|
26 |
-
.cell-output > .sourceCode {
|
27 |
-
background: none;
|
28 |
-
margin-top: 0;
|
29 |
-
}
|
30 |
-
|
31 |
-
div.description {
|
32 |
-
padding-left: 2px;
|
33 |
-
padding-top: 5px;
|
34 |
-
font-style: italic;
|
35 |
-
font-size: 135%;
|
36 |
-
opacity: 70%;
|
37 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/pipelines/llava/pipelines.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
|
3 |
-
from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline
|
4 |
-
|
5 |
-
available_pipelines = ['llava-7b', 'llava-13b']
|
6 |
-
|
7 |
-
|
8 |
-
def get_pipeline(name: str, params: dict) -> Optional[AbstractMultimodalPipeline]:
|
9 |
-
if name == 'llava-7b':
|
10 |
-
from .llava import LLaVA_v0_7B_Pipeline
|
11 |
-
return LLaVA_v0_7B_Pipeline(params)
|
12 |
-
if name == 'llava-13b':
|
13 |
-
from .llava import LLaVA_v0_13B_Pipeline
|
14 |
-
return LLaVA_v0_13B_Pipeline(params)
|
15 |
-
return None
|
16 |
-
|
17 |
-
|
18 |
-
def get_pipeline_from_model_name(model_name: str, params: dict) -> Optional[AbstractMultimodalPipeline]:
|
19 |
-
if 'llava' not in model_name.lower():
|
20 |
-
return None
|
21 |
-
if '7b' in model_name.lower():
|
22 |
-
from .llava import LLaVA_v0_7B_Pipeline
|
23 |
-
return LLaVA_v0_7B_Pipeline(params)
|
24 |
-
if '13b' in model_name.lower():
|
25 |
-
from .llava import LLaVA_v0_13B_Pipeline
|
26 |
-
return LLaVA_v0_13B_Pipeline(params)
|
27 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ank0X0/Image-Upscaling-Playground/app.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import cv2
|
3 |
-
import onnxruntime
|
4 |
-
import gradio as gr
|
5 |
-
|
6 |
-
|
7 |
-
def pre_process(img: np.array) -> np.array:
|
8 |
-
# H, W, C -> C, H, W
|
9 |
-
img = np.transpose(img[:, :, 0:3], (2, 0, 1))
|
10 |
-
# C, H, W -> 1, C, H, W
|
11 |
-
img = np.expand_dims(img, axis=0).astype(np.float32)
|
12 |
-
return img
|
13 |
-
|
14 |
-
|
15 |
-
def post_process(img: np.array) -> np.array:
|
16 |
-
# 1, C, H, W -> C, H, W
|
17 |
-
img = np.squeeze(img)
|
18 |
-
# C, H, W -> H, W, C
|
19 |
-
img = np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8)
|
20 |
-
return img
|
21 |
-
|
22 |
-
|
23 |
-
def inference(model_path: str, img_array: np.array) -> np.array:
|
24 |
-
options = onnxruntime.SessionOptions()
|
25 |
-
options.intra_op_num_threads = 1
|
26 |
-
options.inter_op_num_threads = 1
|
27 |
-
ort_session = onnxruntime.InferenceSession(model_path, options)
|
28 |
-
ort_inputs = {ort_session.get_inputs()[0].name: img_array}
|
29 |
-
ort_outs = ort_session.run(None, ort_inputs)
|
30 |
-
|
31 |
-
return ort_outs[0]
|
32 |
-
|
33 |
-
|
34 |
-
def convert_pil_to_cv2(image):
|
35 |
-
# pil_image = image.convert("RGB")
|
36 |
-
open_cv_image = np.array(image)
|
37 |
-
# RGB to BGR
|
38 |
-
open_cv_image = open_cv_image[:, :, ::-1].copy()
|
39 |
-
return open_cv_image
|
40 |
-
|
41 |
-
|
42 |
-
def upscale(image, model):
|
43 |
-
model_path = f"models/{model}.ort"
|
44 |
-
img = convert_pil_to_cv2(image)
|
45 |
-
if img.ndim == 2:
|
46 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
47 |
-
|
48 |
-
if img.shape[2] == 4:
|
49 |
-
alpha = img[:, :, 3] # GRAY
|
50 |
-
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR) # BGR
|
51 |
-
alpha_output = post_process(inference(model_path, pre_process(alpha))) # BGR
|
52 |
-
alpha_output = cv2.cvtColor(alpha_output, cv2.COLOR_BGR2GRAY) # GRAY
|
53 |
-
|
54 |
-
img = img[:, :, 0:3] # BGR
|
55 |
-
image_output = post_process(inference(model_path, pre_process(img))) # BGR
|
56 |
-
image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2BGRA) # BGRA
|
57 |
-
image_output[:, :, 3] = alpha_output
|
58 |
-
|
59 |
-
elif img.shape[2] == 3:
|
60 |
-
image_output = post_process(inference(model_path, pre_process(img))) # BGR
|
61 |
-
|
62 |
-
return image_output
|
63 |
-
|
64 |
-
|
65 |
-
css = ".output-image, .input-image, .image-preview {height: 480px !important} "
|
66 |
-
model_choices = ["modelx2", "modelx2 25 JXL", "modelx4", "minecraft_modelx4"]
|
67 |
-
|
68 |
-
gr.Interface(
|
69 |
-
fn=upscale,
|
70 |
-
inputs=[
|
71 |
-
gr.inputs.Image(type="pil", label="Input Image"),
|
72 |
-
gr.inputs.Radio(
|
73 |
-
model_choices,
|
74 |
-
type="value",
|
75 |
-
default=None,
|
76 |
-
label="Choose Upscaler",
|
77 |
-
optional=False,
|
78 |
-
),
|
79 |
-
],
|
80 |
-
outputs="image",
|
81 |
-
title="Image Upscaling 🦆",
|
82 |
-
description="Model: [Anchor-based Plain Net for Mobile Image Super-Resolution](https://arxiv.org/abs/2105.09750). Repository: [SR Mobile PyTorch](https://github.com/w11wo/sr_mobile_pytorch)",
|
83 |
-
allow_flagging="never",
|
84 |
-
css=css,
|
85 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Annelisseishere/Streamlit_GPT/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Streamlit GPT
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: blue
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.21.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Asahi402/anime-remove-background/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Anime Remove Background
|
3 |
-
emoji: 🪄🖼️
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
duplicated_from: skytnt/anime-remove-background
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/show.py
DELETED
@@ -1,189 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
from optparse import Values
|
3 |
-
from typing import Generator, Iterable, Iterator, List, NamedTuple, Optional
|
4 |
-
|
5 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
6 |
-
|
7 |
-
from pip._internal.cli.base_command import Command
|
8 |
-
from pip._internal.cli.status_codes import ERROR, SUCCESS
|
9 |
-
from pip._internal.metadata import BaseDistribution, get_default_environment
|
10 |
-
from pip._internal.utils.misc import write_output
|
11 |
-
|
12 |
-
logger = logging.getLogger(__name__)
|
13 |
-
|
14 |
-
|
15 |
-
class ShowCommand(Command):
|
16 |
-
"""
|
17 |
-
Show information about one or more installed packages.
|
18 |
-
|
19 |
-
The output is in RFC-compliant mail header format.
|
20 |
-
"""
|
21 |
-
|
22 |
-
usage = """
|
23 |
-
%prog [options] <package> ..."""
|
24 |
-
ignore_require_venv = True
|
25 |
-
|
26 |
-
def add_options(self) -> None:
|
27 |
-
self.cmd_opts.add_option(
|
28 |
-
"-f",
|
29 |
-
"--files",
|
30 |
-
dest="files",
|
31 |
-
action="store_true",
|
32 |
-
default=False,
|
33 |
-
help="Show the full list of installed files for each package.",
|
34 |
-
)
|
35 |
-
|
36 |
-
self.parser.insert_option_group(0, self.cmd_opts)
|
37 |
-
|
38 |
-
def run(self, options: Values, args: List[str]) -> int:
|
39 |
-
if not args:
|
40 |
-
logger.warning("ERROR: Please provide a package name or names.")
|
41 |
-
return ERROR
|
42 |
-
query = args
|
43 |
-
|
44 |
-
results = search_packages_info(query)
|
45 |
-
if not print_results(
|
46 |
-
results, list_files=options.files, verbose=options.verbose
|
47 |
-
):
|
48 |
-
return ERROR
|
49 |
-
return SUCCESS
|
50 |
-
|
51 |
-
|
52 |
-
class _PackageInfo(NamedTuple):
|
53 |
-
name: str
|
54 |
-
version: str
|
55 |
-
location: str
|
56 |
-
editable_project_location: Optional[str]
|
57 |
-
requires: List[str]
|
58 |
-
required_by: List[str]
|
59 |
-
installer: str
|
60 |
-
metadata_version: str
|
61 |
-
classifiers: List[str]
|
62 |
-
summary: str
|
63 |
-
homepage: str
|
64 |
-
project_urls: List[str]
|
65 |
-
author: str
|
66 |
-
author_email: str
|
67 |
-
license: str
|
68 |
-
entry_points: List[str]
|
69 |
-
files: Optional[List[str]]
|
70 |
-
|
71 |
-
|
72 |
-
def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]:
|
73 |
-
"""
|
74 |
-
Gather details from installed distributions. Print distribution name,
|
75 |
-
version, location, and installed files. Installed files requires a
|
76 |
-
pip generated 'installed-files.txt' in the distributions '.egg-info'
|
77 |
-
directory.
|
78 |
-
"""
|
79 |
-
env = get_default_environment()
|
80 |
-
|
81 |
-
installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()}
|
82 |
-
query_names = [canonicalize_name(name) for name in query]
|
83 |
-
missing = sorted(
|
84 |
-
[name for name, pkg in zip(query, query_names) if pkg not in installed]
|
85 |
-
)
|
86 |
-
if missing:
|
87 |
-
logger.warning("Package(s) not found: %s", ", ".join(missing))
|
88 |
-
|
89 |
-
def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]:
|
90 |
-
return (
|
91 |
-
dist.metadata["Name"] or "UNKNOWN"
|
92 |
-
for dist in installed.values()
|
93 |
-
if current_dist.canonical_name
|
94 |
-
in {canonicalize_name(d.name) for d in dist.iter_dependencies()}
|
95 |
-
)
|
96 |
-
|
97 |
-
for query_name in query_names:
|
98 |
-
try:
|
99 |
-
dist = installed[query_name]
|
100 |
-
except KeyError:
|
101 |
-
continue
|
102 |
-
|
103 |
-
requires = sorted((req.name for req in dist.iter_dependencies()), key=str.lower)
|
104 |
-
required_by = sorted(_get_requiring_packages(dist), key=str.lower)
|
105 |
-
|
106 |
-
try:
|
107 |
-
entry_points_text = dist.read_text("entry_points.txt")
|
108 |
-
entry_points = entry_points_text.splitlines(keepends=False)
|
109 |
-
except FileNotFoundError:
|
110 |
-
entry_points = []
|
111 |
-
|
112 |
-
files_iter = dist.iter_declared_entries()
|
113 |
-
if files_iter is None:
|
114 |
-
files: Optional[List[str]] = None
|
115 |
-
else:
|
116 |
-
files = sorted(files_iter)
|
117 |
-
|
118 |
-
metadata = dist.metadata
|
119 |
-
|
120 |
-
yield _PackageInfo(
|
121 |
-
name=dist.raw_name,
|
122 |
-
version=str(dist.version),
|
123 |
-
location=dist.location or "",
|
124 |
-
editable_project_location=dist.editable_project_location,
|
125 |
-
requires=requires,
|
126 |
-
required_by=required_by,
|
127 |
-
installer=dist.installer,
|
128 |
-
metadata_version=dist.metadata_version or "",
|
129 |
-
classifiers=metadata.get_all("Classifier", []),
|
130 |
-
summary=metadata.get("Summary", ""),
|
131 |
-
homepage=metadata.get("Home-page", ""),
|
132 |
-
project_urls=metadata.get_all("Project-URL", []),
|
133 |
-
author=metadata.get("Author", ""),
|
134 |
-
author_email=metadata.get("Author-email", ""),
|
135 |
-
license=metadata.get("License", ""),
|
136 |
-
entry_points=entry_points,
|
137 |
-
files=files,
|
138 |
-
)
|
139 |
-
|
140 |
-
|
141 |
-
def print_results(
|
142 |
-
distributions: Iterable[_PackageInfo],
|
143 |
-
list_files: bool,
|
144 |
-
verbose: bool,
|
145 |
-
) -> bool:
|
146 |
-
"""
|
147 |
-
Print the information from installed distributions found.
|
148 |
-
"""
|
149 |
-
results_printed = False
|
150 |
-
for i, dist in enumerate(distributions):
|
151 |
-
results_printed = True
|
152 |
-
if i > 0:
|
153 |
-
write_output("---")
|
154 |
-
|
155 |
-
write_output("Name: %s", dist.name)
|
156 |
-
write_output("Version: %s", dist.version)
|
157 |
-
write_output("Summary: %s", dist.summary)
|
158 |
-
write_output("Home-page: %s", dist.homepage)
|
159 |
-
write_output("Author: %s", dist.author)
|
160 |
-
write_output("Author-email: %s", dist.author_email)
|
161 |
-
write_output("License: %s", dist.license)
|
162 |
-
write_output("Location: %s", dist.location)
|
163 |
-
if dist.editable_project_location is not None:
|
164 |
-
write_output(
|
165 |
-
"Editable project location: %s", dist.editable_project_location
|
166 |
-
)
|
167 |
-
write_output("Requires: %s", ", ".join(dist.requires))
|
168 |
-
write_output("Required-by: %s", ", ".join(dist.required_by))
|
169 |
-
|
170 |
-
if verbose:
|
171 |
-
write_output("Metadata-Version: %s", dist.metadata_version)
|
172 |
-
write_output("Installer: %s", dist.installer)
|
173 |
-
write_output("Classifiers:")
|
174 |
-
for classifier in dist.classifiers:
|
175 |
-
write_output(" %s", classifier)
|
176 |
-
write_output("Entry-points:")
|
177 |
-
for entry in dist.entry_points:
|
178 |
-
write_output(" %s", entry.strip())
|
179 |
-
write_output("Project-URLs:")
|
180 |
-
for project_url in dist.project_urls:
|
181 |
-
write_output(" %s", project_url)
|
182 |
-
if list_files:
|
183 |
-
write_output("Files:")
|
184 |
-
if dist.files is None:
|
185 |
-
write_output("Cannot locate RECORD or installed-files.txt")
|
186 |
-
else:
|
187 |
-
for line in dist.files:
|
188 |
-
write_output(" %s", line.strip())
|
189 |
-
return results_printed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/discovery.py
DELETED
@@ -1,600 +0,0 @@
|
|
1 |
-
"""Automatic discovery of Python modules and packages (for inclusion in the
|
2 |
-
distribution) and other config values.
|
3 |
-
|
4 |
-
For the purposes of this module, the following nomenclature is used:
|
5 |
-
|
6 |
-
- "src-layout": a directory representing a Python project that contains a "src"
|
7 |
-
folder. Everything under the "src" folder is meant to be included in the
|
8 |
-
distribution when packaging the project. Example::
|
9 |
-
|
10 |
-
.
|
11 |
-
├── tox.ini
|
12 |
-
├── pyproject.toml
|
13 |
-
└── src/
|
14 |
-
└── mypkg/
|
15 |
-
├── __init__.py
|
16 |
-
├── mymodule.py
|
17 |
-
└── my_data_file.txt
|
18 |
-
|
19 |
-
- "flat-layout": a Python project that does not use "src-layout" but instead
|
20 |
-
have a directory under the project root for each package::
|
21 |
-
|
22 |
-
.
|
23 |
-
├── tox.ini
|
24 |
-
├── pyproject.toml
|
25 |
-
└── mypkg/
|
26 |
-
├── __init__.py
|
27 |
-
├── mymodule.py
|
28 |
-
└── my_data_file.txt
|
29 |
-
|
30 |
-
- "single-module": a project that contains a single Python script direct under
|
31 |
-
the project root (no directory used)::
|
32 |
-
|
33 |
-
.
|
34 |
-
├── tox.ini
|
35 |
-
├── pyproject.toml
|
36 |
-
└── mymodule.py
|
37 |
-
|
38 |
-
"""
|
39 |
-
|
40 |
-
import itertools
|
41 |
-
import os
|
42 |
-
from fnmatch import fnmatchcase
|
43 |
-
from glob import glob
|
44 |
-
from pathlib import Path
|
45 |
-
from typing import (
|
46 |
-
TYPE_CHECKING,
|
47 |
-
Callable,
|
48 |
-
Dict,
|
49 |
-
Iterable,
|
50 |
-
Iterator,
|
51 |
-
List,
|
52 |
-
Mapping,
|
53 |
-
Optional,
|
54 |
-
Tuple,
|
55 |
-
Union
|
56 |
-
)
|
57 |
-
|
58 |
-
import _distutils_hack.override # noqa: F401
|
59 |
-
|
60 |
-
from distutils import log
|
61 |
-
from distutils.util import convert_path
|
62 |
-
|
63 |
-
_Path = Union[str, os.PathLike]
|
64 |
-
_Filter = Callable[[str], bool]
|
65 |
-
StrIter = Iterator[str]
|
66 |
-
|
67 |
-
chain_iter = itertools.chain.from_iterable
|
68 |
-
|
69 |
-
if TYPE_CHECKING:
|
70 |
-
from setuptools import Distribution # noqa
|
71 |
-
|
72 |
-
|
73 |
-
def _valid_name(path: _Path) -> bool:
|
74 |
-
# Ignore invalid names that cannot be imported directly
|
75 |
-
return os.path.basename(path).isidentifier()
|
76 |
-
|
77 |
-
|
78 |
-
class _Finder:
|
79 |
-
"""Base class that exposes functionality for module/package finders"""
|
80 |
-
|
81 |
-
ALWAYS_EXCLUDE: Tuple[str, ...] = ()
|
82 |
-
DEFAULT_EXCLUDE: Tuple[str, ...] = ()
|
83 |
-
|
84 |
-
@classmethod
|
85 |
-
def find(
|
86 |
-
cls,
|
87 |
-
where: _Path = '.',
|
88 |
-
exclude: Iterable[str] = (),
|
89 |
-
include: Iterable[str] = ('*',)
|
90 |
-
) -> List[str]:
|
91 |
-
"""Return a list of all Python items (packages or modules, depending on
|
92 |
-
the finder implementation) found within directory 'where'.
|
93 |
-
|
94 |
-
'where' is the root directory which will be searched.
|
95 |
-
It should be supplied as a "cross-platform" (i.e. URL-style) path;
|
96 |
-
it will be converted to the appropriate local path syntax.
|
97 |
-
|
98 |
-
'exclude' is a sequence of names to exclude; '*' can be used
|
99 |
-
as a wildcard in the names.
|
100 |
-
When finding packages, 'foo.*' will exclude all subpackages of 'foo'
|
101 |
-
(but not 'foo' itself).
|
102 |
-
|
103 |
-
'include' is a sequence of names to include.
|
104 |
-
If it's specified, only the named items will be included.
|
105 |
-
If it's not specified, all found items will be included.
|
106 |
-
'include' can contain shell style wildcard patterns just like
|
107 |
-
'exclude'.
|
108 |
-
"""
|
109 |
-
|
110 |
-
exclude = exclude or cls.DEFAULT_EXCLUDE
|
111 |
-
return list(
|
112 |
-
cls._find_iter(
|
113 |
-
convert_path(str(where)),
|
114 |
-
cls._build_filter(*cls.ALWAYS_EXCLUDE, *exclude),
|
115 |
-
cls._build_filter(*include),
|
116 |
-
)
|
117 |
-
)
|
118 |
-
|
119 |
-
@classmethod
|
120 |
-
def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
|
121 |
-
raise NotImplementedError
|
122 |
-
|
123 |
-
@staticmethod
|
124 |
-
def _build_filter(*patterns: str) -> _Filter:
|
125 |
-
"""
|
126 |
-
Given a list of patterns, return a callable that will be true only if
|
127 |
-
the input matches at least one of the patterns.
|
128 |
-
"""
|
129 |
-
return lambda name: any(fnmatchcase(name, pat) for pat in patterns)
|
130 |
-
|
131 |
-
|
132 |
-
class PackageFinder(_Finder):
|
133 |
-
"""
|
134 |
-
Generate a list of all Python packages found within a directory
|
135 |
-
"""
|
136 |
-
|
137 |
-
ALWAYS_EXCLUDE = ("ez_setup", "*__pycache__")
|
138 |
-
|
139 |
-
@classmethod
|
140 |
-
def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
|
141 |
-
"""
|
142 |
-
All the packages found in 'where' that pass the 'include' filter, but
|
143 |
-
not the 'exclude' filter.
|
144 |
-
"""
|
145 |
-
for root, dirs, files in os.walk(str(where), followlinks=True):
|
146 |
-
# Copy dirs to iterate over it, then empty dirs.
|
147 |
-
all_dirs = dirs[:]
|
148 |
-
dirs[:] = []
|
149 |
-
|
150 |
-
for dir in all_dirs:
|
151 |
-
full_path = os.path.join(root, dir)
|
152 |
-
rel_path = os.path.relpath(full_path, where)
|
153 |
-
package = rel_path.replace(os.path.sep, '.')
|
154 |
-
|
155 |
-
# Skip directory trees that are not valid packages
|
156 |
-
if '.' in dir or not cls._looks_like_package(full_path, package):
|
157 |
-
continue
|
158 |
-
|
159 |
-
# Should this package be included?
|
160 |
-
if include(package) and not exclude(package):
|
161 |
-
yield package
|
162 |
-
|
163 |
-
# Keep searching subdirectories, as there may be more packages
|
164 |
-
# down there, even if the parent was excluded.
|
165 |
-
dirs.append(dir)
|
166 |
-
|
167 |
-
@staticmethod
|
168 |
-
def _looks_like_package(path: _Path, _package_name: str) -> bool:
|
169 |
-
"""Does a directory look like a package?"""
|
170 |
-
return os.path.isfile(os.path.join(path, '__init__.py'))
|
171 |
-
|
172 |
-
|
173 |
-
class PEP420PackageFinder(PackageFinder):
|
174 |
-
@staticmethod
|
175 |
-
def _looks_like_package(_path: _Path, _package_name: str) -> bool:
|
176 |
-
return True
|
177 |
-
|
178 |
-
|
179 |
-
class ModuleFinder(_Finder):
|
180 |
-
"""Find isolated Python modules.
|
181 |
-
This function will **not** recurse subdirectories.
|
182 |
-
"""
|
183 |
-
|
184 |
-
@classmethod
|
185 |
-
def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
|
186 |
-
for file in glob(os.path.join(where, "*.py")):
|
187 |
-
module, _ext = os.path.splitext(os.path.basename(file))
|
188 |
-
|
189 |
-
if not cls._looks_like_module(module):
|
190 |
-
continue
|
191 |
-
|
192 |
-
if include(module) and not exclude(module):
|
193 |
-
yield module
|
194 |
-
|
195 |
-
_looks_like_module = staticmethod(_valid_name)
|
196 |
-
|
197 |
-
|
198 |
-
# We have to be extra careful in the case of flat layout to not include files
|
199 |
-
# and directories not meant for distribution (e.g. tool-related)
|
200 |
-
|
201 |
-
|
202 |
-
class FlatLayoutPackageFinder(PEP420PackageFinder):
|
203 |
-
_EXCLUDE = (
|
204 |
-
"ci",
|
205 |
-
"bin",
|
206 |
-
"doc",
|
207 |
-
"docs",
|
208 |
-
"documentation",
|
209 |
-
"manpages",
|
210 |
-
"news",
|
211 |
-
"changelog",
|
212 |
-
"test",
|
213 |
-
"tests",
|
214 |
-
"unit_test",
|
215 |
-
"unit_tests",
|
216 |
-
"example",
|
217 |
-
"examples",
|
218 |
-
"scripts",
|
219 |
-
"tools",
|
220 |
-
"util",
|
221 |
-
"utils",
|
222 |
-
"python",
|
223 |
-
"build",
|
224 |
-
"dist",
|
225 |
-
"venv",
|
226 |
-
"env",
|
227 |
-
"requirements",
|
228 |
-
# ---- Task runners / Build tools ----
|
229 |
-
"tasks", # invoke
|
230 |
-
"fabfile", # fabric
|
231 |
-
"site_scons", # SCons
|
232 |
-
# ---- Other tools ----
|
233 |
-
"benchmark",
|
234 |
-
"benchmarks",
|
235 |
-
"exercise",
|
236 |
-
"exercises",
|
237 |
-
# ---- Hidden directories/Private packages ----
|
238 |
-
"[._]*",
|
239 |
-
)
|
240 |
-
|
241 |
-
DEFAULT_EXCLUDE = tuple(chain_iter((p, f"{p}.*") for p in _EXCLUDE))
|
242 |
-
"""Reserved package names"""
|
243 |
-
|
244 |
-
@staticmethod
|
245 |
-
def _looks_like_package(_path: _Path, package_name: str) -> bool:
|
246 |
-
names = package_name.split('.')
|
247 |
-
# Consider PEP 561
|
248 |
-
root_pkg_is_valid = names[0].isidentifier() or names[0].endswith("-stubs")
|
249 |
-
return root_pkg_is_valid and all(name.isidentifier() for name in names[1:])
|
250 |
-
|
251 |
-
|
252 |
-
class FlatLayoutModuleFinder(ModuleFinder):
|
253 |
-
DEFAULT_EXCLUDE = (
|
254 |
-
"setup",
|
255 |
-
"conftest",
|
256 |
-
"test",
|
257 |
-
"tests",
|
258 |
-
"example",
|
259 |
-
"examples",
|
260 |
-
"build",
|
261 |
-
# ---- Task runners ----
|
262 |
-
"toxfile",
|
263 |
-
"noxfile",
|
264 |
-
"pavement",
|
265 |
-
"dodo",
|
266 |
-
"tasks",
|
267 |
-
"fabfile",
|
268 |
-
# ---- Other tools ----
|
269 |
-
"[Ss][Cc]onstruct", # SCons
|
270 |
-
"conanfile", # Connan: C/C++ build tool
|
271 |
-
"manage", # Django
|
272 |
-
"benchmark",
|
273 |
-
"benchmarks",
|
274 |
-
"exercise",
|
275 |
-
"exercises",
|
276 |
-
# ---- Hidden files/Private modules ----
|
277 |
-
"[._]*",
|
278 |
-
)
|
279 |
-
"""Reserved top-level module names"""
|
280 |
-
|
281 |
-
|
282 |
-
def _find_packages_within(root_pkg: str, pkg_dir: _Path) -> List[str]:
|
283 |
-
nested = PEP420PackageFinder.find(pkg_dir)
|
284 |
-
return [root_pkg] + [".".join((root_pkg, n)) for n in nested]
|
285 |
-
|
286 |
-
|
287 |
-
class ConfigDiscovery:
|
288 |
-
"""Fill-in metadata and options that can be automatically derived
|
289 |
-
(from other metadata/options, the file system or conventions)
|
290 |
-
"""
|
291 |
-
|
292 |
-
def __init__(self, distribution: "Distribution"):
|
293 |
-
self.dist = distribution
|
294 |
-
self._called = False
|
295 |
-
self._disabled = False
|
296 |
-
self._skip_ext_modules = False
|
297 |
-
|
298 |
-
def _disable(self):
|
299 |
-
"""Internal API to disable automatic discovery"""
|
300 |
-
self._disabled = True
|
301 |
-
|
302 |
-
def _ignore_ext_modules(self):
|
303 |
-
"""Internal API to disregard ext_modules.
|
304 |
-
|
305 |
-
Normally auto-discovery would not be triggered if ``ext_modules`` are set
|
306 |
-
(this is done for backward compatibility with existing packages relying on
|
307 |
-
``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function
|
308 |
-
to ignore given ``ext_modules`` and proceed with the auto-discovery if
|
309 |
-
``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml
|
310 |
-
metadata).
|
311 |
-
"""
|
312 |
-
self._skip_ext_modules = True
|
313 |
-
|
314 |
-
@property
|
315 |
-
def _root_dir(self) -> _Path:
|
316 |
-
# The best is to wait until `src_root` is set in dist, before using _root_dir.
|
317 |
-
return self.dist.src_root or os.curdir
|
318 |
-
|
319 |
-
@property
|
320 |
-
def _package_dir(self) -> Dict[str, str]:
|
321 |
-
if self.dist.package_dir is None:
|
322 |
-
return {}
|
323 |
-
return self.dist.package_dir
|
324 |
-
|
325 |
-
def __call__(self, force=False, name=True, ignore_ext_modules=False):
|
326 |
-
"""Automatically discover missing configuration fields
|
327 |
-
and modifies the given ``distribution`` object in-place.
|
328 |
-
|
329 |
-
Note that by default this will only have an effect the first time the
|
330 |
-
``ConfigDiscovery`` object is called.
|
331 |
-
|
332 |
-
To repeatedly invoke automatic discovery (e.g. when the project
|
333 |
-
directory changes), please use ``force=True`` (or create a new
|
334 |
-
``ConfigDiscovery`` instance).
|
335 |
-
"""
|
336 |
-
if force is False and (self._called or self._disabled):
|
337 |
-
# Avoid overhead of multiple calls
|
338 |
-
return
|
339 |
-
|
340 |
-
self._analyse_package_layout(ignore_ext_modules)
|
341 |
-
if name:
|
342 |
-
self.analyse_name() # depends on ``packages`` and ``py_modules``
|
343 |
-
|
344 |
-
self._called = True
|
345 |
-
|
346 |
-
def _explicitly_specified(self, ignore_ext_modules: bool) -> bool:
|
347 |
-
"""``True`` if the user has specified some form of package/module listing"""
|
348 |
-
ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules
|
349 |
-
ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules)
|
350 |
-
return (
|
351 |
-
self.dist.packages is not None
|
352 |
-
or self.dist.py_modules is not None
|
353 |
-
or ext_modules
|
354 |
-
or hasattr(self.dist, "configuration") and self.dist.configuration
|
355 |
-
# ^ Some projects use numpy.distutils.misc_util.Configuration
|
356 |
-
)
|
357 |
-
|
358 |
-
def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool:
|
359 |
-
if self._explicitly_specified(ignore_ext_modules):
|
360 |
-
# For backward compatibility, just try to find modules/packages
|
361 |
-
# when nothing is given
|
362 |
-
return True
|
363 |
-
|
364 |
-
log.debug(
|
365 |
-
"No `packages` or `py_modules` configuration, performing "
|
366 |
-
"automatic discovery."
|
367 |
-
)
|
368 |
-
|
369 |
-
return (
|
370 |
-
self._analyse_explicit_layout()
|
371 |
-
or self._analyse_src_layout()
|
372 |
-
# flat-layout is the trickiest for discovery so it should be last
|
373 |
-
or self._analyse_flat_layout()
|
374 |
-
)
|
375 |
-
|
376 |
-
def _analyse_explicit_layout(self) -> bool:
|
377 |
-
"""The user can explicitly give a package layout via ``package_dir``"""
|
378 |
-
package_dir = self._package_dir.copy() # don't modify directly
|
379 |
-
package_dir.pop("", None) # This falls under the "src-layout" umbrella
|
380 |
-
root_dir = self._root_dir
|
381 |
-
|
382 |
-
if not package_dir:
|
383 |
-
return False
|
384 |
-
|
385 |
-
log.debug(f"`explicit-layout` detected -- analysing {package_dir}")
|
386 |
-
pkgs = chain_iter(
|
387 |
-
_find_packages_within(pkg, os.path.join(root_dir, parent_dir))
|
388 |
-
for pkg, parent_dir in package_dir.items()
|
389 |
-
)
|
390 |
-
self.dist.packages = list(pkgs)
|
391 |
-
log.debug(f"discovered packages -- {self.dist.packages}")
|
392 |
-
return True
|
393 |
-
|
394 |
-
def _analyse_src_layout(self) -> bool:
|
395 |
-
"""Try to find all packages or modules under the ``src`` directory
|
396 |
-
(or anything pointed by ``package_dir[""]``).
|
397 |
-
|
398 |
-
The "src-layout" is relatively safe for automatic discovery.
|
399 |
-
We assume that everything within is meant to be included in the
|
400 |
-
distribution.
|
401 |
-
|
402 |
-
If ``package_dir[""]`` is not given, but the ``src`` directory exists,
|
403 |
-
this function will set ``package_dir[""] = "src"``.
|
404 |
-
"""
|
405 |
-
package_dir = self._package_dir
|
406 |
-
src_dir = os.path.join(self._root_dir, package_dir.get("", "src"))
|
407 |
-
if not os.path.isdir(src_dir):
|
408 |
-
return False
|
409 |
-
|
410 |
-
log.debug(f"`src-layout` detected -- analysing {src_dir}")
|
411 |
-
package_dir.setdefault("", os.path.basename(src_dir))
|
412 |
-
self.dist.package_dir = package_dir # persist eventual modifications
|
413 |
-
self.dist.packages = PEP420PackageFinder.find(src_dir)
|
414 |
-
self.dist.py_modules = ModuleFinder.find(src_dir)
|
415 |
-
log.debug(f"discovered packages -- {self.dist.packages}")
|
416 |
-
log.debug(f"discovered py_modules -- {self.dist.py_modules}")
|
417 |
-
return True
|
418 |
-
|
419 |
-
def _analyse_flat_layout(self) -> bool:
|
420 |
-
"""Try to find all packages and modules under the project root.
|
421 |
-
|
422 |
-
Since the ``flat-layout`` is more dangerous in terms of accidentally including
|
423 |
-
extra files/directories, this function is more conservative and will raise an
|
424 |
-
error if multiple packages or modules are found.
|
425 |
-
|
426 |
-
This assumes that multi-package dists are uncommon and refuse to support that
|
427 |
-
use case in order to be able to prevent unintended errors.
|
428 |
-
"""
|
429 |
-
log.debug(f"`flat-layout` detected -- analysing {self._root_dir}")
|
430 |
-
return self._analyse_flat_packages() or self._analyse_flat_modules()
|
431 |
-
|
432 |
-
def _analyse_flat_packages(self) -> bool:
|
433 |
-
self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir)
|
434 |
-
top_level = remove_nested_packages(remove_stubs(self.dist.packages))
|
435 |
-
log.debug(f"discovered packages -- {self.dist.packages}")
|
436 |
-
self._ensure_no_accidental_inclusion(top_level, "packages")
|
437 |
-
return bool(top_level)
|
438 |
-
|
439 |
-
def _analyse_flat_modules(self) -> bool:
|
440 |
-
self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir)
|
441 |
-
log.debug(f"discovered py_modules -- {self.dist.py_modules}")
|
442 |
-
self._ensure_no_accidental_inclusion(self.dist.py_modules, "modules")
|
443 |
-
return bool(self.dist.py_modules)
|
444 |
-
|
445 |
-
def _ensure_no_accidental_inclusion(self, detected: List[str], kind: str):
|
446 |
-
if len(detected) > 1:
|
447 |
-
from inspect import cleandoc
|
448 |
-
|
449 |
-
from setuptools.errors import PackageDiscoveryError
|
450 |
-
|
451 |
-
msg = f"""Multiple top-level {kind} discovered in a flat-layout: {detected}.
|
452 |
-
|
453 |
-
To avoid accidental inclusion of unwanted files or directories,
|
454 |
-
setuptools will not proceed with this build.
|
455 |
-
|
456 |
-
If you are trying to create a single distribution with multiple {kind}
|
457 |
-
on purpose, you should not rely on automatic discovery.
|
458 |
-
Instead, consider the following options:
|
459 |
-
|
460 |
-
1. set up custom discovery (`find` directive with `include` or `exclude`)
|
461 |
-
2. use a `src-layout`
|
462 |
-
3. explicitly set `py_modules` or `packages` with a list of names
|
463 |
-
|
464 |
-
To find more information, look for "package discovery" on setuptools docs.
|
465 |
-
"""
|
466 |
-
raise PackageDiscoveryError(cleandoc(msg))
|
467 |
-
|
468 |
-
def analyse_name(self):
|
469 |
-
"""The packages/modules are the essential contribution of the author.
|
470 |
-
Therefore the name of the distribution can be derived from them.
|
471 |
-
"""
|
472 |
-
if self.dist.metadata.name or self.dist.name:
|
473 |
-
# get_name() is not reliable (can return "UNKNOWN")
|
474 |
-
return None
|
475 |
-
|
476 |
-
log.debug("No `name` configuration, performing automatic discovery")
|
477 |
-
|
478 |
-
name = (
|
479 |
-
self._find_name_single_package_or_module()
|
480 |
-
or self._find_name_from_packages()
|
481 |
-
)
|
482 |
-
if name:
|
483 |
-
self.dist.metadata.name = name
|
484 |
-
|
485 |
-
def _find_name_single_package_or_module(self) -> Optional[str]:
|
486 |
-
"""Exactly one module or package"""
|
487 |
-
for field in ('packages', 'py_modules'):
|
488 |
-
items = getattr(self.dist, field, None) or []
|
489 |
-
if items and len(items) == 1:
|
490 |
-
log.debug(f"Single module/package detected, name: {items[0]}")
|
491 |
-
return items[0]
|
492 |
-
|
493 |
-
return None
|
494 |
-
|
495 |
-
def _find_name_from_packages(self) -> Optional[str]:
|
496 |
-
"""Try to find the root package that is not a PEP 420 namespace"""
|
497 |
-
if not self.dist.packages:
|
498 |
-
return None
|
499 |
-
|
500 |
-
packages = remove_stubs(sorted(self.dist.packages, key=len))
|
501 |
-
package_dir = self.dist.package_dir or {}
|
502 |
-
|
503 |
-
parent_pkg = find_parent_package(packages, package_dir, self._root_dir)
|
504 |
-
if parent_pkg:
|
505 |
-
log.debug(f"Common parent package detected, name: {parent_pkg}")
|
506 |
-
return parent_pkg
|
507 |
-
|
508 |
-
log.warn("No parent package detected, impossible to derive `name`")
|
509 |
-
return None
|
510 |
-
|
511 |
-
|
512 |
-
def remove_nested_packages(packages: List[str]) -> List[str]:
|
513 |
-
"""Remove nested packages from a list of packages.
|
514 |
-
|
515 |
-
>>> remove_nested_packages(["a", "a.b1", "a.b2", "a.b1.c1"])
|
516 |
-
['a']
|
517 |
-
>>> remove_nested_packages(["a", "b", "c.d", "c.d.e.f", "g.h", "a.a1"])
|
518 |
-
['a', 'b', 'c.d', 'g.h']
|
519 |
-
"""
|
520 |
-
pkgs = sorted(packages, key=len)
|
521 |
-
top_level = pkgs[:]
|
522 |
-
size = len(pkgs)
|
523 |
-
for i, name in enumerate(reversed(pkgs)):
|
524 |
-
if any(name.startswith(f"{other}.") for other in top_level):
|
525 |
-
top_level.pop(size - i - 1)
|
526 |
-
|
527 |
-
return top_level
|
528 |
-
|
529 |
-
|
530 |
-
def remove_stubs(packages: List[str]) -> List[str]:
|
531 |
-
"""Remove type stubs (:pep:`561`) from a list of packages.
|
532 |
-
|
533 |
-
>>> remove_stubs(["a", "a.b", "a-stubs", "a-stubs.b.c", "b", "c-stubs"])
|
534 |
-
['a', 'a.b', 'b']
|
535 |
-
"""
|
536 |
-
return [pkg for pkg in packages if not pkg.split(".")[0].endswith("-stubs")]
|
537 |
-
|
538 |
-
|
539 |
-
def find_parent_package(
|
540 |
-
packages: List[str], package_dir: Mapping[str, str], root_dir: _Path
|
541 |
-
) -> Optional[str]:
|
542 |
-
"""Find the parent package that is not a namespace."""
|
543 |
-
packages = sorted(packages, key=len)
|
544 |
-
common_ancestors = []
|
545 |
-
for i, name in enumerate(packages):
|
546 |
-
if not all(n.startswith(f"{name}.") for n in packages[i+1:]):
|
547 |
-
# Since packages are sorted by length, this condition is able
|
548 |
-
# to find a list of all common ancestors.
|
549 |
-
# When there is divergence (e.g. multiple root packages)
|
550 |
-
# the list will be empty
|
551 |
-
break
|
552 |
-
common_ancestors.append(name)
|
553 |
-
|
554 |
-
for name in common_ancestors:
|
555 |
-
pkg_path = find_package_path(name, package_dir, root_dir)
|
556 |
-
init = os.path.join(pkg_path, "__init__.py")
|
557 |
-
if os.path.isfile(init):
|
558 |
-
return name
|
559 |
-
|
560 |
-
return None
|
561 |
-
|
562 |
-
|
563 |
-
def find_package_path(
|
564 |
-
name: str, package_dir: Mapping[str, str], root_dir: _Path
|
565 |
-
) -> str:
|
566 |
-
"""Given a package name, return the path where it should be found on
|
567 |
-
disk, considering the ``package_dir`` option.
|
568 |
-
|
569 |
-
>>> path = find_package_path("my.pkg", {"": "root/is/nested"}, ".")
|
570 |
-
>>> path.replace(os.sep, "/")
|
571 |
-
'./root/is/nested/my/pkg'
|
572 |
-
|
573 |
-
>>> path = find_package_path("my.pkg", {"my": "root/is/nested"}, ".")
|
574 |
-
>>> path.replace(os.sep, "/")
|
575 |
-
'./root/is/nested/pkg'
|
576 |
-
|
577 |
-
>>> path = find_package_path("my.pkg", {"my.pkg": "root/is/nested"}, ".")
|
578 |
-
>>> path.replace(os.sep, "/")
|
579 |
-
'./root/is/nested'
|
580 |
-
|
581 |
-
>>> path = find_package_path("other.pkg", {"my.pkg": "root/is/nested"}, ".")
|
582 |
-
>>> path.replace(os.sep, "/")
|
583 |
-
'./other/pkg'
|
584 |
-
"""
|
585 |
-
parts = name.split(".")
|
586 |
-
for i in range(len(parts), 0, -1):
|
587 |
-
# Look backwards, the most specific package_dir first
|
588 |
-
partial_name = ".".join(parts[:i])
|
589 |
-
if partial_name in package_dir:
|
590 |
-
parent = package_dir[partial_name]
|
591 |
-
return os.path.join(root_dir, parent, *parts[i:])
|
592 |
-
|
593 |
-
parent = package_dir.get("") or ""
|
594 |
-
return os.path.join(root_dir, *parent.split("/"), *parts)
|
595 |
-
|
596 |
-
|
597 |
-
def construct_package_dir(packages: List[str], package_path: _Path) -> Dict[str, str]:
|
598 |
-
parent_pkgs = remove_nested_packages(packages)
|
599 |
-
prefix = Path(package_path).parts
|
600 |
-
return {pkg: "/".join([*prefix, *pkg.split(".")]) for pkg in parent_pkgs}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Azurro/APT-1B-Base/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: APT-1B-Base
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.34.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-nc-4.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BIOML-SVM/SVM/proteinbind_new.py
DELETED
@@ -1,283 +0,0 @@
|
|
1 |
-
from types import SimpleNamespace
|
2 |
-
|
3 |
-
import pandas as pd
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
from torch.utils.data import Dataset
|
7 |
-
|
8 |
-
|
9 |
-
ModalityType = SimpleNamespace(
|
10 |
-
AA="aa",
|
11 |
-
DNA="dna",
|
12 |
-
PDB="pdb",
|
13 |
-
GO="go",
|
14 |
-
MSA="msa",
|
15 |
-
TEXT="text",
|
16 |
-
)
|
17 |
-
|
18 |
-
|
19 |
-
class Normalize(nn.Module):
|
20 |
-
def __init__(self, dim: int) -> None:
|
21 |
-
super().__init__()
|
22 |
-
self.dim = dim
|
23 |
-
|
24 |
-
def forward(self, x):
|
25 |
-
return torch.nn.functional.normalize(x, dim=self.dim, p=2)
|
26 |
-
|
27 |
-
|
28 |
-
class EmbeddingDataset(Dataset):
|
29 |
-
"""
|
30 |
-
The main class for turning any modality to a torch Dataset that can be passed to
|
31 |
-
a torch dataloader. Any modality that doesn't fit into the __getitem__
|
32 |
-
method can subclass this and modify the __getitem__ method.
|
33 |
-
"""
|
34 |
-
def __init__(self, sequence_file_path, embeddings_file_path, modality):
|
35 |
-
self.sequence = pd.read_csv(sequence_file_path)
|
36 |
-
self.embedding = torch.load(embeddings_file_path)
|
37 |
-
self.modality = modality
|
38 |
-
|
39 |
-
def __len__(self):
|
40 |
-
return len(self.sequence)
|
41 |
-
|
42 |
-
def __getitem__(self, idx):
|
43 |
-
sequence = self.sequence.iloc[idx, 0]
|
44 |
-
embedding = self.embedding[idx]
|
45 |
-
return {"aa": sequence, self.modality: embedding}
|
46 |
-
|
47 |
-
|
48 |
-
class DualEmbeddingDataset(Dataset):
|
49 |
-
"""
|
50 |
-
The main class for turning any modality to a torch Dataset that can be passed to
|
51 |
-
a torch dataloader. Any modality that doesn't fit into the __getitem__
|
52 |
-
method can subclass this and modify the __getitem__ method.
|
53 |
-
"""
|
54 |
-
def __init__(self, sequence_embeddings_file_path, embeddings_file_path, modality):
|
55 |
-
self.sequence_embedding = torch.load(sequence_embeddings_file_path)
|
56 |
-
self.embedding = torch.load(embeddings_file_path)
|
57 |
-
self.modality = modality
|
58 |
-
|
59 |
-
def __len__(self):
|
60 |
-
return len(self.sequence_embedding)
|
61 |
-
|
62 |
-
def __getitem__(self, idx):
|
63 |
-
sequence_embedding = self.sequence_embedding[idx]
|
64 |
-
embedding = self.embedding[idx]
|
65 |
-
return {"aa": sequence_embedding, self.modality: embedding}
|
66 |
-
|
67 |
-
|
68 |
-
class ProteinBindModel(nn.Module):
|
69 |
-
|
70 |
-
def __init__(
|
71 |
-
self,
|
72 |
-
aa_embed_dim,
|
73 |
-
dna_embed_dim,
|
74 |
-
pdb_embed_dim,
|
75 |
-
go_embed_dim,
|
76 |
-
msa_embed_dim,
|
77 |
-
text_embed_dim,
|
78 |
-
in_embed_dim,
|
79 |
-
out_embed_dim
|
80 |
-
):
|
81 |
-
super().__init__()
|
82 |
-
self.modality_trunks = self._create_modality_trunk(
|
83 |
-
aa_embed_dim,
|
84 |
-
dna_embed_dim,
|
85 |
-
pdb_embed_dim,
|
86 |
-
go_embed_dim,
|
87 |
-
msa_embed_dim,
|
88 |
-
text_embed_dim,
|
89 |
-
out_embed_dim
|
90 |
-
)
|
91 |
-
self.modality_heads = self._create_modality_head(
|
92 |
-
in_embed_dim,
|
93 |
-
out_embed_dim,
|
94 |
-
)
|
95 |
-
self.modality_postprocessors = self._create_modality_postprocessors(
|
96 |
-
out_embed_dim
|
97 |
-
)
|
98 |
-
|
99 |
-
def _create_modality_trunk(
|
100 |
-
self,
|
101 |
-
aa_embed_dim,
|
102 |
-
dna_embed_dim,
|
103 |
-
pdb_embed_dim,
|
104 |
-
go_embed_dim,
|
105 |
-
msa_embed_dim,
|
106 |
-
text_embed_dim,
|
107 |
-
in_embed_dim
|
108 |
-
):
|
109 |
-
"""
|
110 |
-
The current layers are just a proof of concept
|
111 |
-
and are subject to the opinion of others.
|
112 |
-
:param aa_embed_dim:
|
113 |
-
:param dna_embed_dim:
|
114 |
-
:param pdb_embed_dim:
|
115 |
-
:param go_embed_dim:
|
116 |
-
:param msa_embed_dim:
|
117 |
-
:param text_embed_dim:
|
118 |
-
:param in_embed_dim:
|
119 |
-
:return:
|
120 |
-
"""
|
121 |
-
modality_trunks = {}
|
122 |
-
|
123 |
-
modality_trunks[ModalityType.AA] = nn.Sequential(
|
124 |
-
nn.Linear(aa_embed_dim, 512),
|
125 |
-
nn.ReLU(),
|
126 |
-
nn.Linear(512, 512),
|
127 |
-
nn.ReLU(),
|
128 |
-
nn.Linear(512, in_embed_dim),
|
129 |
-
)
|
130 |
-
|
131 |
-
modality_trunks[ModalityType.DNA] = nn.Sequential(
|
132 |
-
nn.Linear(dna_embed_dim, 512),
|
133 |
-
nn.ReLU(),
|
134 |
-
nn.Linear(512, 512),
|
135 |
-
nn.ReLU(),
|
136 |
-
nn.Linear(512, in_embed_dim),
|
137 |
-
)
|
138 |
-
|
139 |
-
modality_trunks[ModalityType.PDB] = nn.Sequential(
|
140 |
-
nn.Linear(pdb_embed_dim, 512),
|
141 |
-
nn.ReLU(),
|
142 |
-
nn.Linear(512, 512),
|
143 |
-
nn.ReLU(),
|
144 |
-
nn.Linear(512, in_embed_dim),
|
145 |
-
)
|
146 |
-
|
147 |
-
modality_trunks[ModalityType.GO] = nn.Sequential(
|
148 |
-
nn.Linear(go_embed_dim, 512),
|
149 |
-
nn.ReLU(),
|
150 |
-
nn.Linear(512, 512),
|
151 |
-
nn.ReLU(),
|
152 |
-
nn.Linear(512, in_embed_dim),
|
153 |
-
)
|
154 |
-
|
155 |
-
modality_trunks[ModalityType.MSA] = nn.Sequential(
|
156 |
-
nn.Linear(msa_embed_dim, 512),
|
157 |
-
nn.ReLU(),
|
158 |
-
nn.Linear(512, 512),
|
159 |
-
nn.ReLU(),
|
160 |
-
nn.Linear(512, in_embed_dim),
|
161 |
-
)
|
162 |
-
|
163 |
-
modality_trunks[ModalityType.TEXT] = nn.Sequential(
|
164 |
-
nn.Linear(text_embed_dim, 512),
|
165 |
-
nn.ReLU(),
|
166 |
-
nn.Linear(512, 512),
|
167 |
-
nn.ReLU(),
|
168 |
-
nn.Linear(512, in_embed_dim),
|
169 |
-
)
|
170 |
-
|
171 |
-
return nn.ModuleDict(modality_trunks)
|
172 |
-
|
173 |
-
def _create_modality_head(
|
174 |
-
self,
|
175 |
-
in_embed_dim,
|
176 |
-
out_embed_dim
|
177 |
-
):
|
178 |
-
modality_heads = {}
|
179 |
-
|
180 |
-
modality_heads[ModalityType.AA] = nn.Sequential(
|
181 |
-
nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
|
182 |
-
nn.Dropout(p=0.5),
|
183 |
-
nn.Linear(in_embed_dim, out_embed_dim, bias=False),
|
184 |
-
)
|
185 |
-
|
186 |
-
modality_heads[ModalityType.DNA] = nn.Sequential(
|
187 |
-
nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
|
188 |
-
nn.Dropout(p=0.5),
|
189 |
-
nn.Linear(in_embed_dim, out_embed_dim, bias=False),
|
190 |
-
)
|
191 |
-
|
192 |
-
modality_heads[ModalityType.PDB] = nn.Sequential(
|
193 |
-
nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
|
194 |
-
nn.Dropout(p=0.5),
|
195 |
-
nn.Linear(in_embed_dim, out_embed_dim, bias=False),
|
196 |
-
)
|
197 |
-
|
198 |
-
modality_heads[ModalityType.GO] = nn.Sequential(
|
199 |
-
nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
|
200 |
-
nn.Dropout(p=0.5),
|
201 |
-
nn.Linear(in_embed_dim, out_embed_dim, bias=False),
|
202 |
-
)
|
203 |
-
|
204 |
-
modality_heads[ModalityType.MSA] = nn.Sequential(
|
205 |
-
nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
|
206 |
-
nn.Dropout(p=0.5),
|
207 |
-
nn.Linear(in_embed_dim, out_embed_dim, bias=False),
|
208 |
-
)
|
209 |
-
|
210 |
-
modality_heads[ModalityType.TEXT] = nn.Sequential(
|
211 |
-
nn.LayerNorm(normalized_shape=in_embed_dim, eps=1e-6),
|
212 |
-
nn.Dropout(p=0.5),
|
213 |
-
nn.Linear(in_embed_dim, out_embed_dim, bias=False),
|
214 |
-
)
|
215 |
-
return nn.ModuleDict(modality_heads)
|
216 |
-
|
217 |
-
def _create_modality_postprocessors(self, out_embed_dim):
|
218 |
-
modality_postprocessors = {}
|
219 |
-
modality_postprocessors[ModalityType.AA] = Normalize(dim=-1)
|
220 |
-
modality_postprocessors[ModalityType.DNA] = Normalize(dim=-1)
|
221 |
-
modality_postprocessors[ModalityType.PDB] = Normalize(dim=-1)
|
222 |
-
modality_postprocessors[ModalityType.TEXT] = Normalize(dim=-1)
|
223 |
-
modality_postprocessors[ModalityType.GO] = Normalize(dim=-1)
|
224 |
-
modality_postprocessors[ModalityType.MSA] = Normalize(dim=-1)
|
225 |
-
|
226 |
-
return nn.ModuleDict(modality_postprocessors)
|
227 |
-
|
228 |
-
def forward(self, inputs):
|
229 |
-
"""
|
230 |
-
input = {k_1: [v],k_n: [v]}
|
231 |
-
for key in input
|
232 |
-
get trunk for key
|
233 |
-
forward pass of value in trunk
|
234 |
-
get projection head of key
|
235 |
-
forward pass of value in projection head
|
236 |
-
append output in output dict
|
237 |
-
return { k_1, [o], k_n: [o]}
|
238 |
-
"""
|
239 |
-
|
240 |
-
outputs = {}
|
241 |
-
|
242 |
-
for modality_key, modality_value in inputs.items():
|
243 |
-
|
244 |
-
modality_value = self.modality_trunks[modality_key](
|
245 |
-
modality_value
|
246 |
-
)
|
247 |
-
|
248 |
-
modality_value = self.modality_heads[modality_key](
|
249 |
-
modality_value
|
250 |
-
)
|
251 |
-
|
252 |
-
modality_value = self.modality_postprocessors[modality_key](
|
253 |
-
modality_value
|
254 |
-
)
|
255 |
-
outputs[modality_key] = modality_value
|
256 |
-
|
257 |
-
return outputs
|
258 |
-
|
259 |
-
|
260 |
-
def create_proteinbind(pretrained=False):
|
261 |
-
"""
|
262 |
-
The embedding dimensions here are dummy
|
263 |
-
:param pretrained:
|
264 |
-
:return:
|
265 |
-
"""
|
266 |
-
model = ProteinBindModel(
|
267 |
-
aa_embed_dim=480,
|
268 |
-
dna_embed_dim=1280,
|
269 |
-
pdb_embed_dim=128,
|
270 |
-
go_embed_dim=600,
|
271 |
-
msa_embed_dim=768,
|
272 |
-
text_embed_dim=768,
|
273 |
-
in_embed_dim=1024,
|
274 |
-
out_embed_dim=1024
|
275 |
-
)
|
276 |
-
|
277 |
-
if pretrained:
|
278 |
-
# get path from config
|
279 |
-
PATH = 'best_model.pth'
|
280 |
-
|
281 |
-
model.load_state_dict(torch.load(PATH))
|
282 |
-
|
283 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/layers.py
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
from . import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class SeperableConv2DBNActiv(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
-
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
-
self.conv = nn.Sequential(
|
33 |
-
nn.Conv2d(
|
34 |
-
nin,
|
35 |
-
nin,
|
36 |
-
kernel_size=ksize,
|
37 |
-
stride=stride,
|
38 |
-
padding=pad,
|
39 |
-
dilation=dilation,
|
40 |
-
groups=nin,
|
41 |
-
bias=False,
|
42 |
-
),
|
43 |
-
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
-
nn.BatchNorm2d(nout),
|
45 |
-
activ(),
|
46 |
-
)
|
47 |
-
|
48 |
-
def __call__(self, x):
|
49 |
-
return self.conv(x)
|
50 |
-
|
51 |
-
|
52 |
-
class Encoder(nn.Module):
|
53 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
-
super(Encoder, self).__init__()
|
55 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
-
|
58 |
-
def __call__(self, x):
|
59 |
-
skip = self.conv1(x)
|
60 |
-
h = self.conv2(skip)
|
61 |
-
|
62 |
-
return h, skip
|
63 |
-
|
64 |
-
|
65 |
-
class Decoder(nn.Module):
|
66 |
-
def __init__(
|
67 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
-
):
|
69 |
-
super(Decoder, self).__init__()
|
70 |
-
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
-
|
73 |
-
def __call__(self, x, skip=None):
|
74 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
-
if skip is not None:
|
76 |
-
skip = spec_utils.crop_center(skip, x)
|
77 |
-
x = torch.cat([x, skip], dim=1)
|
78 |
-
h = self.conv(x)
|
79 |
-
|
80 |
-
if self.dropout is not None:
|
81 |
-
h = self.dropout(h)
|
82 |
-
|
83 |
-
return h
|
84 |
-
|
85 |
-
|
86 |
-
class ASPPModule(nn.Module):
|
87 |
-
def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
|
88 |
-
super(ASPPModule, self).__init__()
|
89 |
-
self.conv1 = nn.Sequential(
|
90 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
-
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
-
)
|
93 |
-
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
-
self.conv3 = SeperableConv2DBNActiv(
|
95 |
-
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
-
)
|
97 |
-
self.conv4 = SeperableConv2DBNActiv(
|
98 |
-
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
-
)
|
100 |
-
self.conv5 = SeperableConv2DBNActiv(
|
101 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
-
)
|
103 |
-
self.bottleneck = nn.Sequential(
|
104 |
-
Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
105 |
-
)
|
106 |
-
|
107 |
-
def forward(self, x):
|
108 |
-
_, _, h, w = x.size()
|
109 |
-
feat1 = F.interpolate(
|
110 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
111 |
-
)
|
112 |
-
feat2 = self.conv2(x)
|
113 |
-
feat3 = self.conv3(x)
|
114 |
-
feat4 = self.conv4(x)
|
115 |
-
feat5 = self.conv5(x)
|
116 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
|
117 |
-
bottle = self.bottleneck(out)
|
118 |
-
return bottle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Alquimia Clsico 2 Mod Apk.md
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Proyecto deriva 2.0 Mod APK 43: Todo lo que necesita saber</h1>
|
3 |
-
<p>Si eres un fan de los juegos de carreras, especialmente los juegos de deriva, es posible que hayas oído hablar de Project Drift 2.0, un juego de simulación de deriva realista y desafiante para dispositivos Android. En este artículo, le diremos todo lo que necesita saber sobre Project Drift 2.0 Mod APK 43, una versión modificada del juego que le da dinero ilimitado y acceso a todos los coches y pistas en el juego. También compartiremos algunos consejos y trucos sobre cómo jugar el juego y dominar el arte de la deriva. </p>
|
4 |
-
<h2>alquimia clásico 2 mod apk</h2><br /><p><b><b>Download Zip</b> ✵✵✵ <a href="https://bltlly.com/2v6Jf8">https://bltlly.com/2v6Jf8</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Project Drift 2.0? </h2>
|
6 |
-
<p>Project Drift 2.0 es una secuela del popular juego Project Drift, desarrollado por Bycodec Games, un estudio de juegos indie turco. El juego está diseñado para proporcionar una experiencia de deriva realista e inmersiva, con gráficos impresionantes, manejo de automóviles basado en la física y varios modos de juego y desafíos. </p>
|
7 |
-
<h3>Características de la deriva del proyecto 2.0</h3>
|
8 |
-
<p>Algunas de las características de Project Drift 2.0 son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Más de 50 coches diferentes para elegir, cada uno con sus propias características y opciones de personalización. </li>
|
11 |
-
<li>Más de 20 pistas diferentes para seguir, cada una con su propio diseño y nivel de dificultad. </li>
|
12 |
-
<li>Cuatro modos de juego: Carrera, Free Ride, Time Attack y Multijugador.</li>
|
13 |
-
<li>Modo de carrera: Completa varias misiones y desafíos para ganar dinero y reputación. </li>
|
14 |
-
<li>Modo Free Ride: Explora las pistas y practica tus habilidades de deriva sin ninguna presión o límite de tiempo. </li>
|
15 |
-
<li>Modo de ataque de tiempo: Carrera contra el reloj y tratar de batir sus propios u otros jugadores' registros. </li>
|
16 |
-
<li>Modo multijugador: Compite con otros jugadores en línea en batallas a la deriva en tiempo real. </li>
|
17 |
-
<li>Tablas de clasificación y logros: Seguimiento de su progreso y comparar su rendimiento con otros jugadores de todo el mundo. </li>
|
18 |
-
</ul>
|
19 |
-
<h3>Cómo descargar e instalar el proyecto Drift 2.0 Mod APK 43</h3>
|
20 |
-
|
21 |
-
<p>Para descargar e instalar Project Drift 2.0 Mod APK 43, siga estos pasos:</p>
|
22 |
-
<ol>
|
23 |
-
<li>Ir a [HappyMod]( 1 ), un sitio web que proporciona descargas apk mod seguro y confiable para varios juegos y aplicaciones. </li>
|
24 |
-
<li>Buscar "Proyecto de deriva 2.0 Mod APK" en la barra de búsqueda. </li>
|
25 |
-
<li>Seleccione la versión que dice "Proyecto de deriva 2.0 Mod Apk [dinero ilimitado]". Asegúrese de que coincide con el número de versión "43". </li>
|
26 |
-
<li>Haga clic en el botón "Descargar" y espere a que el archivo se descargue en su dispositivo. </li>
|
27 |
-
<li> Una vez que el archivo se descarga, localizarlo en el administrador de archivos y toque en él para instalarlo. </li>
|
28 |
-
<li>Si ves una ventana emergente que dice "Instalar bloqueado", ve a la configuración de tu dispositivo y habilita "Fuentes desconocidas" en las opciones de seguridad. </li>
|
29 |
-
<li>Después de instalar el apk mod, lanzar el juego y disfrutar de dinero ilimitado y el acceso a todos los coches y pistas en el juego. </li>
|
30 |
-
</ol>
|
31 |
-
<h2>¿Por qué usar Project Drift 2.0 Mod APK 43? </h2>
|
32 |
-
<p>Es posible que se pregunte por qué debe utilizar Project Drift 2.0 Mod APK 43 en lugar de la versión original del juego. Bueno, hay algunos beneficios y riesgos de usar el apk mod que usted debe ser consciente de antes de decidir usarlo. </p>
|
33 |
-
<h3>Beneficios de la deriva del proyecto 2.0 Mod APK 43</h3>
|
34 |
-
<p>Algunos de los beneficios de usar Project Drift 2.0 Mod APK 43 son:</p>
|
35 |
-
<ul>
|
36 |
-
<li> Usted puede obtener dinero ilimitado en el juego, que se puede utilizar para comprar y actualizar cualquier coche que desee. </li>
|
37 |
-
<li> Puede desbloquear todos los coches y pistas en el juego, lo que le da más variedad y opciones para elegir. </li>
|
38 |
-
<li>Puedes disfrutar del juego sin anuncios ni interrupciones, lo que puede mejorar tu experiencia de juego y rendimiento. </li>
|
39 |
-
<li>Puedes divertirte más y desafiarte con los diferentes modos y niveles de juego, sin preocuparte por quedarte sin dinero o recursos. </li>
|
40 |
-
</ul>
|
41 |
-
<h3>Riesgos de la deriva del proyecto 2.0 Mod APK 43</h3>
|
42 |
-
<p>Algunos de los riesgos de usar Project Drift 2.0 Mod APK 43 son:</p>
|
43 |
-
<p></p>
|
44 |
-
<ul>
|
45 |
-
|
46 |
-
<li>Es posible que pierda su progreso o datos en el juego, como el apk mod podría no sincronizar con su cuenta de Google Play o almacenamiento en la nube. </li>
|
47 |
-
<li>Usted puede ser prohibido o penalizado por los desarrolladores de juegos o moderadores, como el uso de un apk mod se considera hacer trampa y violar los términos y condiciones del juego. </li>
|
48 |
-
<li>Es posible que exponga su dispositivo o información personal a malware o virus, ya que algunos archivos apk mod pueden contener código o enlaces dañinos o maliciosos. </li>
|
49 |
-
</ul>
|
50 |
-
<h2>Consejos y trucos para jugar Project Drift 2.0</h2>
|
51 |
-
<p>Si decide utilizar Project Drift 2.0 Mod APK 43 o la versión original del juego, es posible que desee saber algunos consejos y trucos sobre cómo jugar el juego y dominar el arte de la deriva. Estos son algunos de ellos:</p>
|
52 |
-
<h3>Cómo dominar la deriva en Project Drift 2.0</h3>
|
53 |
-
<p>Drifting es la principal habilidad que necesitas dominar en Project Drift 2.0, ya que es la forma de ganar puntos y reputación en el juego. A la deriva es cuando se desliza su coche de lado alrededor de una esquina o curva, manteniendo el control y la velocidad. Para ir a la deriva en Project Drift 2.0, debes seguir estos pasos:</p>
|
54 |
-
<ol>
|
55 |
-
<li>Seleccione un coche que tenga buen manejo y potencia, ya que estos son esenciales para la deriva. </li>
|
56 |
-
<li>Seleccione una pista que tenga curvas y giros agudos, ya que son ideales para la deriva. </li>
|
57 |
-
<li>A medida que se acerca a una esquina o curva, toque el pedal del freno para reducir su velocidad e iniciar una deriva. </li>
|
58 |
-
<li>Al entrar en la deriva, dirigir su coche en la dirección opuesta de la vuelta, mientras que golpea el pedal del acelerador para mantener su impulso y equilibrio. </li>
|
59 |
-
<li>Al salir de la deriva, dirigir su coche de nuevo en línea con la carretera, mientras que la liberación del pedal de gas para recuperar la tracción y la estabilidad. </li>
|
60 |
-
</ol>
|
61 |
-
<h3>Cómo desbloquear nuevos coches y pistas en Project Drift 2.0</h3>
|
62 |
-
|
63 |
-
<ul>
|
64 |
-
<li>Completa tantas misiones y desafíos como sea posible en el modo carrera, ya que te recompensarán con dinero y reputación, que son necesarios para desbloquear nuevos coches y pistas. </li>
|
65 |
-
<li>Trate de lograr altas puntuaciones y calificaciones en cada misión y desafío, ya que aumentarán sus recompensas de dinero y reputación. </li>
|
66 |
-
<li>Juega el modo de ataque de tiempo y tratar de batir sus propios u otros jugadores' registros, ya que también le dará dinero y bonos de reputación. </li>
|
67 |
-
<li>Ahorre su dinero y gastarlo sabiamente en los coches y pistas que se adapten a sus preferencias y estilo de juego. </li>
|
68 |
-
<li>Consulte la tienda de juegos regularmente para obtener descuentos y ofertas en coches y pistas, ya que podrían ayudarle a ahorrar algo de dinero y obtener más valor. </li>
|
69 |
-
</ul>
|
70 |
-
<h3>Cómo personalizar su coche en Project Drift 2.0</h3>
|
71 |
-
<p>Uno de los aspectos divertidos de Project Drift 2.0 es que puedes personalizar tu coche para que se vea y funcione mejor. Usted puede cambiar el color, la pintura, las calcomanías, las ruedas, los neumáticos, los alerones, los escapes, y más de su coche. También puede actualizar el motor, la transmisión, la suspensión, los frenos y más de su coche. Para personalizar tu coche en Project Drift 2.0, sigue estos pasos:</p>
|
72 |
-
<ol>
|
73 |
-
<li>Seleccione un coche que desea personalizar desde su garaje. </li>
|
74 |
-
<li>Toque en el botón "Personalizar" en la parte inferior de la pantalla. </li>
|
75 |
-
<li>Elija la categoría que desea personalizar, como apariencia o rendimiento. </li>
|
76 |
-
<li>Seleccione el elemento que desea cambiar o actualizar, como color o motor. </li>
|
77 |
-
<li>Elija la opción que desea aplicar, como rojo o turbo. </li>
|
78 |
-
<li>Toque en el botón "Aplicar" para confirmar sus cambios. </li>
|
79 |
-
<li>Toque en el botón "Atrás" para regresar a su garaje. </li>
|
80 |
-
</ol>
|
81 |
-
<h2>Conclusión</h2>
|
82 |
-
|
83 |
-
<h3>Llamada a la acción para los lectores</h3>
|
84 |
-
<p>Si usted está listo para empezar a la deriva en Project Drift 2.0, descargar el juego de [Google Play] o [HappyMod] ahora y disfrutar de la emoción de deslizar su coche alrededor de las esquinas y curvas. No olvides compartir tus comentarios y opiniones sobre el juego con nosotros en la sección de comentarios a continuación. ¡Feliz deriva! </p>
|
85 |
-
<h4>Preguntas frecuentes</h4>
|
86 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Project Drift 2.0:</p>
|
87 |
-
<ol>
|
88 |
-
<li><b> ¿Cuál es la diferencia entre Proyecto de deriva 2.0 Mod APK 43 y proyecto de deriva 2.0 Hack APK? </b></li>
|
89 |
-
<p>Proyecto deriva 2.0 Mod APK 43 es una versión modificada del juego que le da dinero ilimitado y acceso a todos los coches y pistas en el juego. Proyecto de deriva 2.0 Hack APK es una versión hackeada del juego que le da dinero ilimitado, acceso a todos los coches y pistas, y otros trucos, tales como la invencibilidad, aumento de velocidad, o auto deriva. Ambas versiones no son oficiales y pueden tener algunos riesgos, como problemas de compatibilidad, pérdida de datos, prohibiciones o malware. </p>
|
90 |
-
<li><b> ¿Cómo actualizar Project Drift 2.0 Mod APK 43? </b></li>
|
91 |
-
<p>Si utiliza Project Drift 2.0 Mod APK 43, es posible que no sea capaz de actualizar el juego de Google Play, ya que podría detectar que está utilizando una versión modificada del juego y le impide actualizar. Para actualizar Project Drift 2.0 Mod APK 43, usted tendrá que descargar e instalar la última versión del mod apk de [HappyMod] u otra fuente confiable. Sin embargo, es posible que pierda su progreso o datos en el juego si actualiza el apk mod, así que asegúrese de hacer una copia de seguridad de sus datos antes de actualizar. </p>
|
92 |
-
<li><b>Cómo jugar proyecto deriva 2.0 sin conexión? </b></li>
|
93 |
-
<p>Project Drift 2.0 es un juego en línea que requiere una conexión a Internet para jugar. Sin embargo, puedes jugar algunas partes del juego sin conexión, como el modo de viaje libre y el modo de ataque de tiempo. Para jugar Project Drift 2.0 sin conexión, siga estos pasos:</p>
|
94 |
-
<ol>
|
95 |
-
<li>Inicie el juego mientras tiene una conexión a Internet. </li>
|
96 |
-
|
97 |
-
<li>Seleccione un coche y una pista que desea reproducir. </li>
|
98 |
-
<li>Espera a que el juego cargue el coche y la pista. </li>
|
99 |
-
<li>Apague su conexión a Internet o cambie al modo avión en su dispositivo. </li>
|
100 |
-
<li>Disfruta jugando Project Drift 2.0 sin conexión. </li>
|
101 |
-
</ol>
|
102 |
-
<li><b>Cómo jugar Project Drift 2.0 con amigos? </b></li>
|
103 |
-
<p>Project Drift 2.0 tiene un modo multijugador que te permite jugar con amigos u otros jugadores en línea en batallas de deriva en tiempo real. Para jugar a Project Drift 2.0 con tus amigos, sigue estos pasos:</p>
|
104 |
-
<ol>
|
105 |
-
<li>Inicie el juego y asegúrese de tener una conexión a Internet. </li>
|
106 |
-
<li>Seleccione el modo multijugador en el menú principal. </li>
|
107 |
-
<li>Seleccione un coche y una pista que desea reproducir. </li>
|
108 |
-
<li>Espera a que el juego encuentre un oponente o invita a un amigo a unirse a tu partida. </li>
|
109 |
-
<li>Comienza a derrapar y trata de vencer a tu oponente o amigo anotando más puntos o a la deriva más tiempo. </li>
|
110 |
-
</ol>
|
111 |
-
<li><b> ¿Cómo obtener más dinero en Project Drift 2.0? </b></li>
|
112 |
-
<p>Si utiliza Project Drift 2.0 Mod APK 43, tendrá dinero ilimitado en el juego, que se puede utilizar para comprar y actualizar cualquier coche que desee. Sin embargo, si usas la versión original del juego, tendrás que ganar dinero en el juego completando misiones y desafíos, o viendo anuncios o haciendo compras en la aplicación. Aquí hay algunos consejos sobre cómo obtener más dinero en Project Drift 2.0:</p>
|
113 |
-
<ul>
|
114 |
-
<li>Completa tantas misiones y desafíos como sea posible en el modo carrera, ya que te recompensarán con dinero y reputación, que son necesarios para desbloquear nuevos coches y pistas. </li>
|
115 |
-
<li>Trate de lograr altas puntuaciones y calificaciones en cada misión y desafío, ya que aumentarán sus recompensas de dinero y reputación. </li>
|
116 |
-
<li>Juega el modo de ataque de tiempo y tratar de batir sus propios u otros jugadores' registros, ya que también le dará dinero y bonos de reputación. </li>
|
117 |
-
<li>Ver anuncios o vídeos en la tienda de juegos o en el menú principal, ya que te darán algo de dinero o artículos gratis. </li>
|
118 |
-
|
119 |
-
</ul></p> 64aa2da5cf<br />
|
120 |
-
<br />
|
121 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Carrom Pool Disc Game Mod Apk Monedas Y Gemas Ilimitadas.md
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Carrom Pool Disc Game Mod Apk: Cómo descargar y disfrutar de monedas y gemas ilimitadas</h1>
|
3 |
-
<p>Si eres un fan de los juegos de tablero de carrom, te encantará Carrom Pool Disc Game. Este es un juego multijugador en línea que te permite jugar carrom con tus amigos u otros jugadores de todo el mundo. También puede personalizar sus tableros, piezas y señales con varios temas y diseños. Pero lo que si quieres disfrutar de más características y beneficios sin gastar dinero real? Ahí es donde Carrom Pool Mod Apk entra en juego. En este artículo, le diremos lo que es Carrom Pool Disc Game, lo que es Carrom Pool Mod Apk, cómo descargarlo e instalarlo, y algunos consejos y trucos para mejorar su juego. </p>
|
4 |
-
<h2>carrom pool disc game mod apk monedas y gemas ilimitadas</h2><br /><p><b><b>Download File</b> — <a href="https://bltlly.com/2v6Kbw">https://bltlly.com/2v6Kbw</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es Carrom Pool Disc Game? </h2>
|
6 |
-
<p>Carrom Pool Disc Game es un popular juego móvil desarrollado por Miniclip. Se basa en el tradicional juego de tablero de zanahoria, que es un juego de mesa que se originó en la India. El juego consiste en utilizar un delantero para golpear los discos en los bolsillos en las cuatro esquinas del tablero. Los discos son negros o blancos, y el jugador que mete todos sus discos primero gana el juego. </p>
|
7 |
-
<h3>Características de Carrom Pool Disc Game</h3>
|
8 |
-
<p>Algunas de las características de Carrom Pool Disc Game son:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Tiene dos modos de juego: Classic y Disc Pool. El modo clásico sigue las reglas tradicionales de carrom, mientras que el modo Disc Pool no tiene discos negros o blancos, pero solo los rojos que dan puntos cuando se embolsan. </li>
|
11 |
-
<li>Tiene modo multijugador en línea, donde puedes jugar con tus amigos u otros jugadores de diferentes países. También puedes chatear con ellos usando emojis y frases. </li>
|
12 |
-
<li> Tiene modo sin conexión, donde se puede jugar contra el ordenador o con otro reproductor en el mismo dispositivo. </li>
|
13 |
-
<li>Tiene varias arenas, donde puedes competir con jugadores de diferentes niveles de habilidad y ganar recompensas. </li>
|
14 |
-
<li> Tiene tablas de clasificación y rankings, donde se puede ver su progreso y logros. </li>
|
15 |
-
|
16 |
-
<li> Tiene una tienda, donde se puede comprar nuevos tableros, piezas, señales y cofres con monedas y gemas. </li>
|
17 |
-
</ul>
|
18 |
-
<h3>Cómo jugar Carrom Pool Disc Game</h3>
|
19 |
-
<p>La jugabilidad de Carrom Pool Disc Game es simple e intuitiva. Solo tienes que arrastrar el dedo en la pantalla para apuntar a tu delantero, y liberarlo para golpear los discos. También puede ajustar la potencia de su tiro moviendo el dedo más cerca o más lejos del delantero. El objetivo es embolsarse todos los discos antes que tu oponente. También puedes usar boosters, como tiempo extra, turno extra o deshacer, para ayudarte a ganar el juego. </p>
|
20 |
-
<h2>¿Qué es Carrom Pool Mod Apk? </h2>
|
21 |
-
<p>Carrom Pool Mod Apk es una versión modificada de Carrom Pool Disc Game que le da monedas y gemas ilimitadas. Monedas y gemas son las monedas del juego que necesitas para comprar nuevos tableros, piezas, tacos, cofres y boosters. Normalmente, tienes que ganarlos jugando juegos, completando misiones o viendo anuncios. Pero con Carrom Pool Mod Apk, se puede obtener de forma gratuita sin ningún tipo de molestia. </p>
|
22 |
-
<h3>Beneficios de Carrom Pool Mod Apk</h3>
|
23 |
-
<p>Algunos de los beneficios de Carrom Pool Mod Apk son:</p>
|
24 |
-
<p></p>
|
25 |
-
<ul>
|
26 |
-
<li> Puede desbloquear todas las características premium y los elementos que de otro modo se pagan o difícil de conseguir. </li>
|
27 |
-
<li> Puede personalizar sus tableros, piezas y señales con cualquier tema o diseño que desee. </li>
|
28 |
-
<li>Puedes acceder a todas las arenas y jugar con cualquier jugador que quieras. </li>
|
29 |
-
<li <p>Puede disfrutar de monedas y gemas ilimitadas sin preocuparse por quedarse sin ellas o gastar dinero real. </li>
|
30 |
-
<li> Usted puede tener más diversión y emoción jugando Carrom Pool Disc Game con características mejoradas y gráficos. </li>
|
31 |
-
</ul>
|
32 |
-
<h3> Cómo descargar e instalar Carrom Pool Mod Apk</h3>
|
33 |
-
<p>Descargar e instalar Carrom Pool Mod Apk es fácil y rápido. Solo tienes que seguir estos pasos:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Haga clic en este enlace para descargar el Carrom Pool Mod Apk archivo: [Carrom Pool Mod Apk Download]. </li>
|
36 |
-
|
37 |
-
<li>Localice el archivo descargado en su administrador de archivos y toque en él para iniciar la instalación. </li>
|
38 |
-
<li>Espera a que termine la instalación y luego abre la aplicación. </li>
|
39 |
-
<li>Disfruta jugando Carrom Pool Disc Game con monedas y gemas ilimitadas. </li>
|
40 |
-
</ol>
|
41 |
-
<h2>Consejos y trucos para Carrom Pool Disc Game</h2>
|
42 |
-
<p>Si quieres mejorar tus habilidades y ganar más juegos en Carrom Pool Disc Game, aquí hay algunos consejos y trucos que puedes usar:</p>
|
43 |
-
<h3>Modo de práctica</h3>
|
44 |
-
<p>Antes de jugar online con otros jugadores, puedes practicar tus tiros y estrategias en el modo de práctica. Este modo le permite jugar contra el ordenador o con otro reproductor en el mismo dispositivo. También puedes elegir el nivel de dificultad y el modo de juego que quieres practicar. El modo de práctica es una gran manera de aprender lo básico y dominar el juego. </p>
|
45 |
-
<h3>Puntería y potencia</h3>
|
46 |
-
<p>Los aspectos más importantes de Carrom Pool Disc Game son el objetivo y el poder. Usted necesita apuntar su delantero con precisión y golpear los discos con la cantidad correcta de energía. Para apuntar al delantero, puede utilizar la guía que muestra la dirección y el ángulo de su tiro. También puede acercar o alejar para ver mejor el tablero. Para ajustar la potencia de su disparo, puede mover el dedo más cerca o más lejos del delantero. Necesitas equilibrar la potencia y la precisión de tu disparo dependiendo de la situación. Por ejemplo, si desea guardar un disco cerca de un bolsillo, puede usar una toma de baja potencia. Pero si quieres embolsarte un disco que esté lejos de un bolsillo, necesitas usar una toma de alta potencia. </p>
|
47 |
-
<h3>Utilice refuerzos y cofres</h3>
|
48 |
-
|
49 |
-
<h2>Conclusión</h2>
|
50 |
-
<p>Carrom Pool Disc Game es un juego divertido y adictivo que puedes jugar con tus amigos u otros jugadores en línea. Se basa en el clásico juego de tablero de carrom, pero con más características y opciones. También puede descargar Carrom Pool Mod Apk para obtener monedas y gemas ilimitadas y desbloquear todos los elementos premium y arenas. Carrom Pool Mod Apk es fácil de descargar e instalar, y hará que su juego más emocionante y gratificante. Si quieres convertirte en un profesional en Carrom Pool Disc Game, también puedes utilizar algunos consejos y trucos que hemos compartido en este artículo. Entonces, ¿qué estás esperando? Descargar Carrom Pool Disc Game o Carrom Pool Mod Apk ahora y empezar a jugar! </p>
|
51 |
-
<h3>Preguntas frecuentes</h3>
|
52 |
-
<p>Aquí hay algunas preguntas frecuentes sobre Carrom Pool Disc Game y Carrom Pool Mod Apk:</p>
|
53 |
-
<ul>
|
54 |
-
<li><b>¿Es Carrom Pool juego de disco libre para jugar? </b><br>
|
55 |
-
Sí, Carrom Pool Disc Game es gratis. Sin embargo, algunos elementos y funciones pueden requerir monedas o gemas, que son las monedas del juego. Puedes ganar monedas o gemas jugando, completando misiones, viendo anuncios o comprándolos con dinero real. </li>
|
56 |
-
<li><b> ¿Es seguro usar Carrom Pool Mod Apk? </b><br>
|
57 |
-
Sí, Carrom Pool Mod Apk es seguro de usar. No contiene ningún virus o malware que puede dañar su dispositivo o datos. Sin embargo, siempre debe descargarlo de una fuente de confianza como este enlace: [Carrom Pool Mod Apk Download]. </li>
|
58 |
-
<li><b>¿Puedo jugar sin conexión a Carrom Pool Disc Game? </b><br>
|
59 |
-
Sí, puedes jugar Carrom Pool Disc Game sin conexión. Puede elegir el modo sin conexión en el menú principal y jugar contra el ordenador o con otro reproductor en el mismo dispositivo. Sin embargo, no podrás acceder a las funciones en línea, como el modo multijugador, arenas, tablas de clasificación y rankings. </li>
|
60 |
-
<li><b>¿Cómo puedo contactar a los desarrolladores de Carrom Pool Disc Game? </b><br>
|
61 |
-
|
62 |
-
<li><b> ¿Cuáles son algunos otros juegos como Carrom Pool Disc Game? </b><br>
|
63 |
-
Algunos otros juegos como Carrom Pool Disc Game son:</li>
|
64 |
-
<ul>
|
65 |
-
<li>8 Ball Pool: Este es otro juego popular de Miniclip que te permite jugar al billar con tus amigos u otros jugadores en línea. También puede personalizar sus señales, mesas y bolas con varios temas y diseños. </li>
|
66 |
-
<li>Ludo King: Este es un juego de mesa clásico que puedes jugar con tus amigos o familiares en línea o fuera de línea. También puedes chatear con ellos usando emojis y pegatinas. </li>
|
67 |
-
<li>Disc Pool Carrom: Este es un juego similar a Carrom Pool Disc Game, pero con física y gráficos más realistas. También puedes jugar con diferentes tipos de discos, como fútbol, golf o hockey. </li>
|
68 |
-
</ul>
|
69 |
-
</ul></p> 64aa2da5cf<br />
|
70 |
-
<br />
|
71 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Chess King Mod Apk.md
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Ajedrez rey Mod Apk: Un juego de mesa con características ilimitadas</h1>
|
3 |
-
<p>Si eres un fan del ajedrez, es posible que hayas oído hablar de Chess King, una popular aplicación de juego de mesa que te permite jugar al ajedrez contra el ordenador u otros jugadores en línea. ¿Pero sabías que hay una versión modificada de Chess King que te da acceso a funciones y opciones ilimitadas? En este artículo, le diremos todo lo que necesita saber sobre Chess King Mod Apk, cómo descargarlo e instalarlo, por qué debe jugar, y cómo jugarlo. Así que, vamos a empezar! </p>
|
4 |
-
<h2>chess king mod apk</h2><br /><p><b><b>Download File</b> ✯ <a href="https://bltlly.com/2v6Ktx">https://bltlly.com/2v6Ktx</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el ajedrez rey Mod Apk? </h2>
|
6 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
7 |
-
<p>Chess King es una aplicación de juego de mesa que te permite jugar al ajedrez en tu dispositivo móvil. Puede elegir entre diferentes modos, como entrenamiento, rompecabezas, torneos, partidos en línea y más. También puede personalizar su tablero, piezas y fondo de acuerdo a sus preferencias. Chess King tiene una interfaz fácil de usar y un potente motor que le proporciona una experiencia de ajedrez realista y desafiante. </p>
|
8 |
-
<p>Chess King Mod Apk es una versión modificada de Chess King que desbloquea todas las características y opciones que están restringidas o pagadas en la versión original. Con Chess King Mod Apk, se puede disfrutar de:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Monedas y diamantes ilimitados que puedes usar para comprar artículos premium y acceder a contenido exclusivo</li>
|
11 |
-
<li>Todos los niveles y modos desbloqueados, para que puedas jugar a cualquier dificultad y desafiarte a ti mismo</li>
|
12 |
-
<li>Todos los estilos de tablero, conjuntos de piezas y fondos desbloqueados, para que pueda personalizar su juego como desee</li>
|
13 |
-
<li>No hay anuncios o ventanas emergentes que interrumpan su juego o le molesten</li>
|
14 |
-
<li>No se requiere raíz o jailbreak para instalar o ejecutar el mod apk</li>
|
15 |
-
</ul>
|
16 |
-
<h3>Cómo descargar e instalar el apk mod en su dispositivo</h3>
|
17 |
-
<p>Descargar e instalar Chess King Mod Apk es muy fácil y simple. Solo tienes que seguir estos pasos:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Haga clic en este enlace para descargar el archivo apk mod en su dispositivo. </li>
|
20 |
-
|
21 |
-
<li>Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita. </li>
|
22 |
-
<li>Espere a que la instalación termine y luego inicie la aplicación. </li>
|
23 |
-
<li>Disfruta jugando ajedrez rey Mod Apk con características ilimitadas! </li>
|
24 |
-
</ol>
|
25 |
-
<h2>¿Por qué usted debe jugar ajedrez rey Mod Apk? </h2>
|
26 |
-
<h3>Los beneficios de jugar al ajedrez para tu cerebro y habilidades</h3>
|
27 |
-
<p>El ajedrez no es solo un juego divertido y entretenido, sino también una gran manera de mejorar tu cerebro y habilidades. Jugar ajedrez puede ayudarte:</p>
|
28 |
-
<p></p>
|
29 |
-
<ul>
|
30 |
-
<li>Mejora tu memoria, concentración, lógica, creatividad, resolución de problemas y toma de decisiones</li>
|
31 |
-
<li>Aumenta tu coeficiente intelectual, agilidad mental, pensamiento analítico y habilidades de planificación estratégica</li>
|
32 |
-
<li>Reducir el estrés, la ansiedad, la depresión y el aburrimiento</li>
|
33 |
-
<li>Aumenta tu autoconfianza, autoestima y autodisciplina</li>
|
34 |
-
<h3>Las ventajas de usar el mod apk sobre la versión original</h3>
|
35 |
-
<p>Como hemos mencionado anteriormente, Chess King Mod Apk le da acceso a características y opciones ilimitadas que no están disponibles en la versión original de Chess King. Mediante el uso de la apk mod, puede:</p>
|
36 |
-
<ul>
|
37 |
-
<li>Ahorra dinero y tiempo al no tener que gastar dinero real o ver anuncios para obtener monedas y diamantes</li>
|
38 |
-
<li>Explorar y disfrutar de todos los niveles y modos sin restricciones o limitaciones</li>
|
39 |
-
<li>Personaliza y personaliza tu juego según tu gusto y estilo</li>
|
40 |
-
<li> Tener un juego más suave y más rápido sin retrasos o problemas técnicos</li>
|
41 |
-
<li>Tener más diversión y emoción jugando con diferentes estilos de tablero, conjuntos de piezas y fondos</li>
|
42 |
-
</ul>
|
43 |
-
<h2>Cómo jugar ajedrez rey mod apk? </h2>
|
44 |
-
<h3>Las reglas y objetivos básicos del ajedrez</h3>
|
45 |
-
<p>Si usted es nuevo en el ajedrez, es posible que desee aprender las reglas básicas y objetivos del juego antes de empezar a jugar Chess King Mod Apk. Estos son algunos de los puntos principales que necesitas saber:</p>
|
46 |
-
<ul>
|
47 |
-
<li>El ajedrez es un juego para dos jugadores jugado en un tablero cuadrado con 64 cuadrados de colores alternos (blanco y negro)</li>
|
48 |
-
|
49 |
-
<li>Las piezas se mueven de acuerdo a sus reglas específicas y pueden capturar las piezas del oponente aterrizando en sus cuadrados</li>
|
50 |
-
<li>El rey es la pieza más importante y no puede ser capturado. El objetivo del juego es hacer jaque mate al rey del oponente, lo que significa ponerlo en una posición donde no pueda escapar de un ataque de cualquiera de las piezas del jugador</li>
|
51 |
-
<li>El juego también puede terminar en un empate, lo que significa que ningún jugador puede ganar. Esto puede suceder si hay un punto muerto (donde el jugador cuyo turno no tiene movimientos legales), si hay material insuficiente (donde ninguno de los jugadores tiene suficientes piezas para jaque mate al oponente), si hay una repetición triple (donde la misma posición ocurre tres veces con el mismo jugador para mover), o si hay una regla de 50 movimientos (donde no se ha hecho ninguna captura o movimiento de peón en los últimos 50 movimientos)</li>
|
52 |
-
</ul>
|
53 |
-
<h3>Los diferentes modos y niveles de dificultad en el juego</h3>
|
54 |
-
<p>Ajedrez King Mod Apk le ofrece diferentes modos y niveles de dificultad para adaptarse a sus preferencias y habilidades. Usted puede elegir entre:</p>
|
55 |
-
<tabla>
|
56 |
-
<tr><th>Modo</th><th>Descripción</th></tr>
|
57 |
-
<tr><td>Entrenamiento</td><td>Este modo te ayuda a aprender y practicar ajedrez proporcionándote lecciones, ejercicios, rompecabezas y sugerencias. También puede analizar sus movimientos y errores con la ayuda del motor. </td></tr>
|
58 |
-
<tr><td>Puzzles</td><td>Este modo te reta a resolver varios problemas de ajedrez, como jaque mate en uno, dos o tres movimientos, tácticas, finales de partida y más. También puedes crear tus propios puzzles y compartirlos con otros jugadores. </td></tr>
|
59 |
-
<tr><td>Torneos</td><td>Este modo le permite participar en diferentes torneos con diferentes formatos, como round-robin, knockout, sistema suizo, etc. También puede crear sus propios torneos e invitar a otros jugadores a unirse. </td></tr>
|
60 |
-
|
61 |
-
<tr><td>Partidos sin conexión</td><td>Este modo le permite jugar ajedrez sin conexión con el equipo o con otro jugador en el mismo dispositivo. También puede ajustar el nivel de dificultad del equipo de 1 a 20. </td></tr>
|
62 |
-
</tabla>
|
63 |
-
<h3>Los consejos y trucos para mejorar tu estrategia y tácticas de ajedrez</h3>
|
64 |
-
<p>Si quieres mejorar tu estrategia y tácticas de ajedrez, puedes seguir estos consejos y trucos:</p>
|
65 |
-
<ul>
|
66 |
-
<li>Estudia los principios básicos de la apertura del ajedrez, como controlar el centro, desarrollar tus piezas, enrocar a tu rey, etc.</li>
|
67 |
-
<li>Piensa con anticipación y planifica tus movimientos cuidadosamente. Trata de anticipar los movimientos y respuestas de tu oponente. </li>
|
68 |
-
<li>Evite hacer movimientos innecesarios o prematuros que debiliten su posición o pierdan material. </li>
|
69 |
-
<li>Usa todas tus piezas de manera efectiva y coordinándolas bien. No dejes ninguna pieza inactiva o desprotegida. </li>
|
70 |
-
<li>Busca oportunidades para crear amenazas, como cheques, capturas, horquillas, pines, brochetas, etc.</li>
|
71 |
-
</li>
|
72 |
-
<li>Practica y juega ajedrez regularmente. Cuanto más juegas, más aprendes y mejoras. </li>
|
73 |
-
<li>Revisa y analiza tus juegos y aprende de tus errores y éxitos. </li>
|
74 |
-
<li>Lee libros, mira videos y sigue tutoriales sobre estrategia y tácticas de ajedrez. </li>
|
75 |
-
<li>Busca comentarios y consejos de otros jugadores, entrenadores o expertos. </li>
|
76 |
-
</ul>
|
77 |
-
<h2>Conclusión</h2>
|
78 |
-
<p>Chess King Mod Apk es una aplicación de juego de mesa que le permite jugar al ajedrez con funciones y opciones ilimitadas. Puede descargar e instalar el apk mod fácilmente y disfrutar jugando al ajedrez en su dispositivo móvil. Jugar al ajedrez puede ayudarte a mejorar tu cerebro y tus habilidades, además de divertirte y emocionarte. También puede aprender las reglas y objetivos básicos del ajedrez, así como algunos consejos y trucos para mejorar su estrategia y tácticas de ajedrez. Chess King Mod Apk es una gran manera de disfrutar del ajedrez en cualquier momento y en cualquier lugar. Entonces, ¿qué estás esperando? Descargar Chess King Mod Apk ahora y empezar a jugar! </p>
|
79 |
-
<h2>Preguntas frecuentes</h2>
|
80 |
-
|
81 |
-
<p>A1: Sí, Chess King Mod Apk es seguro de usar. No contiene ningún virus, malware o spyware que pueda dañar su dispositivo o datos. Tampoco requiere ningún root o jailbreak para instalar o ejecutar el mod apk. </p>
|
82 |
-
<h3>Q2: ¿Cuáles son los requisitos para ejecutar Chess King Mod Apk? </h3>
|
83 |
-
<p>A2: Ajedrez Rey Mod Apk requiere un dispositivo Android con un mínimo de 4.1 versión y 100 MB de espacio libre. También requiere una conexión a Internet para jugar partidos en línea y acceder a algunas características. </p>
|
84 |
-
<h3>Q3: ¿Cómo puedo actualizar Chess King Mod Apk? </h3>
|
85 |
-
<p>A3: Ajedrez rey Mod Apk actualizaciones automáticamente cada vez que hay una nueva versión disponible. También puede comprobar las actualizaciones manualmente yendo al menú de configuración y pulsando el botón de actualización. </p>
|
86 |
-
<h3>Q4: ¿Puedo jugar ajedrez rey mod apk offline? </h3>
|
87 |
-
<p>A4: Sí, usted puede jugar Chess King Mod Apk sin conexión. Puede jugar partidos sin conexión con el ordenador o con otro jugador en el mismo dispositivo. Sin embargo, no podrás acceder a algunas funciones que requieran conexión a Internet, como partidas online, torneos, puzzles, etc.</p>
|
88 |
-
<h3>Q5: ¿Cómo puedo contactar a los desarrolladores de Chess King Mod Apk? </h3>
|
89 |
-
<p>A5: Puede ponerse en contacto con los desarrolladores de Chess King Mod Apk enviándoles un correo electrónico a o visitando su sitio web . También puede seguirlos en sus cuentas de redes sociales para obtener más actualizaciones e información. </p> 64aa2da5cf<br />
|
90 |
-
<br />
|
91 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar 8 Bola Piscina Herramienta Apk.md
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1> Cómo descargar 8 bola piscina herramienta APK para Android</h1>
|
3 |
-
<p>Si eres un fan de 8 Ball Pool, el juego de billar en línea más popular del mundo, es posible que te hayas preguntado cómo mejorar tus habilidades y ganar más partidos. Es posible que también haya deseado más monedas y dinero en efectivo para comprar mejores pistas y mesas, o para personalizar la configuración de su juego. Bueno, hay una manera de lograr todo eso, y se llama 8 Ball Pool Tool APK.</p>
|
4 |
-
<p>8 Ball Pool Tool APK es una versión modificada de la aplicación original 8 Ball Pool que le permite acceder a algunas características sorprendentes que no están disponibles en la aplicación oficial. Con esta herramienta, puede ampliar la guía, aumentar la potencia, los efectos y el objetivo de sus disparos, obtener monedas y dinero en efectivo ilimitados, desbloquear pistas y mesas premium y mucho más. Suena increíble, ¿verdad? </p>
|
5 |
-
<h2>descargar 8 bola piscina herramienta apk</h2><br /><p><b><b>Download File</b> ★ <a href="https://bltlly.com/2v6LYJ">https://bltlly.com/2v6LYJ</a></b></p><br /><br />
|
6 |
-
<p>Pero ¿cómo descargar e instalar esta herramienta en su dispositivo Android? Y cuáles son los beneficios y riesgos de su uso? En este artículo, vamos a responder a estas preguntas y le proporcionará una guía paso a paso sobre cómo descargar 8 Ball Pool Tool APK para Android. ¡Vamos a empezar! </p>
|
7 |
-
<h2> Pasos para descargar e instalar 8 bola piscina herramienta APK</h2>
|
8 |
-
<p>Descargar e instalar 8 Ball Pool Tool APK no es muy difícil, pero requiere algunos pasos adicionales que no son necesarios para la aplicación oficial. Estos son los pasos que debes seguir:</p>
|
9 |
-
<ol>
|
10 |
-
<li><b>Habilita fuentes desconocidas en tu dispositivo. </b> Dado que esta herramienta no está disponible en Google Play Store, debe permitir que su dispositivo instale aplicaciones de fuentes desconocidas. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
|
11 |
-
|
12 |
-
<li><b>Instala el archivo APK en tu dispositivo. </b> Una vez que haya descargado el archivo APK, busque en el administrador de archivos de su dispositivo y toque en él para iniciar el proceso de instalación. Puede ver un mensaje de advertencia que dice "Este tipo de archivo puede dañar su dispositivo. ¿Desea mantenerlo de todos modos?" Ignórelo y toque en "OK". Luego, siga las instrucciones en pantalla para completar la instalación. </li>
|
13 |
-
<li><b>Iniciar la aplicación y disfrutar del juego. </b> Después de la instalación, verá un icono de 8 Pool Master en la pantalla de inicio o en el cajón de la aplicación. Toque en él para iniciar la aplicación y empezar a jugar 8 Ball Pool con características mejoradas. Usted se dará cuenta de que usted tiene una gran cantidad de monedas y dinero en efectivo en su cuenta, y se puede utilizar para comprar cualquier señal o tabla que desee. También verás un icono flotante de 8 Pool Master en la pantalla, que puedes usar para ajustar la guía, la potencia, los efectos y el objetivo de tus disparos. También puede acceder a otras configuraciones y características desde el menú de la aplicación. </li>
|
14 |
-
</ol>
|
15 |
-
<h2>Beneficios de usar 8 bola piscina herramienta APK</h2>
|
16 |
-
<p>El uso de 8 Ball Pool Tool APK puede traer muchos beneficios que pueden mejorar su experiencia de juego y hacerte un mejor jugador. Estos son algunos de los beneficios que puedes disfrutar:</p>
|
17 |
-
<ul>
|
18 |
-
<li><b>Mejore su precisión y habilidades. </b> Con la guía extendida, puede ver la trayectoria de sus disparos y planificar sus movimientos en consecuencia. También puedes ajustar la potencia, los efectos y el objetivo de tus disparos para que sean más precisos y efectivos. De esta manera, puedes mejorar tu precisión y habilidades y ganar más partidos. </li>
|
19 |
-
<li><b>Obtén monedas y efectivo ilimitados. </b> Monedas y dinero en efectivo son las principales monedas en 8 Ball Pool, y los necesita para comprar señales, mesas, paquetes de chat, minijuegos y más. Sin embargo, ganarlos puede ser lento y tedioso, y comprarlos con dinero real puede ser caro. Con 8 Ball Pool Tool APK, puede obtener monedas ilimitadas y dinero en efectivo de forma gratuita, y gastarlos tanto como quieras sin preocuparse por agotarse. </li>
|
20 |
-
|
21 |
-
<li><b>Personalizar la configuración de su juego. </b> 8 Ball Pool Tool APK también le permite personalizar la configuración de juego de acuerdo a sus preferencias. Puede cambiar el modo de juego, el color de la mesa, el tamaño de la bola, el ángulo de referencia, los efectos de sonido, la calidad gráfica y mucho más. También puede activar o desactivar algunas características, tales como recarga automática, anti-van, anti-detect, etc. Puede hacer que su juego sea más divertido y cómodo con estos ajustes. </li>
|
22 |
-
</ul>
|
23 |
-
<h2> Riesgos y precauciones de usar 8 Ball Pool Tool APK</h2>
|
24 |
-
<p>Si bien el uso de 8 Ball Pool Tool APK puede ser beneficioso, también viene con algunos riesgos y desventajas que usted necesita ser consciente de. Estos son algunos de los riesgos y precauciones de usar esta herramienta:</p>
|
25 |
-
<ul>
|
26 |
-
<li><b>Posible infección de malware o virus. </b> Como se mencionó anteriormente, no todas las fuentes que ofrecen 8 Ball Pool Tool APK son seguras o fiables. Algunos de ellos pueden contener malware o virus que pueden dañar su dispositivo o robar su información personal. Por lo tanto, debe tener cuidado y solo descargar el archivo APK de una fuente de confianza. También debe escanear el archivo con una aplicación antivirus antes de instalarlo. </li>
|
27 |
-
<li><b>Prohibición o suspensión de cuentas potenciales. </b> El uso de 8 Ball Pool Tool APK está en contra de los términos de servicio de 8 Ball Pool y Miniclip, el desarrollador del juego. Si detectan que está utilizando esta herramienta, pueden prohibir o suspender su cuenta de forma permanente. Esto significa que perderás todo tu progreso, monedas, efectivo, pistas, mesas, etc. Por lo tanto, necesitas usar esta herramienta bajo tu propio riesgo y discreción. También debe evitar usarlo en partidos o torneos en línea donde otros jugadores pueden reportarlo. </li>
|
28 |
-
|
29 |
-
<li><b>Consejos para evitar o minimizar los riesgos. </b> Si todavía desea utilizar 8 Bola Pool Tool APK a pesar de los riesgos involucrados, aquí hay algunos consejos que pueden ayudar a evitar o minimizarlos:</li>
|
30 |
-
<ul>
|
31 |
-
<li>Usa una cuenta secundaria o ficticia en lugar de tu cuenta principal. </li>
|
32 |
-
<li>Usa una aplicación VPN para ocultar tu dirección IP y ubicación. </li>
|
33 |
-
<li>Utilice una aplicación de espacio paralelo para crear un clon de 8 Ball Pool app. </li>
|
34 |
-
<li> Utilice una aplicación modded Google Play Store para evitar la verificación de licencias. </li>
|
35 |
-
<li> Utilice una aplicación de copia de seguridad para guardar sus datos antes de usar esta herramienta. </li>
|
36 |
-
<li>Utilice esta herramienta con moderación y moderación. </li>
|
37 |
-
</ul>
|
38 |
-
</ul>
|
39 |
-
<h2>Conclusión</h2 <p>En conclusión, 8 Ball Pool Tool APK es una versión modificada de la aplicación original 8 Ball Pool que le permite acceder a algunas características sorprendentes que no están disponibles en la aplicación oficial. Con esta herramienta, puede mejorar su precisión y habilidades, obtener monedas ilimitadas y dinero en efectivo, desbloquear pistas y mesas premium, y personalizar la configuración de su juego. Sin embargo, el uso de esta herramienta también viene con algunos riesgos y desventajas, como una posible infección de malware o virus, una posible prohibición o suspensión de la cuenta, problemas legales y éticos, y consejos para evitar o minimizar los riesgos. Por lo tanto, debe ser cuidadoso y responsable al usar esta herramienta, y seguir los pasos que hemos proporcionado en este artículo para descargarlo e instalarlo en su dispositivo Android. </p>
|
40 |
-
<p>Si has encontrado este artículo útil e informativo, por favor compártelo con tus amigos y compañeros de 8 jugadores de Ball Pool. También, no dude en dejar un comentario a continuación si usted tiene alguna pregunta o retroalimentación sobre 8 Ball Pool Tool APK. Nos encantaría saber de usted! </p>
|
41 |
-
<h2>Preguntas frecuentes</h2>
|
42 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre 8 Ball Pool Tool APK:</p>
|
43 |
-
<ol>
|
44 |
-
<li><b> ¿Es 8 Ball Pool Tool APK seguro de usar? </b><br>
|
45 |
-
|
46 |
-
<li><b>Es 8 Ball Pool Tool APK legal de usar? </b><br>
|
47 |
-
El uso de 8 Ball Pool Tool APK no es legal, ya que viola los términos de servicio de 8 Ball Pool y Miniclip, el desarrollador del juego. También infringe los derechos de propiedad intelectual de Miniclip y otras partes involucradas en el desarrollo del juego. Por lo tanto, el uso de esta herramienta puede resultar en acciones legales o sanciones de las autoridades o el desarrollador del juego. </li>
|
48 |
-
<li><b> ¿Cómo puedo actualizar 8 Ball Pool Tool APK? </b><br>
|
49 |
-
Para actualizar 8 Ball Pool Tool APK, debe seguir los mismos pasos que descargarlo e instalarlo. Necesitas habilitar fuentes desconocidas en tu dispositivo, descargar la última versión del archivo APK desde una fuente de confianza, instalarlo en tu dispositivo e iniciar la aplicación. Sin embargo, también debe hacer una copia de seguridad de sus datos antes de actualizarlos, ya que algunas actualizaciones pueden causar pérdida de datos o corrupción. </li>
|
50 |
-
<li><b> ¿Puedo usar 8 bola piscina herramienta APK en otros dispositivos? </b><br>
|
51 |
-
Sí, se puede utilizar 8 Ball Pool Tool APK en otros dispositivos que se ejecutan en el sistema operativo Android. Sin embargo, debe asegurarse de que su dispositivo cumple con los requisitos mínimos para ejecutar esta herramienta, como la versión de Android, el tamaño de la RAM, el espacio de almacenamiento, etc. También debe seguir los mismos pasos que descargarlo e instalarlo en su dispositivo. </li>
|
52 |
-
<li><b> ¿Dónde puedo obtener más información acerca de 8 Ball Pool Tool APK? </b><br>
|
53 |
-
Puede obtener más información sobre 8 Ball Pool Tool APK de varias fuentes en línea, tales como blogs, foros, videos, comentarios, etc. Sin embargo, usted necesita ser cuidadoso y solo confiar en fuentes confiables y creíbles que proporcionan información precisa y actualizada. También puede ponerse en contacto con el desarrollador de esta herramienta directamente a través de su sitio web o cuentas de redes sociales. </li>
|
54 |
-
</ol></p>
|
55 |
-
<p></p> 64aa2da5cf<br />
|
56 |
-
<br />
|
57 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Apk Mod Cazador Asesino 2.md
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar e instalar APK Mod Hunter Assassin 2 en Android</h1>
|
3 |
-
<p>Si eres un fan de los juegos de acción furtiva, es posible que hayas oído hablar de Hunter Assassin 2, un juego popular donde tienes que eliminar a tus enemigos sin ser detectado. ¿Pero sabías que hay una versión modificada de este juego que te da dinero ilimitado, gemas y características premium? En este artículo, le diremos lo que es APK Mod Hunter Assassin 2, ¿cuáles son los beneficios y riesgos de usar archivos APK mod, y cómo descargar e instalar APK Mod Hunter Assassin 2 en su dispositivo Android. </p>
|
4 |
-
<h2>¿Qué es APK Mod Hunter Assassin 2?</h2>
|
5 |
-
<p>APK Mod Hunter Assassin 2 es una versión modificada del juego oficial de Hunter Assassin 2, que fue desarrollado por Ruby Game Studio y lanzado en septiembre de 2021. El juego es una secuela del original Hunter Assassin, que tiene más de 100 millones de descargas en Google Play. El juego está disponible de forma gratuita en Google Play, pero también tiene compras y anuncios en la aplicación. </p>
|
6 |
-
<h2>descargar apk mod cazador asesino 2</h2><br /><p><b><b>Download</b> ✔✔✔ <a href="https://bltlly.com/2v6KMs">https://bltlly.com/2v6KMs</a></b></p><br /><br />
|
7 |
-
<h3>El juego de Hunter Assassin 2</h3>
|
8 |
-
<p>La jugabilidad de Hunter Assassin 2 es similar al primer juego, pero con gráficos, animaciones y efectos de sonido mejorados. Usted juega como un asesino experto que tiene que infiltrarse en varios lugares y eliminar a sus objetivos sin ser visto u oído. Puedes usar tus habilidades de sigilo, armas, artilugios y trampas para completar tus misiones. También puedes mejorar tu personaje y desbloquear nuevos atuendos, armas y habilidades. </p>
|
9 |
-
<h3>Las características de APK Mod Hunter Assassin 2</h3>
|
10 |
-
<p>Las características de APK Mod Hunter Assassin 2 son diferentes del juego oficial, ya que le dan más ventajas y opciones. Algunas de las características son:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Dinero y gemas ilimitadas: Puedes usar estos recursos para comprar lo que quieras en el juego, como atuendos, armas, gadgets y mejoras. </li>
|
13 |
-
<li>Funciones premium desbloqueadas: Puedes acceder a todas las funciones premium del juego, como eliminar anuncios, desbloquear todos los niveles y obtener recompensas exclusivas. </li>
|
14 |
-
|
15 |
-
</ul>
|
16 |
-
<h2>¿Cuáles son los beneficios y riesgos de los archivos mod APK? </h2>
|
17 |
-
<p>Los archivos mod de APK son archivos que han sido modificados por desarrolladores de terceros para cambiar algunos aspectos de una aplicación o juego original. Generalmente se descargan de fuentes no oficiales, como sitios web o foros. Hay algunos beneficios y riesgos de usar archivos mod APK que debe tener en cuenta antes de descargarlos. </p>
|
18 |
-
<h3>Los beneficios de los archivos mod APK</h3>
|
19 |
-
<p>Algunos de los beneficios de usar archivos mod APK son:</p>
|
20 |
-
<ul>
|
21 |
-
<li>Gratis: Puede descargar y usar archivos mod APK gratis, sin pagar por nada. </li>
|
22 |
-
<li>Más características: Puedes disfrutar de más características y opciones que no están disponibles en la aplicación original o en el juego. </li>
|
23 |
-
<li>Más recursos: Puedes obtener más recursos, como dinero, gemas, monedas o vidas, que pueden ayudarte a progresar más rápido en el juego. </li>
|
24 |
-
</ul>
|
25 |
-
<h3>Los riesgos de los archivos mod APK</h3>
|
26 |
-
<p>Algunos de los riesgos de usar archivos mod APK son:</p>
|
27 |
-
<p></p>
|
28 |
-
<ul>
|
29 |
-
<li>Fuente no oficial: Debido a que han sido manipulados por un tercero, los archivos mod APK no son de una fuente oficial. Esto significa que pueden no ser compatibles con su dispositivo o pueden causar errores o fallos. </li>
|
30 |
-
<li>No hay actualizaciones: La mayoría de los archivos mod APK no se pueden actualizar a través de Google Play u otros canales oficiales. Esto significa que puede perderse las últimas características, correcciones de errores o parches de seguridad de la aplicación o juego original. </li>
|
31 |
-
<li>No hay soporte: Si se encuentra con cualquier problema o problemas con el archivo de mod APK, es posible que no pueda obtener ningún apoyo o asistencia del desarrollador original o el modder. </li>
|
32 |
-
<li>Riesgo de malware: Algunos archivos mod APK pueden contener código malicioso o virus que pueden dañar su dispositivo o robar su información personal. Siempre debe escanear el archivo mod APK con un software antivirus de buena reputación antes de instalarlo. </li>
|
33 |
-
</ul>
|
34 |
-
<h2>¿Cómo descargar e instalar APK Mod Hunter Assassin 2 en Android? </h2>
|
35 |
-
|
36 |
-
<h3>Paso 1: Habilitar fuentes desconocidas en el dispositivo</h3>
|
37 |
-
<p>Antes de poder instalar cualquier archivo APK mod en su dispositivo, es necesario habilitar fuentes desconocidas en la configuración. Esto le permitirá instalar aplicaciones o juegos de fuentes distintas de Google Play. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. Puede ver un mensaje de advertencia, pero puede ignorarlo y tocar OK.</p>
|
38 |
-
<h3>Paso 2: Descargar el archivo APK mod de una fuente confiable</h3>
|
39 |
-
<p>Siguiente, es necesario descargar el archivo APK mod de Hunter Assassin 2 de una fuente confiable. Puede buscar en Google o utilizar un sitio web de confianza que proporciona archivos mod APK. Por ejemplo, puedes usar [este enlace] para descargar el archivo APK mod de Hunter Assassin 2. Asegúrate de descargar la última versión del archivo y guardarlo en una carpeta a la que puedas acceder fácilmente. </p>
|
40 |
-
<h3>Paso 3: Instalar el archivo APK mod en su dispositivo</h3>
|
41 |
-
<p>Una vez que haya descargado el archivo APK mod, debe instalarlo en su dispositivo. Para hacer esto, busque el archivo en su administrador de archivos y toque en él. Puede ver una ventana emergente pidiendo su permiso para instalar la aplicación. Toque en Instalar y espere a que termine el proceso de instalación. </p>
|
42 |
-
<h3>Paso 4: Iniciar el juego y disfrutar de</h3>
|
43 |
-
<p>Una vez completada la instalación, puede iniciar el juego tocando en su icono en la pantalla de inicio o en el cajón de la aplicación. Deberías ver el juego cargando con las características modificadas habilitadas. Ahora puedes disfrutar jugando a Hunter Assassin 2 con dinero ilimitado, gemas y funciones premium. </p>
|
44 |
-
<h2>Conclusión</h2>
|
45 |
-
<p>En este artículo, hemos explicado lo que es APK Mod Hunter Assassin 2, cuáles son los beneficios y riesgos de usar archivos APK mod, y cómo descargar e instalar APK Mod Hunter Assassin 2 en su dispositivo Android. Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p>
|
46 |
-
<h2>Preguntas frecuentes</h2>
|
47 |
-
|
48 |
-
<tabla>
|
49 |
-
<tr><td><b>Q: ¿Es APK Mod Hunter Assassin 2 seguro de usar? </b></td><td><b>A: No hay una respuesta definitiva a esta pregunta, ya que diferentes archivos APK mod pueden tener diferentes niveles de seguridad y calidad. Sin embargo, como regla general, siempre debe tener cuidado al descargar e instalar cualquier archivo mod APK desde una fuente no oficial. Siempre debe escanear el archivo con un software antivirus de buena reputación antes de instalarlo, y evitar dar permisos innecesarios o acceso a la aplicación. </b></td></tr>
|
50 |
-
<tr><td><b>Q: ¿Puedo jugar en línea con APK Mod Hunter Assassin 2?</b></td><td><b>A: No, no puedes jugar en línea con APK Mod Hunter Assassin 2, ya que no es compatible con los servidores oficiales del juego. Si intenta jugar en línea con la versión modificada, puede enfrentar errores, bloqueos o prohibiciones del juego. Por lo tanto, solo debe jugar sin conexión con APK Mod Hunter Assassin 2.</b></td></tr>
|
51 |
-
<tr><td><b>Q: ¿Puedo actualizar APK Mod Hunter Assassin 2 a través de Google Play? </b></td><td><b>A: No, no se puede actualizar APK Mod Hunter Assassin 2 a través de Google Play, ya que no es una versión oficial del juego. Si intenta actualizarlo a través de Google Play, puede perder todas las características modificadas y volver a la versión original del juego. Por lo tanto, solo debe actualizar APK Mod Hunter Assassin 2 mediante la descarga de una nueva versión del archivo de una fuente confiable. </b></td></tr>
|
52 |
-
<tr><td><b>Q: ¿Perderé mi progreso si desinstalo APK Mod Hunter Assassin 2?</b></td><td><b>A: Sí, perderá su progreso si desinstala APK Mod Hunter Assassin 2, ya que no se sincroniza con su cuenta de Google o almacenamiento en la nube. Por lo tanto, debe hacer una copia de seguridad de su progreso guardando los datos del juego en una carpeta separada o utilizando una aplicación de terceros. </b></td></tr>
|
53 |
-
|
54 |
-
</table></p> 64aa2da5cf<br />
|
55 |
-
<br />
|
56 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/contrib/__init__.py
DELETED
File without changes
|