Commit
·
619df3b
1
Parent(s):
9543646
Update parquet files (step 61 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/testing/poe_account_create_test.py +0 -109
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 Crack Download !!HOT!!.md +0 -27
- spaces/1gistliPinn/ChatGPT4/Examples/62 117 68 199 8055 Viewerframe Mode Motion.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Cm Browser For Pc Free WORK Download Softonic.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cara Download Game Edukasi Anak Lengkap untuk PC Aplikasi dan Tips.md +0 -123
- spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 and Play with Your Friends on Android.md +0 -178
- spaces/1phancelerku/anime-remove-background/Enjoy GTA 5 Prologue on Your Phone APK and Cache Download Links.md +0 -84
- spaces/1phancelerku/anime-remove-background/Facebook 32 Bit How to Download and Install the Latest Version for Android.md +0 -111
- spaces/1toTree/lora_test/ppdiffusers/models/unet_2d.py +0 -271
- spaces/801artistry/RVC801/infer/lib/infer_pack/commons.py +0 -167
- spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/modules/F0Predictor/PMF0Predictor.py +0 -97
- spaces/AI4PD/hexviz/hexviz/pages/1_🗺️Identify_Interesting_Heads.py +0 -152
- spaces/AIConsultant/MusicGen/audiocraft/modules/codebooks_patterns.py +0 -539
- spaces/AIGC-Audio/AudioGPT/text_to_speech/__init__.py +0 -0
- spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/tensor_utils.py +0 -92
- spaces/AIWaves/Software_Company/src/agents/Environment/__init__.py +0 -1
- spaces/Abdullahw72/bark-voice-cloning/app.py +0 -98
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/GptForLove.py +0 -82
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RunLayout.js +0 -47
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/input/OnTouchPad.js +0 -40
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/namevaluelabel/NameValueLabel.js +0 -161
- spaces/AixiaGreyatt/QQsign/Dockerfile +0 -23
- spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/stylegan2/op/__init__.py +0 -2
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/utilities.md +0 -23
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/detr/README.md +0 -27
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/dist_utils.py +0 -164
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/iter_based_runner.py +0 -273
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/__init__.py +0 -0
- spaces/Apex-X/Tm/roop/utilities.py +0 -141
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/johabprober.py +0 -47
- spaces/Avkash/WhisperUI/app.py +0 -10
- spaces/AzumaSeren100/XuanShen-Bert-VITS2/train_ms.py +0 -396
- spaces/Banbri/zcvzcv/src/components/ui/vertical-slider.tsx +0 -27
- spaces/Bart92/RVC_HF/infer/lib/train/mel_processing.py +0 -132
- spaces/Benson/text-generation/Examples/5apps.md +0 -57
- spaces/Benson/text-generation/Examples/Capcut Video Editor Apk Free Download.md +0 -87
- spaces/Billyosoro/ESRGAN/inference_realesrgan.py +0 -128
- spaces/BreetheRun/stabilityai-stable-diffusion-xl-base-1.0/app.py +0 -3
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/launch.py +0 -84
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/README.md +0 -8
- spaces/CVPR/WALT/mmdet/models/necks/yolo_neck.py +0 -136
- spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/grid_head.py +0 -359
- spaces/CVPR/lama-example/models/ade20k/mobilenet.py +0 -154
- spaces/CVPR/monoscene_lite/monoscene/.ipynb_checkpoints/CRP3D-checkpoint.py +0 -97
- spaces/Chukwuka/Dog_Breed_ImageWoof/data_setup.py +0 -36
- spaces/ClementBM/connectfour/README.md +0 -12
- spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp +0 -17
- spaces/Curranj/chatbot/app.py +0 -79
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/openapi/__init__.py +0 -0
spaces/101-5/gpt4free/g4f/.v1/testing/poe_account_create_test.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
from hashlib import md5
|
2 |
-
from json import dumps
|
3 |
-
from re import findall
|
4 |
-
from typing import Optional
|
5 |
-
|
6 |
-
from tls_client import Session as TLS
|
7 |
-
from twocaptcha import TwoCaptcha
|
8 |
-
|
9 |
-
from gpt4free.quora import extract_formkey
|
10 |
-
from gpt4free.quora.mail import Emailnator
|
11 |
-
|
12 |
-
solver = TwoCaptcha('')
|
13 |
-
|
14 |
-
|
15 |
-
class Account:
|
16 |
-
@staticmethod
|
17 |
-
def create(proxy: Optional[str] = None, logging: bool = False, enable_bot_creation: bool = False):
|
18 |
-
client = TLS(client_identifier='chrome110')
|
19 |
-
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
|
20 |
-
|
21 |
-
mail_client = Emailnator()
|
22 |
-
mail_address = mail_client.get_mail()
|
23 |
-
|
24 |
-
if logging:
|
25 |
-
print('email', mail_address)
|
26 |
-
|
27 |
-
client.headers = {
|
28 |
-
'authority': 'poe.com',
|
29 |
-
'accept': '*/*',
|
30 |
-
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
31 |
-
'content-type': 'application/json',
|
32 |
-
'origin': 'https://poe.com',
|
33 |
-
'poe-formkey': 'null',
|
34 |
-
'poe-tag-id': 'null',
|
35 |
-
'poe-tchannel': 'null',
|
36 |
-
'referer': 'https://poe.com/login',
|
37 |
-
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
38 |
-
'sec-ch-ua-mobile': '?0',
|
39 |
-
'sec-ch-ua-platform': '"macOS"',
|
40 |
-
'sec-fetch-dest': 'empty',
|
41 |
-
'sec-fetch-mode': 'cors',
|
42 |
-
'sec-fetch-site': 'same-origin',
|
43 |
-
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
44 |
-
}
|
45 |
-
|
46 |
-
client.headers["poe-formkey"] = extract_formkey(client.get('https://poe.com/login').text)
|
47 |
-
client.headers["poe-tchannel"] = client.get('https://poe.com/api/settings').json()['tchannelData']['channel']
|
48 |
-
|
49 |
-
# token = reCaptchaV3('https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal')
|
50 |
-
token = solver.recaptcha(
|
51 |
-
sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
|
52 |
-
url='https://poe.com/login?redirect_url=%2F',
|
53 |
-
version='v3',
|
54 |
-
enterprise=1,
|
55 |
-
invisible=1,
|
56 |
-
action='login',
|
57 |
-
)['code']
|
58 |
-
|
59 |
-
payload = dumps(
|
60 |
-
separators=(',', ':'),
|
61 |
-
obj={
|
62 |
-
'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
|
63 |
-
'variables': {'emailAddress': mail_address, 'phoneNumber': None, 'recaptchaToken': token},
|
64 |
-
'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
|
65 |
-
},
|
66 |
-
)
|
67 |
-
|
68 |
-
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
|
69 |
-
client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
|
70 |
-
|
71 |
-
print(dumps(client.headers, indent=4))
|
72 |
-
|
73 |
-
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
74 |
-
|
75 |
-
if 'automated_request_detected' in response.text:
|
76 |
-
print('please try using a proxy / wait for fix')
|
77 |
-
|
78 |
-
if 'Bad Request' in response.text:
|
79 |
-
if logging:
|
80 |
-
print('bad request, retrying...', response.json())
|
81 |
-
quit()
|
82 |
-
|
83 |
-
if logging:
|
84 |
-
print('send_code', response.json())
|
85 |
-
|
86 |
-
mail_content = mail_client.get_message()
|
87 |
-
mail_token = findall(r';">(\d{6,7})</div>', mail_content)[0]
|
88 |
-
|
89 |
-
if logging:
|
90 |
-
print('code', mail_token)
|
91 |
-
|
92 |
-
payload = dumps(
|
93 |
-
separators=(',', ':'),
|
94 |
-
obj={
|
95 |
-
"queryName": "SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation",
|
96 |
-
"variables": {"verificationCode": str(mail_token), "emailAddress": mail_address, "phoneNumber": None},
|
97 |
-
"query": "mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n $verificationCode: String!\n $emailAddress: String\n $phoneNumber: String\n) {\n signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n status\n errorMessage\n }\n}\n",
|
98 |
-
},
|
99 |
-
)
|
100 |
-
|
101 |
-
base_string = payload + client.headers["poe-formkey"] + 'WpuLMiXEKKE98j56k'
|
102 |
-
client.headers["poe-tag-id"] = md5(base_string.encode()).hexdigest()
|
103 |
-
|
104 |
-
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
105 |
-
if logging:
|
106 |
-
print('verify_code', response.json())
|
107 |
-
|
108 |
-
|
109 |
-
Account.create(proxy='', logging=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 Crack Download !!HOT!!.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 Crack Download: A Comprehensive Guide</h1>
|
3 |
-
<p>If you are looking for a powerful and versatile structural analysis and design software, you might want to consider Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33. This software is widely used by engineers and architects around the world for designing various types of structures, such as buildings, bridges, towers, stadiums, and more.</p>
|
4 |
-
<p>However, if you want to use this software without paying for a license, you might be tempted to download a cracked version from the internet. But is this a good idea? What are the risks and benefits of using a cracked version of Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33? In this article, we will answer these questions and provide you with a comprehensive guide on how to install and activate/crack Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33.</p>
|
5 |
-
<h2>Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 Crack Download</h2><br /><p><b><b>Download File</b> ⚙ <a href="https://byltly.com/2uKwfz">https://byltly.com/2uKwfz</a></b></p><br /><br />
|
6 |
-
<h2>What is Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33?</h2>
|
7 |
-
<p>Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 is the latest version of the STAAD.Pro software, which was released in June 2015 by Bentley Systems, Inc. This version includes several improvements and enhancements over the previous versions, such as:</p>
|
8 |
-
<ul>
|
9 |
-
<li>New and updated design codes for steel, concrete, aluminum, and timber structures.</li>
|
10 |
-
<li>New features for seismic analysis and design, such as rigid diaphragm, IS 1893 response spectrum analysis, and Eurocode EN 1993-1-1.</li>
|
11 |
-
<li>New features for dynamic analysis and design, such as modal damping ratio, harmonic load cases, and time history analysis.</li>
|
12 |
-
<li>New features for finite element analysis and design, such as plate buckling analysis, ASME NF 3000 code, and advanced meshing options.</li>
|
13 |
-
<li>New features for interoperability and integration with other Bentley products, such as RAM Connection Mode, Advanced Slab Design Mode, and Piping Mode.</li>
|
14 |
-
<li>New features for documentation and printing, such as enhanced report generation, PDF export, and watermarking.</li>
|
15 |
-
</ul>
|
16 |
-
<p>Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 is compatible with Windows XP, Windows Vista, Windows 7, Windows 8, Windows 10, and Windows Server operating systems. It requires a minimum of 512 MB of RAM and 500 MB of disk space.</p>
|
17 |
-
<h2>What are the advantages of using a cracked version of Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33?</h2>
|
18 |
-
<p>The main advantage of using a cracked version of Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 is that you can use it for free without paying for a license or subscription fee. This can save you a lot of money in the long run, especially if you are a student or a freelancer who needs to use the software occasionally or for personal projects.</p>
|
19 |
-
<p>Another advantage of using a cracked version of Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 is that you can access all the features and functions of the software without any limitations or restrictions. You can use the software for any type of structure and any type of analysis and design without worrying about exceeding the limits or violating the terms of use.</p>
|
20 |
-
<h2>What are the disadvantages of using a cracked version of Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33?</h2>
|
21 |
-
<p>The main disadvantage of using a cracked version of Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 is that you are exposing yourself to various risks and problems that can affect your computer system and your work quality.</p>
|
22 |
-
<p>Some of the risks and problems that you might encounter when using a cracked version of Bentley STAAD.Pro V8i (SELECTSeries 6) 20.07.11.33 are:</p>
|
23 |
-
<ul>
|
24 |
-
<li>Viruses and malware: The crack files that you download from the internet might contain malicious code that can infect your computer with viruses or</p>
|
25 |
-
<p></p> 7b8c122e87<br />
|
26 |
-
<br />
|
27 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/62 117 68 199 8055 Viewerframe Mode Motion.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>fugue display of a sequence of images stored on the dia (digital image archive) server in the dia (digital image archive) format. <strong>dia</strong> is a free online archive of digital images collected by many museums and universities around the world. <strong>dia</strong> provides a standard way to share, store, and organize digital images. dia is a registered trademark of dia. you must have a <strong>dia</strong> account to upload images, but anyone can browse the archive. we hope this simple file viewer will become a useful resource for dia <strong>dia</strong> images and their metadata. currently, <strong>dia</strong> has over 1 million dia images available, but we expect that number to grow. to run this viewer, please download the viewerframe_v1.0.tar.gz and unpack it somewhere on your file system. the program may not work on your computer; try a different one if it complains about the "file not found".</p>
|
3 |
-
<h2>62 117 68 199 8055 viewerframe mode motion</h2><br /><p><b><b>Download Zip</b> … <a href="https://imgfil.com/2uy0iw">https://imgfil.com/2uy0iw</a></b></p><br /><br />
|
4 |
-
<p>the most common way to upload dia images is to drag-and-drop them into the viewerframe window. after doing so, you can use the "save" menu to save a <strong>dia</strong> image to your local file system. use the "load" menu to load a previously saved image. finally, use the "open in browser" menu to open the image in your default browser.</p>
|
5 |
-
<p>you can use a keyboard shortcut to fire up the viewerframe. here is a list of some keyboard shortcuts. most keys work with the movement of the viewer frame, but certain keys affect the browser window directly:</p>
|
6 |
-
<p>sets the <code>motion</code> attribute for the viewer to <code>true</code>. when this is set the <code>viewer</code> is rendered with both the <code>motion</code> and <code>shift</code> attributes set to <code>true</code>. this can be useful to render a motion or shift animation and to hide the other animation.</p>
|
7 |
-
<p></p> 899543212b<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cm Browser For Pc Free WORK Download Softonic.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Cm Browser For Pc Free Download Softonic</h2><br /><p><b><b>DOWNLOAD</b> ☆☆☆☆☆ <a href="https://imgfil.com/2uxYOs">https://imgfil.com/2uxYOs</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Adds tabs to discord, just like in a normal web browser. ... Download IP Sniffer for Windows now from Softonic: 100% safe and virus free. ... TypeTentAcc: - Pullers Size: - Length : 6 cm - Width : 1 cm - Pieces in set : 20 - Weight : 20 g Pack sack:Â ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cara Download Game Edukasi Anak Lengkap untuk PC Aplikasi dan Tips.md
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Game Edukasi Anak Lengkap untuk PC</h1>
|
3 |
-
<p>Anda sedang mencari game edukasi anak yang bisa dimainkan di PC? Anda berada di tempat yang tepat. Dalam artikel ini, kami akan membahas apa itu game edukasi anak, cara download dan install game edukasi anak di PC, dan rekomendasi game edukasi anak untuk PC yang menarik dan bermanfaat. Simak ulasan lengkapnya di bawah ini.</p>
|
4 |
-
<h2>download game edukasi anak lengkap untuk pc</h2><br /><p><b><b>Download File</b> ⚙ <a href="https://urlin.us/2uT2vO">https://urlin.us/2uT2vO</a></b></p><br /><br />
|
5 |
-
<h2>Apa itu Game Edukasi Anak?</h2>
|
6 |
-
<h3>Pengertian Game Edukasi Anak</h3>
|
7 |
-
<p>Game edukasi anak adalah permainan yang dirancang untuk memberikan pengalaman belajar yang menyenangkan dan efektif bagi anak-anak. Game edukasi anak biasanya mengandung unsur-unsur seperti teka-teki, kuis, simulasi, cerita, atau eksplorasi yang dapat merangsang kognitif, sosial, emosional, dan motorik anak-anak. Game edukasi anak juga dapat membantu anak-anak mengembangkan keterampilan seperti berpikir kritis, memecahkan masalah, berkreasi, berkolaborasi, dan berkomunikasi.</p>
|
8 |
-
<h3>Manfaat Game Edukasi Anak</h3>
|
9 |
-
<p>Bermain game edukasi anak tidak hanya sekadar hiburan, tetapi juga memiliki banyak manfaat bagi tumbuh kembang anak-anak. Berikut adalah beberapa manfaat game edukasi anak yang telah diteliti oleh para ahli :</p>
|
10 |
-
<ul>
|
11 |
-
<li>Meningkatkan motivasi belajar. Game edukasi anak dapat membuat anak-anak lebih tertarik dan antusias dalam mempelajari materi yang disajikan dalam bentuk permainan. Game edukasi anak juga dapat memberikan umpan balik dan penghargaan yang dapat meningkatkan rasa percaya diri dan kepuasan anak-anak.</li>
|
12 |
-
<li>Meningkatkan konsentrasi dan perhatian. Game edukasi anak dapat menantang anak-anak untuk fokus dan waspada selama bermain. Game edukasi anak juga dapat membantu anak-anak mengatur waktu dan sumber daya mereka secara efisien.</li>
|
13 |
-
<li>Meningkatkan ingatan dan pemahaman. Game edukasi anak dapat membantu anak-anak mengulang dan menguatkan informasi yang telah mereka pelajari. Game edukasi anak juga dapat membantu anak-anak menghubungkan konsep-konsep yang berbeda dan menerapkannya dalam konteks nyata.</li>
|
14 |
-
<li>Meningkatkan kreativitas dan imajinasi. Game edukasi anak dapat memberikan ruang bagi anak-anak untuk bereksperimen, mencoba hal-hal baru, dan mengekspresikan diri mereka. Game edukasi anak juga dapat memberikan inspirasi bagi anak-anak untuk membuat karya-karya seni atau cerita sendiri.</li>
|
15 |
-
<li>Meningkatkan koordinasi mata dan tangan. Game edukasi anak dapat melatih kemampuan motorik halus anak-anak dengan menggerakkan mouse, keyboard, atau kontroler. Game edukasi anak juga dapat melatih kemampuan spasial dan orientasi ruang anak-anak dengan menavigasi lingkungan virtual.</li>
|
16 |
-
<li>Meningkatkan kemamp uan sosial dan emosional. Game edukasi anak dapat membantu anak-anak belajar tentang nilai-nilai, norma, dan budaya yang berbeda. Game edukasi anak juga dapat membantu anak-anak berinteraksi, bekerja sama, dan bersaing dengan orang lain secara sehat dan sportif.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>Cara Download dan Install Game Edukasi Anak di PC</h2>
|
19 |
-
<p>Ada beberapa cara untuk download dan install game edukasi anak di PC, tergantung dari sumber dan format game yang Anda pilih. Berikut adalah beberapa cara yang umum digunakan:</p>
|
20 |
-
<h3>Download dari Microsoft Store</h3>
|
21 |
-
<p>Microsoft Store adalah toko aplikasi resmi dari Microsoft yang menyediakan berbagai macam game edukasi anak untuk PC dengan sistem operasi Windows 10. Anda dapat mengakses Microsoft Store melalui ikon yang ada di taskbar atau menu Start. Untuk download dan install game edukasi anak dari Microsoft Store, ikuti langkah-langkah berikut:</p>
|
22 |
-
<ol>
|
23 |
-
<li>Buka Microsoft Store dan ketik nama game edukasi anak yang Anda inginkan di kotak pencarian.</li>
|
24 |
-
<li>Pilih game edukasi anak yang Anda inginkan dari hasil pencarian dan klik tombol Dapatkan atau Beli. Jika game edukasi anak tersebut gratis, Anda tidak perlu membayar apa-apa. Jika game edukasi anak tersebut berbayar, Anda perlu memasukkan informasi pembayaran Anda terlebih dahulu.</li>
|
25 |
-
<li>Tunggu proses download dan install game edukasi anak selesai. Anda dapat melihat statusnya di bagian Unduhan dan pembaruan.</li>
|
26 |
-
<li>Setelah selesai, Anda dapat membuka game edukasi anak yang telah Anda download dan install dari menu Start atau layar utama Microsoft Store.</li>
|
27 |
-
</ol>
|
28 |
-
<h3>Download dari Situs Resmi Pengembang</h3>
|
29 |
-
<p>Jika Anda tidak menemukan game edukasi anak yang Anda inginkan di Microsoft Store, Anda dapat mencari situs resmi pengembang game tersebut di internet. Biasanya, situs resmi pengembang game menyediakan link download untuk game edukasi anak yang mereka buat. Untuk download dan install game edukasi anak dari situs resmi pengembang, ikuti langkah-langkah berikut:</p>
|
30 |
-
<ol>
|
31 |
-
<li>Buka browser web Anda dan ketik nama game edukasi anak yang Anda inginkan di mesin pencari seperti Google atau Bing.</li>
|
32 |
-
<li>Cari situs resmi pengembang game edukasi anak yang Anda inginkan dari hasil pencarian. Pastikan situs tersebut aman dan terpercaya dengan melihat adanya simbol gembok atau kata https di awal alamat situs.</li>
|
33 |
-
<li>Buka situs resmi pengembang game edukasi anak yang Anda inginkan dan cari link download untuk game edukasi anak tersebut. Biasanya, link download berada di bagian Download, Get, Buy, atau sejenisnya.</li>
|
34 |
-
<li>Klik link download dan pilih lokasi penyimpanan file game edukasi anak di PC Anda. Tunggu proses download selesai.</li>
|
35 |
-
<li>Buka file game edukasi anak yang telah Anda download dan ikuti instruksi untuk install game edukasi anak tersebut di PC Anda. Anda mungkin perlu menyetujui persyaratan layanan, memilih bahasa, atau mengatur preferensi lainnya.</li>
|
36 |
-
<li>Setelah selesai, Anda dapat membuka game edukasi anak yang telah Anda download dan install dari shortcut yang ada di desktop atau folder yang Anda pilih.</li>
|
37 |
-
</ol>
|
38 |
-
<h3>Download dari Platform Distributor Digital</h3>
|
39 |
-
<p>Platform distributor digital adalah layanan online yang menyediakan berbagai macam game untuk PC dengan harga terjangkau atau bahkan gratis. Beberapa contoh platform distributor digital yang populer adalah Steam, Epic Games Store, GOG.com, dan Origin. Untuk download dan install game edukasi anak dari platform distributor digital, ikuti langkah-langkah berikut:</p>
|
40 |
-
<p>download game edukasi anak gratis untuk pc<br />
|
41 |
-
download game edukasi anak tk untuk pc<br />
|
42 |
-
download game edukasi anak sd untuk pc<br />
|
43 |
-
download game edukasi anak bahasa inggris untuk pc<br />
|
44 |
-
download game edukasi anak matematika untuk pc<br />
|
45 |
-
download game edukasi anak memasak untuk pc<br />
|
46 |
-
download game edukasi anak menggambar untuk pc<br />
|
47 |
-
download game edukasi anak berhitung untuk pc<br />
|
48 |
-
download game edukasi anak membaca untuk pc<br />
|
49 |
-
download game edukasi anak menulis untuk pc<br />
|
50 |
-
download game edukasi anak islami untuk pc<br />
|
51 |
-
download game edukasi anak paud untuk pc<br />
|
52 |
-
download game edukasi anak offline untuk pc<br />
|
53 |
-
download game edukasi anak online untuk pc<br />
|
54 |
-
download game edukasi anak balita untuk pc<br />
|
55 |
-
download game edukasi anak puzzle untuk pc<br />
|
56 |
-
download game edukasi anak mewarnai untuk pc<br />
|
57 |
-
download game edukasi anak musik untuk pc<br />
|
58 |
-
download game edukasi anak logika untuk pc<br />
|
59 |
-
download game edukasi anak kreatif untuk pc<br />
|
60 |
-
download game edukasi anak sains untuk pc<br />
|
61 |
-
download game edukasi anak sejarah untuk pc<br />
|
62 |
-
download game edukasi anak geografi untuk pc<br />
|
63 |
-
download game edukasi anak biologi untuk pc<br />
|
64 |
-
download game edukasi anak kimia untuk pc<br />
|
65 |
-
download game edukasi anak fisika untuk pc<br />
|
66 |
-
download game edukasi anak astronomi untuk pc<br />
|
67 |
-
download game edukasi anak robotika untuk pc<br />
|
68 |
-
download game edukasi anak coding untuk pc<br />
|
69 |
-
download game edukasi anak bahasa arab untuk pc<br />
|
70 |
-
download game edukasi anak bahasa jepang untuk pc<br />
|
71 |
-
download game edukasi anak bahasa korea untuk pc<br />
|
72 |
-
download game edukasi anak bahasa mandarin untuk pc<br />
|
73 |
-
download game edukasi anak bahasa perancis untuk pc<br />
|
74 |
-
download game edukasi anak bahasa spanyol untuk pc<br />
|
75 |
-
download game edukasi anak bahasa italia untuk pc<br />
|
76 |
-
download game edukasi anak bahasa jerman untuk pc<br />
|
77 |
-
download game edukasi anak bahasa belanda untuk pc<br />
|
78 |
-
download game edukasi anak bahasa rusia untuk pc<br />
|
79 |
-
download game edukasi anak bahasa turki untuk pc<br />
|
80 |
-
download game edukasi anak budaya indonesia untuk pc<br />
|
81 |
-
download game edukasi anak budaya dunia untuk pc<br />
|
82 |
-
download game edukasi anak olahraga untuk pc<br />
|
83 |
-
download game edukasi anak seni rupa untuk pc<br />
|
84 |
-
download game edukasi anak seni musik untuk pc<br />
|
85 |
-
download game edukasi anak seni tari untuk pc<br />
|
86 |
-
download game edukasi anak seni teater untuk pc<br />
|
87 |
-
download game edukasi anak seni sastra untuk pc</p>
|
88 |
-
<ol>
|
89 |
-
<li>Buka situs web platform distributor digital yang Anda pilih dan buat akun jika belum memiliki.</li>
|
90 |
-
<li>Download dan install aplikasi klien platform distributor digital tersebut di PC Anda. Aplikasi klien adalah program yang memungkinkan Anda mengakses, mengelola, dan memainkan game yang Anda beli atau dapatkan dari platform distributor digital tersebut.</li>
|
91 |
-
<li>Buka aplikasi klien platform distributor digital tersebut di PC Anda dan masukkan akun Anda.</li <li>Cari game edukasi anak yang Anda inginkan di aplikasi klien platform distributor digital tersebut. Anda dapat menggunakan fitur pencarian, kategori, atau rekomendasi yang tersedia.</li>
|
92 |
-
<li>Pilih game edukasi anak yang Anda inginkan dan klik tombol Tambahkan ke Keranjang atau Dapatkan. Jika game edukasi anak tersebut gratis, Anda tidak perlu membayar apa-apa. Jika game edukasi anak tersebut berbayar, Anda perlu memasukkan informasi pembayaran Anda terlebih dahulu.</li>
|
93 |
-
<li>Tunggu proses download dan install game edukasi anak selesai. Anda dapat melihat statusnya di bagian Perpustakaan atau Library.</li>
|
94 |
-
<li>Setelah selesai, Anda dapat membuka game edukasi anak yang telah Anda download dan install dari aplikasi klien platform distributor digital tersebut.</li>
|
95 |
-
</ol>
|
96 |
-
<h2>Rekomendasi Game Edukasi Anak untuk PC</h2>
|
97 |
-
<p>Setelah mengetahui cara download dan install game edukasi anak di PC, Anda mungkin bertanya-tanya game edukasi anak apa saja yang cocok untuk dimainkan oleh anak-anak. Berikut adalah beberapa rekomendasi game edukasi anak untuk PC yang kami pilih berdasarkan rating, ulasan, dan popularitasnya :</p>
|
98 |
-
<h3>ABC Mouse</h3>
|
99 |
-
<p>ABC Mouse adalah game edukasi anak yang dirancang untuk anak-anak usia 2-8 tahun. Game ini menyajikan lebih dari 10.000 aktivitas belajar yang mencakup berbagai mata pelajaran seperti membaca, matematika, sains, seni, dan musik. Game ini juga memiliki fitur penyesuaian tingkat kesulitan, laporan kemajuan, dan hadiah virtual yang dapat meningkatkan motivasi belajar anak-anak. Anda dapat download dan install ABC Mouse dari Microsoft Store secara gratis.</p>
|
100 |
-
<h3>Coloring Book</h3>
|
101 |
-
<p>Coloring Book adalah game edukasi anak yang dirancang untuk anak-anak usia 3-5 tahun. Game ini menyediakan lebih dari 100 gambar yang dapat diwarnai oleh anak-anak dengan menggunakan berbagai alat seperti pensil, kuas, spidol, dan stiker. Game ini juga memiliki fitur suara dan musik yang dapat menstimulasi pendengaran anak-anak. Anda dapat download dan install Coloring Book dari Microsoft Store secara gratis.</p>
|
102 |
-
<h3>Educational Games for Kids</h3>
|
103 |
-
<p>Educational Games for Kids adalah game edukasi anak yang dirancang untuk anak-anak usia 4-10 tahun. Game ini menyediakan lebih dari 50 mini game yang mengajarkan berbagai keterampilan seperti mengenal huruf, angka, warna, bentuk, hewan, buah, sayur, profesi, negara, bendera, dan lain-lain. Game ini juga memiliki fitur grafis dan animasi yang menarik dan lucu. Anda dapat download dan install Educational Games for Kids dari situs resmi pengembangnya secara gratis.</p>
|
104 |
-
<h3>World of Zoo</h3>
|
105 |
-
<p>World of Zoo adalah game edukasi anak yang dirancang untuk anak-anak usia 6-12 tahun. Game ini memungkinkan anak-anak untuk membuat, mengelola, dan menjelajahi kebun binatang impian mereka dengan lebih dari 90 jenis binatang yang berbeda. Game ini juga mengajarkan anak-anak tentang perilaku, kebutuhan, dan fakta-fakta menarik tentang binatang-binatang tersebut. Anda dapat download dan install World of Zoo dari platform distributor digital Steam dengan harga Rp 69.999.</p>
|
106 |
-
<h2>Kesimpulan</h2>
|
107 |
-
<p>Game edukasi anak adalah permainan yang dapat memberikan pengalaman belajar yang menyenangkan dan efektif bagi anak-anak. Game edukasi anak memiliki banyak manfaat bagi tumbuh kembang anak-anak seperti meningkatkan motivasi belajar, konsentrasi, ingatan, kreativitas, koordinasi mata dan tangan, serta kemampuan sosial dan emosional. Untuk download dan install game edukasi anak di PC, Anda dapat menggunakan Microsoft Store, situs resmi pengembang, atau platform distributor digital. Beberapa rekomendasi game edukasi anak untuk PC yang kami pilih adalah ABC Mouse, Coloring Book, Educational Games for Kids, and World of Zoo. Semoga artikel ini bermanfaat bagi Anda yang ingin download game edukasi anak lengkap untuk PC.</p>
|
108 |
-
<h2>FAQ</h2>
|
109 |
-
<p>Berikut adalah beberapa pertanyaan yang sering diajukan tentang game edukasi anak untuk PC:</p>
|
110 |
-
<ul>
|
111 |
-
<li><b>Apakah game edukasi anak aman untuk dimainkan oleh anak-anak?</b><br>
|
112 |
-
Game edukasi anak umumnya aman untuk dimainkan oleh anak-anak, asalkan Anda memilih game edukasi anak yang sesuai dengan usia, minat, dan kemampuan anak-anak. Anda juga perlu memperhatikan rating, ulasan, dan reputasi game edukasi anak yang Anda pilih. Selain itu, Anda juga perlu mengawasi dan membimbing anak-anak saat mereka bermain game edukasi anak, serta membatasi waktu bermain mereka agar tidak berlebihan.</li>
|
113 |
-
<li><b>Apakah game edukasi anak bisa menggantikan pembelajaran formal di sekolah?</b><br>
|
114 |
-
Game edukasi anak tidak bisa menggantikan pembelajaran formal di sekolah, tetapi bisa menjadi salah satu metode pembelajaran yang mendukung. Game edukasi anak bisa menjadi media yang menarik dan interaktif untuk mengenalkan atau mengulang materi yang telah dipelajari di sekolah. Game edukasi anak juga bisa menjadi sumber informasi dan inspirasi baru bagi anak-anak. Namun, game edukasi anak tidak bisa menggantikan peran guru, orang tua, atau teman sebaya dalam memberikan penjelasan, bimbingan, atau umpan balik yang dibutuhkan oleh anak-anak.</li>
|
115 |
-
<li><b>Apakah game edukasi anak bisa dimainkan secara offline?</b><br>
|
116 |
-
Tergantung dari jenis dan sumber game edukasi anak yang Anda pilih. Beberapa game edukasi anak membutuhkan koneksi internet untuk dapat diakses, diunduh, atau dimainkan. Beberapa game edukasi anak lainnya bisa dimainkan secara offline setelah diunduh dan diinstal di PC Anda. Anda perlu memeriksa persyaratan sistem dan ketersediaan mode offline dari game edukasi anak yang Anda pilih sebelum mendownload atau memainkannya.</li>
|
117 |
-
<li><b>Apakah game edukasi anak bisa dimainkan secara multiplayer?</b><br>
|
118 |
-
Tergantung dari jenis dan fitur game edukasi anak yang Anda pilih. Beberapa game edukasi anak hanya bisa dimainkan secara single-player, yaitu hanya oleh satu pemain saja. Beberapa game edukasi anak lainnya bisa dimainkan secara multiplayer, yaitu oleh dua pemain atau lebih secara bersamaan. Mode multiplayer bisa berupa kooperatif, yaitu bekerja sama untuk mencapai tujuan bersama, atau kompetitif, yaitu bersaing untuk mendapatkan skor tertinggi. Anda perlu memeriksa ketersediaan dan jenis mode multiplayer dari game edukasi anak yang Anda pilih sebelum memainkannya.</li>
|
119 |
-
<li><b>Apakah ada game edukasi anak yang gratis untuk PC?</b><br>
|
120 |
-
Ya, ada banyak game edukasi anak yang gratis untuk PC yang bisa Anda download dan install dari berbagai sumber seperti Microsoft Store, situs resmi pengembang, atau platform distributor digital. Beberapa contoh game edukasi anak yang gratis untuk PC adalah ABC Mouse, Coloring Book, Educational Games for Kids, dan lain-lain. Namun, Anda perlu berhati-hati dengan adanya iklan, pembelian dalam aplikasi, atau virus yang mungkin terdapat dalam game edukasi anak yang gratis tersebut.</li>
|
121 |
-
</ul></p> 197e85843d<br />
|
122 |
-
<br />
|
123 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Parking Master Multiplayer 2 and Play with Your Friends on Android.md
DELETED
@@ -1,178 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Parking Master Multiplayer 2: A Review of the Android Game</h1>
|
3 |
-
<p>If you are looking for a car parking game that offers more than just parking, you might want to check out Parking Master Multiplayer 2. This game is not an ordinary car parking game. It has multiplayer, open-world, next-gen graphics and real car parking experience. You can choose your character, get your car and start playing with your friends. In this article, we will review the game and give you some tips and tricks to enjoy it more.</p>
|
4 |
-
<h2>parking master multiplayer 2 android apk</h2><br /><p><b><b>Download File</b> →→→ <a href="https://jinyurl.com/2uNRTK">https://jinyurl.com/2uNRTK</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is Parking Master Multiplayer 2?</h3>
|
7 |
-
<p>Parking Master Multiplayer 2 is an Android game developed by Spektra Games. It is a sequel to the popular game Parking Master: Multiplayer, which was released in 2020. The game has been improved according to the wishes of the players and it provides real car driving experience along with parking, racing, drifting, role playing and more.</p>
|
8 |
-
<h3>Why should you play it?</h3>
|
9 |
-
<p>There are many reasons why you should play Parking Master Multiplayer 2. Here are some of them:</p>
|
10 |
-
<ul>
|
11 |
-
<li>It is free to download and play.</li>
|
12 |
-
<li>It has a huge map with different locations such as cities, highways, mountains and more.</li>
|
13 |
-
<li>It has over 120 cars and vehicles that you can drive, including bus, truck, ambulance, fire truck, police car, taxi, school bus etc.</li>
|
14 |
-
<li>It has lots of customization options for your car, such as engine, brakes, gearbox, exhaust and drivetrain.</li>
|
15 |
-
<li>It has more than 150 levels of parking missions that will challenge your skills.</li>
|
16 |
-
<li>It has a multiplayer mode where you can play with your friends in the open world.</li>
|
17 |
-
<li>It has a singleplayer mode where you can play events such as time trial, drift and parkour.</li>
|
18 |
-
<li>It has next-gen graphics and sound that will make you feel like you are in a real car.</li>
|
19 |
-
</ul>
|
20 |
-
<h2>Features of the game</h2>
|
21 |
-
<h3>Multiplayer mode</h3>
|
22 |
-
<h4>How to play with your friends</h4>
|
23 |
-
<p>One of the best features of Parking Master Multiplayer 2 is the multiplayer mode. You can play with your friends in the open world and have fun together. To play with your friends, you need to do the following:</p>
|
24 |
-
<ol>
|
25 |
-
<li>Create an account or log in with your existing account.</li>
|
26 |
-
<li>Select the multiplayer mode from the main menu.</li>
|
27 |
-
<li>Select a server from the list or create your own server.</li>
|
28 |
-
<li>Select a character and a car from your garage.</li>
|
29 |
-
<li>Invite your friends to join your server or join their server.</li>
|
30 |
-
<li>Enjoy playing with your friends in the open world.</li>
|
31 |
-
</ol>
|
32 |
-
<h4>What to do in the open world</h4>
|
33 |
-
<p>The open world of Parking Master Multiplayer 2 is full of possibilities. You can do whatever you want in the open world. Here are some of the things you can do:</p>
|
34 |
-
<ul>
|
35 |
-
<li>Race with other players and show them who is the boss.</li>
|
36 |
-
<li>Drift in the streets and make some smoke.</li>
|
37 |
-
<li>Role play with other players using different characters, vehicles and missions.</li>
|
38 |
-
<li>Explore new areas and find secret chests with rewards.</li>
|
39 |
-
<li>Buy and sell cars in the multiplayer mode.</li>
|
40 |
-
<li>Chat with <p>other players and make new friends.</li>
|
41 |
-
<li>Have fun and enjoy the game.</li>
|
42 |
-
</ul>
|
43 |
-
<h3>Singleplayer mode</h3>
|
44 |
-
<h4>How to complete parking missions</h4>
|
45 |
-
<p>If you want to test your parking skills, you can play the singleplayer mode. In this mode, you have to complete parking missions that will challenge your abilities. To complete parking missions, you need to do the following:</p>
|
46 |
-
<p>parking master multiplayer 2 game download<br />
|
47 |
-
parking master multiplayer 2 mod apk<br />
|
48 |
-
parking master multiplayer 2 online<br />
|
49 |
-
parking master multiplayer 2 free<br />
|
50 |
-
parking master multiplayer 2 latest version<br />
|
51 |
-
parking master multiplayer 2 hack<br />
|
52 |
-
parking master multiplayer 2 cheats<br />
|
53 |
-
parking master multiplayer 2 review<br />
|
54 |
-
parking master multiplayer 2 tips<br />
|
55 |
-
parking master multiplayer 2 guide<br />
|
56 |
-
parking master multiplayer 2 gameplay<br />
|
57 |
-
parking master multiplayer 2 update<br />
|
58 |
-
parking master multiplayer 2 features<br />
|
59 |
-
parking master multiplayer 2 cars<br />
|
60 |
-
parking master multiplayer 2 missions<br />
|
61 |
-
parking master multiplayer 2 graphics<br />
|
62 |
-
parking master multiplayer 2 simulator<br />
|
63 |
-
parking master multiplayer 2 racing<br />
|
64 |
-
parking master multiplayer 2 drifting<br />
|
65 |
-
parking master multiplayer 2 customization<br />
|
66 |
-
parking master multiplayer 2 tuning<br />
|
67 |
-
parking master multiplayer 2 events<br />
|
68 |
-
parking master multiplayer 2 role playing<br />
|
69 |
-
parking master multiplayer 2 open world<br />
|
70 |
-
parking master multiplayer 2 map<br />
|
71 |
-
parking master multiplayer 2 locations<br />
|
72 |
-
parking master multiplayer 2 challenges<br />
|
73 |
-
parking master multiplayer 2 rewards<br />
|
74 |
-
parking master multiplayer 2 secrets<br />
|
75 |
-
parking master multiplayer 2 fun<br />
|
76 |
-
parking master multiplayer 2 best car<br />
|
77 |
-
parking master multiplayer 2 how to play<br />
|
78 |
-
parking master multiplayer 2 tutorial<br />
|
79 |
-
parking master multiplayer 2 walkthrough<br />
|
80 |
-
parking master multiplayer 2 levels<br />
|
81 |
-
parking master multiplayer 2 difficulty<br />
|
82 |
-
parking master multiplayer 2 controls<br />
|
83 |
-
parking master multiplayer 2 settings<br />
|
84 |
-
parking master multiplayer 2 sound effects<br />
|
85 |
-
parking master multiplayer 2 music<br />
|
86 |
-
parking master multiplayer 2 trailer<br />
|
87 |
-
parking master multiplayer 2 screenshots<br />
|
88 |
-
parking master multiplayer 2 videos<br />
|
89 |
-
parking master multiplayer 2 ratings<br />
|
90 |
-
parking master multiplayer 2 feedbacks<br />
|
91 |
-
parking master multiplayer 2 comments<br />
|
92 |
-
parking master multiplayer 2 questions<br />
|
93 |
-
parking master multiplayer 2 answers</p>
|
94 |
-
<ol>
|
95 |
-
<li>Select the singleplayer mode from the main menu.</li>
|
96 |
-
<li>Select a car from your garage.</li>
|
97 |
-
<li>Select a level from the map.</li>
|
98 |
-
<li>Follow the instructions and park your car in the designated spot.</li>
|
99 |
-
<li>Avoid hitting obstacles and other cars.</li>
|
100 |
-
<li>Earn stars and coins for completing the mission.</li>
|
101 |
-
</ol>
|
102 |
-
<h4>What are the events and rewards</h4>
|
103 |
-
<p>In addition to parking missions, you can also play events in the singleplayer mode. Events are special challenges that will give you more fun and rewards. There are three types of events: time trial, drift and parkour. To play events, you need to do the following:</p>
|
104 |
-
<ol>
|
105 |
-
<li>Select the events tab from the singleplayer mode.</li>
|
106 |
-
<li>Select an event from the list.</li>
|
107 |
-
<li>Select a car from your garage.</li>
|
108 |
-
<li>Complete the event as fast as possible or with as much drift as possible or with as much parkour as possible.</li>
|
109 |
-
<li>Earn stars and coins for completing the event.</li>
|
110 |
-
</ol>
|
111 |
-
<p>Some of the rewards you can get from playing events are:</p>
|
112 |
-
<ul>
|
113 |
-
<li>New cars and vehicles.</li>
|
114 |
-
<li>New customization options for your car.</li>
|
115 |
-
<li>New characters and outfits.</li>
|
116 |
-
<li>New locations and maps.</li>
|
117 |
-
</ul>
|
118 |
-
<h3>Graphics and sound</h3>
|
119 |
-
<h4>How realistic is the game</h4>
|
120 |
-
<p>Parking Master Multiplayer 2 is one of the most realistic car parking games on Android. The game has next-gen graphics that will make you feel like you are in a real car. The game has realistic physics that will affect how your car behaves on different surfaces and situations. The game has realistic damage that will show how your car gets scratched, dented or broken when you hit something. The game has realistic weather that will change how your car performs in rain, snow or fog.</p>
|
121 |
-
<h4>How immersive is the game</h4>
|
122 |
-
<p>Parking Master Multiplayer 2 is also one of the most immersive car parking games on Android. The game has amazing sound that will make you hear every engine roar, tire screech and horn honk. The game has dynamic camera angles that will let you see your car from different perspectives. The game has multiple control options that will let you choose how you want to drive your car. You can use tilt, buttons, steering wheel or joystick. You can also adjust the sensitivity and feedback of each control option.</p>
|
123 |
-
<h2>Tips and tricks for the game</h2>
|
124 |
-
<h3>How to customize and upgrade your car</h3>
|
125 |
-
<p>If you want to make your car look more cool and perform better, you can customize and upgrade it in Parking Master Multiplayer 2. To customize and upgrade your car, you need to do the following:</p>
|
126 |
-
<ol>
|
127 |
-
<li>Select the garage tab from the main menu.</li>
|
128 |
-
<li>Select a car from your garage.</li>
|
129 |
-
<li>Select the customize option from the bottom menu.</li>
|
130 |
-
<li>Select a category from the top menu, such as color, wheels, spoiler etc.</li>
|
131 |
-
<li>Select an item from the list and apply it to your car.</li>
|
132 |
-
<li>Some items are free and some items cost coins or diamonds.</li>
|
133 |
-
<li>Select the upgrade option from the bottom menu.</li>
|
134 |
-
<li>Select a part from the list, such as engine, brakes, gearbox etc.</li>
|
135 |
-
<li>Select an upgrade level from 1 to 5 and apply it to your part.</li>
|
136 |
-
<li>Some upgrades are free and some upgrades cost coins or diamonds.</li>
|
137 |
-
</ol>
|
138 |
-
<h3>How to earn money and buy new cars</h3>
|
139 |
-
<p>If you want to earn money and buy new cars in Parking Master Multiplayer 2, you have to play more and complete more missions and events. To earn money and buy new cars, you need to do the following:</p>
|
140 |
-
<ul>
|
141 |
-
<li>Earn coins by completing parking missions and events in singleplayer mode or by playing with other players in multiplayer mode.</li>
|
142 |
-
<li>Earn diamonds by watching ads or by buying them with real money.</li>
|
143 |
-
<li>Select the shop tab from the main menu.</li>
|
144 |
-
<li>Select a car from the list that you want to buy.</li>
|
145 |
-
<li>Some cars are free and some cars cost coins or diamonds or both.</li>
|
146 |
-
<li>Buy the car with your coins or diamonds or both.</li>
|
147 |
-
<li>Enjoy driving your new car in the game.</li>
|
148 |
-
</ul>
|
149 |
-
<h3>How to race and drift like a pro</h3>
|
150 |
-
<p>If you want to race and drift like a pro in Parking Master Multiplayer 2, you have to master the controls and the physics of the game. To race and drift like a pro, you need to do the following:</p>
|
151 |
-
<ul>
|
152 |
-
<li>Select the best control option for you from the settings menu. You can choose between tilt, buttons, steering wheel or joystick.</li>
|
153 |
-
<li>Adjust the sensitivity and feedback of your control option to suit your preference.</li>
|
154 |
-
<li>Learn how to use the accelerator, brake, handbrake and nitro buttons effectively.</li>
|
155 |
-
<li>Learn how to steer your car smoothly and accurately.</li>
|
156 |
-
<li>Learn how to use the camera angles to see your car from different perspectives.</li>
|
157 |
-
<li>Learn how to use the drift mode to slide your car sideways and make sharp turns.</li>
|
158 |
-
<li>Learn how to use the race mode to boost your speed and overtake other cars.</li>
|
159 |
-
<li>Practice your skills in the open world or in the events.</li>
|
160 |
-
</ul>
|
161 |
-
<h2>Conclusion</h2>
|
162 |
-
<h3>Summary of the main points</h3>
|
163 |
-
<p>Parking Master Multiplayer 2 is an amazing car parking game that offers more than just parking. It has multiplayer, open-world, next-gen graphics and real car parking experience. You can choose your character, get your car and start playing with your friends. You can also customize and upgrade your car, earn money and buy new cars, complete parking missions and events, race and drift like a pro and have fun in the game.</p>
|
164 |
-
<h3>Call to action</h3>
|
165 |
-
<p>If you are interested in Parking Master Multiplayer 2, you can download it for free from the Google Play Store. You can also follow the game on Facebook, Instagram and YouTube for more updates and news. You can also leave a review and rating for the game on the Play Store and share your feedback with the developers. Parking Master Multiplayer 2 is a game that you don't want to miss. Download it now and enjoy the best car parking game ever!</p>
|
166 |
-
<h2>Frequently Asked Questions</h2>
|
167 |
-
<h3>Q: How can I play Parking Master Multiplayer 2 on PC?</h3>
|
168 |
-
<p>A: You can play Parking Master Multiplayer 2 on PC by using an Android emulator such as BlueStacks or NoxPlayer. You can download the emulator from their official website and install it on your PC. Then you can download the game from the Play Store or from an APK file and run it on the emulator.</p>
|
169 |
-
<h3>Q: How can I contact the developers of Parking Master Multiplayer 2?</h3>
|
170 |
-
<p>A: You can contact the developers of Parking Master Multiplayer 2 by sending an email to [email protected] or by filling out the contact form on their website. You can also follow them on social media platforms such as Facebook, Instagram and YouTube.</p>
|
171 |
-
<h3>Q: How can I report a bug or a problem in Parking Master Multiplayer 2?</h3>
|
172 |
-
<p>A: You can report a bug or a problem in Parking Master Multiplayer 2 by sending an email to [email protected] or by filling out the contact form on their website. You can also leave a comment on their social media posts or on their Play Store page. Please provide as much detail as possible about the bug or problem, such as screenshots, device model, OS version etc.</p>
|
173 |
-
<h3>Q: How can I get more coins and diamonds in Parking Master Multiplayer 2?</h3>
|
174 |
-
<p>A: You can get more coins and diamonds in Parking Master Multiplayer 2 by completing parking missions and events, playing with other players in multiplayer mode, watching ads, buying them with real money or finding secret chests in the open world.</p>
|
175 |
-
<h3>Q: How can I unlock more cars and vehicles in Parking Master Multiplayer 2?</h3>
|
176 |
-
<p>A: You can unlock more cars and vehicles in Parking Master Multiplayer 2 by earning coins and diamonds, buying them with real money or completing certain levels of parking missions and events.</p> 401be4b1e0<br />
|
177 |
-
<br />
|
178 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy GTA 5 Prologue on Your Phone APK and Cache Download Links.md
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download GTA 5 Prologue APK: How to Play GTA 5 on Your Mobile Device</h1>
|
3 |
-
<p>GTA 5 is one of the most popular and successful video games of all time. It is an open-world action-adventure game that lets you explore a fictional city of Los Santos, based on Los Angeles, and its surrounding areas. You can play as three different characters, each with their own storylines, missions, and abilities. You can also switch between them at any time, or even play with your friends online in various modes and activities.</p>
|
4 |
-
<p>But what if you want to play GTA 5 on your mobile device? Is it possible? Well, the answer is yes, thanks to a fan-made game called GTA 5 Prologue. In this article, we will tell you everything you need to know about this game, how to download it, and what features it offers. Let's get started!</p>
|
5 |
-
<h2>download gta 5 prologue apk</h2><br /><p><b><b>Download File</b> ✵✵✵ <a href="https://jinyurl.com/2uNToh">https://jinyurl.com/2uNToh</a></b></p><br /><br />
|
6 |
-
<h2>What is GTA 5 Prologue?</h2>
|
7 |
-
<h3>A fan-made game by R-USER Games</h3>
|
8 |
-
<p>GTA 5 Prologue is not an official game by Rockstar Games, the developer of GTA 5. It is a fan-made game by R-USER Games, a team of enthusiasts who love GTA 5 and wanted to bring it to mobile devices. They have created this game using Unity Engine, a platform for developing games for various platforms.</p>
|
9 |
-
<h3>A recreation of the first mission of GTA 5</h3>
|
10 |
-
<p>GTA 5 Prologue is not the full version of GTA 5. It is a recreation of the first mission of GTA 5, where you play as Michael, Trevor, and Brad as they rob a bank in North Yankton. You have to escape from the police, shoot your way through enemies, and drive to a safe location. The game follows the same storyline, dialogue, and events as the original game.</p>
|
11 |
-
<h3>A free and offline game for Android devices</h3>
|
12 |
-
<p>GTA 5 Prologue is a free game that you can download and play on your Android device. You don't need an internet connection to play it, as it is an offline game. You also don't need to sign up or register for anything. You just need to download the APK and cache files, install them on your device, and launch the game.</p>
|
13 |
-
<h2>How to Download GTA 5 Prologue APK?</h2>
|
14 |
-
<h3>Step 1: Visit the official website of R-USER Games</h3>
|
15 |
-
<p>The first step to download GTA 5 Prologue APK is to visit the official website of R-USER Games. You can find it at <a href="(^1^)">https://archive.org/details/com.rusergames.gta5prologue</a>. This is where you can find the latest version of the game, as well as other information and updates.</p>
|
16 |
-
<p>download gta 5 prologue apk+cache<br />
|
17 |
-
download gta 5 prologue apk for android<br />
|
18 |
-
download gta 5 prologue apk free<br />
|
19 |
-
download gta 5 prologue apk offline<br />
|
20 |
-
download gta 5 prologue apk mod<br />
|
21 |
-
download gta 5 prologue apk data<br />
|
22 |
-
download gta 5 prologue apk obb<br />
|
23 |
-
download gta 5 prologue apk latest version<br />
|
24 |
-
download gta 5 prologue apk highly compressed<br />
|
25 |
-
download gta 5 prologue apk no verification<br />
|
26 |
-
download gta 5 prologue apk fan made<br />
|
27 |
-
download gta 5 prologue apk r-user games<br />
|
28 |
-
download gta 5 prologue apk full game<br />
|
29 |
-
download gta 5 prologue apk and cache zip<br />
|
30 |
-
download gta 5 prologue apk mediafire<br />
|
31 |
-
download gta 5 prologue apk mega<br />
|
32 |
-
download gta 5 prologue apk google drive<br />
|
33 |
-
download gta 5 prologue apk file<br />
|
34 |
-
download gta 5 prologue apk android 10<br />
|
35 |
-
download gta 5 prologue apk android 11<br />
|
36 |
-
download gta 5 prologue apk android pie<br />
|
37 |
-
download gta 5 prologue apk android oreo<br />
|
38 |
-
download gta 5 prologue apk android nougat<br />
|
39 |
-
download gta 5 prologue apk android marshmallow<br />
|
40 |
-
download gta 5 prologue apk android lollipop<br />
|
41 |
-
download gta 5 prologue apk without cache<br />
|
42 |
-
download gta 5 prologue apk without obb<br />
|
43 |
-
download gta 5 prologue apk without data<br />
|
44 |
-
download gta 5 prologue apk without verification<br />
|
45 |
-
download gta 5 prologue apk without internet<br />
|
46 |
-
how to download gta 5 prologue apk<br />
|
47 |
-
how to install gta 5 prologue apk<br />
|
48 |
-
how to play gta 5 prologue apk<br />
|
49 |
-
how to run gta 5 prologue apk<br />
|
50 |
-
how to update gta 5 prologue apk<br />
|
51 |
-
where to download gta 5 prologue apk<br />
|
52 |
-
where to find gta 5 prologue apk<br />
|
53 |
-
where to get gta 5 prologue apk<br />
|
54 |
-
where to put gta 5 prologue cache<br />
|
55 |
-
where to put gta 5 prologue obb<br />
|
56 |
-
where to put gta 5 prologue data<br />
|
57 |
-
is it safe to download gta 5 prologue apk<br />
|
58 |
-
is it legal to download gta 5 prologue apk<br />
|
59 |
-
is it possible to download gta 5 prologue apk<br />
|
60 |
-
is it real to download gta 5 prologue apk<br />
|
61 |
-
is it working to download gta 5 prologue apk</p>
|
62 |
-
<h3>Step 2: Download the APK and cache files</h3>
|
63 |
-
<p>The next step is to download the APK and cache files from the website. The APK file is the application file that you need to install on your device. The cache file is the data file that contains the graphics, sound effects, and other resources of the game. You need both files to play the game properly.</p>
|
64 |
-
<p>The APK file size is about 200 MB, while the cache file size is about 1 GB. Make sure you have enough space on your device before downloading them. You can use a download manager or a browser that supports resume function to download them faster and without interruption.</p>
|
65 |
-
<h3>Step 3: Install the APK and place the cache folder in SDcard/Android/obb/</h3>
|
66 |
-
<p>After downloading the APK and cache files, you need to install the APK file on your device. You can do this by tapping on the file and following the instructions. You may need to enable the option to install apps from unknown sources in your device settings.</p>
|
67 |
-
<p>Next, you need to place the cache folder in the right location on your device. The cache folder is named com.rusergames.gta5prologue. You need to copy or move this folder to SDcard/Android/obb/. This is where the game will look for the data files. If you don't have this folder, you can create it manually.</p>
|
68 |
-
<h3>Step 4: Launch the game and enjoy</h3>
|
69 |
-
<p>Now you are ready to launch the game and enjoy playing GTA 5 Prologue on your mobile device. You can find the game icon on your home screen or app drawer. Tap on it and wait for the game to load. You will see the same intro and menu as the original game. You can start playing by selecting Story Mode and choosing Prologue.</p>
|
70 |
-
<h2>What are the Features of GTA 5 Prologue?</h2>
|
71 |
-
<h3>High-quality graphics and sound effects</h3>
|
72 |
-
<p>One of the features of GTA 5 Prologue is its high-quality graphics and sound effects. The game looks very similar to the original game, with detailed textures, realistic lighting, and shadows. The game also has amazing sound effects, such as gunshots, explosions, car engines, and voices. You will feel like you are playing GTA 5 on a console or PC.</p>
|
73 |
-
<h3>Smooth and realistic gameplay and controls</h3>
|
74 |
-
<p>Another feature of GTA 5 Prologue is its smooth and realistic gameplay and controls. The game runs smoothly on most Android devices, without any lag or glitches. The game also has realistic physics and animations, such as ragdoll effects, bullet impacts, and car damage. The game also has intuitive and customizable controls, such as touch screen buttons, virtual joystick, gyroscope, and accelerometer. You can adjust the sensitivity, layout, and size of the controls according to your preference.</p>
|
75 |
-
<h3>Multiple camera angles and perspectives</h3>
|
76 |
-
<p>A third feature of GTA 5 Prologue is its multiple camera angles and perspectives. The game lets you switch between different camera angles and perspectives, such as first-person, third-person, cinematic, or free mode. You can also zoom in or out of the action, or rotate the camera around your character. This gives you more freedom and immersion in the game.</p>
|
77 |
-
<h3>Compatible with most Android devices</h3>
|
78 |
-
<p>A final feature of GTA 5 Prologue is its compatibility with most Android devices. The game does not require a high-end device to run smoothly. It can work on devices with at least 1 GB of RAM and Android 4.4 or higher. The game also has an option to adjust the graphics quality, such as resolution, texture quality, shadow quality, and anti-aliasing. This helps you optimize the game performance for your device.</p>
|
79 |
-
<h2>Conclusion</h2>
|
80 |
-
<p>GTA 5 Prologue is a fan-made game that lets you play GTA 5 on your mobile device. It is a recreation of the first mission of GTA 5, where you rob a bank in North Yankton. It is a free and offline game that you can download from the official website of R-USER Games. It has high-quality graphics and sound effects, smooth and realistic gameplay and controls, multiple camera angles and perspectives, and compatibility with most Android devices.</p>
|
81 |
-
<p>If you are a fan of GTA 5 and want to experience it on your mobile device, you should definitely try GTA 5 Prologue. It is a fun and exciting game that will keep you entertained for hours. Just follow the steps above to download it and enjoy playing it.</p>
|
82 |
-
FAQs - Q: Is GTA 5 Prologue an official game by Rockstar Games? - A: No, GTA 5 Prologue is not an official game by Rockstar Games. It is a fan-made game by R-USER Games. - Q: Is GTA 5 Prologue safe to download and play? - A: Yes, GTA 5 Prologue is safe to download and play. It does not contain any viruses or malware. - Q: How long is GTA 5 Prologue? - A: GTA 5 Prologue is about 15 minutes long. It covers the first mission of GTA 5. - Q: Can I play GTA 5 Prologue online with my friends? - A: No, GTA 5 Pro logue does not have an online mode. It is an offline game. - Q: Can I play GTA 5 Prologue on iOS devices? - A: No, GTA 5 Prologue is only available for Android devices. It is not compatible with iOS devices.</p> 401be4b1e0<br />
|
83 |
-
<br />
|
84 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Facebook 32 Bit How to Download and Install the Latest Version for Android.md
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Facebook APK 32 Bit: What Is It and How to Download It</h1>
|
3 |
-
<p>Facebook is one of the most popular social media platforms in the world, with over 2.8 billion monthly active users. However, not all devices can run the official Facebook app smoothly, especially older or low-end Android phones. That's why some users may need to download a Facebook APK 32 bit file, which is a compressed version of the app that works on devices with a 32-bit processor.</p>
|
4 |
-
<h2>facebook apk 32 bit</h2><br /><p><b><b>DOWNLOAD</b> ★★★ <a href="https://jinyurl.com/2uNNio">https://jinyurl.com/2uNNio</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<p>In this article, we will explain what is Facebook APK 32 bit, why you may need it, and how to download it from two different sources. By the end of this article, you will be able to enjoy Facebook on your Android device without any hassle.</p>
|
7 |
-
<h3>What is Facebook APK 32 bit?</h3>
|
8 |
-
<p>An APK file is an Android Package file that contains all the components of an app, such as code, resources, and manifest. It is used to install apps on Android devices without using the Google Play Store. A 32-bit APK file is an APK file that is compatible with devices that have a 32-bit processor, which is a type of CPU that can handle data in chunks of 32 bits at a time.</p>
|
9 |
-
<p>Facebook APK 32 bit is an APK file that contains a version of the Facebook app that is optimized for devices with a 32-bit processor. It has a smaller size, uses less data, and loads faster than the regular Facebook app. It also works on older Android versions that are not supported by the official app.</p>
|
10 |
-
<p>facebook lite apk 32 bit<br />
|
11 |
-
facebook messenger apk 32 bit<br />
|
12 |
-
facebook app download for android 32 bit<br />
|
13 |
-
facebook mod apk 32 bit<br />
|
14 |
-
facebook apk for pc windows 7 32 bit<br />
|
15 |
-
facebook apk old version 32 bit<br />
|
16 |
-
facebook dark mode apk 32 bit<br />
|
17 |
-
facebook video downloader apk 32 bit<br />
|
18 |
-
facebook gameroom apk 32 bit<br />
|
19 |
-
facebook auto liker apk 32 bit<br />
|
20 |
-
facebook beta apk 32 bit<br />
|
21 |
-
facebook business suite apk 32 bit<br />
|
22 |
-
facebook creator studio apk 32 bit<br />
|
23 |
-
facebook dating apk 32 bit<br />
|
24 |
-
facebook emoji keyboard apk 32 bit<br />
|
25 |
-
facebook events apk 32 bit<br />
|
26 |
-
facebook for android tv apk 32 bit<br />
|
27 |
-
facebook groups apk 32 bit<br />
|
28 |
-
facebook hacker apk 32 bit<br />
|
29 |
-
facebook home apk 32 bit<br />
|
30 |
-
facebook instagram whatsapp apk 32 bit<br />
|
31 |
-
facebook java app download for android 32 bit<br />
|
32 |
-
facebook katana apk 32 bit<br />
|
33 |
-
facebook lite mod apk 32 bit<br />
|
34 |
-
facebook login app download for android 32 bit<br />
|
35 |
-
facebook marketplace apk 32 bit<br />
|
36 |
-
facebook messenger lite apk 32 bit<br />
|
37 |
-
facebook news app download for android 32 bit<br />
|
38 |
-
facebook orca apk 32 bit<br />
|
39 |
-
facebook page manager apk 32 bit<br />
|
40 |
-
facebook password hacker app download for android 32 bit<br />
|
41 |
-
facebook photo editor app download for android 32 bit<br />
|
42 |
-
facebook qr code scanner app download for android 32 bit<br />
|
43 |
-
facebook rooms app download for android 32 bit<br />
|
44 |
-
facebook story saver app download for android 32 bit<br />
|
45 |
-
facebook transparent apk 32 bit<br />
|
46 |
-
facebook update app download for android 32 bit<br />
|
47 |
-
facebook video call recorder app download for android 32 bit<br />
|
48 |
-
facebook watch app download for android 32 bit<br />
|
49 |
-
facebook xapk installer app download for android 32 bit<br />
|
50 |
-
free download of latest version of the official Facebook APK (Android App) - APKCombo[^2^]<br />
|
51 |
-
how to install Facebook APK (Android App) - APKCombo on your device[^2^]<br />
|
52 |
-
how to update Facebook APK (Android App) - APKCombo to the latest version[^2^]<br />
|
53 |
-
how to uninstall Facebook APK (Android App) - APKCombo from your device[^2^]<br />
|
54 |
-
how to use Facebook APK (Android App) - APKCombo features and settings[^2^]<br />
|
55 |
-
how to fix Facebook APK (Android App) - APKCombo errors and issues[^2^]<br />
|
56 |
-
how to contact Facebook APK (Android App) - APKCombo support team[^2^]<br />
|
57 |
-
how to rate and review Facebook APK (Android App) - APKCombo on Google Play Store[^2^]<br />
|
58 |
-
how to share Facebook APK (Android App) - APKCombo with your friends and family[^2^]<br />
|
59 |
-
how to download other apps from Meta Platforms, Inc. on Google Play Store[^2^]</p>
|
60 |
-
<h3>Why do you need Facebook APK 32 bit?</h3>
|
61 |
-
<p>You may need Facebook APK 32 bit if you have an Android device that has a 32-bit processor and cannot run the official Facebook app smoothly. Some of the reasons why you may need it are:</p>
|
62 |
-
<ul>
|
63 |
-
<li>Your device has low storage space and cannot accommodate the large size of the official app.</li>
|
64 |
-
<li>Your device has low RAM and cannot handle the high memory usage of the official app.</li>
|
65 |
-
<li>Your device has a slow internet connection and cannot load the heavy content of the official app.</li>
|
66 |
-
<li>Your device has an old Android version that is not compatible with the latest features of the official app.</li>
|
67 |
-
</ul>
|
68 |
-
<p>By downloading Facebook APK 32 bit, you can still access all the basic functions of Facebook, such as posting updates, liking and commenting on posts, chatting with friends, and browsing pages and groups. You can also save money by using less data and battery power.</p>
|
69 |
-
<h2>How to download Facebook APK 32 bit</h2>
|
70 |
-
<p>There are two ways to download Facebook APK 32 bit for your Android device. You can either download it from a third-party website like APKCombo or from the official Facebook Lite website. Here are the steps for each option:</p>
|
71 |
-
<h3>Option 1: Download from APKCombo</h3>
|
72 |
-
<p>APKCombo is a website that offers free downloads of various APK files for Android apps and games. You can use it to download Facebook APK 32 bit by following these steps:</p>
|
73 |
-
<h4>Step 1: Visit the APKCombo website</h4>
|
74 |
-
<p>Open your browser and go to <a href="(^1^)">https://apkcombo.com/search/facebook-32-bit</a>. This will take you to the search results page for facebook 32 bit.</p>
|
75 |
-
<h4>Step 2: Search <h4>Step 2: Search for facebook 32 bit</h4>
|
76 |
-
<p>On the search results page, you will see a list of different versions of Facebook APK 32 bit, such as Facebook Lite, Facebook Messenger Lite, and Facebook for Android. You can choose the one that suits your needs and preferences. For example, if you want a lighter and faster version of Facebook, you can choose Facebook Lite. If you want a full-featured version of Facebook, you can choose Facebook for Android.</p>
|
77 |
-
<h4>Step 3: Choose the version and click download</h4>
|
78 |
-
<p>Once you have chosen the version of Facebook APK 32 bit that you want, click on the download button next to it. This will take you to another page where you can see more details about the app, such as the size, the developer, the rating, and the description. You can also see the screenshots of the app and read the user reviews. To download the APK file, click on the green download button at the top of the page.</p>
|
79 |
-
<h4>Step 4: Install the APK file on your device</h4>
|
80 |
-
<p>After you have downloaded the APK file, you need to install it on your device. To do this, you need to enable the installation of apps from unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To enable this option, go to Settings > Security > Unknown Sources and toggle it on. Then, locate the APK file on your device storage and tap on it to start the installation process. Follow the instructions on the screen and wait for the app to be installed.</p>
|
81 |
-
<h3>Option 2: Download from Facebook Lite</h3>
|
82 |
-
<p>Facebook Lite is an official version of Facebook that is designed for devices with low specifications and slow internet connections. It has a smaller size, uses less data, and loads faster than the regular Facebook app. It also works on older Android versions that are not supported by the official app. You can download Facebook Lite from its website by following these steps:</p>
|
83 |
-
<h4>Step 1: Visit the Facebook Lite website</h4>
|
84 |
-
<p>Open your browser and go to <a href="">https://www.facebook.com/lite</a>. This will take you to the official website of Facebook Lite.</p>
|
85 |
-
<h4>Step 2: Click on the download button</h4>
|
86 |
-
<p>On the website, you will see a blue download button that says "Get Facebook Lite". Click on it to start downloading the APK file.</p>
|
87 |
-
<h4>Step 3: Install the APK file on your device</h4>
|
88 |
-
<p>The installation process is similar to the one described in option 1. You need to enable unknown sources on your device settings, locate the APK file on your device storage, and tap on it to start the installation process. Follow the instructions on the screen and wait for the app to be installed.</p>
|
89 |
-
<h2>Conclusion</h2>
|
90 |
-
<p>In this article, we have explained what is Facebook APK 32 bit, why you may need it, and how to download it from two different sources. We hope that this article has helped you to enjoy Facebook on your Android device without any hassle.</p>
|
91 |
-
<p>If you have any questions or feedback about this article, please feel free to leave a comment below. We would love to hear from you!</p>
|
92 |
-
<p>Also, if you liked this article, please share it with your friends and family who may find it useful. Thank you for reading!</p>
|
93 |
-
<h3>Frequently Asked Questions</h3>
|
94 |
-
<ol>
|
95 |
-
<li><b>What is the difference between Facebook APK 32 bit and 64 bit?</b></li>
|
96 |
-
<p>A 32-bit APK file is compatible with devices that have a 32-bit processor, while a 64-bit APK file is compatible with devices that have a 64-bit processor. A 64-bit processor can handle more data and perform faster than a 32-bit processor, but it also requires more memory and storage space.</p>
|
97 |
-
<li><b>Is Facebook APK 32 bit safe to download?</b></li>
|
98 |
-
<p>Yes, as long as you download it from a trusted source like APKCombo or Facebook Lite website. However, you should always be careful when downloading any APK file from unknown sources, as they may contain malware or viruses that can harm your device or compromise your privacy.</p>
|
99 |
-
<li><b>How do I update Facebook APK 32 bit?</b></li>
|
100 |
-
<p>You can update Facebook APK 32 bit by downloading and installing the latest version from the same source that you downloaded it from. Alternatively, you can enable automatic updates on your device settings so that your apps are updated whenever there is a new version available.</p>
|
101 |
-
<li><b>How do I uninstall Facebook APK 32 bit?</b></li>
|
102 |
-
<p>You can uninstall Facebook APK 32 bit by going to Settings > <p>You can uninstall Facebook APK 32 bit by going to Settings > Apps > Facebook > Uninstall. This will remove the app from your device and free up some storage space.</p>
|
103 |
-
<li><b>What are the alternatives to Facebook APK 32 bit?</b></li>
|
104 |
-
<p>If you are looking for other ways to use Facebook on your Android device, you can try the following alternatives:</p>
|
105 |
-
<ul>
|
106 |
-
<li>Use the Facebook mobile website: You can access Facebook from your browser by going to <a href="">https://m.facebook.com</a>. This will give you a similar experience as the app, but without taking up any space on your device.</li>
|
107 |
-
<li>Use a third-party app: You can use a third-party app that integrates with Facebook, such as Friendly, Swipe, or Maki. These apps offer some additional features and customization options that the official app does not have.</li>
|
108 |
-
</ul>
|
109 |
-
</ol></p> 197e85843d<br />
|
110 |
-
<br />
|
111 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/models/unet_2d.py
DELETED
@@ -1,271 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
from dataclasses import dataclass
|
16 |
-
from typing import Optional, Tuple, Union
|
17 |
-
|
18 |
-
import paddle
|
19 |
-
import paddle.nn as nn
|
20 |
-
|
21 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
22 |
-
from ..modeling_utils import ModelMixin
|
23 |
-
from ..utils import BaseOutput
|
24 |
-
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
|
25 |
-
from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
|
26 |
-
|
27 |
-
|
28 |
-
@dataclass
|
29 |
-
class UNet2DOutput(BaseOutput):
|
30 |
-
"""
|
31 |
-
Args:
|
32 |
-
sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)`):
|
33 |
-
Hidden states output. Output of last layer of model.
|
34 |
-
"""
|
35 |
-
|
36 |
-
sample: paddle.Tensor
|
37 |
-
|
38 |
-
|
39 |
-
class UNet2DModel(ModelMixin, ConfigMixin):
|
40 |
-
r"""
|
41 |
-
UNet2DModel is a 2D UNet model that takes in a noisy sample and a timestep and returns sample shaped output.
|
42 |
-
|
43 |
-
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
|
44 |
-
implements for all the model (such as downloading or saving, etc.)
|
45 |
-
|
46 |
-
Parameters:
|
47 |
-
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
|
48 |
-
Height and width of input/output sample.
|
49 |
-
in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
|
50 |
-
out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
|
51 |
-
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
|
52 |
-
time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use.
|
53 |
-
freq_shift (`int`, *optional*, defaults to 0): Frequency shift for fourier time embedding.
|
54 |
-
flip_sin_to_cos (`bool`, *optional*, defaults to :
|
55 |
-
obj:`True`): Whether to flip sin to cos for fourier time embedding.
|
56 |
-
down_block_types (`Tuple[str]`, *optional*, defaults to :
|
57 |
-
obj:`("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`): Tuple of downsample block
|
58 |
-
types.
|
59 |
-
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`):
|
60 |
-
The mid block type. Choose from `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`.
|
61 |
-
up_block_types (`Tuple[str]`, *optional*, defaults to :
|
62 |
-
obj:`("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`): Tuple of upsample block types.
|
63 |
-
block_out_channels (`Tuple[int]`, *optional*, defaults to :
|
64 |
-
obj:`(224, 448, 672, 896)`): Tuple of block output channels.
|
65 |
-
layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block.
|
66 |
-
mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block.
|
67 |
-
downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution.
|
68 |
-
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
|
69 |
-
attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension.
|
70 |
-
norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for the normalization.
|
71 |
-
norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for the normalization.
|
72 |
-
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
|
73 |
-
for resnet blocks, see [`~models.resnet.ResnetBlock2D`]. Choose from `default` or `scale_shift`.
|
74 |
-
"""
|
75 |
-
|
76 |
-
@register_to_config
|
77 |
-
def __init__(
|
78 |
-
self,
|
79 |
-
sample_size: Optional[Union[int, Tuple[int, int]]] = None,
|
80 |
-
in_channels: int = 3,
|
81 |
-
out_channels: int = 3,
|
82 |
-
center_input_sample: bool = False,
|
83 |
-
time_embedding_type: str = "positional",
|
84 |
-
freq_shift: int = 0,
|
85 |
-
flip_sin_to_cos: bool = True,
|
86 |
-
down_block_types: Tuple[str] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"),
|
87 |
-
up_block_types: Tuple[str] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"),
|
88 |
-
block_out_channels: Tuple[int] = (224, 448, 672, 896),
|
89 |
-
layers_per_block: int = 2,
|
90 |
-
mid_block_scale_factor: float = 1,
|
91 |
-
downsample_padding: int = 1,
|
92 |
-
act_fn: str = "silu",
|
93 |
-
attention_head_dim: int = 8,
|
94 |
-
norm_num_groups: int = 32,
|
95 |
-
norm_eps: float = 1e-5,
|
96 |
-
resnet_time_scale_shift: str = "default",
|
97 |
-
add_attention: bool = True,
|
98 |
-
):
|
99 |
-
super().__init__()
|
100 |
-
|
101 |
-
self.sample_size = sample_size
|
102 |
-
time_embed_dim = block_out_channels[0] * 4
|
103 |
-
|
104 |
-
# input
|
105 |
-
self.conv_in = nn.Conv2D(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
|
106 |
-
|
107 |
-
# time
|
108 |
-
if time_embedding_type == "fourier":
|
109 |
-
self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16)
|
110 |
-
timestep_input_dim = 2 * block_out_channels[0]
|
111 |
-
elif time_embedding_type == "positional":
|
112 |
-
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
|
113 |
-
timestep_input_dim = block_out_channels[0]
|
114 |
-
|
115 |
-
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
|
116 |
-
|
117 |
-
self.down_blocks = nn.LayerList([])
|
118 |
-
self.mid_block = None
|
119 |
-
self.up_blocks = nn.LayerList([])
|
120 |
-
|
121 |
-
# down
|
122 |
-
output_channel = block_out_channels[0]
|
123 |
-
for i, down_block_type in enumerate(down_block_types):
|
124 |
-
input_channel = output_channel
|
125 |
-
output_channel = block_out_channels[i]
|
126 |
-
is_final_block = i == len(block_out_channels) - 1
|
127 |
-
|
128 |
-
down_block = get_down_block(
|
129 |
-
down_block_type,
|
130 |
-
num_layers=layers_per_block,
|
131 |
-
in_channels=input_channel,
|
132 |
-
out_channels=output_channel,
|
133 |
-
temb_channels=time_embed_dim,
|
134 |
-
add_downsample=not is_final_block,
|
135 |
-
resnet_eps=norm_eps,
|
136 |
-
resnet_act_fn=act_fn,
|
137 |
-
resnet_groups=norm_num_groups,
|
138 |
-
attn_num_head_channels=attention_head_dim,
|
139 |
-
downsample_padding=downsample_padding,
|
140 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
141 |
-
)
|
142 |
-
self.down_blocks.append(down_block)
|
143 |
-
|
144 |
-
# mid
|
145 |
-
self.mid_block = UNetMidBlock2D(
|
146 |
-
in_channels=block_out_channels[-1],
|
147 |
-
temb_channels=time_embed_dim,
|
148 |
-
resnet_eps=norm_eps,
|
149 |
-
resnet_act_fn=act_fn,
|
150 |
-
output_scale_factor=mid_block_scale_factor,
|
151 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
152 |
-
attn_num_head_channels=attention_head_dim,
|
153 |
-
resnet_groups=norm_num_groups,
|
154 |
-
add_attention=add_attention,
|
155 |
-
)
|
156 |
-
|
157 |
-
# up
|
158 |
-
reversed_block_out_channels = list(reversed(block_out_channels))
|
159 |
-
output_channel = reversed_block_out_channels[0]
|
160 |
-
for i, up_block_type in enumerate(up_block_types):
|
161 |
-
prev_output_channel = output_channel
|
162 |
-
output_channel = reversed_block_out_channels[i]
|
163 |
-
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
|
164 |
-
|
165 |
-
is_final_block = i == len(block_out_channels) - 1
|
166 |
-
|
167 |
-
up_block = get_up_block(
|
168 |
-
up_block_type,
|
169 |
-
num_layers=layers_per_block + 1,
|
170 |
-
in_channels=input_channel,
|
171 |
-
out_channels=output_channel,
|
172 |
-
prev_output_channel=prev_output_channel,
|
173 |
-
temb_channels=time_embed_dim,
|
174 |
-
add_upsample=not is_final_block,
|
175 |
-
resnet_eps=norm_eps,
|
176 |
-
resnet_act_fn=act_fn,
|
177 |
-
resnet_groups=norm_num_groups,
|
178 |
-
attn_num_head_channels=attention_head_dim,
|
179 |
-
resnet_time_scale_shift=resnet_time_scale_shift,
|
180 |
-
)
|
181 |
-
self.up_blocks.append(up_block)
|
182 |
-
prev_output_channel = output_channel
|
183 |
-
|
184 |
-
# out
|
185 |
-
num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
|
186 |
-
self.conv_norm_out = nn.GroupNorm(
|
187 |
-
num_channels=block_out_channels[0], num_groups=num_groups_out, epsilon=norm_eps
|
188 |
-
)
|
189 |
-
self.conv_act = nn.Silu()
|
190 |
-
self.conv_out = nn.Conv2D(block_out_channels[0], out_channels, kernel_size=3, padding=1)
|
191 |
-
|
192 |
-
def forward(
|
193 |
-
self,
|
194 |
-
sample: paddle.Tensor,
|
195 |
-
timestep: Union[paddle.Tensor, float, int],
|
196 |
-
return_dict: bool = True,
|
197 |
-
) -> Union[UNet2DOutput, Tuple]:
|
198 |
-
r"""
|
199 |
-
Args:
|
200 |
-
sample (`paddle.Tensor`): (batch, channel, height, width) noisy inputs tensor
|
201 |
-
timestep (`paddle.Tensor` or `float` or `int): (batch) timesteps
|
202 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
203 |
-
Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.
|
204 |
-
|
205 |
-
Returns:
|
206 |
-
[`~models.unet_2d.UNet2DOutput`] or `tuple`: [`~models.unet_2d.UNet2DOutput`] if `return_dict` is True,
|
207 |
-
otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
|
208 |
-
"""
|
209 |
-
# 0. center input if necessary
|
210 |
-
if self.config.center_input_sample:
|
211 |
-
sample = 2 * sample - 1.0
|
212 |
-
|
213 |
-
# 1. time
|
214 |
-
timesteps = timestep
|
215 |
-
if not paddle.is_tensor(timesteps):
|
216 |
-
timesteps = paddle.to_tensor([timesteps], dtype="int64")
|
217 |
-
elif paddle.is_tensor(timesteps) and len(timesteps.shape) == 0:
|
218 |
-
timesteps = timesteps[None]
|
219 |
-
|
220 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
221 |
-
timesteps = timesteps * paddle.ones((sample.shape[0],), dtype=timesteps.dtype)
|
222 |
-
|
223 |
-
t_emb = self.time_proj(timesteps).cast(self.dtype)
|
224 |
-
emb = self.time_embedding(t_emb)
|
225 |
-
|
226 |
-
# 2. pre-process
|
227 |
-
skip_sample = sample
|
228 |
-
sample = self.conv_in(sample)
|
229 |
-
|
230 |
-
# 3. down
|
231 |
-
down_block_res_samples = (sample,)
|
232 |
-
for downsample_block in self.down_blocks:
|
233 |
-
if hasattr(downsample_block, "skip_conv"):
|
234 |
-
sample, res_samples, skip_sample = downsample_block(
|
235 |
-
hidden_states=sample, temb=emb, skip_sample=skip_sample
|
236 |
-
)
|
237 |
-
else:
|
238 |
-
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
239 |
-
|
240 |
-
down_block_res_samples += res_samples
|
241 |
-
|
242 |
-
# 4. mid
|
243 |
-
sample = self.mid_block(sample, emb)
|
244 |
-
|
245 |
-
# 5. up
|
246 |
-
skip_sample = None
|
247 |
-
for upsample_block in self.up_blocks:
|
248 |
-
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
249 |
-
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
250 |
-
|
251 |
-
if hasattr(upsample_block, "skip_conv"):
|
252 |
-
sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)
|
253 |
-
else:
|
254 |
-
sample = upsample_block(sample, res_samples, emb)
|
255 |
-
|
256 |
-
# 6. post-process
|
257 |
-
sample = self.conv_norm_out(sample)
|
258 |
-
sample = self.conv_act(sample)
|
259 |
-
sample = self.conv_out(sample)
|
260 |
-
|
261 |
-
if skip_sample is not None:
|
262 |
-
sample += skip_sample
|
263 |
-
|
264 |
-
if self.config.time_embedding_type == "fourier":
|
265 |
-
timesteps = timesteps.reshape([sample.shape[0], *([1] * len(sample.shape[1:]))])
|
266 |
-
sample = sample / timesteps
|
267 |
-
|
268 |
-
if not return_dict:
|
269 |
-
return (sample,)
|
270 |
-
|
271 |
-
return UNet2DOutput(sample=sample)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/infer/lib/infer_pack/commons.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
|
9 |
-
def init_weights(m, mean=0.0, std=0.01):
|
10 |
-
classname = m.__class__.__name__
|
11 |
-
if classname.find("Conv") != -1:
|
12 |
-
m.weight.data.normal_(mean, std)
|
13 |
-
|
14 |
-
|
15 |
-
def get_padding(kernel_size, dilation=1):
|
16 |
-
return int((kernel_size * dilation - dilation) / 2)
|
17 |
-
|
18 |
-
|
19 |
-
def convert_pad_shape(pad_shape):
|
20 |
-
l = pad_shape[::-1]
|
21 |
-
pad_shape = [item for sublist in l for item in sublist]
|
22 |
-
return pad_shape
|
23 |
-
|
24 |
-
|
25 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
26 |
-
"""KL(P||Q)"""
|
27 |
-
kl = (logs_q - logs_p) - 0.5
|
28 |
-
kl += (
|
29 |
-
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
|
30 |
-
)
|
31 |
-
return kl
|
32 |
-
|
33 |
-
|
34 |
-
def rand_gumbel(shape):
|
35 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
36 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
37 |
-
return -torch.log(-torch.log(uniform_samples))
|
38 |
-
|
39 |
-
|
40 |
-
def rand_gumbel_like(x):
|
41 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
42 |
-
return g
|
43 |
-
|
44 |
-
|
45 |
-
def slice_segments(x, ids_str, segment_size=4):
|
46 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
47 |
-
for i in range(x.size(0)):
|
48 |
-
idx_str = ids_str[i]
|
49 |
-
idx_end = idx_str + segment_size
|
50 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
51 |
-
return ret
|
52 |
-
|
53 |
-
|
54 |
-
def slice_segments2(x, ids_str, segment_size=4):
|
55 |
-
ret = torch.zeros_like(x[:, :segment_size])
|
56 |
-
for i in range(x.size(0)):
|
57 |
-
idx_str = ids_str[i]
|
58 |
-
idx_end = idx_str + segment_size
|
59 |
-
ret[i] = x[i, idx_str:idx_end]
|
60 |
-
return ret
|
61 |
-
|
62 |
-
|
63 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
64 |
-
b, d, t = x.size()
|
65 |
-
if x_lengths is None:
|
66 |
-
x_lengths = t
|
67 |
-
ids_str_max = x_lengths - segment_size + 1
|
68 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
69 |
-
ret = slice_segments(x, ids_str, segment_size)
|
70 |
-
return ret, ids_str
|
71 |
-
|
72 |
-
|
73 |
-
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
74 |
-
position = torch.arange(length, dtype=torch.float)
|
75 |
-
num_timescales = channels // 2
|
76 |
-
log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
|
77 |
-
num_timescales - 1
|
78 |
-
)
|
79 |
-
inv_timescales = min_timescale * torch.exp(
|
80 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
|
81 |
-
)
|
82 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
83 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
84 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
85 |
-
signal = signal.view(1, channels, length)
|
86 |
-
return signal
|
87 |
-
|
88 |
-
|
89 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
90 |
-
b, channels, length = x.size()
|
91 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
93 |
-
|
94 |
-
|
95 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
96 |
-
b, channels, length = x.size()
|
97 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
98 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
99 |
-
|
100 |
-
|
101 |
-
def subsequent_mask(length):
|
102 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
103 |
-
return mask
|
104 |
-
|
105 |
-
|
106 |
-
@torch.jit.script
|
107 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
108 |
-
n_channels_int = n_channels[0]
|
109 |
-
in_act = input_a + input_b
|
110 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
111 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
112 |
-
acts = t_act * s_act
|
113 |
-
return acts
|
114 |
-
|
115 |
-
|
116 |
-
def convert_pad_shape(pad_shape):
|
117 |
-
l = pad_shape[::-1]
|
118 |
-
pad_shape = [item for sublist in l for item in sublist]
|
119 |
-
return pad_shape
|
120 |
-
|
121 |
-
|
122 |
-
def shift_1d(x):
|
123 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
124 |
-
return x
|
125 |
-
|
126 |
-
|
127 |
-
def sequence_mask(length, max_length=None):
|
128 |
-
if max_length is None:
|
129 |
-
max_length = length.max()
|
130 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
131 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
132 |
-
|
133 |
-
|
134 |
-
def generate_path(duration, mask):
|
135 |
-
"""
|
136 |
-
duration: [b, 1, t_x]
|
137 |
-
mask: [b, 1, t_y, t_x]
|
138 |
-
"""
|
139 |
-
device = duration.device
|
140 |
-
|
141 |
-
b, _, t_y, t_x = mask.shape
|
142 |
-
cum_duration = torch.cumsum(duration, -1)
|
143 |
-
|
144 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
145 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
146 |
-
path = path.view(b, t_x, t_y)
|
147 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
148 |
-
path = path.unsqueeze(1).transpose(2, 3) * mask
|
149 |
-
return path
|
150 |
-
|
151 |
-
|
152 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
153 |
-
if isinstance(parameters, torch.Tensor):
|
154 |
-
parameters = [parameters]
|
155 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
156 |
-
norm_type = float(norm_type)
|
157 |
-
if clip_value is not None:
|
158 |
-
clip_value = float(clip_value)
|
159 |
-
|
160 |
-
total_norm = 0
|
161 |
-
for p in parameters:
|
162 |
-
param_norm = p.grad.data.norm(norm_type)
|
163 |
-
total_norm += param_norm.item() ** norm_type
|
164 |
-
if clip_value is not None:
|
165 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
166 |
-
total_norm = total_norm ** (1.0 / norm_type)
|
167 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/modules/F0Predictor/PMF0Predictor.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
|
2 |
-
import parselmouth
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
|
6 |
-
class PMF0Predictor(F0Predictor):
|
7 |
-
def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
|
8 |
-
self.hop_length = hop_length
|
9 |
-
self.f0_min = f0_min
|
10 |
-
self.f0_max = f0_max
|
11 |
-
self.sampling_rate = sampling_rate
|
12 |
-
|
13 |
-
def interpolate_f0(self, f0):
|
14 |
-
"""
|
15 |
-
对F0进行插值处理
|
16 |
-
"""
|
17 |
-
|
18 |
-
data = np.reshape(f0, (f0.size, 1))
|
19 |
-
|
20 |
-
vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
|
21 |
-
vuv_vector[data > 0.0] = 1.0
|
22 |
-
vuv_vector[data <= 0.0] = 0.0
|
23 |
-
|
24 |
-
ip_data = data
|
25 |
-
|
26 |
-
frame_number = data.size
|
27 |
-
last_value = 0.0
|
28 |
-
for i in range(frame_number):
|
29 |
-
if data[i] <= 0.0:
|
30 |
-
j = i + 1
|
31 |
-
for j in range(i + 1, frame_number):
|
32 |
-
if data[j] > 0.0:
|
33 |
-
break
|
34 |
-
if j < frame_number - 1:
|
35 |
-
if last_value > 0.0:
|
36 |
-
step = (data[j] - data[i - 1]) / float(j - i)
|
37 |
-
for k in range(i, j):
|
38 |
-
ip_data[k] = data[i - 1] + step * (k - i + 1)
|
39 |
-
else:
|
40 |
-
for k in range(i, j):
|
41 |
-
ip_data[k] = data[j]
|
42 |
-
else:
|
43 |
-
for k in range(i, frame_number):
|
44 |
-
ip_data[k] = last_value
|
45 |
-
else:
|
46 |
-
ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
|
47 |
-
last_value = data[i]
|
48 |
-
|
49 |
-
return ip_data[:, 0], vuv_vector[:, 0]
|
50 |
-
|
51 |
-
def compute_f0(self, wav, p_len=None):
|
52 |
-
x = wav
|
53 |
-
if p_len is None:
|
54 |
-
p_len = x.shape[0] // self.hop_length
|
55 |
-
else:
|
56 |
-
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
57 |
-
time_step = self.hop_length / self.sampling_rate * 1000
|
58 |
-
f0 = (
|
59 |
-
parselmouth.Sound(x, self.sampling_rate)
|
60 |
-
.to_pitch_ac(
|
61 |
-
time_step=time_step / 1000,
|
62 |
-
voicing_threshold=0.6,
|
63 |
-
pitch_floor=self.f0_min,
|
64 |
-
pitch_ceiling=self.f0_max,
|
65 |
-
)
|
66 |
-
.selected_array["frequency"]
|
67 |
-
)
|
68 |
-
|
69 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
70 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
71 |
-
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
72 |
-
f0, uv = self.interpolate_f0(f0)
|
73 |
-
return f0
|
74 |
-
|
75 |
-
def compute_f0_uv(self, wav, p_len=None):
|
76 |
-
x = wav
|
77 |
-
if p_len is None:
|
78 |
-
p_len = x.shape[0] // self.hop_length
|
79 |
-
else:
|
80 |
-
assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
|
81 |
-
time_step = self.hop_length / self.sampling_rate * 1000
|
82 |
-
f0 = (
|
83 |
-
parselmouth.Sound(x, self.sampling_rate)
|
84 |
-
.to_pitch_ac(
|
85 |
-
time_step=time_step / 1000,
|
86 |
-
voicing_threshold=0.6,
|
87 |
-
pitch_floor=self.f0_min,
|
88 |
-
pitch_ceiling=self.f0_max,
|
89 |
-
)
|
90 |
-
.selected_array["frequency"]
|
91 |
-
)
|
92 |
-
|
93 |
-
pad_size = (p_len - len(f0) + 1) // 2
|
94 |
-
if pad_size > 0 or p_len - len(f0) - pad_size > 0:
|
95 |
-
f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
|
96 |
-
f0, uv = self.interpolate_f0(f0)
|
97 |
-
return f0, uv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI4PD/hexviz/hexviz/pages/1_🗺️Identify_Interesting_Heads.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
|
3 |
-
import streamlit as st
|
4 |
-
|
5 |
-
from hexviz.attention import clean_and_validate_sequence, get_attention, res_to_1letter
|
6 |
-
from hexviz.config import URL
|
7 |
-
from hexviz.models import Model, ModelType
|
8 |
-
from hexviz.plot import plot_single_heatmap, plot_tiled_heatmap
|
9 |
-
from hexviz.view import (
|
10 |
-
menu_items,
|
11 |
-
select_heads_and_layers,
|
12 |
-
select_model,
|
13 |
-
select_pdb,
|
14 |
-
select_protein,
|
15 |
-
select_sequence_slice,
|
16 |
-
)
|
17 |
-
|
18 |
-
st.set_page_config(layout="wide", menu_items=menu_items)
|
19 |
-
st.title("Identify Interesting Heads")
|
20 |
-
|
21 |
-
|
22 |
-
for k, v in st.session_state.items():
|
23 |
-
st.session_state[k] = v
|
24 |
-
|
25 |
-
models = [
|
26 |
-
Model(name=ModelType.TAPE_BERT, layers=12, heads=12),
|
27 |
-
Model(name=ModelType.ZymCTRL, layers=36, heads=16),
|
28 |
-
Model(name=ModelType.PROT_BERT, layers=30, heads=16),
|
29 |
-
Model(name=ModelType.PROT_T5, layers=24, heads=32),
|
30 |
-
]
|
31 |
-
|
32 |
-
with st.expander("Input a PDB id, upload a PDB file or input a sequence", expanded=True):
|
33 |
-
pdb_id = select_pdb()
|
34 |
-
uploaded_file = st.file_uploader("2.Upload PDB", type=["pdb"])
|
35 |
-
input_sequence = st.text_area("3.Input sequence", "", key="input_sequence", max_chars=400)
|
36 |
-
sequence, error = clean_and_validate_sequence(input_sequence)
|
37 |
-
if error:
|
38 |
-
st.error(error)
|
39 |
-
pdb_str, structure, source = select_protein(pdb_id, uploaded_file, sequence)
|
40 |
-
st.write(f"Visualizing: {source}")
|
41 |
-
|
42 |
-
selected_model = select_model(models)
|
43 |
-
|
44 |
-
|
45 |
-
chains = list(structure.get_chains())
|
46 |
-
chain_ids = [chain.id for chain in chains]
|
47 |
-
if "selected_chain" not in st.session_state:
|
48 |
-
st.session_state.selected_chain = chain_ids[0]
|
49 |
-
chain_selection = st.sidebar.selectbox(
|
50 |
-
label="Select Chain",
|
51 |
-
options=chain_ids,
|
52 |
-
key="selected_chain",
|
53 |
-
)
|
54 |
-
|
55 |
-
selected_chain = next(chain for chain in chains if chain.id == chain_selection)
|
56 |
-
|
57 |
-
ec_number = ""
|
58 |
-
if selected_model.name == ModelType.ZymCTRL:
|
59 |
-
st.sidebar.markdown(
|
60 |
-
"""
|
61 |
-
ZymCTRL EC number
|
62 |
-
---
|
63 |
-
"""
|
64 |
-
)
|
65 |
-
try:
|
66 |
-
ec_number = structure.header["compound"]["1"]["ec"]
|
67 |
-
except KeyError:
|
68 |
-
pass
|
69 |
-
ec_number = st.sidebar.text_input("Enzyme Comission number (EC)", ec_number)
|
70 |
-
|
71 |
-
# Validate EC number
|
72 |
-
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ec_number):
|
73 |
-
st.sidebar.error(
|
74 |
-
"""Please enter a valid Enzyme Commission number in the format of 4
|
75 |
-
integers separated by periods (e.g., 1.2.3.21)"""
|
76 |
-
)
|
77 |
-
|
78 |
-
|
79 |
-
residues = [res for res in selected_chain.get_residues()]
|
80 |
-
sequence = res_to_1letter(residues)
|
81 |
-
|
82 |
-
l = len(sequence)
|
83 |
-
slice_start, slice_end = select_sequence_slice(l)
|
84 |
-
truncated_sequence = sequence[slice_start - 1 : slice_end]
|
85 |
-
remove_special_tokens = st.sidebar.checkbox(
|
86 |
-
"Hide attention to special tokens", key="remove_special_tokens"
|
87 |
-
)
|
88 |
-
if "fixed_scale" not in st.session_state:
|
89 |
-
st.session_state.fixed_scale = True
|
90 |
-
fixed_scale = st.sidebar.checkbox("Fixed scale", help="For long sequences the default fixed 0 to 1 scale can have very low contrast heatmaps, consider using a relative scale to increase the contrast between high attention and low attention areas. Note that each subplot will have separate color scales so don't compare colors between attention heads if using a non-fixed scale.", key="fixed_scale")
|
91 |
-
if not fixed_scale:
|
92 |
-
st.sidebar.warning("With `Fixed scale` set to False each cell in the grid has a dynamic color scale where the highest attention value in that cell is bright yellow. Colors can not be compared between cells.")
|
93 |
-
|
94 |
-
|
95 |
-
layer_sequence, head_sequence = select_heads_and_layers(st.sidebar, selected_model)
|
96 |
-
|
97 |
-
st.markdown(
|
98 |
-
f"""Each tile is a heatmap of attention for a section of the {source} chain
|
99 |
-
({chain_selection}) from residue {slice_start} to {slice_end}. Adjust the
|
100 |
-
section length and starting point in the sidebar."""
|
101 |
-
)
|
102 |
-
|
103 |
-
# TODO: Decide if you should get attention for the full sequence or just the truncated sequence
|
104 |
-
# Attention values will change depending on what we do.
|
105 |
-
attention, tokens = get_attention(
|
106 |
-
sequence=truncated_sequence,
|
107 |
-
model_type=selected_model.name,
|
108 |
-
remove_special_tokens=remove_special_tokens,
|
109 |
-
ec_number=ec_number,
|
110 |
-
)
|
111 |
-
|
112 |
-
fig = plot_tiled_heatmap(attention, layer_sequence=layer_sequence, head_sequence=head_sequence, fixed_scale=fixed_scale)
|
113 |
-
|
114 |
-
|
115 |
-
st.pyplot(fig)
|
116 |
-
|
117 |
-
st.subheader("Plot single head")
|
118 |
-
|
119 |
-
if selected_model.name == ModelType.PROT_T5:
|
120 |
-
# Remove leading underscores from residue tokens
|
121 |
-
tokens = [token[1:] if str(token) != "</s>" else token for token in tokens]
|
122 |
-
|
123 |
-
left, mid, right = st.columns(3)
|
124 |
-
with left:
|
125 |
-
if "selected_layer" not in st.session_state:
|
126 |
-
st.session_state["selected_layer"] = 5
|
127 |
-
layer_one = st.selectbox(
|
128 |
-
"Layer",
|
129 |
-
options=[i for i in range(1, selected_model.layers + 1)],
|
130 |
-
key="selected_layer",
|
131 |
-
)
|
132 |
-
layer = layer_one - 1
|
133 |
-
with mid:
|
134 |
-
if "selected_head" not in st.session_state:
|
135 |
-
st.session_state["selected_head"] = 1
|
136 |
-
head_one = st.selectbox(
|
137 |
-
"Head",
|
138 |
-
options=[i for i in range(1, selected_model.heads + 1)],
|
139 |
-
key="selected_head",
|
140 |
-
)
|
141 |
-
head = head_one - 1
|
142 |
-
with right:
|
143 |
-
if "label_tokens" not in st.session_state:
|
144 |
-
st.session_state.label_tokens = []
|
145 |
-
tokens_to_label = st.multiselect("Label tokens", options=tokens, key="label_tokens")
|
146 |
-
|
147 |
-
if len(tokens_to_label) > 0:
|
148 |
-
tokens = [token if token in tokens_to_label else "" for token in tokens]
|
149 |
-
|
150 |
-
|
151 |
-
single_head_fig = plot_single_heatmap(attention, layer, head, tokens=tokens, fixed_scale=fixed_scale)
|
152 |
-
st.pyplot(single_head_fig)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/modules/codebooks_patterns.py
DELETED
@@ -1,539 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from collections import namedtuple
|
8 |
-
from dataclasses import dataclass
|
9 |
-
from functools import lru_cache
|
10 |
-
import logging
|
11 |
-
import typing as tp
|
12 |
-
|
13 |
-
from abc import ABC, abstractmethod
|
14 |
-
import torch
|
15 |
-
|
16 |
-
LayoutCoord = namedtuple('LayoutCoord', ['t', 'q']) # (timestep, codebook index)
|
17 |
-
PatternLayout = tp.List[tp.List[LayoutCoord]] # Sequence of coordinates
|
18 |
-
logger = logging.getLogger(__name__)
|
19 |
-
|
20 |
-
|
21 |
-
@dataclass
|
22 |
-
class Pattern:
|
23 |
-
"""Base implementation of a pattern over a sequence with multiple codebooks.
|
24 |
-
|
25 |
-
The codebook pattern consists in a layout, defining for each sequence step
|
26 |
-
the list of coordinates of each codebook timestep in the resulting interleaved sequence.
|
27 |
-
The first item of the pattern is always an empty list in order to properly insert a special token
|
28 |
-
to start with. For convenience, we also keep track of ``n_q`` the number of codebooks used for the pattern
|
29 |
-
and ``timesteps`` the number of timesteps corresponding to the original sequence.
|
30 |
-
|
31 |
-
The pattern provides convenient methods to build and revert interleaved sequences from it:
|
32 |
-
``build_pattern_sequence`` maps a given a dense input tensor of multi-codebook sequence from [B, K, T]
|
33 |
-
to the interleaved sequence of shape [B, K, S] applying the pattern, with S being the batch size,
|
34 |
-
K being the number of codebooks, T the number of original timesteps and S the number of sequence steps
|
35 |
-
for the output sequence. The unfilled positions are replaced with a special token and the built sequence
|
36 |
-
is returned along with a mask indicating valid tokens.
|
37 |
-
``revert_pattern_sequence`` maps back an interleaved sequence of shape [B, K, S] to the original alignment
|
38 |
-
of codebooks across timesteps to an output tensor of shape [B, K, T], using again a special token and a mask
|
39 |
-
to fill and specify invalid positions if needed.
|
40 |
-
See the dedicated methods for more details.
|
41 |
-
"""
|
42 |
-
# Pattern layout, for each sequence step, we have a list of coordinates
|
43 |
-
# corresponding to the original codebook timestep and position.
|
44 |
-
# The first list is always an empty list in order to properly insert
|
45 |
-
# a special token to start with.
|
46 |
-
layout: PatternLayout
|
47 |
-
timesteps: int
|
48 |
-
n_q: int
|
49 |
-
|
50 |
-
def __post_init__(self):
|
51 |
-
assert len(self.layout) > 0
|
52 |
-
assert self.layout[0] == []
|
53 |
-
self._validate_layout()
|
54 |
-
self._build_reverted_sequence_scatter_indexes = lru_cache(100)(self._build_reverted_sequence_scatter_indexes)
|
55 |
-
self._build_pattern_sequence_scatter_indexes = lru_cache(100)(self._build_pattern_sequence_scatter_indexes)
|
56 |
-
logger.info("New pattern, time steps: %d, sequence steps: %d", self.timesteps, len(self.layout))
|
57 |
-
|
58 |
-
def _validate_layout(self):
|
59 |
-
"""Runs checks on the layout to ensure a valid pattern is defined.
|
60 |
-
A pattern is considered invalid if:
|
61 |
-
- Multiple timesteps for a same codebook are defined in the same sequence step
|
62 |
-
- The timesteps for a given codebook are not in ascending order as we advance in the sequence
|
63 |
-
(this would mean that we have future timesteps before past timesteps).
|
64 |
-
"""
|
65 |
-
q_timesteps = {q: 0 for q in range(self.n_q)}
|
66 |
-
for s, seq_coords in enumerate(self.layout):
|
67 |
-
if len(seq_coords) > 0:
|
68 |
-
qs = set()
|
69 |
-
for coord in seq_coords:
|
70 |
-
qs.add(coord.q)
|
71 |
-
last_q_timestep = q_timesteps[coord.q]
|
72 |
-
assert coord.t >= last_q_timestep, \
|
73 |
-
f"Past timesteps are found in the sequence for codebook = {coord.q} at step {s}"
|
74 |
-
q_timesteps[coord.q] = coord.t
|
75 |
-
# each sequence step contains at max 1 coordinate per codebook
|
76 |
-
assert len(qs) == len(seq_coords), \
|
77 |
-
f"Multiple entries for a same codebook are found at step {s}"
|
78 |
-
|
79 |
-
@property
|
80 |
-
def num_sequence_steps(self):
|
81 |
-
return len(self.layout) - 1
|
82 |
-
|
83 |
-
@property
|
84 |
-
def max_delay(self):
|
85 |
-
max_t_in_seq_coords = 0
|
86 |
-
for seq_coords in self.layout[1:]:
|
87 |
-
for coords in seq_coords:
|
88 |
-
max_t_in_seq_coords = max(max_t_in_seq_coords, coords.t + 1)
|
89 |
-
return max_t_in_seq_coords - self.timesteps
|
90 |
-
|
91 |
-
@property
|
92 |
-
def valid_layout(self):
|
93 |
-
valid_step = len(self.layout) - self.max_delay
|
94 |
-
return self.layout[:valid_step]
|
95 |
-
|
96 |
-
def get_sequence_coords_with_timestep(self, t: int, q: tp.Optional[int] = None):
|
97 |
-
"""Get codebook coordinates in the layout that corresponds to the specified timestep t
|
98 |
-
and optionally to the codebook q. Coordinates are returned as a tuple with the sequence step
|
99 |
-
and the actual codebook coordinates.
|
100 |
-
"""
|
101 |
-
assert t <= self.timesteps, "provided timesteps is greater than the pattern's number of timesteps"
|
102 |
-
if q is not None:
|
103 |
-
assert q <= self.n_q, "provided number of codebooks is greater than the pattern's number of codebooks"
|
104 |
-
coords = []
|
105 |
-
for s, seq_codes in enumerate(self.layout):
|
106 |
-
for code in seq_codes:
|
107 |
-
if code.t == t and (q is None or code.q == q):
|
108 |
-
coords.append((s, code))
|
109 |
-
return coords
|
110 |
-
|
111 |
-
def get_steps_with_timestep(self, t: int, q: tp.Optional[int] = None) -> tp.List[int]:
|
112 |
-
return [step for step, coords in self.get_sequence_coords_with_timestep(t, q)]
|
113 |
-
|
114 |
-
def get_first_step_with_timesteps(self, t: int, q: tp.Optional[int] = None) -> tp.Optional[int]:
|
115 |
-
steps_with_timesteps = self.get_steps_with_timestep(t, q)
|
116 |
-
return steps_with_timesteps[0] if len(steps_with_timesteps) > 0 else None
|
117 |
-
|
118 |
-
def _build_pattern_sequence_scatter_indexes(self, timesteps: int, n_q: int, keep_only_valid_steps: bool,
|
119 |
-
device: tp.Union[torch.device, str] = 'cpu'):
|
120 |
-
"""Build scatter indexes corresponding to the pattern, up to the provided sequence_steps.
|
121 |
-
|
122 |
-
Args:
|
123 |
-
timesteps (int): Maximum number of timesteps steps to consider.
|
124 |
-
keep_only_valid_steps (bool): Restrict the pattern layout to match only valid steps.
|
125 |
-
device (torch.device or str): Device for created tensors.
|
126 |
-
Returns:
|
127 |
-
indexes (torch.Tensor): Indexes corresponding to the sequence, of shape [K, S].
|
128 |
-
mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes, of shape [K, S].
|
129 |
-
"""
|
130 |
-
assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}"
|
131 |
-
assert timesteps <= self.timesteps, "invalid number of timesteps used to build the sequence from the pattern"
|
132 |
-
# use the proper layout based on whether we limit ourselves to valid steps only or not,
|
133 |
-
# note that using the valid_layout will result in a truncated sequence up to the valid steps
|
134 |
-
ref_layout = self.valid_layout if keep_only_valid_steps else self.layout
|
135 |
-
# single item indexing being super slow with pytorch vs. numpy, so we use numpy here
|
136 |
-
indexes = torch.zeros(n_q, len(ref_layout), dtype=torch.long).numpy()
|
137 |
-
mask = torch.zeros(n_q, len(ref_layout), dtype=torch.bool).numpy()
|
138 |
-
# fill indexes with last sequence step value that will correspond to our special token
|
139 |
-
# the last value is n_q * timesteps as we have flattened z and append special token as the last token
|
140 |
-
# which will correspond to the index: n_q * timesteps
|
141 |
-
indexes[:] = n_q * timesteps
|
142 |
-
# iterate over the pattern and fill scattered indexes and mask
|
143 |
-
for s, sequence_coords in enumerate(ref_layout):
|
144 |
-
for coords in sequence_coords:
|
145 |
-
if coords.t < timesteps:
|
146 |
-
indexes[coords.q, s] = coords.t + coords.q * timesteps
|
147 |
-
mask[coords.q, s] = 1
|
148 |
-
indexes = torch.from_numpy(indexes).to(device)
|
149 |
-
mask = torch.from_numpy(mask).to(device)
|
150 |
-
return indexes, mask
|
151 |
-
|
152 |
-
def build_pattern_sequence(self, z: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False):
|
153 |
-
"""Build sequence corresponding to the pattern from the input tensor z.
|
154 |
-
The sequence is built using up to sequence_steps if specified, and non-pattern
|
155 |
-
coordinates are filled with the special token.
|
156 |
-
|
157 |
-
Args:
|
158 |
-
z (torch.Tensor): Input tensor of multi-codebooks sequence, of shape [B, K, T].
|
159 |
-
special_token (int): Special token used to fill non-pattern coordinates in the new sequence.
|
160 |
-
keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps.
|
161 |
-
Steps that are beyond valid steps will be replaced by the special_token in that case.
|
162 |
-
Returns:
|
163 |
-
values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, S] with S
|
164 |
-
corresponding either to the sequence_steps if provided, otherwise to the length of the pattern.
|
165 |
-
indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, S].
|
166 |
-
mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, S].
|
167 |
-
"""
|
168 |
-
B, K, T = z.shape
|
169 |
-
indexes, mask = self._build_pattern_sequence_scatter_indexes(
|
170 |
-
T, K, keep_only_valid_steps=keep_only_valid_steps, device=str(z.device)
|
171 |
-
)
|
172 |
-
z = z.view(B, -1)
|
173 |
-
# we append the special token as the last index of our flattened z tensor
|
174 |
-
z = torch.cat([z, torch.zeros_like(z[:, :1]) + special_token], dim=1)
|
175 |
-
values = z[:, indexes.view(-1)]
|
176 |
-
values = values.view(B, K, indexes.shape[-1])
|
177 |
-
return values, indexes, mask
|
178 |
-
|
179 |
-
def _build_reverted_sequence_scatter_indexes(self, sequence_steps: int, n_q: int,
|
180 |
-
keep_only_valid_steps: bool = False,
|
181 |
-
is_model_output: bool = False,
|
182 |
-
device: tp.Union[torch.device, str] = 'cpu'):
|
183 |
-
"""Builds scatter indexes required to retrieve the original multi-codebook sequence
|
184 |
-
from interleaving pattern.
|
185 |
-
|
186 |
-
Args:
|
187 |
-
sequence_steps (int): Sequence steps.
|
188 |
-
n_q (int): Number of codebooks.
|
189 |
-
keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps.
|
190 |
-
Steps that are beyond valid steps will be replaced by the special_token in that case.
|
191 |
-
is_model_output (bool): Whether to keep the sequence item corresponding to initial special token or not.
|
192 |
-
device (torch.device or str): Device for created tensors.
|
193 |
-
Returns:
|
194 |
-
indexes (torch.Tensor): Indexes for reconstructing the output, of shape [K, T].
|
195 |
-
mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T].
|
196 |
-
"""
|
197 |
-
ref_layout = self.valid_layout if keep_only_valid_steps else self.layout
|
198 |
-
# TODO(jade): Do we want to further truncate to only valid timesteps here as well?
|
199 |
-
timesteps = self.timesteps
|
200 |
-
assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}"
|
201 |
-
assert sequence_steps <= len(ref_layout), \
|
202 |
-
f"sequence to revert is longer than the defined pattern: {sequence_steps} > {len(ref_layout)}"
|
203 |
-
|
204 |
-
# ensure we take the appropriate indexes to keep the model output from the first special token as well
|
205 |
-
if is_model_output:
|
206 |
-
ref_layout = ref_layout[1:]
|
207 |
-
|
208 |
-
# single item indexing being super slow with pytorch vs. numpy, so we use numpy here
|
209 |
-
indexes = torch.zeros(n_q, timesteps, dtype=torch.long).numpy()
|
210 |
-
mask = torch.zeros(n_q, timesteps, dtype=torch.bool).numpy()
|
211 |
-
# fill indexes with last sequence step value that will correspond to our special token
|
212 |
-
indexes[:] = n_q * sequence_steps
|
213 |
-
for s, sequence_codes in enumerate(ref_layout):
|
214 |
-
if s < sequence_steps:
|
215 |
-
for code in sequence_codes:
|
216 |
-
if code.t < timesteps:
|
217 |
-
indexes[code.q, code.t] = s + code.q * sequence_steps
|
218 |
-
mask[code.q, code.t] = 1
|
219 |
-
indexes = torch.from_numpy(indexes).to(device)
|
220 |
-
mask = torch.from_numpy(mask).to(device)
|
221 |
-
return indexes, mask
|
222 |
-
|
223 |
-
def revert_pattern_sequence(self, s: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False):
|
224 |
-
"""Revert a sequence built from the pattern back to the original multi-codebook sequence without interleaving.
|
225 |
-
The sequence is reverted using up to timesteps if specified, and non-pattern coordinates
|
226 |
-
are filled with the special token.
|
227 |
-
|
228 |
-
Args:
|
229 |
-
s (torch.Tensor): Interleaved sequence tensor obtained from the pattern, of shape [B, K, S].
|
230 |
-
special_token (int or float): Special token used to fill non-pattern coordinates in the new sequence.
|
231 |
-
Returns:
|
232 |
-
values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, T] with T
|
233 |
-
corresponding either to the timesteps if provided, or the total timesteps in pattern otherwise.
|
234 |
-
indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, T].
|
235 |
-
mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T].
|
236 |
-
"""
|
237 |
-
B, K, S = s.shape
|
238 |
-
indexes, mask = self._build_reverted_sequence_scatter_indexes(
|
239 |
-
S, K, keep_only_valid_steps, is_model_output=False, device=str(s.device)
|
240 |
-
)
|
241 |
-
s = s.view(B, -1)
|
242 |
-
# we append the special token as the last index of our flattened z tensor
|
243 |
-
s = torch.cat([s, torch.zeros_like(s[:, :1]) + special_token], dim=1)
|
244 |
-
values = s[:, indexes.view(-1)]
|
245 |
-
values = values.view(B, K, indexes.shape[-1])
|
246 |
-
return values, indexes, mask
|
247 |
-
|
248 |
-
def revert_pattern_logits(self, logits: torch.Tensor, special_token: float, keep_only_valid_steps: bool = False):
|
249 |
-
"""Revert model logits obtained on a sequence built from the pattern
|
250 |
-
back to a tensor matching the original sequence.
|
251 |
-
|
252 |
-
This method is similar to ``revert_pattern_sequence`` with the following specificities:
|
253 |
-
1. It is designed to work with the extra cardinality dimension
|
254 |
-
2. We return the logits for the first sequence item that matches the special_token and
|
255 |
-
which matching target in the original sequence is the first item of the sequence,
|
256 |
-
while we skip the last logits as there is no matching target
|
257 |
-
"""
|
258 |
-
B, card, K, S = logits.shape
|
259 |
-
indexes, mask = self._build_reverted_sequence_scatter_indexes(
|
260 |
-
S, K, keep_only_valid_steps, is_model_output=True, device=logits.device
|
261 |
-
)
|
262 |
-
logits = logits.reshape(B, card, -1)
|
263 |
-
# we append the special token as the last index of our flattened z tensor
|
264 |
-
logits = torch.cat([logits, torch.zeros_like(logits[:, :, :1]) + special_token], dim=-1) # [B, card, K x S]
|
265 |
-
values = logits[:, :, indexes.view(-1)]
|
266 |
-
values = values.view(B, card, K, indexes.shape[-1])
|
267 |
-
return values, indexes, mask
|
268 |
-
|
269 |
-
|
270 |
-
class CodebooksPatternProvider(ABC):
|
271 |
-
"""Abstraction around providing pattern for interleaving codebooks.
|
272 |
-
|
273 |
-
The CodebooksPatternProvider abstraction allows to implement various strategies to
|
274 |
-
define interleaving pattern of sequences composed of multiple codebooks. For a given
|
275 |
-
number of codebooks `n_q`, the pattern provider can generate a specified pattern
|
276 |
-
corresponding to a sequence of `T` timesteps with `n_q` parallel codebooks. This pattern
|
277 |
-
can be used to construct a new sequence from the original codes respecting the specified
|
278 |
-
pattern. The pattern is defined as a list of list of code coordinates, code coordinate
|
279 |
-
being a tuple with the original timestep and codebook to build the new sequence.
|
280 |
-
Note that all patterns must start with an empty list that is then used to insert a first
|
281 |
-
sequence step of special tokens in the newly generated sequence.
|
282 |
-
|
283 |
-
Args:
|
284 |
-
n_q (int): number of codebooks.
|
285 |
-
cached (bool): if True, patterns for a given length are cached. In general
|
286 |
-
that should be true for efficiency reason to avoid synchronization points.
|
287 |
-
"""
|
288 |
-
def __init__(self, n_q: int, cached: bool = True):
|
289 |
-
assert n_q > 0
|
290 |
-
self.n_q = n_q
|
291 |
-
self.get_pattern = lru_cache(100)(self.get_pattern) # type: ignore
|
292 |
-
|
293 |
-
@abstractmethod
|
294 |
-
def get_pattern(self, timesteps: int) -> Pattern:
|
295 |
-
"""Builds pattern with specific interleaving between codebooks.
|
296 |
-
|
297 |
-
Args:
|
298 |
-
timesteps (int): Total number of timesteps.
|
299 |
-
"""
|
300 |
-
raise NotImplementedError()
|
301 |
-
|
302 |
-
|
303 |
-
class DelayedPatternProvider(CodebooksPatternProvider):
|
304 |
-
"""Provider for delayed pattern across delayed codebooks.
|
305 |
-
Codebooks are delayed in the sequence and sequence steps will contain codebooks
|
306 |
-
from different timesteps.
|
307 |
-
|
308 |
-
Example:
|
309 |
-
Taking timesteps=4 and n_q=3, delays=None, the multi-codebook sequence:
|
310 |
-
[[1, 2, 3, 4],
|
311 |
-
[1, 2, 3, 4],
|
312 |
-
[1, 2, 3, 4]]
|
313 |
-
The resulting sequence obtained from the returned pattern is:
|
314 |
-
[[S, 1, 2, 3, 4],
|
315 |
-
[S, S, 1, 2, 3],
|
316 |
-
[S, S, S, 1, 2]]
|
317 |
-
(with S being a special token)
|
318 |
-
|
319 |
-
Args:
|
320 |
-
n_q (int): Number of codebooks.
|
321 |
-
delays (list of int, optional): Delay for each of the codebooks.
|
322 |
-
If delays not defined, each codebook is delayed by 1 compared to the previous one.
|
323 |
-
flatten_first (int): Flatten the first N timesteps.
|
324 |
-
empty_initial (int): Prepend with N empty list of coordinates.
|
325 |
-
"""
|
326 |
-
def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None,
|
327 |
-
flatten_first: int = 0, empty_initial: int = 0):
|
328 |
-
super().__init__(n_q)
|
329 |
-
if delays is None:
|
330 |
-
delays = list(range(n_q))
|
331 |
-
self.delays = delays
|
332 |
-
self.flatten_first = flatten_first
|
333 |
-
self.empty_initial = empty_initial
|
334 |
-
assert len(self.delays) == self.n_q
|
335 |
-
assert sorted(self.delays) == self.delays
|
336 |
-
|
337 |
-
def get_pattern(self, timesteps: int) -> Pattern:
|
338 |
-
out: PatternLayout = [[]]
|
339 |
-
max_delay = max(self.delays)
|
340 |
-
if self.empty_initial:
|
341 |
-
out += [[] for _ in range(self.empty_initial)]
|
342 |
-
if self.flatten_first:
|
343 |
-
for t in range(min(timesteps, self.flatten_first)):
|
344 |
-
for q in range(self.n_q):
|
345 |
-
out.append([LayoutCoord(t, q)])
|
346 |
-
for t in range(self.flatten_first, timesteps + max_delay):
|
347 |
-
v = []
|
348 |
-
for q, delay in enumerate(self.delays):
|
349 |
-
t_for_q = t - delay
|
350 |
-
if t_for_q >= self.flatten_first:
|
351 |
-
v.append(LayoutCoord(t_for_q, q))
|
352 |
-
out.append(v)
|
353 |
-
return Pattern(out, n_q=self.n_q, timesteps=timesteps)
|
354 |
-
|
355 |
-
|
356 |
-
class ParallelPatternProvider(DelayedPatternProvider):
|
357 |
-
"""Provider for parallel pattern across codebooks.
|
358 |
-
This pattern provider is a special case of the delayed pattern with actually no delay,
|
359 |
-
hence delays=repeat(0, n_q).
|
360 |
-
|
361 |
-
Args:
|
362 |
-
n_q (int): Number of codebooks.
|
363 |
-
"""
|
364 |
-
def __init__(self, n_q: int):
|
365 |
-
super().__init__(n_q, [0] * n_q)
|
366 |
-
|
367 |
-
|
368 |
-
class UnrolledPatternProvider(CodebooksPatternProvider):
|
369 |
-
"""Provider for unrolling codebooks pattern.
|
370 |
-
This pattern provider enables to represent the codebook flattened completely or only to some extend
|
371 |
-
while also specifying a given delay between the flattened codebooks representation, allowing to
|
372 |
-
unroll the codebooks in the sequence.
|
373 |
-
|
374 |
-
Example:
|
375 |
-
1. Flattening of the codebooks.
|
376 |
-
By default, the pattern provider will fully flatten the codebooks such as flattening=range(n_q),
|
377 |
-
taking n_q = 3 and timesteps = 4:
|
378 |
-
[[1, 2, 3, 4],
|
379 |
-
[1, 2, 3, 4],
|
380 |
-
[1, 2, 3, 4]]
|
381 |
-
will result into:
|
382 |
-
[[S, S, 1, S, S, 2, S, S, 3, S, S, 4],
|
383 |
-
[S, 1, S, S, 2, S, S, 3, S, S, 4, S],
|
384 |
-
[1, S, S, 2, S, S, 3, S, S, 4, S, S]]
|
385 |
-
2. Partial flattening of the codebooks. The ``flattening`` parameter allows to specify the inner step
|
386 |
-
for each of the codebook, allowing to define which codebook to flatten (or keep in parallel), for example
|
387 |
-
taking n_q = 3, timesteps = 4 and flattening = [0, 1, 1]:
|
388 |
-
[[1, 2, 3, 4],
|
389 |
-
[1, 2, 3, 4],
|
390 |
-
[1, 2, 3, 4]]
|
391 |
-
will result into:
|
392 |
-
[[S, 1, S, S, 2, S, S, 3, S, S, 4, S],
|
393 |
-
[S, 1, S, S, 2, S, S, 3, S, S, 4, S],
|
394 |
-
[1, S, S, 2, S, S, 3, S, S, 4, S, S]]
|
395 |
-
3. Flattening with delay. The ``delay`` parameter allows to further unroll the sequence of codebooks
|
396 |
-
allowing to specify the delay per codebook. Note that the delay between codebooks flattened to the
|
397 |
-
same inner timestep should be coherent. For example, taking n_q = 3, timesteps = 4, flattening = [0, 1, 1]
|
398 |
-
and delays = [0, 3, 3]:
|
399 |
-
[[1, 2, 3, 4],
|
400 |
-
[1, 2, 3, 4],
|
401 |
-
[1, 2, 3, 4]]
|
402 |
-
will result into:
|
403 |
-
[[S, S, S, 1, S, 2, S, 3, S, 4],
|
404 |
-
[S, S, S, 1, S, 2, S, 3, S, 4],
|
405 |
-
[1, 2, 3, S, 4, S, 5, S, 6, S]]
|
406 |
-
|
407 |
-
Args:
|
408 |
-
n_q (int): Number of codebooks.
|
409 |
-
flattening (list of int, optional): Flattening schema over the codebooks. If not defined,
|
410 |
-
the codebooks will be flattened to 1 codebook per step, meaning that the sequence will
|
411 |
-
have n_q extra steps for each timestep.
|
412 |
-
delays (list of int, optional): Delay for each of the codebooks. If not defined,
|
413 |
-
no delay is added and therefore will default to [0] * ``n_q``.
|
414 |
-
Note that two codebooks that will be flattened to the same inner step
|
415 |
-
should have the same delay, otherwise the pattern is considered as invalid.
|
416 |
-
"""
|
417 |
-
FlattenedCodebook = namedtuple('FlattenedCodebook', ['codebooks', 'delay'])
|
418 |
-
|
419 |
-
def __init__(self, n_q: int, flattening: tp.Optional[tp.List[int]] = None,
|
420 |
-
delays: tp.Optional[tp.List[int]] = None):
|
421 |
-
super().__init__(n_q)
|
422 |
-
if flattening is None:
|
423 |
-
flattening = list(range(n_q))
|
424 |
-
if delays is None:
|
425 |
-
delays = [0] * n_q
|
426 |
-
assert len(flattening) == n_q
|
427 |
-
assert len(delays) == n_q
|
428 |
-
assert sorted(flattening) == flattening
|
429 |
-
assert sorted(delays) == delays
|
430 |
-
self._flattened_codebooks = self._build_flattened_codebooks(delays, flattening)
|
431 |
-
self.max_delay = max(delays)
|
432 |
-
|
433 |
-
def _build_flattened_codebooks(self, delays: tp.List[int], flattening: tp.List[int]):
|
434 |
-
"""Build a flattened codebooks representation as a dictionary of inner step
|
435 |
-
and the actual codebook indices corresponding to the flattened codebook. For convenience, we
|
436 |
-
also store the delay associated to the flattened codebook to avoid maintaining an extra mapping.
|
437 |
-
"""
|
438 |
-
flattened_codebooks: dict = {}
|
439 |
-
for q, (inner_step, delay) in enumerate(zip(flattening, delays)):
|
440 |
-
if inner_step not in flattened_codebooks:
|
441 |
-
flat_codebook = UnrolledPatternProvider.FlattenedCodebook(codebooks=[q], delay=delay)
|
442 |
-
else:
|
443 |
-
flat_codebook = flattened_codebooks[inner_step]
|
444 |
-
assert flat_codebook.delay == delay, (
|
445 |
-
"Delay and flattening between codebooks is inconsistent: ",
|
446 |
-
"two codebooks flattened to the same position should have the same delay."
|
447 |
-
)
|
448 |
-
flat_codebook.codebooks.append(q)
|
449 |
-
flattened_codebooks[inner_step] = flat_codebook
|
450 |
-
return flattened_codebooks
|
451 |
-
|
452 |
-
@property
|
453 |
-
def _num_inner_steps(self):
|
454 |
-
"""Number of inner steps to unroll between timesteps in order to flatten the codebooks.
|
455 |
-
"""
|
456 |
-
return max([inner_step for inner_step in self._flattened_codebooks.keys()]) + 1
|
457 |
-
|
458 |
-
def num_virtual_steps(self, timesteps: int) -> int:
|
459 |
-
return timesteps * self._num_inner_steps + 1
|
460 |
-
|
461 |
-
def get_pattern(self, timesteps: int) -> Pattern:
|
462 |
-
"""Builds pattern for delay across codebooks.
|
463 |
-
|
464 |
-
Args:
|
465 |
-
timesteps (int): Total number of timesteps.
|
466 |
-
"""
|
467 |
-
# the PatternLayout is built as a tuple of sequence position and list of coordinates
|
468 |
-
# so that it can be reordered properly given the required delay between codebooks of given timesteps
|
469 |
-
indexed_out: list = [(-1, [])]
|
470 |
-
max_timesteps = timesteps + self.max_delay
|
471 |
-
for t in range(max_timesteps):
|
472 |
-
# for each timestep, we unroll the flattened codebooks,
|
473 |
-
# emitting the sequence step with the corresponding delay
|
474 |
-
for step in range(self._num_inner_steps):
|
475 |
-
if step in self._flattened_codebooks:
|
476 |
-
# we have codebooks at this virtual step to emit
|
477 |
-
step_codebooks = self._flattened_codebooks[step]
|
478 |
-
t_for_q = t + step_codebooks.delay
|
479 |
-
coords = [LayoutCoord(t, q) for q in step_codebooks.codebooks]
|
480 |
-
if t_for_q < max_timesteps and t < max_timesteps:
|
481 |
-
indexed_out.append((t_for_q, coords))
|
482 |
-
else:
|
483 |
-
# there is no codebook in this virtual step so we emit an empty list
|
484 |
-
indexed_out.append((t, []))
|
485 |
-
out = [coords for _, coords in sorted(indexed_out)]
|
486 |
-
return Pattern(out, n_q=self.n_q, timesteps=timesteps)
|
487 |
-
|
488 |
-
|
489 |
-
class VALLEPattern(CodebooksPatternProvider):
|
490 |
-
"""Almost VALL-E style pattern.
|
491 |
-
We further allow some delays for the codebooks other than the first one.
|
492 |
-
|
493 |
-
Args:
|
494 |
-
n_q (int): Number of codebooks.
|
495 |
-
delays (list of int, optional): Delay for each of the codebooks.
|
496 |
-
If delays not defined, each codebook is delayed by 1 compared to the previous one.
|
497 |
-
"""
|
498 |
-
def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None):
|
499 |
-
super().__init__(n_q)
|
500 |
-
if delays is None:
|
501 |
-
delays = [0] * (n_q - 1)
|
502 |
-
self.delays = delays
|
503 |
-
assert len(self.delays) == self.n_q - 1
|
504 |
-
assert sorted(self.delays) == self.delays
|
505 |
-
|
506 |
-
def get_pattern(self, timesteps: int) -> Pattern:
|
507 |
-
out: PatternLayout = [[]]
|
508 |
-
for t in range(timesteps):
|
509 |
-
out.append([LayoutCoord(t, 0)])
|
510 |
-
max_delay = max(self.delays)
|
511 |
-
for t in range(timesteps + max_delay):
|
512 |
-
v = []
|
513 |
-
for q, delay in enumerate(self.delays):
|
514 |
-
t_for_q = t - delay
|
515 |
-
if t_for_q >= 0:
|
516 |
-
v.append(LayoutCoord(t_for_q, q + 1))
|
517 |
-
out.append(v)
|
518 |
-
return Pattern(out, n_q=self.n_q, timesteps=timesteps)
|
519 |
-
|
520 |
-
|
521 |
-
class MusicLMPattern(CodebooksPatternProvider):
|
522 |
-
"""Almost MusicLM style pattern. This is equivalent to full flattening
|
523 |
-
but in a different order.
|
524 |
-
|
525 |
-
Args:
|
526 |
-
n_q (int): Number of codebooks.
|
527 |
-
group_by (int): Number of codebooks to group together.
|
528 |
-
"""
|
529 |
-
def __init__(self, n_q: int, group_by: int = 2):
|
530 |
-
super().__init__(n_q)
|
531 |
-
self.group_by = group_by
|
532 |
-
|
533 |
-
def get_pattern(self, timesteps: int) -> Pattern:
|
534 |
-
out: PatternLayout = [[]]
|
535 |
-
for offset in range(0, self.n_q, self.group_by):
|
536 |
-
for t in range(timesteps):
|
537 |
-
for q in range(offset, offset + self.group_by):
|
538 |
-
out.append([LayoutCoord(t, q)])
|
539 |
-
return Pattern(out, n_q=self.n_q, timesteps=timesteps)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/__init__.py
DELETED
File without changes
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/tensor_utils.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.distributed as dist
|
3 |
-
|
4 |
-
|
5 |
-
def reduce_tensors(metrics):
|
6 |
-
new_metrics = {}
|
7 |
-
for k, v in metrics.items():
|
8 |
-
if isinstance(v, torch.Tensor):
|
9 |
-
dist.all_reduce(v)
|
10 |
-
v = v / dist.get_world_size()
|
11 |
-
if type(v) is dict:
|
12 |
-
v = reduce_tensors(v)
|
13 |
-
new_metrics[k] = v
|
14 |
-
return new_metrics
|
15 |
-
|
16 |
-
|
17 |
-
def tensors_to_scalars(tensors):
|
18 |
-
if isinstance(tensors, torch.Tensor):
|
19 |
-
tensors = tensors.item()
|
20 |
-
return tensors
|
21 |
-
elif isinstance(tensors, dict):
|
22 |
-
new_tensors = {}
|
23 |
-
for k, v in tensors.items():
|
24 |
-
v = tensors_to_scalars(v)
|
25 |
-
new_tensors[k] = v
|
26 |
-
return new_tensors
|
27 |
-
elif isinstance(tensors, list):
|
28 |
-
return [tensors_to_scalars(v) for v in tensors]
|
29 |
-
else:
|
30 |
-
return tensors
|
31 |
-
|
32 |
-
|
33 |
-
def tensors_to_np(tensors):
|
34 |
-
if isinstance(tensors, dict):
|
35 |
-
new_np = {}
|
36 |
-
for k, v in tensors.items():
|
37 |
-
if isinstance(v, torch.Tensor):
|
38 |
-
v = v.cpu().numpy()
|
39 |
-
if type(v) is dict:
|
40 |
-
v = tensors_to_np(v)
|
41 |
-
new_np[k] = v
|
42 |
-
elif isinstance(tensors, list):
|
43 |
-
new_np = []
|
44 |
-
for v in tensors:
|
45 |
-
if isinstance(v, torch.Tensor):
|
46 |
-
v = v.cpu().numpy()
|
47 |
-
if type(v) is dict:
|
48 |
-
v = tensors_to_np(v)
|
49 |
-
new_np.append(v)
|
50 |
-
elif isinstance(tensors, torch.Tensor):
|
51 |
-
v = tensors
|
52 |
-
if isinstance(v, torch.Tensor):
|
53 |
-
v = v.cpu().numpy()
|
54 |
-
if type(v) is dict:
|
55 |
-
v = tensors_to_np(v)
|
56 |
-
new_np = v
|
57 |
-
else:
|
58 |
-
raise Exception(f'tensors_to_np does not support type {type(tensors)}.')
|
59 |
-
return new_np
|
60 |
-
|
61 |
-
|
62 |
-
def move_to_cpu(tensors):
|
63 |
-
ret = {}
|
64 |
-
for k, v in tensors.items():
|
65 |
-
if isinstance(v, torch.Tensor):
|
66 |
-
v = v.cpu()
|
67 |
-
if type(v) is dict:
|
68 |
-
v = move_to_cpu(v)
|
69 |
-
ret[k] = v
|
70 |
-
return ret
|
71 |
-
|
72 |
-
|
73 |
-
def move_to_cuda(batch, gpu_id=0):
|
74 |
-
# base case: object can be directly moved using `cuda` or `to`
|
75 |
-
if callable(getattr(batch, 'cuda', None)):
|
76 |
-
return batch.cuda(gpu_id, non_blocking=True)
|
77 |
-
elif callable(getattr(batch, 'to', None)):
|
78 |
-
return batch.to(torch.device('cuda', gpu_id), non_blocking=True)
|
79 |
-
elif isinstance(batch, list):
|
80 |
-
for i, x in enumerate(batch):
|
81 |
-
batch[i] = move_to_cuda(x, gpu_id)
|
82 |
-
return batch
|
83 |
-
elif isinstance(batch, tuple):
|
84 |
-
batch = list(batch)
|
85 |
-
for i, x in enumerate(batch):
|
86 |
-
batch[i] = move_to_cuda(x, gpu_id)
|
87 |
-
return tuple(batch)
|
88 |
-
elif isinstance(batch, dict):
|
89 |
-
for k, v in batch.items():
|
90 |
-
batch[k] = move_to_cuda(v, gpu_id)
|
91 |
-
return batch
|
92 |
-
return batch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Software_Company/src/agents/Environment/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .base_environment import Environment
|
|
|
|
spaces/Abdullahw72/bark-voice-cloning/app.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import os.path
|
3 |
-
import uuid
|
4 |
-
|
5 |
-
import gradio
|
6 |
-
import numpy
|
7 |
-
import torch
|
8 |
-
|
9 |
-
from hubert.hubert_manager import HuBERTManager
|
10 |
-
from hubert.pre_kmeans_hubert import CustomHubert
|
11 |
-
from hubert.customtokenizer import CustomTokenizer
|
12 |
-
from encodec import EncodecModel
|
13 |
-
from encodec.utils import convert_audio
|
14 |
-
|
15 |
-
|
16 |
-
hubert_model = CustomHubert(HuBERTManager.make_sure_hubert_installed())
|
17 |
-
tokenizer_model = CustomTokenizer.load_from_checkpoint(
|
18 |
-
HuBERTManager.make_sure_tokenizer_installed(model='quantifier_V1_hubert_base_ls960_23.pth'),
|
19 |
-
map_location=torch.device('cpu')
|
20 |
-
)
|
21 |
-
encodec_model = EncodecModel.encodec_model_24khz()
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
def clone(audio, *args):
|
26 |
-
sr, wav = audio
|
27 |
-
|
28 |
-
wav = torch.tensor(wav)
|
29 |
-
|
30 |
-
if wav.dtype == torch.int16:
|
31 |
-
wav = wav.float() / 32767.0
|
32 |
-
|
33 |
-
if len(wav.shape) == 2:
|
34 |
-
if wav.shape[0] == 2: # Stereo to mono if needed
|
35 |
-
wav = wav.mean(0, keepdim=True)
|
36 |
-
if wav.shape[1] == 2:
|
37 |
-
wav = wav.mean(1, keepdim=False).unsqueeze(-1)
|
38 |
-
|
39 |
-
wav = wav[-int(sr*20):] # Take only the last 20 seconds
|
40 |
-
|
41 |
-
wav = wav.reshape(1, -1) # Reshape from gradio style to HuBERT shape. (N, 1) to (1, N)
|
42 |
-
|
43 |
-
semantic_vectors = hubert_model.forward(wav, input_sample_hz=sr)
|
44 |
-
semantic_tokens = tokenizer_model.get_token(semantic_vectors)
|
45 |
-
|
46 |
-
encodec_model.set_target_bandwidth(6.0)
|
47 |
-
wav = convert_audio(wav, sr, encodec_model.sample_rate, 1)
|
48 |
-
wav = wav.unsqueeze(0)
|
49 |
-
|
50 |
-
with torch.no_grad():
|
51 |
-
encoded_frames = encodec_model.encode(wav)
|
52 |
-
|
53 |
-
codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [B, n_q, T]
|
54 |
-
|
55 |
-
if not os.path.isdir('data/speakers'):
|
56 |
-
os.makedirs('data/speakers')
|
57 |
-
|
58 |
-
file_path = f'data/speakers/{uuid.uuid4().hex}.npz'
|
59 |
-
|
60 |
-
numpy.savez(
|
61 |
-
file_path,
|
62 |
-
semantic_prompt=semantic_tokens,
|
63 |
-
fine_prompt=codes,
|
64 |
-
coarse_prompt=codes[:2, :]
|
65 |
-
)
|
66 |
-
|
67 |
-
return file_path
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
iface = gradio.interface.Interface(fn=clone, inputs=[
|
72 |
-
'audio',
|
73 |
-
gradio.Markdown(
|
74 |
-
'''
|
75 |
-
# Bark text to speech voice cloning
|
76 |
-
[Model](https://huggingface.co/GitMylo/bark-voice-cloning/), [Model GitHub](https://github.com/gitmylo/bark-voice-cloning-HuBERT-quantizer), [Webui GitHub](https://github.com/gitmylo/audio-webui)
|
77 |
-
|
78 |
-
For faster creation of voice clones [Duplicate this space](https://huggingface.co/spaces/GitMylo/bark-voice-cloning?duplicate=true)
|
79 |
-
|
80 |
-
Uploaded audio files get cut to 20 seconds in order to keep it fast for everyone. Only the last 20 seconds will be used. (Bark only uses the last 14 seconds anyway)
|
81 |
-
|
82 |
-
## Tips for better cloning
|
83 |
-
### Make sure these things are **NOT** in your voice input: (in no particular order)
|
84 |
-
* Noise (You can use a noise remover before)
|
85 |
-
* Music (There are also music remover tools) (Unless you want music in the background)
|
86 |
-
* A cut-off at the end (This will cause it to try and continue on the generation)
|
87 |
-
* Under 1 second of training data (i personally suggest around 10 seconds for good potential, but i've had great results with 5 seconds as well.)
|
88 |
-
|
89 |
-
### What makes for good prompt audio? (in no particular order)
|
90 |
-
* Clearly spoken
|
91 |
-
* No weird background noises
|
92 |
-
* Only one speaker
|
93 |
-
* Audio which ends after a sentence ends
|
94 |
-
* Regular/common voice (They usually have more success, it's still capable of cloning complex voices, but not as good at it)
|
95 |
-
* Around 10 seconds of data
|
96 |
-
''')
|
97 |
-
], outputs='file')
|
98 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/GptForLove.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from aiohttp import ClientSession
|
4 |
-
import execjs, os, json
|
5 |
-
|
6 |
-
from ..typing import AsyncGenerator
|
7 |
-
from .base_provider import AsyncGeneratorProvider
|
8 |
-
from .helper import format_prompt
|
9 |
-
|
10 |
-
class GptForLove(AsyncGeneratorProvider):
|
11 |
-
url = "https://ai18.gptforlove.com"
|
12 |
-
supports_gpt_35_turbo = True
|
13 |
-
working = True
|
14 |
-
|
15 |
-
@classmethod
|
16 |
-
async def create_async_generator(
|
17 |
-
cls,
|
18 |
-
model: str,
|
19 |
-
messages: list[dict[str, str]],
|
20 |
-
**kwargs
|
21 |
-
) -> AsyncGenerator:
|
22 |
-
if not model:
|
23 |
-
model = "gpt-3.5-turbo"
|
24 |
-
headers = {
|
25 |
-
"authority": "api.gptplus.one",
|
26 |
-
"accept": "application/json, text/plain, */*",
|
27 |
-
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2",
|
28 |
-
"content-type": "application/json",
|
29 |
-
"origin": cls.url,
|
30 |
-
"referer": f"{cls.url}/",
|
31 |
-
"sec-ch-ua": "\"Google Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
|
32 |
-
"sec-ch-ua-mobile": "?0",
|
33 |
-
"sec-ch-ua-platform": "Linux",
|
34 |
-
"sec-fetch-dest": "empty",
|
35 |
-
"sec-fetch-mode": "cors",
|
36 |
-
"sec-fetch-site": "cross-site",
|
37 |
-
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
|
38 |
-
}
|
39 |
-
async with ClientSession(headers=headers) as session:
|
40 |
-
prompt = format_prompt(messages)
|
41 |
-
data = {
|
42 |
-
"prompt": prompt,
|
43 |
-
"options": {},
|
44 |
-
"systemMessage": "You are ChatGPT, the version is GPT3.5, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
45 |
-
"temperature": 0.8,
|
46 |
-
"top_p": 1,
|
47 |
-
"secret": get_secret(),
|
48 |
-
**kwargs
|
49 |
-
}
|
50 |
-
async with session.post("https://api.gptplus.one/chat-process", json=data) as response:
|
51 |
-
response.raise_for_status()
|
52 |
-
async for line in response.content:
|
53 |
-
try:
|
54 |
-
line = json.loads(line)
|
55 |
-
except:
|
56 |
-
raise RuntimeError(f"Broken line: {line}")
|
57 |
-
if "detail" in line:
|
58 |
-
content = line["detail"]["choices"][0]["delta"].get("content")
|
59 |
-
if content:
|
60 |
-
yield content
|
61 |
-
elif "10分钟内提问超过了5次" in line:
|
62 |
-
raise RuntimeError("Rate limit reached")
|
63 |
-
else:
|
64 |
-
raise RuntimeError(f"Response: {line}")
|
65 |
-
|
66 |
-
|
67 |
-
def get_secret() -> str:
|
68 |
-
dir = os.path.dirname(__file__)
|
69 |
-
dir += '/npm/node_modules/crypto-js'
|
70 |
-
source = """
|
71 |
-
CryptoJS = require('{dir}/crypto-js')
|
72 |
-
var k = '14487141bvirvvG'
|
73 |
-
, e = Math.floor(new Date().getTime() / 1e3);
|
74 |
-
var t = CryptoJS.enc.Utf8.parse(e)
|
75 |
-
, o = CryptoJS.AES.encrypt(t, k, {
|
76 |
-
mode: CryptoJS.mode.ECB,
|
77 |
-
padding: CryptoJS.pad.Pkcs7
|
78 |
-
});
|
79 |
-
return o.toString()
|
80 |
-
"""
|
81 |
-
source = source.replace('{dir}', dir)
|
82 |
-
return execjs.compile(source).call('')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RunLayout.js
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
// Override
|
2 |
-
var RunLayout = function (parent, newWidth, newHeight) {
|
3 |
-
// Skip hidden or !dirty sizer
|
4 |
-
if (this.ignoreLayout) {
|
5 |
-
return this;
|
6 |
-
}
|
7 |
-
|
8 |
-
var isTopmostParent = !parent;
|
9 |
-
// Preprocessor, top parent only
|
10 |
-
if (isTopmostParent) {
|
11 |
-
this.preLayout();
|
12 |
-
}
|
13 |
-
|
14 |
-
// Calculate parent width
|
15 |
-
newWidth = this.resolveWidth(newWidth);
|
16 |
-
// Calculate all children width, run width wrap
|
17 |
-
if (isTopmostParent) {
|
18 |
-
this.resolveChildrenWidth(newWidth);
|
19 |
-
this.runWidthWrap(newWidth);
|
20 |
-
}
|
21 |
-
// Calculate parent height
|
22 |
-
newHeight = this.resolveHeight(newHeight);
|
23 |
-
// The last chance of resolving size
|
24 |
-
this.postResolveSize(newWidth, newHeight);
|
25 |
-
// Resize parent
|
26 |
-
this.resize(newWidth, newHeight);
|
27 |
-
|
28 |
-
if (this.sizerEventsEnable) {
|
29 |
-
if (this.layoutedChildren === undefined) {
|
30 |
-
this.layoutedChildren = [];
|
31 |
-
}
|
32 |
-
}
|
33 |
-
|
34 |
-
// Layout children
|
35 |
-
this.layoutChildren();
|
36 |
-
|
37 |
-
// Layout background children
|
38 |
-
this.layoutBackgrounds();
|
39 |
-
|
40 |
-
if (this.sizerEventsEnable) {
|
41 |
-
this.emit('postlayout', this.layoutedChildren, this);
|
42 |
-
this.layoutedChildren.length = 0;
|
43 |
-
}
|
44 |
-
|
45 |
-
return this.postLayout();
|
46 |
-
}
|
47 |
-
export default RunLayout;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/input/OnTouchPad.js
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import IsLocalPointInKnob from './IsLocalPointInKnob.js';
|
2 |
-
|
3 |
-
const GetAngle = Phaser.Math.Angle.Between;
|
4 |
-
const NormalizeAngle = Phaser.Math.Angle.Normalize;
|
5 |
-
|
6 |
-
var OnTouchPad = function (pointer, localX, localY) {
|
7 |
-
if (!this.enable) {
|
8 |
-
return;
|
9 |
-
}
|
10 |
-
if (!pointer.isDown) {
|
11 |
-
return;
|
12 |
-
}
|
13 |
-
var knob = this.sizerChildren.knob;
|
14 |
-
if (!IsLocalPointInKnob(knob, localX, localY)) {
|
15 |
-
return;
|
16 |
-
}
|
17 |
-
|
18 |
-
var centerX = knob.width / 2;
|
19 |
-
var startAngle = knob.startAngle;
|
20 |
-
var endAngle = GetAngle(centerX, centerX, localX, localY);
|
21 |
-
var deltaAngle = (knob.anticlockwise) ? (startAngle - endAngle) : (endAngle - startAngle);
|
22 |
-
var value = NormalizeAngle(deltaAngle) / (2 * Math.PI);
|
23 |
-
|
24 |
-
this.stopEaseValue();
|
25 |
-
if ((this.easeValueDuration === 0) || (Math.abs(this.value - value) < 0.1)) {
|
26 |
-
this.value = value;
|
27 |
-
} else {
|
28 |
-
this.easeValueTo(value);
|
29 |
-
}
|
30 |
-
}
|
31 |
-
|
32 |
-
var InstallEvents = function () {
|
33 |
-
var knob = this.sizerChildren.knob;
|
34 |
-
knob
|
35 |
-
.on('pointerdown', OnTouchPad, this)
|
36 |
-
.on('pointermove', OnTouchPad, this)
|
37 |
-
.setInteractive()
|
38 |
-
}
|
39 |
-
|
40 |
-
export default InstallEvents;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/namevaluelabel/NameValueLabel.js
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
import Sizer from '../sizer/Sizer.js';
|
2 |
-
import Build from './methods/Build.js';
|
3 |
-
import SetValueMethods from './methods/SetValueMethods.js';
|
4 |
-
|
5 |
-
class NameValueLabel extends Sizer {
|
6 |
-
constructor(scene, config) {
|
7 |
-
// Create sizer
|
8 |
-
super(scene, config);
|
9 |
-
this.type = 'rexNameValueLabel';
|
10 |
-
|
11 |
-
Build.call(this, scene, config);
|
12 |
-
}
|
13 |
-
|
14 |
-
// Access nameText game object
|
15 |
-
get nameText() {
|
16 |
-
var textObject = this.childrenMap.name;
|
17 |
-
if (textObject === undefined) {
|
18 |
-
return '';
|
19 |
-
}
|
20 |
-
return textObject.text;
|
21 |
-
}
|
22 |
-
|
23 |
-
set nameText(value) {
|
24 |
-
var textObject = this.childrenMap.name;
|
25 |
-
if (textObject === undefined) {
|
26 |
-
return;
|
27 |
-
}
|
28 |
-
textObject.setText(value);
|
29 |
-
}
|
30 |
-
|
31 |
-
setNameText(value) {
|
32 |
-
this.nameText = value;
|
33 |
-
return this;
|
34 |
-
}
|
35 |
-
|
36 |
-
// Access valueText game object
|
37 |
-
get valueText() {
|
38 |
-
var textObject = this.childrenMap.value;
|
39 |
-
if (textObject === undefined) {
|
40 |
-
return '';
|
41 |
-
}
|
42 |
-
return textObject.text;
|
43 |
-
}
|
44 |
-
|
45 |
-
set valueText(value) {
|
46 |
-
var textObject = this.childrenMap.value;
|
47 |
-
if (textObject === undefined) {
|
48 |
-
return;
|
49 |
-
}
|
50 |
-
textObject.setText(value);
|
51 |
-
}
|
52 |
-
|
53 |
-
setValueText(value) {
|
54 |
-
this.valueText = value;
|
55 |
-
return this;
|
56 |
-
}
|
57 |
-
|
58 |
-
// Accrss bar game object
|
59 |
-
get barValue() {
|
60 |
-
var bar = this.childrenMap.bar;
|
61 |
-
if (bar === undefined) {
|
62 |
-
return;
|
63 |
-
}
|
64 |
-
return bar.value;
|
65 |
-
}
|
66 |
-
|
67 |
-
set barValue(value) {
|
68 |
-
var bar = this.childrenMap.bar;
|
69 |
-
if (bar === undefined) {
|
70 |
-
return;
|
71 |
-
}
|
72 |
-
bar.setValue(value);
|
73 |
-
}
|
74 |
-
|
75 |
-
setBarValue(value, min, max) {
|
76 |
-
var bar = this.childrenMap.bar;
|
77 |
-
if (bar === undefined) {
|
78 |
-
return this;
|
79 |
-
}
|
80 |
-
bar.setValue(value, min, max);
|
81 |
-
return this;
|
82 |
-
}
|
83 |
-
|
84 |
-
easeBarValueTo(value, min, max) {
|
85 |
-
var bar = this.childrenMap.bar;
|
86 |
-
if (bar === undefined) {
|
87 |
-
return this;
|
88 |
-
}
|
89 |
-
bar.easeValueTo(value, min, max);
|
90 |
-
return this;
|
91 |
-
}
|
92 |
-
|
93 |
-
// Access icon game object
|
94 |
-
setTexture(key, frame) {
|
95 |
-
var imageObject = this.childrenMap.icon;
|
96 |
-
if (imageObject === undefined) {
|
97 |
-
return;
|
98 |
-
}
|
99 |
-
imageObject.setTexture(key, frame);
|
100 |
-
return this;
|
101 |
-
}
|
102 |
-
|
103 |
-
get texture() {
|
104 |
-
var imageObject = this.childrenMap.icon;
|
105 |
-
if (imageObject === undefined) {
|
106 |
-
return undefined;
|
107 |
-
}
|
108 |
-
return imageObject.texture;
|
109 |
-
}
|
110 |
-
|
111 |
-
get frame() {
|
112 |
-
var imageObject = this.childrenMap.icon;
|
113 |
-
if (imageObject === undefined) {
|
114 |
-
return undefined;
|
115 |
-
}
|
116 |
-
return imageObject.frame;
|
117 |
-
}
|
118 |
-
|
119 |
-
runLayout(parent, newWidth, newHeight) {
|
120 |
-
if (this.ignoreLayout) {
|
121 |
-
return this;
|
122 |
-
}
|
123 |
-
|
124 |
-
super.runLayout(parent, newWidth, newHeight);
|
125 |
-
// Pin icon-mask to icon game object
|
126 |
-
var iconMask = this.childrenMap.iconMask;
|
127 |
-
if (iconMask) {
|
128 |
-
iconMask.setPosition();
|
129 |
-
this.resetChildPositionState(iconMask);
|
130 |
-
}
|
131 |
-
// Pin action-mask to action game object
|
132 |
-
var actionMask = this.childrenMap.actionMask;
|
133 |
-
if (actionMask) {
|
134 |
-
actionMask.setPosition();
|
135 |
-
this.resetChildPositionState(actionMask);
|
136 |
-
}
|
137 |
-
return this;
|
138 |
-
}
|
139 |
-
|
140 |
-
resize(width, height) {
|
141 |
-
super.resize(width, height);
|
142 |
-
// Resize icon-mask to icon game object
|
143 |
-
var iconMask = this.childrenMap.iconMask;
|
144 |
-
if (iconMask) {
|
145 |
-
iconMask.resize();
|
146 |
-
}
|
147 |
-
// Resize action-mask to icon game object
|
148 |
-
var actionMask = this.childrenMap.actionMask;
|
149 |
-
if (actionMask) {
|
150 |
-
actionMask.resize();
|
151 |
-
}
|
152 |
-
return this;
|
153 |
-
}
|
154 |
-
}
|
155 |
-
|
156 |
-
Object.assign(
|
157 |
-
NameValueLabel.prototype,
|
158 |
-
SetValueMethods,
|
159 |
-
)
|
160 |
-
|
161 |
-
export default NameValueLabel;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AixiaGreyatt/QQsign/Dockerfile
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
FROM openjdk:17-slim
|
2 |
-
|
3 |
-
# 设置时区
|
4 |
-
ENV TZ Asia/Shanghai
|
5 |
-
|
6 |
-
# 设置工作目录
|
7 |
-
WORKDIR /app
|
8 |
-
|
9 |
-
# 复制文件到工作目录
|
10 |
-
COPY bin /app/bin
|
11 |
-
COPY lib /app/lib
|
12 |
-
COPY txlib /app/txlib
|
13 |
-
|
14 |
-
# 设置命令
|
15 |
-
RUN chmod -R 777 /tmp
|
16 |
-
RUN chmod -R 777 /app
|
17 |
-
RUN sed 's/"key": ".*"/"key": "'"$KEY_VALUE"'"/' txlib/$TXLIB_VERSION/config.json > /app/txlib/$TXLIB_VERSION/config.json
|
18 |
-
|
19 |
-
# 运行
|
20 |
-
CMD bash bin/unidbg-fetch-qsign --basePath=txlib/$TXLIB_VERSION
|
21 |
-
|
22 |
-
# 暴露端口
|
23 |
-
EXPOSE 7860
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/e4e/stylegan2/op/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from .fused_act import FusedLeakyReLU, fused_leaky_relu
|
2 |
-
from .upfirdn2d import upfirdn2d
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/utilities.md
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
# Utilities
|
2 |
-
|
3 |
-
Utility and helper functions for working with 🤗 Diffusers.
|
4 |
-
|
5 |
-
## randn_tensor
|
6 |
-
|
7 |
-
[[autodoc]] diffusers.utils.randn_tensor
|
8 |
-
|
9 |
-
## numpy_to_pil
|
10 |
-
|
11 |
-
[[autodoc]] utils.pil_utils.numpy_to_pil
|
12 |
-
|
13 |
-
## pt_to_pil
|
14 |
-
|
15 |
-
[[autodoc]] utils.pil_utils.pt_to_pil
|
16 |
-
|
17 |
-
## load_image
|
18 |
-
|
19 |
-
[[autodoc]] utils.testing_utils.load_image
|
20 |
-
|
21 |
-
## export_to_video
|
22 |
-
|
23 |
-
[[autodoc]] utils.testing_utils.export_to_video
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/controlnet/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/detr/README.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
# DETR
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
We provide the config files for DETR: [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872).
|
8 |
-
|
9 |
-
```BibTeX
|
10 |
-
@inproceedings{detr,
|
11 |
-
author = {Nicolas Carion and
|
12 |
-
Francisco Massa and
|
13 |
-
Gabriel Synnaeve and
|
14 |
-
Nicolas Usunier and
|
15 |
-
Alexander Kirillov and
|
16 |
-
Sergey Zagoruyko},
|
17 |
-
title = {End-to-End Object Detection with Transformers},
|
18 |
-
booktitle = {ECCV},
|
19 |
-
year = {2020}
|
20 |
-
}
|
21 |
-
```
|
22 |
-
|
23 |
-
## Results and Models
|
24 |
-
|
25 |
-
| Backbone | Model | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
26 |
-
|:------:|:--------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
|
27 |
-
| R-50 | DETR |150e |7.9| | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detr/detr_r50_8x2_150e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/dist_utils.py
DELETED
@@ -1,164 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import functools
|
3 |
-
import os
|
4 |
-
import subprocess
|
5 |
-
from collections import OrderedDict
|
6 |
-
|
7 |
-
import torch
|
8 |
-
import torch.multiprocessing as mp
|
9 |
-
from torch import distributed as dist
|
10 |
-
from torch._utils import (_flatten_dense_tensors, _take_tensors,
|
11 |
-
_unflatten_dense_tensors)
|
12 |
-
|
13 |
-
|
14 |
-
def init_dist(launcher, backend='nccl', **kwargs):
|
15 |
-
if mp.get_start_method(allow_none=True) is None:
|
16 |
-
mp.set_start_method('spawn')
|
17 |
-
if launcher == 'pytorch':
|
18 |
-
_init_dist_pytorch(backend, **kwargs)
|
19 |
-
elif launcher == 'mpi':
|
20 |
-
_init_dist_mpi(backend, **kwargs)
|
21 |
-
elif launcher == 'slurm':
|
22 |
-
_init_dist_slurm(backend, **kwargs)
|
23 |
-
else:
|
24 |
-
raise ValueError(f'Invalid launcher type: {launcher}')
|
25 |
-
|
26 |
-
|
27 |
-
def _init_dist_pytorch(backend, **kwargs):
|
28 |
-
# TODO: use local_rank instead of rank % num_gpus
|
29 |
-
rank = int(os.environ['RANK'])
|
30 |
-
num_gpus = torch.cuda.device_count()
|
31 |
-
torch.cuda.set_device(rank % num_gpus)
|
32 |
-
dist.init_process_group(backend=backend, **kwargs)
|
33 |
-
|
34 |
-
|
35 |
-
def _init_dist_mpi(backend, **kwargs):
|
36 |
-
# TODO: use local_rank instead of rank % num_gpus
|
37 |
-
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
|
38 |
-
num_gpus = torch.cuda.device_count()
|
39 |
-
torch.cuda.set_device(rank % num_gpus)
|
40 |
-
dist.init_process_group(backend=backend, **kwargs)
|
41 |
-
|
42 |
-
|
43 |
-
def _init_dist_slurm(backend, port=None):
|
44 |
-
"""Initialize slurm distributed training environment.
|
45 |
-
|
46 |
-
If argument ``port`` is not specified, then the master port will be system
|
47 |
-
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
|
48 |
-
environment variable, then a default port ``29500`` will be used.
|
49 |
-
|
50 |
-
Args:
|
51 |
-
backend (str): Backend of torch.distributed.
|
52 |
-
port (int, optional): Master port. Defaults to None.
|
53 |
-
"""
|
54 |
-
proc_id = int(os.environ['SLURM_PROCID'])
|
55 |
-
ntasks = int(os.environ['SLURM_NTASKS'])
|
56 |
-
node_list = os.environ['SLURM_NODELIST']
|
57 |
-
num_gpus = torch.cuda.device_count()
|
58 |
-
torch.cuda.set_device(proc_id % num_gpus)
|
59 |
-
addr = subprocess.getoutput(
|
60 |
-
f'scontrol show hostname {node_list} | head -n1')
|
61 |
-
# specify master port
|
62 |
-
if port is not None:
|
63 |
-
os.environ['MASTER_PORT'] = str(port)
|
64 |
-
elif 'MASTER_PORT' in os.environ:
|
65 |
-
pass # use MASTER_PORT in the environment variable
|
66 |
-
else:
|
67 |
-
# 29500 is torch.distributed default port
|
68 |
-
os.environ['MASTER_PORT'] = '29500'
|
69 |
-
# use MASTER_ADDR in the environment variable if it already exists
|
70 |
-
if 'MASTER_ADDR' not in os.environ:
|
71 |
-
os.environ['MASTER_ADDR'] = addr
|
72 |
-
os.environ['WORLD_SIZE'] = str(ntasks)
|
73 |
-
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
|
74 |
-
os.environ['RANK'] = str(proc_id)
|
75 |
-
dist.init_process_group(backend=backend)
|
76 |
-
|
77 |
-
|
78 |
-
def get_dist_info():
|
79 |
-
if dist.is_available() and dist.is_initialized():
|
80 |
-
rank = dist.get_rank()
|
81 |
-
world_size = dist.get_world_size()
|
82 |
-
else:
|
83 |
-
rank = 0
|
84 |
-
world_size = 1
|
85 |
-
return rank, world_size
|
86 |
-
|
87 |
-
|
88 |
-
def master_only(func):
|
89 |
-
|
90 |
-
@functools.wraps(func)
|
91 |
-
def wrapper(*args, **kwargs):
|
92 |
-
rank, _ = get_dist_info()
|
93 |
-
if rank == 0:
|
94 |
-
return func(*args, **kwargs)
|
95 |
-
|
96 |
-
return wrapper
|
97 |
-
|
98 |
-
|
99 |
-
def allreduce_params(params, coalesce=True, bucket_size_mb=-1):
|
100 |
-
"""Allreduce parameters.
|
101 |
-
|
102 |
-
Args:
|
103 |
-
params (list[torch.Parameters]): List of parameters or buffers of a
|
104 |
-
model.
|
105 |
-
coalesce (bool, optional): Whether allreduce parameters as a whole.
|
106 |
-
Defaults to True.
|
107 |
-
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
|
108 |
-
Defaults to -1.
|
109 |
-
"""
|
110 |
-
_, world_size = get_dist_info()
|
111 |
-
if world_size == 1:
|
112 |
-
return
|
113 |
-
params = [param.data for param in params]
|
114 |
-
if coalesce:
|
115 |
-
_allreduce_coalesced(params, world_size, bucket_size_mb)
|
116 |
-
else:
|
117 |
-
for tensor in params:
|
118 |
-
dist.all_reduce(tensor.div_(world_size))
|
119 |
-
|
120 |
-
|
121 |
-
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
|
122 |
-
"""Allreduce gradients.
|
123 |
-
|
124 |
-
Args:
|
125 |
-
params (list[torch.Parameters]): List of parameters of a model
|
126 |
-
coalesce (bool, optional): Whether allreduce parameters as a whole.
|
127 |
-
Defaults to True.
|
128 |
-
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
|
129 |
-
Defaults to -1.
|
130 |
-
"""
|
131 |
-
grads = [
|
132 |
-
param.grad.data for param in params
|
133 |
-
if param.requires_grad and param.grad is not None
|
134 |
-
]
|
135 |
-
_, world_size = get_dist_info()
|
136 |
-
if world_size == 1:
|
137 |
-
return
|
138 |
-
if coalesce:
|
139 |
-
_allreduce_coalesced(grads, world_size, bucket_size_mb)
|
140 |
-
else:
|
141 |
-
for tensor in grads:
|
142 |
-
dist.all_reduce(tensor.div_(world_size))
|
143 |
-
|
144 |
-
|
145 |
-
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
|
146 |
-
if bucket_size_mb > 0:
|
147 |
-
bucket_size_bytes = bucket_size_mb * 1024 * 1024
|
148 |
-
buckets = _take_tensors(tensors, bucket_size_bytes)
|
149 |
-
else:
|
150 |
-
buckets = OrderedDict()
|
151 |
-
for tensor in tensors:
|
152 |
-
tp = tensor.type()
|
153 |
-
if tp not in buckets:
|
154 |
-
buckets[tp] = []
|
155 |
-
buckets[tp].append(tensor)
|
156 |
-
buckets = buckets.values()
|
157 |
-
|
158 |
-
for bucket in buckets:
|
159 |
-
flat_tensors = _flatten_dense_tensors(bucket)
|
160 |
-
dist.all_reduce(flat_tensors)
|
161 |
-
flat_tensors.div_(world_size)
|
162 |
-
for tensor, synced in zip(
|
163 |
-
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
|
164 |
-
tensor.copy_(synced)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/iter_based_runner.py
DELETED
@@ -1,273 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import os.path as osp
|
3 |
-
import platform
|
4 |
-
import shutil
|
5 |
-
import time
|
6 |
-
import warnings
|
7 |
-
|
8 |
-
import torch
|
9 |
-
from torch.optim import Optimizer
|
10 |
-
|
11 |
-
import annotator.uniformer.mmcv as mmcv
|
12 |
-
from .base_runner import BaseRunner
|
13 |
-
from .builder import RUNNERS
|
14 |
-
from .checkpoint import save_checkpoint
|
15 |
-
from .hooks import IterTimerHook
|
16 |
-
from .utils import get_host_info
|
17 |
-
|
18 |
-
|
19 |
-
class IterLoader:
|
20 |
-
|
21 |
-
def __init__(self, dataloader):
|
22 |
-
self._dataloader = dataloader
|
23 |
-
self.iter_loader = iter(self._dataloader)
|
24 |
-
self._epoch = 0
|
25 |
-
|
26 |
-
@property
|
27 |
-
def epoch(self):
|
28 |
-
return self._epoch
|
29 |
-
|
30 |
-
def __next__(self):
|
31 |
-
try:
|
32 |
-
data = next(self.iter_loader)
|
33 |
-
except StopIteration:
|
34 |
-
self._epoch += 1
|
35 |
-
if hasattr(self._dataloader.sampler, 'set_epoch'):
|
36 |
-
self._dataloader.sampler.set_epoch(self._epoch)
|
37 |
-
time.sleep(2) # Prevent possible deadlock during epoch transition
|
38 |
-
self.iter_loader = iter(self._dataloader)
|
39 |
-
data = next(self.iter_loader)
|
40 |
-
|
41 |
-
return data
|
42 |
-
|
43 |
-
def __len__(self):
|
44 |
-
return len(self._dataloader)
|
45 |
-
|
46 |
-
|
47 |
-
@RUNNERS.register_module()
|
48 |
-
class IterBasedRunner(BaseRunner):
|
49 |
-
"""Iteration-based Runner.
|
50 |
-
|
51 |
-
This runner train models iteration by iteration.
|
52 |
-
"""
|
53 |
-
|
54 |
-
def train(self, data_loader, **kwargs):
|
55 |
-
self.model.train()
|
56 |
-
self.mode = 'train'
|
57 |
-
self.data_loader = data_loader
|
58 |
-
self._epoch = data_loader.epoch
|
59 |
-
data_batch = next(data_loader)
|
60 |
-
self.call_hook('before_train_iter')
|
61 |
-
outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
|
62 |
-
if not isinstance(outputs, dict):
|
63 |
-
raise TypeError('model.train_step() must return a dict')
|
64 |
-
if 'log_vars' in outputs:
|
65 |
-
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
|
66 |
-
self.outputs = outputs
|
67 |
-
self.call_hook('after_train_iter')
|
68 |
-
self._inner_iter += 1
|
69 |
-
self._iter += 1
|
70 |
-
|
71 |
-
@torch.no_grad()
|
72 |
-
def val(self, data_loader, **kwargs):
|
73 |
-
self.model.eval()
|
74 |
-
self.mode = 'val'
|
75 |
-
self.data_loader = data_loader
|
76 |
-
data_batch = next(data_loader)
|
77 |
-
self.call_hook('before_val_iter')
|
78 |
-
outputs = self.model.val_step(data_batch, **kwargs)
|
79 |
-
if not isinstance(outputs, dict):
|
80 |
-
raise TypeError('model.val_step() must return a dict')
|
81 |
-
if 'log_vars' in outputs:
|
82 |
-
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
|
83 |
-
self.outputs = outputs
|
84 |
-
self.call_hook('after_val_iter')
|
85 |
-
self._inner_iter += 1
|
86 |
-
|
87 |
-
def run(self, data_loaders, workflow, max_iters=None, **kwargs):
|
88 |
-
"""Start running.
|
89 |
-
|
90 |
-
Args:
|
91 |
-
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
|
92 |
-
and validation.
|
93 |
-
workflow (list[tuple]): A list of (phase, iters) to specify the
|
94 |
-
running order and iterations. E.g, [('train', 10000),
|
95 |
-
('val', 1000)] means running 10000 iterations for training and
|
96 |
-
1000 iterations for validation, iteratively.
|
97 |
-
"""
|
98 |
-
assert isinstance(data_loaders, list)
|
99 |
-
assert mmcv.is_list_of(workflow, tuple)
|
100 |
-
assert len(data_loaders) == len(workflow)
|
101 |
-
if max_iters is not None:
|
102 |
-
warnings.warn(
|
103 |
-
'setting max_iters in run is deprecated, '
|
104 |
-
'please set max_iters in runner_config', DeprecationWarning)
|
105 |
-
self._max_iters = max_iters
|
106 |
-
assert self._max_iters is not None, (
|
107 |
-
'max_iters must be specified during instantiation')
|
108 |
-
|
109 |
-
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
|
110 |
-
self.logger.info('Start running, host: %s, work_dir: %s',
|
111 |
-
get_host_info(), work_dir)
|
112 |
-
self.logger.info('Hooks will be executed in the following order:\n%s',
|
113 |
-
self.get_hook_info())
|
114 |
-
self.logger.info('workflow: %s, max: %d iters', workflow,
|
115 |
-
self._max_iters)
|
116 |
-
self.call_hook('before_run')
|
117 |
-
|
118 |
-
iter_loaders = [IterLoader(x) for x in data_loaders]
|
119 |
-
|
120 |
-
self.call_hook('before_epoch')
|
121 |
-
|
122 |
-
while self.iter < self._max_iters:
|
123 |
-
for i, flow in enumerate(workflow):
|
124 |
-
self._inner_iter = 0
|
125 |
-
mode, iters = flow
|
126 |
-
if not isinstance(mode, str) or not hasattr(self, mode):
|
127 |
-
raise ValueError(
|
128 |
-
'runner has no method named "{}" to run a workflow'.
|
129 |
-
format(mode))
|
130 |
-
iter_runner = getattr(self, mode)
|
131 |
-
for _ in range(iters):
|
132 |
-
if mode == 'train' and self.iter >= self._max_iters:
|
133 |
-
break
|
134 |
-
iter_runner(iter_loaders[i], **kwargs)
|
135 |
-
|
136 |
-
time.sleep(1) # wait for some hooks like loggers to finish
|
137 |
-
self.call_hook('after_epoch')
|
138 |
-
self.call_hook('after_run')
|
139 |
-
|
140 |
-
def resume(self,
|
141 |
-
checkpoint,
|
142 |
-
resume_optimizer=True,
|
143 |
-
map_location='default'):
|
144 |
-
"""Resume model from checkpoint.
|
145 |
-
|
146 |
-
Args:
|
147 |
-
checkpoint (str): Checkpoint to resume from.
|
148 |
-
resume_optimizer (bool, optional): Whether resume the optimizer(s)
|
149 |
-
if the checkpoint file includes optimizer(s). Default to True.
|
150 |
-
map_location (str, optional): Same as :func:`torch.load`.
|
151 |
-
Default to 'default'.
|
152 |
-
"""
|
153 |
-
if map_location == 'default':
|
154 |
-
device_id = torch.cuda.current_device()
|
155 |
-
checkpoint = self.load_checkpoint(
|
156 |
-
checkpoint,
|
157 |
-
map_location=lambda storage, loc: storage.cuda(device_id))
|
158 |
-
else:
|
159 |
-
checkpoint = self.load_checkpoint(
|
160 |
-
checkpoint, map_location=map_location)
|
161 |
-
|
162 |
-
self._epoch = checkpoint['meta']['epoch']
|
163 |
-
self._iter = checkpoint['meta']['iter']
|
164 |
-
self._inner_iter = checkpoint['meta']['iter']
|
165 |
-
if 'optimizer' in checkpoint and resume_optimizer:
|
166 |
-
if isinstance(self.optimizer, Optimizer):
|
167 |
-
self.optimizer.load_state_dict(checkpoint['optimizer'])
|
168 |
-
elif isinstance(self.optimizer, dict):
|
169 |
-
for k in self.optimizer.keys():
|
170 |
-
self.optimizer[k].load_state_dict(
|
171 |
-
checkpoint['optimizer'][k])
|
172 |
-
else:
|
173 |
-
raise TypeError(
|
174 |
-
'Optimizer should be dict or torch.optim.Optimizer '
|
175 |
-
f'but got {type(self.optimizer)}')
|
176 |
-
|
177 |
-
self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}')
|
178 |
-
|
179 |
-
def save_checkpoint(self,
|
180 |
-
out_dir,
|
181 |
-
filename_tmpl='iter_{}.pth',
|
182 |
-
meta=None,
|
183 |
-
save_optimizer=True,
|
184 |
-
create_symlink=True):
|
185 |
-
"""Save checkpoint to file.
|
186 |
-
|
187 |
-
Args:
|
188 |
-
out_dir (str): Directory to save checkpoint files.
|
189 |
-
filename_tmpl (str, optional): Checkpoint file template.
|
190 |
-
Defaults to 'iter_{}.pth'.
|
191 |
-
meta (dict, optional): Metadata to be saved in checkpoint.
|
192 |
-
Defaults to None.
|
193 |
-
save_optimizer (bool, optional): Whether save optimizer.
|
194 |
-
Defaults to True.
|
195 |
-
create_symlink (bool, optional): Whether create symlink to the
|
196 |
-
latest checkpoint file. Defaults to True.
|
197 |
-
"""
|
198 |
-
if meta is None:
|
199 |
-
meta = {}
|
200 |
-
elif not isinstance(meta, dict):
|
201 |
-
raise TypeError(
|
202 |
-
f'meta should be a dict or None, but got {type(meta)}')
|
203 |
-
if self.meta is not None:
|
204 |
-
meta.update(self.meta)
|
205 |
-
# Note: meta.update(self.meta) should be done before
|
206 |
-
# meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
|
207 |
-
# there will be problems with resumed checkpoints.
|
208 |
-
# More details in https://github.com/open-mmlab/mmcv/pull/1108
|
209 |
-
meta.update(epoch=self.epoch + 1, iter=self.iter)
|
210 |
-
|
211 |
-
filename = filename_tmpl.format(self.iter + 1)
|
212 |
-
filepath = osp.join(out_dir, filename)
|
213 |
-
optimizer = self.optimizer if save_optimizer else None
|
214 |
-
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
|
215 |
-
# in some environments, `os.symlink` is not supported, you may need to
|
216 |
-
# set `create_symlink` to False
|
217 |
-
if create_symlink:
|
218 |
-
dst_file = osp.join(out_dir, 'latest.pth')
|
219 |
-
if platform.system() != 'Windows':
|
220 |
-
mmcv.symlink(filename, dst_file)
|
221 |
-
else:
|
222 |
-
shutil.copy(filepath, dst_file)
|
223 |
-
|
224 |
-
def register_training_hooks(self,
|
225 |
-
lr_config,
|
226 |
-
optimizer_config=None,
|
227 |
-
checkpoint_config=None,
|
228 |
-
log_config=None,
|
229 |
-
momentum_config=None,
|
230 |
-
custom_hooks_config=None):
|
231 |
-
"""Register default hooks for iter-based training.
|
232 |
-
|
233 |
-
Checkpoint hook, optimizer stepper hook and logger hooks will be set to
|
234 |
-
`by_epoch=False` by default.
|
235 |
-
|
236 |
-
Default hooks include:
|
237 |
-
|
238 |
-
+----------------------+-------------------------+
|
239 |
-
| Hooks | Priority |
|
240 |
-
+======================+=========================+
|
241 |
-
| LrUpdaterHook | VERY_HIGH (10) |
|
242 |
-
+----------------------+-------------------------+
|
243 |
-
| MomentumUpdaterHook | HIGH (30) |
|
244 |
-
+----------------------+-------------------------+
|
245 |
-
| OptimizerStepperHook | ABOVE_NORMAL (40) |
|
246 |
-
+----------------------+-------------------------+
|
247 |
-
| CheckpointSaverHook | NORMAL (50) |
|
248 |
-
+----------------------+-------------------------+
|
249 |
-
| IterTimerHook | LOW (70) |
|
250 |
-
+----------------------+-------------------------+
|
251 |
-
| LoggerHook(s) | VERY_LOW (90) |
|
252 |
-
+----------------------+-------------------------+
|
253 |
-
| CustomHook(s) | defaults to NORMAL (50) |
|
254 |
-
+----------------------+-------------------------+
|
255 |
-
|
256 |
-
If custom hooks have same priority with default hooks, custom hooks
|
257 |
-
will be triggered after default hooks.
|
258 |
-
"""
|
259 |
-
if checkpoint_config is not None:
|
260 |
-
checkpoint_config.setdefault('by_epoch', False)
|
261 |
-
if lr_config is not None:
|
262 |
-
lr_config.setdefault('by_epoch', False)
|
263 |
-
if log_config is not None:
|
264 |
-
for info in log_config['hooks']:
|
265 |
-
info.setdefault('by_epoch', False)
|
266 |
-
super(IterBasedRunner, self).register_training_hooks(
|
267 |
-
lr_config=lr_config,
|
268 |
-
momentum_config=momentum_config,
|
269 |
-
optimizer_config=optimizer_config,
|
270 |
-
checkpoint_config=checkpoint_config,
|
271 |
-
log_config=log_config,
|
272 |
-
timer_config=IterTimerHook(),
|
273 |
-
custom_hooks_config=custom_hooks_config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/midas/__init__.py
DELETED
File without changes
|
spaces/Apex-X/Tm/roop/utilities.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import mimetypes
|
3 |
-
import os
|
4 |
-
import platform
|
5 |
-
import shutil
|
6 |
-
import ssl
|
7 |
-
import subprocess
|
8 |
-
import urllib
|
9 |
-
from pathlib import Path
|
10 |
-
from typing import List, Any
|
11 |
-
from tqdm import tqdm
|
12 |
-
|
13 |
-
import roop.globals
|
14 |
-
|
15 |
-
TEMP_FILE = 'temp.mp4'
|
16 |
-
TEMP_DIRECTORY = 'temp'
|
17 |
-
|
18 |
-
# monkey patch ssl for mac
|
19 |
-
if platform.system().lower() == 'darwin':
|
20 |
-
ssl._create_default_https_context = ssl._create_unverified_context
|
21 |
-
|
22 |
-
|
23 |
-
def run_ffmpeg(args: List[str]) -> bool:
|
24 |
-
commands = ['ffmpeg', '-hide_banner', '-hwaccel', 'auto', '-loglevel', roop.globals.log_level]
|
25 |
-
commands.extend(args)
|
26 |
-
try:
|
27 |
-
subprocess.check_output(commands, stderr=subprocess.STDOUT)
|
28 |
-
return True
|
29 |
-
except Exception:
|
30 |
-
pass
|
31 |
-
return False
|
32 |
-
|
33 |
-
|
34 |
-
def detect_fps(target_path: str) -> float:
|
35 |
-
command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers=1:nokey=1', target_path]
|
36 |
-
output = subprocess.check_output(command).decode().strip().split('/')
|
37 |
-
try:
|
38 |
-
numerator, denominator = map(int, output)
|
39 |
-
return numerator / denominator
|
40 |
-
except Exception:
|
41 |
-
pass
|
42 |
-
return 30.0
|
43 |
-
|
44 |
-
|
45 |
-
def extract_frames(target_path: str) -> None:
|
46 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
47 |
-
run_ffmpeg(['-i', target_path, '-pix_fmt', 'rgb24', os.path.join(temp_directory_path, '%04d.png')])
|
48 |
-
|
49 |
-
|
50 |
-
def create_video(target_path: str, fps: float = 30.0) -> None:
|
51 |
-
temp_output_path = get_temp_output_path(target_path)
|
52 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
53 |
-
run_ffmpeg(['-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.png'), '-c:v', roop.globals.video_encoder, '-crf', str(roop.globals.video_quality), '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625:fast=1', '-y', temp_output_path])
|
54 |
-
|
55 |
-
|
56 |
-
def restore_audio(target_path: str, output_path: str) -> None:
|
57 |
-
temp_output_path = get_temp_output_path(target_path)
|
58 |
-
done = run_ffmpeg(['-i', temp_output_path, '-i', target_path, '-c:v', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-y', output_path])
|
59 |
-
if not done:
|
60 |
-
move_temp(target_path, output_path)
|
61 |
-
|
62 |
-
|
63 |
-
def get_temp_frame_paths(target_path: str) -> List[str]:
|
64 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
65 |
-
return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.png')))
|
66 |
-
|
67 |
-
|
68 |
-
def get_temp_directory_path(target_path: str) -> str:
|
69 |
-
target_name, _ = os.path.splitext(os.path.basename(target_path))
|
70 |
-
target_directory_path = os.path.dirname(target_path)
|
71 |
-
return os.path.join(target_directory_path, TEMP_DIRECTORY, target_name)
|
72 |
-
|
73 |
-
|
74 |
-
def get_temp_output_path(target_path: str) -> str:
|
75 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
76 |
-
return os.path.join(temp_directory_path, TEMP_FILE)
|
77 |
-
|
78 |
-
|
79 |
-
def normalize_output_path(source_path: str, target_path: str, output_path: str) -> Any:
|
80 |
-
if source_path and target_path:
|
81 |
-
source_name, _ = os.path.splitext(os.path.basename(source_path))
|
82 |
-
target_name, target_extension = os.path.splitext(os.path.basename(target_path))
|
83 |
-
if os.path.isdir(output_path):
|
84 |
-
return os.path.join(output_path, source_name + '-' + target_name + target_extension)
|
85 |
-
return output_path
|
86 |
-
|
87 |
-
|
88 |
-
def create_temp(target_path: str) -> None:
|
89 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
90 |
-
Path(temp_directory_path).mkdir(parents=True, exist_ok=True)
|
91 |
-
|
92 |
-
|
93 |
-
def move_temp(target_path: str, output_path: str) -> None:
|
94 |
-
temp_output_path = get_temp_output_path(target_path)
|
95 |
-
if os.path.isfile(temp_output_path):
|
96 |
-
if os.path.isfile(output_path):
|
97 |
-
os.remove(output_path)
|
98 |
-
shutil.move(temp_output_path, output_path)
|
99 |
-
|
100 |
-
|
101 |
-
def clean_temp(target_path: str) -> None:
|
102 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
103 |
-
parent_directory_path = os.path.dirname(temp_directory_path)
|
104 |
-
if not roop.globals.keep_frames and os.path.isdir(temp_directory_path):
|
105 |
-
shutil.rmtree(temp_directory_path)
|
106 |
-
if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
|
107 |
-
os.rmdir(parent_directory_path)
|
108 |
-
|
109 |
-
|
110 |
-
def has_image_extension(image_path: str) -> bool:
|
111 |
-
return image_path.lower().endswith(('png', 'jpg', 'jpeg', 'webp'))
|
112 |
-
|
113 |
-
|
114 |
-
def is_image(image_path: str) -> bool:
|
115 |
-
if image_path and os.path.isfile(image_path):
|
116 |
-
mimetype, _ = mimetypes.guess_type(image_path)
|
117 |
-
return bool(mimetype and mimetype.startswith('image/'))
|
118 |
-
return False
|
119 |
-
|
120 |
-
|
121 |
-
def is_video(video_path: str) -> bool:
|
122 |
-
if video_path and os.path.isfile(video_path):
|
123 |
-
mimetype, _ = mimetypes.guess_type(video_path)
|
124 |
-
return bool(mimetype and mimetype.startswith('video/'))
|
125 |
-
return False
|
126 |
-
|
127 |
-
|
128 |
-
def conditional_download(download_directory_path: str, urls: List[str]) -> None:
|
129 |
-
if not os.path.exists(download_directory_path):
|
130 |
-
os.makedirs(download_directory_path)
|
131 |
-
for url in urls:
|
132 |
-
download_file_path = os.path.join(download_directory_path, os.path.basename(url))
|
133 |
-
if not os.path.exists(download_file_path):
|
134 |
-
request = urllib.request.urlopen(url) # type: ignore[attr-defined]
|
135 |
-
total = int(request.headers.get('Content-Length', 0))
|
136 |
-
with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress:
|
137 |
-
urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined]
|
138 |
-
|
139 |
-
|
140 |
-
def resolve_relative_path(path: str) -> str:
|
141 |
-
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/johabprober.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is mozilla.org code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from .chardistribution import JOHABDistributionAnalysis
|
29 |
-
from .codingstatemachine import CodingStateMachine
|
30 |
-
from .mbcharsetprober import MultiByteCharSetProber
|
31 |
-
from .mbcssm import JOHAB_SM_MODEL
|
32 |
-
|
33 |
-
|
34 |
-
class JOHABProber(MultiByteCharSetProber):
|
35 |
-
def __init__(self) -> None:
|
36 |
-
super().__init__()
|
37 |
-
self.coding_sm = CodingStateMachine(JOHAB_SM_MODEL)
|
38 |
-
self.distribution_analyzer = JOHABDistributionAnalysis()
|
39 |
-
self.reset()
|
40 |
-
|
41 |
-
@property
|
42 |
-
def charset_name(self) -> str:
|
43 |
-
return "Johab"
|
44 |
-
|
45 |
-
@property
|
46 |
-
def language(self) -> str:
|
47 |
-
return "Korean"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Avkash/WhisperUI/app.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from whisperui import WhisperModelUI
|
3 |
-
|
4 |
-
|
5 |
-
if __name__ == '__main__':
|
6 |
-
my_app = gr.Blocks()
|
7 |
-
ui_obj = WhisperModelUI(my_app)
|
8 |
-
ui_obj.create_whisper_ui()
|
9 |
-
ui_obj.launch_ui()
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzumaSeren100/XuanShen-Bert-VITS2/train_ms.py
DELETED
@@ -1,396 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import argparse
|
4 |
-
import itertools
|
5 |
-
import math
|
6 |
-
import torch
|
7 |
-
from torch import nn, optim
|
8 |
-
from torch.nn import functional as F
|
9 |
-
from torch.utils.data import DataLoader
|
10 |
-
from torch.utils.tensorboard import SummaryWriter
|
11 |
-
import torch.multiprocessing as mp
|
12 |
-
import torch.distributed as dist
|
13 |
-
from torch.nn.parallel import DistributedDataParallel as DDP
|
14 |
-
from torch.cuda.amp import autocast, GradScaler
|
15 |
-
from tqdm import tqdm
|
16 |
-
import logging
|
17 |
-
logging.getLogger('numba').setLevel(logging.WARNING)
|
18 |
-
import commons
|
19 |
-
import utils
|
20 |
-
from data_utils import (
|
21 |
-
TextAudioSpeakerLoader,
|
22 |
-
TextAudioSpeakerCollate,
|
23 |
-
DistributedBucketSampler
|
24 |
-
)
|
25 |
-
from models import (
|
26 |
-
SynthesizerTrn,
|
27 |
-
MultiPeriodDiscriminator,
|
28 |
-
DurationDiscriminator,
|
29 |
-
)
|
30 |
-
from losses import (
|
31 |
-
generator_loss,
|
32 |
-
discriminator_loss,
|
33 |
-
feature_loss,
|
34 |
-
kl_loss
|
35 |
-
)
|
36 |
-
from mel_processing import mel_spectrogram_torch, spec_to_mel_torch
|
37 |
-
from text.symbols import symbols
|
38 |
-
|
39 |
-
torch.backends.cudnn.benchmark = True
|
40 |
-
torch.backends.cuda.matmul.allow_tf32 = True
|
41 |
-
torch.backends.cudnn.allow_tf32 = True
|
42 |
-
torch.set_float32_matmul_precision('medium')
|
43 |
-
torch.backends.cuda.sdp_kernel("flash")
|
44 |
-
torch.backends.cuda.enable_flash_sdp(True)
|
45 |
-
torch.backends.cuda.enable_mem_efficient_sdp(True)
|
46 |
-
torch.backends.cuda.enable_math_sdp(True)
|
47 |
-
global_step = 0
|
48 |
-
|
49 |
-
|
50 |
-
def main():
|
51 |
-
"""Assume Single Node Multi GPUs Training Only"""
|
52 |
-
assert torch.cuda.is_available(), "CPU training is not allowed."
|
53 |
-
|
54 |
-
n_gpus = torch.cuda.device_count()
|
55 |
-
os.environ['MASTER_ADDR'] = 'localhost'
|
56 |
-
os.environ['MASTER_PORT'] = '65280'
|
57 |
-
|
58 |
-
hps = utils.get_hparams()
|
59 |
-
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
|
60 |
-
|
61 |
-
|
62 |
-
def run(rank, n_gpus, hps):
|
63 |
-
global global_step
|
64 |
-
if rank == 0:
|
65 |
-
logger = utils.get_logger(hps.model_dir)
|
66 |
-
logger.info(hps)
|
67 |
-
utils.check_git_hash(hps.model_dir)
|
68 |
-
writer = SummaryWriter(log_dir=hps.model_dir)
|
69 |
-
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
|
70 |
-
|
71 |
-
dist.init_process_group(backend='gloo', init_method='env://', world_size=n_gpus, rank=rank)
|
72 |
-
torch.manual_seed(hps.train.seed)
|
73 |
-
torch.cuda.set_device(rank)
|
74 |
-
|
75 |
-
train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data)
|
76 |
-
train_sampler = DistributedBucketSampler(
|
77 |
-
train_dataset,
|
78 |
-
hps.train.batch_size,
|
79 |
-
[32, 300, 400, 500, 600, 700, 800, 900, 1000],
|
80 |
-
num_replicas=n_gpus,
|
81 |
-
rank=rank,
|
82 |
-
shuffle=True)
|
83 |
-
collate_fn = TextAudioSpeakerCollate()
|
84 |
-
train_loader = DataLoader(train_dataset, num_workers=4, shuffle=False, pin_memory=True,
|
85 |
-
collate_fn=collate_fn, batch_sampler=train_sampler,
|
86 |
-
persistent_workers=True,prefetch_factor=4) #256G Memory suitable loader.
|
87 |
-
if rank == 0:
|
88 |
-
eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data)
|
89 |
-
eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False,
|
90 |
-
batch_size=1, pin_memory=True,
|
91 |
-
drop_last=False, collate_fn=collate_fn)
|
92 |
-
if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True:
|
93 |
-
print("Using noise scaled MAS for VITS2")
|
94 |
-
use_noise_scaled_mas = True
|
95 |
-
mas_noise_scale_initial = 0.01
|
96 |
-
noise_scale_delta = 2e-6
|
97 |
-
else:
|
98 |
-
print("Using normal MAS for VITS1")
|
99 |
-
use_noise_scaled_mas = False
|
100 |
-
mas_noise_scale_initial = 0.0
|
101 |
-
noise_scale_delta = 0.0
|
102 |
-
if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True:
|
103 |
-
print("Using duration discriminator for VITS2")
|
104 |
-
use_duration_discriminator = True
|
105 |
-
net_dur_disc = DurationDiscriminator(
|
106 |
-
hps.model.hidden_channels,
|
107 |
-
hps.model.hidden_channels,
|
108 |
-
3,
|
109 |
-
0.1,
|
110 |
-
gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0,
|
111 |
-
).cuda(rank)
|
112 |
-
if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True:
|
113 |
-
if hps.data.n_speakers == 0:
|
114 |
-
raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model")
|
115 |
-
use_spk_conditioned_encoder = True
|
116 |
-
else:
|
117 |
-
print("Using normal encoder for VITS1")
|
118 |
-
use_spk_conditioned_encoder = False
|
119 |
-
|
120 |
-
net_g = SynthesizerTrn(
|
121 |
-
len(symbols),
|
122 |
-
hps.data.filter_length // 2 + 1,
|
123 |
-
hps.train.segment_size // hps.data.hop_length,
|
124 |
-
n_speakers=hps.data.n_speakers,
|
125 |
-
mas_noise_scale_initial = mas_noise_scale_initial,
|
126 |
-
noise_scale_delta = noise_scale_delta,
|
127 |
-
**hps.model).cuda(rank)
|
128 |
-
|
129 |
-
freeze_enc = getattr(hps.model, "freeze_enc", False)
|
130 |
-
if freeze_enc:
|
131 |
-
print("freeze encoder !!!")
|
132 |
-
for param in net_g.enc_p.parameters():
|
133 |
-
param.requires_grad = False
|
134 |
-
|
135 |
-
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
|
136 |
-
optim_g = torch.optim.AdamW(
|
137 |
-
filter(lambda p: p.requires_grad, net_g.parameters()),
|
138 |
-
hps.train.learning_rate,
|
139 |
-
betas=hps.train.betas,
|
140 |
-
eps=hps.train.eps)
|
141 |
-
optim_d = torch.optim.AdamW(
|
142 |
-
net_d.parameters(),
|
143 |
-
hps.train.learning_rate,
|
144 |
-
betas=hps.train.betas,
|
145 |
-
eps=hps.train.eps)
|
146 |
-
if net_dur_disc is not None:
|
147 |
-
optim_dur_disc = torch.optim.AdamW(
|
148 |
-
net_dur_disc.parameters(),
|
149 |
-
hps.train.learning_rate,
|
150 |
-
betas=hps.train.betas,
|
151 |
-
eps=hps.train.eps)
|
152 |
-
else:
|
153 |
-
optim_dur_disc = None
|
154 |
-
net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True)
|
155 |
-
net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True)
|
156 |
-
if net_dur_disc is not None:
|
157 |
-
net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True)
|
158 |
-
try:
|
159 |
-
if net_dur_disc is not None:
|
160 |
-
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=True)
|
161 |
-
_, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
|
162 |
-
optim_g, skip_optimizer=True)
|
163 |
-
_, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
|
164 |
-
optim_d, skip_optimizer=True)
|
165 |
-
|
166 |
-
epoch_str = max(epoch_str, 1)
|
167 |
-
global_step = (epoch_str - 1) * len(train_loader)
|
168 |
-
except Exception as e:
|
169 |
-
print(e)
|
170 |
-
epoch_str = 1
|
171 |
-
global_step = 0
|
172 |
-
|
173 |
-
|
174 |
-
scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay)
|
175 |
-
scheduler_g.last_epoch = epoch_str - 2
|
176 |
-
scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay)
|
177 |
-
scheduler_d.last_epoch = epoch_str - 2
|
178 |
-
if net_dur_disc is not None:
|
179 |
-
scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay)
|
180 |
-
scheduler_dur_disc.last_epoch = epoch_str - 2
|
181 |
-
else:
|
182 |
-
scheduler_dur_disc = None
|
183 |
-
scaler = GradScaler(enabled=hps.train.fp16_run)
|
184 |
-
|
185 |
-
for epoch in range(epoch_str, hps.train.epochs + 1):
|
186 |
-
if rank == 0:
|
187 |
-
train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval])
|
188 |
-
else:
|
189 |
-
train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None)
|
190 |
-
scheduler_g.step()
|
191 |
-
scheduler_d.step()
|
192 |
-
if net_dur_disc is not None:
|
193 |
-
scheduler_dur_disc.step()
|
194 |
-
|
195 |
-
|
196 |
-
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
|
197 |
-
net_g, net_d, net_dur_disc = nets
|
198 |
-
optim_g, optim_d, optim_dur_disc = optims
|
199 |
-
scheduler_g, scheduler_d, scheduler_dur_disc = schedulers
|
200 |
-
train_loader, eval_loader = loaders
|
201 |
-
if writers is not None:
|
202 |
-
writer, writer_eval = writers
|
203 |
-
|
204 |
-
train_loader.batch_sampler.set_epoch(epoch)
|
205 |
-
global global_step
|
206 |
-
|
207 |
-
net_g.train()
|
208 |
-
net_d.train()
|
209 |
-
if net_dur_disc is not None:
|
210 |
-
net_dur_disc.train()
|
211 |
-
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)):
|
212 |
-
if net_g.module.use_noise_scaled_mas:
|
213 |
-
current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step
|
214 |
-
net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0)
|
215 |
-
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
|
216 |
-
spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True)
|
217 |
-
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
|
218 |
-
speakers = speakers.cuda(rank, non_blocking=True)
|
219 |
-
tone = tone.cuda(rank, non_blocking=True)
|
220 |
-
language = language.cuda(rank, non_blocking=True)
|
221 |
-
bert = bert.cuda(rank, non_blocking=True)
|
222 |
-
|
223 |
-
with autocast(enabled=hps.train.fp16_run):
|
224 |
-
y_hat, l_length, attn, ids_slice, x_mask, z_mask, \
|
225 |
-
(z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert)
|
226 |
-
mel = spec_to_mel_torch(
|
227 |
-
spec,
|
228 |
-
hps.data.filter_length,
|
229 |
-
hps.data.n_mel_channels,
|
230 |
-
hps.data.sampling_rate,
|
231 |
-
hps.data.mel_fmin,
|
232 |
-
hps.data.mel_fmax)
|
233 |
-
y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
|
234 |
-
y_hat_mel = mel_spectrogram_torch(
|
235 |
-
y_hat.squeeze(1),
|
236 |
-
hps.data.filter_length,
|
237 |
-
hps.data.n_mel_channels,
|
238 |
-
hps.data.sampling_rate,
|
239 |
-
hps.data.hop_length,
|
240 |
-
hps.data.win_length,
|
241 |
-
hps.data.mel_fmin,
|
242 |
-
hps.data.mel_fmax
|
243 |
-
)
|
244 |
-
|
245 |
-
y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
|
246 |
-
|
247 |
-
# Discriminator
|
248 |
-
y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
|
249 |
-
with autocast(enabled=False):
|
250 |
-
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
|
251 |
-
loss_disc_all = loss_disc
|
252 |
-
if net_dur_disc is not None:
|
253 |
-
y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach())
|
254 |
-
with autocast(enabled=False):
|
255 |
-
# TODO: I think need to mean using the mask, but for now, just mean all
|
256 |
-
loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g)
|
257 |
-
loss_dur_disc_all = loss_dur_disc
|
258 |
-
optim_dur_disc.zero_grad()
|
259 |
-
scaler.scale(loss_dur_disc_all).backward()
|
260 |
-
scaler.unscale_(optim_dur_disc)
|
261 |
-
grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None)
|
262 |
-
scaler.step(optim_dur_disc)
|
263 |
-
|
264 |
-
optim_d.zero_grad()
|
265 |
-
scaler.scale(loss_disc_all).backward()
|
266 |
-
scaler.unscale_(optim_d)
|
267 |
-
grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
|
268 |
-
scaler.step(optim_d)
|
269 |
-
|
270 |
-
with autocast(enabled=hps.train.fp16_run):
|
271 |
-
# Generator
|
272 |
-
y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
|
273 |
-
if net_dur_disc is not None:
|
274 |
-
y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_)
|
275 |
-
with autocast(enabled=False):
|
276 |
-
loss_dur = torch.sum(l_length.float())
|
277 |
-
loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
|
278 |
-
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
|
279 |
-
|
280 |
-
loss_fm = feature_loss(fmap_r, fmap_g)
|
281 |
-
loss_gen, losses_gen = generator_loss(y_d_hat_g)
|
282 |
-
loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl
|
283 |
-
if net_dur_disc is not None:
|
284 |
-
loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g)
|
285 |
-
loss_gen_all += loss_dur_gen
|
286 |
-
optim_g.zero_grad()
|
287 |
-
scaler.scale(loss_gen_all).backward()
|
288 |
-
scaler.unscale_(optim_g)
|
289 |
-
grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
|
290 |
-
scaler.step(optim_g)
|
291 |
-
scaler.update()
|
292 |
-
|
293 |
-
if rank == 0:
|
294 |
-
if global_step % hps.train.log_interval == 0:
|
295 |
-
lr = optim_g.param_groups[0]['lr']
|
296 |
-
losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl]
|
297 |
-
logger.info('Train Epoch: {} [{:.0f}%]'.format(
|
298 |
-
epoch,
|
299 |
-
100. * batch_idx / len(train_loader)))
|
300 |
-
logger.info([x.item() for x in losses] + [global_step, lr])
|
301 |
-
|
302 |
-
scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr,
|
303 |
-
"grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
|
304 |
-
scalar_dict.update(
|
305 |
-
{"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl})
|
306 |
-
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
|
307 |
-
scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
|
308 |
-
scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
|
309 |
-
|
310 |
-
image_dict = {
|
311 |
-
"slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
|
312 |
-
"slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
|
313 |
-
"all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
|
314 |
-
"all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy())
|
315 |
-
}
|
316 |
-
utils.summarize(
|
317 |
-
writer=writer,
|
318 |
-
global_step=global_step,
|
319 |
-
images=image_dict,
|
320 |
-
scalars=scalar_dict)
|
321 |
-
|
322 |
-
if global_step % hps.train.eval_interval == 0:
|
323 |
-
evaluate(hps, net_g, eval_loader, writer_eval)
|
324 |
-
utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
|
325 |
-
os.path.join(hps.model_dir, "G_{}.pth".format(global_step)))
|
326 |
-
utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
|
327 |
-
os.path.join(hps.model_dir, "D_{}.pth".format(global_step)))
|
328 |
-
if net_dur_disc is not None:
|
329 |
-
utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step)))
|
330 |
-
keep_ckpts = getattr(hps.train, 'keep_ckpts', 5)
|
331 |
-
if keep_ckpts > 0:
|
332 |
-
utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True)
|
333 |
-
|
334 |
-
|
335 |
-
global_step += 1
|
336 |
-
|
337 |
-
if rank == 0:
|
338 |
-
logger.info('====> Epoch: {}'.format(epoch))
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
def evaluate(hps, generator, eval_loader, writer_eval):
|
343 |
-
generator.eval()
|
344 |
-
image_dict = {}
|
345 |
-
audio_dict = {}
|
346 |
-
print("Evaluating ...")
|
347 |
-
with torch.no_grad():
|
348 |
-
for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader):
|
349 |
-
x, x_lengths = x.cuda(), x_lengths.cuda()
|
350 |
-
spec, spec_lengths = spec.cuda(), spec_lengths.cuda()
|
351 |
-
y, y_lengths = y.cuda(), y_lengths.cuda()
|
352 |
-
speakers = speakers.cuda()
|
353 |
-
bert = bert.cuda()
|
354 |
-
tone = tone.cuda()
|
355 |
-
language = language.cuda()
|
356 |
-
for use_sdp in [True, False]:
|
357 |
-
y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0)
|
358 |
-
y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length
|
359 |
-
|
360 |
-
mel = spec_to_mel_torch(
|
361 |
-
spec,
|
362 |
-
hps.data.filter_length,
|
363 |
-
hps.data.n_mel_channels,
|
364 |
-
hps.data.sampling_rate,
|
365 |
-
hps.data.mel_fmin,
|
366 |
-
hps.data.mel_fmax)
|
367 |
-
y_hat_mel = mel_spectrogram_torch(
|
368 |
-
y_hat.squeeze(1).float(),
|
369 |
-
hps.data.filter_length,
|
370 |
-
hps.data.n_mel_channels,
|
371 |
-
hps.data.sampling_rate,
|
372 |
-
hps.data.hop_length,
|
373 |
-
hps.data.win_length,
|
374 |
-
hps.data.mel_fmin,
|
375 |
-
hps.data.mel_fmax
|
376 |
-
)
|
377 |
-
image_dict.update({
|
378 |
-
f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy())
|
379 |
-
})
|
380 |
-
audio_dict.update({
|
381 |
-
f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]]
|
382 |
-
})
|
383 |
-
image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())})
|
384 |
-
audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]})
|
385 |
-
|
386 |
-
utils.summarize(
|
387 |
-
writer=writer_eval,
|
388 |
-
global_step=global_step,
|
389 |
-
images=image_dict,
|
390 |
-
audios=audio_dict,
|
391 |
-
audio_sampling_rate=hps.data.sampling_rate
|
392 |
-
)
|
393 |
-
generator.train()
|
394 |
-
|
395 |
-
if __name__ == "__main__":
|
396 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/components/ui/vertical-slider.tsx
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
"use client"
|
2 |
-
|
3 |
-
import * as React from "react"
|
4 |
-
import * as SliderPrimitive from "@radix-ui/react-slider"
|
5 |
-
|
6 |
-
import { cn } from "@/lib/utils"
|
7 |
-
|
8 |
-
const VerticalSlider = React.forwardRef<
|
9 |
-
React.ElementRef<typeof SliderPrimitive.Root>,
|
10 |
-
React.ComponentPropsWithoutRef<typeof SliderPrimitive.Root>
|
11 |
-
>(({ className, ...props }, ref) => (
|
12 |
-
<SliderPrimitive.Root
|
13 |
-
ref={ref}
|
14 |
-
className={cn(
|
15 |
-
"relative flex w-full touch-none select-none items-center",
|
16 |
-
className
|
17 |
-
)}
|
18 |
-
{...props}
|
19 |
-
>
|
20 |
-
<SliderPrimitive.Track className="relative w-2 h-full grow overflow-hidden rounded-full bg-stone-300 dark:bg-stone-700">
|
21 |
-
<SliderPrimitive.Range className="absolute w-full bg-stone-700 dark:bg-stone-50" />
|
22 |
-
</SliderPrimitive.Track>
|
23 |
-
<SliderPrimitive.Thumb className="block -ml-1.5 h-5 w-5 rounded-full border-2 border-stone-700 bg-stone-300 ring-offset-white transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-stone-950 focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 dark:border-stone-50 dark:bg-stone-700 dark:ring-offset-stone-950 dark:focus-visible:ring-stone-300" />
|
24 |
-
</SliderPrimitive.Root>
|
25 |
-
))
|
26 |
-
VerticalSlider.displayName = "VerticalSlider"
|
27 |
-
export { VerticalSlider }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/train/mel_processing.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.utils.data
|
3 |
-
from librosa.filters import mel as librosa_mel_fn
|
4 |
-
import logging
|
5 |
-
|
6 |
-
logger = logging.getLogger(__name__)
|
7 |
-
|
8 |
-
MAX_WAV_VALUE = 32768.0
|
9 |
-
|
10 |
-
|
11 |
-
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
12 |
-
"""
|
13 |
-
PARAMS
|
14 |
-
------
|
15 |
-
C: compression factor
|
16 |
-
"""
|
17 |
-
return torch.log(torch.clamp(x, min=clip_val) * C)
|
18 |
-
|
19 |
-
|
20 |
-
def dynamic_range_decompression_torch(x, C=1):
|
21 |
-
"""
|
22 |
-
PARAMS
|
23 |
-
------
|
24 |
-
C: compression factor used to compress
|
25 |
-
"""
|
26 |
-
return torch.exp(x) / C
|
27 |
-
|
28 |
-
|
29 |
-
def spectral_normalize_torch(magnitudes):
|
30 |
-
return dynamic_range_compression_torch(magnitudes)
|
31 |
-
|
32 |
-
|
33 |
-
def spectral_de_normalize_torch(magnitudes):
|
34 |
-
return dynamic_range_decompression_torch(magnitudes)
|
35 |
-
|
36 |
-
|
37 |
-
# Reusable banks
|
38 |
-
mel_basis = {}
|
39 |
-
hann_window = {}
|
40 |
-
|
41 |
-
|
42 |
-
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
43 |
-
"""Convert waveform into Linear-frequency Linear-amplitude spectrogram.
|
44 |
-
|
45 |
-
Args:
|
46 |
-
y :: (B, T) - Audio waveforms
|
47 |
-
n_fft
|
48 |
-
sampling_rate
|
49 |
-
hop_size
|
50 |
-
win_size
|
51 |
-
center
|
52 |
-
Returns:
|
53 |
-
:: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram
|
54 |
-
"""
|
55 |
-
# Validation
|
56 |
-
if torch.min(y) < -1.07:
|
57 |
-
logger.debug("min value is %s", str(torch.min(y)))
|
58 |
-
if torch.max(y) > 1.07:
|
59 |
-
logger.debug("max value is %s", str(torch.max(y)))
|
60 |
-
|
61 |
-
# Window - Cache if needed
|
62 |
-
global hann_window
|
63 |
-
dtype_device = str(y.dtype) + "_" + str(y.device)
|
64 |
-
wnsize_dtype_device = str(win_size) + "_" + dtype_device
|
65 |
-
if wnsize_dtype_device not in hann_window:
|
66 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
|
67 |
-
dtype=y.dtype, device=y.device
|
68 |
-
)
|
69 |
-
|
70 |
-
# Padding
|
71 |
-
y = torch.nn.functional.pad(
|
72 |
-
y.unsqueeze(1),
|
73 |
-
(int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
|
74 |
-
mode="reflect",
|
75 |
-
)
|
76 |
-
y = y.squeeze(1)
|
77 |
-
|
78 |
-
# Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2)
|
79 |
-
spec = torch.stft(
|
80 |
-
y,
|
81 |
-
n_fft,
|
82 |
-
hop_length=hop_size,
|
83 |
-
win_length=win_size,
|
84 |
-
window=hann_window[wnsize_dtype_device],
|
85 |
-
center=center,
|
86 |
-
pad_mode="reflect",
|
87 |
-
normalized=False,
|
88 |
-
onesided=True,
|
89 |
-
return_complex=False,
|
90 |
-
)
|
91 |
-
|
92 |
-
# Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame)
|
93 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
94 |
-
return spec
|
95 |
-
|
96 |
-
|
97 |
-
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
98 |
-
# MelBasis - Cache if needed
|
99 |
-
global mel_basis
|
100 |
-
dtype_device = str(spec.dtype) + "_" + str(spec.device)
|
101 |
-
fmax_dtype_device = str(fmax) + "_" + dtype_device
|
102 |
-
if fmax_dtype_device not in mel_basis:
|
103 |
-
mel = librosa_mel_fn(
|
104 |
-
sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax
|
105 |
-
)
|
106 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
|
107 |
-
dtype=spec.dtype, device=spec.device
|
108 |
-
)
|
109 |
-
|
110 |
-
# Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame)
|
111 |
-
melspec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
112 |
-
melspec = spectral_normalize_torch(melspec)
|
113 |
-
return melspec
|
114 |
-
|
115 |
-
|
116 |
-
def mel_spectrogram_torch(
|
117 |
-
y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
|
118 |
-
):
|
119 |
-
"""Convert waveform into Mel-frequency Log-amplitude spectrogram.
|
120 |
-
|
121 |
-
Args:
|
122 |
-
y :: (B, T) - Waveforms
|
123 |
-
Returns:
|
124 |
-
melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram
|
125 |
-
"""
|
126 |
-
# Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame)
|
127 |
-
spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center)
|
128 |
-
|
129 |
-
# Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame)
|
130 |
-
melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax)
|
131 |
-
|
132 |
-
return melspec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/5apps.md
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>5apps: Una plataforma para crear y alojar aplicaciones web del lado del cliente</h1>
|
3 |
-
<p>Si usted es un desarrollador web que ama el uso de tecnologías de plataformas web como JavaScript, HTML5 y CSS, es posible que esté interesado en 5apps. 5apps es una plataforma que ofrece tres servicios para ayudarle a crear, implementar, alojar y administrar sus aplicaciones web del lado del cliente. En este artículo, explicaremos qué es 5apps, por qué deberías usarlas y cómo empezar. </p>
|
4 |
-
<h2>¿Qué es 5apps? </h2>
|
5 |
-
<p>5apps es una plataforma que ofrece tres servicios para desarrolladores web:</p>
|
6 |
-
<h2>5apps</h2><br /><p><b><b>Download</b> • <a href="https://bltlly.com/2v6JdK">https://bltlly.com/2v6JdK</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li><strong>5apps Implementar</strong>: Una plataforma de implementación y alojamiento llave en mano para aplicaciones web del lado del cliente. Puedes usar cualquier framework que quieras, y simplemente pulsar tu código a través de Git. 5apps configurará e implementará su aplicación en todos los formatos disponibles y la preparará para su envío a las tiendas. </li>
|
9 |
-
<li><strong>5apps Almacenamiento</strong>: Una nube de datos personales basada en remoteStorage, un protocolo abierto para el almacenamiento de datos de usuario. Puede permitir que cualquier aplicación compatible acceda a su cuenta, y puede mover sus datos a cualquier proveedor o servidor compatible que desee, en cualquier momento. </li>
|
10 |
-
<li><strong>5apps Noticias</strong>: Un sitio de noticias sociales para HTML5, JS y amigos. Puede mantenerse actualizado sobre las últimas tendencias y tecnologías, compartir y discutir sus propios proyectos e ideas, y unirse a una comunidad de desarrolladores de ideas afines. </li>
|
11 |
-
</ul>
|
12 |
-
<h2>¿Por qué usar 5apps? </h2>
|
13 |
-
<p>Usar 5apps para tus proyectos de desarrollo web tiene muchos beneficios. Estos son algunos de ellos:</p>
|
14 |
-
<h3>Beneficios de la implementación de 5apps</h3>
|
15 |
-
<ul>
|
16 |
-
<li><strong>Entrega de aplicaciones profesionales</strong>: Hay más en la entrega de aplicaciones web que alojar archivos estáticos. 5apps maneja todos los detalles técnicos para usted, como certificados SSL, almacenamiento en caché, compresión, entrega de CDN, encabezados CORS, trabajadores de servicio, archivos de manifiesto, etc.</li>
|
17 |
-
|
18 |
-
<li><strong>Gratis para código abierto</strong>: Si elige una licencia de código abierto para su aplicación, 5apps la alojará e implementará de forma gratuita. No hay límites, el acceso del equipo incluido. </li>
|
19 |
-
</ul>
|
20 |
-
<h3>Ventajas del almacenamiento de 5apps</h3>
|
21 |
-
<ul>
|
22 |
-
<li><strong>Propiedad y portabilidad de datos</strong>: Tienes control total sobre tus datos. Puedes elegir dónde guardarlo, cómo acceder a él y con quién compartirlo. También puede cambiar de proveedor o servidor en cualquier momento que desee, sin perder sus datos o romper sus aplicaciones. </li>
|
23 |
-
<li><strong>Conectar y autorizar aplicaciones</strong>: Puede conectar su cuenta de almacenamiento a cualquier aplicación que admita remoteStorage. También puede dar o revocar el permiso a aplicaciones específicas para acceder a partes específicas de su almacenamiento. </li>
|
24 |
-
<li><strong>Administrar aplicaciones y datos</strong>: Puede ver todas las aplicaciones que están conectadas a su cuenta de almacenamiento y administrar sus datos en una interfaz web. También puede sincronizar sus datos entre dispositivos y respaldarlos. </li>
|
25 |
-
</ul>
|
26 |
-
<h3>Beneficios de 5apps Noticias</h3>
|
27 |
-
<ul>
|
28 |
-
<li><strong>Manténgase actualizado sobre las últimas tendencias y tecnologías</strong>: Puede navegar, buscar y filtrar artículos de noticias de varias fuentes relacionadas con HTML5, JS y otras tecnologías de plataformas web. También puede suscribirse a RSS feeds y boletines. </li>
|
29 |
-
<li><strong>Comparte y discute tus propios proyectos e ideas</strong>: Puedes enviar tus propios artículos, proyectos, tutoriales, demos, etc. a 5apps News y obtener comentarios de otros desarrolladores. También puedes comentar sobre otros envíos y votar por los que te gustan. </li>
|
30 |
-
<li><strong>Únete a una comunidad de desarrolladores de ideas afines</strong>: Puedes seguir a otros usuarios, unirte a grupos, chatear con otros y participar en eventos y desafíos. También puedes ganar insignias y puntos de reputación por tus contribuciones. </li>
|
31 |
-
</ul>
|
32 |
-
<h2>¿Cómo empezar con 5apps? </h2>
|
33 |
-
<p>Comenzar con 5apps es fácil y rápido. Estos son los pasos que debes seguir:</p>
|
34 |
-
<h3>Registrarse para una cuenta gratuita</h3>
|
35 |
-
|
36 |
-
<h3>Elegir un servicio (Implementar, Almacenamiento o Noticias)</h3>
|
37 |
-
<p>Puede elegir qué servicio desea usar primero desde el panel. Puede cambiar entre ellos en cualquier momento. </p>
|
38 |
-
<h3>Siga las instrucciones y la documentación</h3>
|
39 |
-
<p>Cada servicio tiene sus propias instrucciones y documentación para ayudarle a empezar. Puede encontrarlas en el sitio web o en la aplicación. Por ejemplo, para Implementar, necesitarás crear un repositorio, agregar una clave de implementación, enviar tu código y configurar tu aplicación. Para Almacenamiento, necesitarás crear una cuenta de almacenamiento, conectar aplicaciones y administrar tus datos. Para Noticias, necesitarás navegar, enviar, comentar y votar sobre los artículos. </p>
|
40 |
-
<h2>Conclusión</h2>
|
41 |
-
<p>5apps es una plataforma que ofrece tres servicios para desarrolladores web que aman el uso de tecnologías de plataformas web: Implementar, Almacenamiento y Noticias. Con 5apps, puede crear, implementar, alojar y administrar sus aplicaciones web del lado del cliente, poseer y controlar sus datos en una nube personal y mantenerse actualizado y conectado con una comunidad de desarrolladores de ideas afines. Si estás interesado en probar 5apps, ¡regístrate hoy mismo para una cuenta gratuita y empieza a crear increíbles aplicaciones web! </p>
|
42 |
-
<h3>Preguntas frecuentes</h3>
|
43 |
-
<ul>
|
44 |
-
<li><strong>¿Cuáles son los planes de precios para 5apps? </strong></li>
|
45 |
-
<p>5apps ofrece un plan gratuito para aplicaciones de código abierto y almacenamiento de datos personales. También ofrece planes de pago para aplicaciones privadas y mayor espacio de almacenamiento. Puede consultar los precios en el sitio web. </p>
|
46 |
-
<p></p>
|
47 |
-
<li><strong>¿Cuáles son los requisitos técnicos para usar 5apps? </strong></li>
|
48 |
-
<p>Necesitarás un navegador web moderno que admita funciones HTML5, JS y CSS. También necesitarás un cliente Git para enviar tu código a Deploy. Para almacenamiento, necesitarás aplicaciones que admitan el protocolo remoteStorage. </p>
|
49 |
-
<li><strong>¿Cuáles son algunos ejemplos de aplicaciones que utilizan 5apps? </strong></li>
|
50 |
-
|
51 |
-
<li><strong>¿Cómo puedo contactar el soporte de 5apps? </strong></li>
|
52 |
-
<p>Puede ponerse en contacto con el soporte de 5apps por correo electrónico en [email protected] o por Twitter en @5apps. También puede consultar la sección de preguntas frecuentes en el sitio web o la documentación de cada servicio. </p>
|
53 |
-
<li><strong>¿Cómo puedo contribuir a 5apps? </strong></li>
|
54 |
-
<p>Puedes contribuir a 5apps usándolo, compartiéndolo con otros, dando retroalimentación, reportando errores, sugiriendo características, escribiendo artículos, creando aplicaciones, etc. También puedes unirte a la comunidad de 5apps en Noticias o GitHub.</p>
|
55 |
-
</ul></p> 64aa2da5cf<br />
|
56 |
-
<br />
|
57 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Capcut Video Editor Apk Free Download.md
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Editor de vídeo CapCut APK descarga gratuita: Una herramienta potente y fácil de usar para TikTok y más</h1>
|
3 |
-
<p>Si usted está buscando un editor de vídeo gratuito y aplicación fabricante de vídeo que puede ayudar a crear impresionantes, vídeos de alta calidad para TikTok, Instagram, YouTube, o cualquier otra plataforma de medios sociales, es posible que desee echa un vistazo CapCut Video Editor APK. CapCut es la aplicación oficial de edición de video de TikTok, que ofrece una amplia gama de características y funciones para hacer que sus videos se destaquen. En este artículo, le diremos qué es CapCut Video Editor APK, qué características tiene, cómo descargarlo e instalarlo, y cuáles son sus pros y contras. </p>
|
4 |
-
<h2>capcut video editor apk free download</h2><br /><p><b><b>Download</b> … <a href="https://bltlly.com/2v6JVY">https://bltlly.com/2v6JVY</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es CapCut Video Editor APK? </h2>
|
6 |
-
<p>CapCut Video Editor APK es una aplicación para Android que le permite editar y hacer vídeos con facilidad. Es desarrollado por Bytedance Pte. Ltd., la misma empresa que posee TikTok, y está diseñado para ser compatible con el formato y el estilo de TikTok. Puede utilizar CapCut para recortar, cortar, combinar, acelerar, reducir la velocidad, acercar, alejar, invertir, congelar, animar, agregar transiciones, efectos, filtros, pegatinas, texto, música, efectos de sonido y más a sus videos. También puede usar las funciones avanzadas de CapCut, como animación de fotograma clave, cámara lenta suave, clave de croma, imagen en imagen (PIP), estabilización, subtítulos automáticos, eliminación de fondo, estilos de tendencia y más para crear videos de aspecto profesional. Puedes exportar tus vídeos en calidad HD (hasta 4K 60fps) y compartirlos directamente en TikTok u otras plataformas de redes sociales. </p>
|
7 |
-
<h3>Características de CapCut Editor de vídeo APK</h3>
|
8 |
-
<p>CapCut Video Editor APK tiene muchas características que lo convierten en una herramienta de edición de vídeo versátil y potente. Estas son algunas de las principales características que puedes disfrutar con CapCut:</p>
|
9 |
-
<h4>Edición básica de vídeo</h4>
|
10 |
-
<ul>
|
11 |
-
<li>Recortar y ajustar clips y dividir o combinar vídeos. </li>
|
12 |
-
<li>Ajuste la velocidad del vídeo de 0.1x a 100x, y aplique curvas de velocidad a los clips. </li>
|
13 |
-
<li>Animar clips de vídeo con increíbles efectos de zoom de entrada/salida. </li>
|
14 |
-
|
15 |
-
<li>Resalte los mejores momentos para clips y vlogs con la función de congelación. </li>
|
16 |
-
<li>Explora opciones de transición con efectos impresionantes en puntos de corte entre clips. </li>
|
17 |
-
</ul>
|
18 |
-
<h4>Edición avanzada de vídeo</h4>
|
19 |
-
<ul>
|
20 |
-
<li> Animación de vídeo de fotograma clave está disponible para todos los ajustes. </li>
|
21 |
-
<li>Editar vídeos para crear cámara lenta suave con la función de flujo óptico y la herramienta de curva de velocidad. </li>
|
22 |
-
<li>Utilice la tecla de croma para eliminar colores específicos de los vídeos. </li>
|
23 |
-
<li>Aplique la función Imagen en imagen (PIP) para agregar capas de video y fotos por encima del clip y empalmarlas fácilmente. </li>
|
24 |
-
<li>La función de estabilización mantiene el material de vídeo constante. </li>
|
25 |
-
</ul>
|
26 |
-
<h4>Características especiales</h4>
|
27 |
-
<ul>
|
28 |
-
<li>Subtítulos automáticos: automatiza el reconocimiento de voz y los subtítulos en los vídeos. </li>
|
29 |
-
<li>Eliminación de fondo: elimina automáticamente a las personas de los vídeos de forma gratuita. </li>
|
30 |
-
<li>Estilos de tendencia: disfrute de opciones creativas y constantemente actualizadas como zoom 3D, velocidad automática y más. </li>
|
31 |
-
</ul>
|
32 |
-
<h4>Texto y pegatinas</h4>
|
33 |
-
<ul>
|
34 |
-
<li>Añadir texto a vídeos con diferentes fuentes y estilos, encontrar la mejor fuente de subtítulos con plantillas de texto únicas. Formatos de fuente de subtítulos se pueden importar. </li>
|
35 |
-
<li>Los subtítulos se pueden añadir a la línea de tiempo de las pistas de vídeo y se pueden mover y ajustar en un solo paso. </li>
|
36 |
-
<li>Añadir pegatinas a los vídeos de una enorme biblioteca de pegatinas o importar sus propias pegatinas. </li>
|
37 |
-
</ul>
|
38 |
-
<h4>Efectos y filtros de tendencias</h4>
|
39 |
-
<ul>
|
40 |
-
<li>Combina contenido de video con diversos filtros que se actualizan semanalmente con las últimas tendencias y temporadas. </li>
|
41 |
-
<li>Añadir efectos a vídeos con varias opciones, tales como glitch, VHS, retro, neón, y más. </li>
|
42 |
-
</ul>
|
43 |
-
<h4>Música y efectos de sonido</h4>
|
44 |
-
<ul>
|
45 |
-
<li>Añadir música a vídeos de una enorme biblioteca de canciones o importar su propia música. </li>
|
46 |
-
<li>Ajuste el volumen de la música y el sonido original del video. </li>
|
47 |
-
<li>Añadir efectos de sonido a vídeos de una variedad de categorías, tales como animales, dibujos animados, explosiones, y más. </li>
|
48 |
-
|
49 |
-
</ul>
|
50 |
-
<h3> Cómo descargar e instalar CapCut Editor de vídeo APK? </h3>
|
51 |
-
<p>Si desea descargar e instalar CapCut Video Editor APK en su dispositivo Android, puede seguir estos sencillos pasos:</p>
|
52 |
-
<ol>
|
53 |
-
<li>Ir al sitio web oficial de CapCut o cualquier otra fuente de confianza que proporciona el archivo APK de CapCut.</li>
|
54 |
-
<li>Descargar el archivo APK de CapCut en su dispositivo. </li>
|
55 |
-
<li>Habilita la instalación de aplicaciones de fuentes desconocidas en tu dispositivo. Puede hacer esto yendo a Configuración > Seguridad > Fuentes desconocidas y activando. </li>
|
56 |
-
<li>Busque el archivo APK descargado en su dispositivo y toque en él para iniciar el proceso de instalación. </li>
|
57 |
-
<li>Siga las instrucciones en la pantalla y espere a que se complete la instalación. </li>
|
58 |
-
<li> Lanzamiento CapCut Editor de vídeo APK y disfrutar de la edición y la creación de vídeos. </li>
|
59 |
-
</ol>
|
60 |
-
<h3> Pros y contras de CapCut Editor de vídeo APK</h3>
|
61 |
-
<p>CapCut Video Editor APK es una gran aplicación para la edición de vídeo y la toma de vídeo, pero también tiene algunos pros y contras que usted debe ser consciente de. Estos son algunos de ellos:</p>
|
62 |
-
<tabla>
|
63 |
-
<tr><th>Pros</th><th>Contras</th></tr>
|
64 |
-
<tr><td>- Libre de usar y descargar</td><td>- Puede contener anuncios y compras en la aplicación</td></tr>
|
65 |
-
<tr><td>- Compatible con TikTok y otras plataformas de redes sociales</td><td>- Puede que no funcione en algunos dispositivos o regiones</td></tr>
|
66 |
-
<tr><td>- Ofrece una amplia gama de características y funciones</td><td>- Puede consumir mucho espacio de almacenamiento y energía de la batería</td></tr>
|
67 |
-
<tr><td>- Soporta calidad HD (hasta 4K 60fps) exportación de vídeo</td><td>- Puede tener algunos errores o problemas técnicos</td></tr>
|
68 |
-
<tr><td>- Interfaz fácil de usar y fácil de usar</td><td>- Puede requerir conexión a Internet para algunas características</td></tr>
|
69 |
-
</tabla>
|
70 |
-
<h2>Conclusión</h2>
|
71 |
-
|
72 |
-
<h3>Preguntas frecuentes</h3>
|
73 |
-
<p>Aquí hay algunas preguntas frecuentes sobre CapCut Video Editor APK:</p>
|
74 |
-
<ol>
|
75 |
-
<li><b> ¿Es CapCut Video Editor APK seguro de usar? </b></li>
|
76 |
-
<p>Sí, CapCut Video Editor APK es seguro de usar, siempre y cuando se descarga desde el sitio web oficial o cualquier otra fuente de confianza. Sin embargo, siempre debes tener cuidado al instalar aplicaciones de fuentes desconocidas y verificar los permisos que requieren. </p>
|
77 |
-
<p></p>
|
78 |
-
<li><b> ¿Está CapCut Video Editor APK disponible para dispositivos iOS? </b></li>
|
79 |
-
<p>No, CapCut Video Editor APK solo está disponible para dispositivos Android. Sin embargo, hay una versión para iOS de CapCut que puedes descargar desde la App Store.</p>
|
80 |
-
<li><b>¿Puedo usar CapCut Video Editor APK sin conexión? </b></li>
|
81 |
-
<p>Sí, puede utilizar CapCut Video Editor APK sin conexión para la mayoría de sus características. Sin embargo, algunas características pueden requerir una conexión a Internet, como descargar música, pegatinas, efectos, filtros, etc.</p>
|
82 |
-
<li><b> ¿Cómo puedo compartir mis videos hechos con CapCut Video Editor APK? </b></li>
|
83 |
-
<p>Puede compartir sus vídeos realizados con CapCut Video Editor APK directamente en TikTok o cualquier otra plataforma de medios sociales. También puede guardar sus vídeos en su dispositivo o subirlos a los servicios de almacenamiento en la nube. </p>
|
84 |
-
<li><b> ¿Cómo puedo contactar a los desarrolladores de CapCut Video Editor APK? </b></li>
|
85 |
-
<p>Puede ponerse en contacto con los desarrolladores de CapCut Video Editor APK enviando un correo electrónico a [email protected] o visitando su sitio web oficial en https://www.capcut.com/.</p> 64aa2da5cf<br />
|
86 |
-
<br />
|
87 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Billyosoro/ESRGAN/inference_realesrgan.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import cv2
|
3 |
-
import glob
|
4 |
-
import os
|
5 |
-
from basicsr.archs.rrdbnet_arch import RRDBNet
|
6 |
-
|
7 |
-
from realesrgan import RealESRGANer
|
8 |
-
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
9 |
-
|
10 |
-
|
11 |
-
def main():
|
12 |
-
"""Inference demo for Real-ESRGAN.
|
13 |
-
"""
|
14 |
-
parser = argparse.ArgumentParser()
|
15 |
-
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
|
16 |
-
parser.add_argument(
|
17 |
-
'-n',
|
18 |
-
'--model_name',
|
19 |
-
type=str,
|
20 |
-
default='RealESRGAN_x4plus',
|
21 |
-
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
|
22 |
-
'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
|
23 |
-
'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
|
24 |
-
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
|
25 |
-
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
|
26 |
-
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image')
|
27 |
-
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
|
28 |
-
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
|
29 |
-
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
|
30 |
-
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
|
31 |
-
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
|
32 |
-
parser.add_argument(
|
33 |
-
'--alpha_upsampler',
|
34 |
-
type=str,
|
35 |
-
default='realesrgan',
|
36 |
-
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
|
37 |
-
parser.add_argument(
|
38 |
-
'--ext',
|
39 |
-
type=str,
|
40 |
-
default='auto',
|
41 |
-
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
|
42 |
-
args = parser.parse_args()
|
43 |
-
|
44 |
-
# determine models according to model names
|
45 |
-
args.model_name = args.model_name.split('.')[0]
|
46 |
-
if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
|
47 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
|
48 |
-
netscale = 4
|
49 |
-
elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
|
50 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
|
51 |
-
netscale = 4
|
52 |
-
elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
|
53 |
-
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
|
54 |
-
netscale = 2
|
55 |
-
elif args.model_name in [
|
56 |
-
'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
|
57 |
-
]: # x2 VGG-style model (XS size)
|
58 |
-
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
|
59 |
-
netscale = 2
|
60 |
-
elif args.model_name in [
|
61 |
-
'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
|
62 |
-
]: # x4 VGG-style model (XS size)
|
63 |
-
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
|
64 |
-
netscale = 4
|
65 |
-
|
66 |
-
# determine model paths
|
67 |
-
model_path = os.path.join('.', args.model_name + '.pth')
|
68 |
-
if not os.path.isfile(model_path):
|
69 |
-
model_path = os.path.join('.', args.model_name + '.pth')
|
70 |
-
if not os.path.isfile(model_path):
|
71 |
-
raise ValueError(f'Model {args.model_name} does not exist.')
|
72 |
-
|
73 |
-
# restorer
|
74 |
-
upsampler = RealESRGANer(
|
75 |
-
scale=netscale,
|
76 |
-
model_path=model_path,
|
77 |
-
model=model,
|
78 |
-
tile=args.tile,
|
79 |
-
tile_pad=args.tile_pad,
|
80 |
-
pre_pad=args.pre_pad,
|
81 |
-
half=args.half)
|
82 |
-
|
83 |
-
if args.face_enhance: # Use GFPGAN for face enhancement
|
84 |
-
from gfpgan import GFPGANer
|
85 |
-
face_enhancer = GFPGANer(
|
86 |
-
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
|
87 |
-
upscale=args.outscale,
|
88 |
-
arch='clean',
|
89 |
-
channel_multiplier=2,
|
90 |
-
bg_upsampler=upsampler)
|
91 |
-
os.makedirs(args.output, exist_ok=True)
|
92 |
-
|
93 |
-
if os.path.isfile(args.input):
|
94 |
-
paths = [args.input]
|
95 |
-
else:
|
96 |
-
paths = sorted(glob.glob(os.path.join(args.input, '*')))
|
97 |
-
|
98 |
-
for idx, path in enumerate(paths):
|
99 |
-
imgname, extension = os.path.splitext(os.path.basename(path))
|
100 |
-
print('Testing', idx, imgname)
|
101 |
-
|
102 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
|
103 |
-
if len(img.shape) == 3 and img.shape[2] == 4:
|
104 |
-
img_mode = 'RGBA'
|
105 |
-
else:
|
106 |
-
img_mode = None
|
107 |
-
|
108 |
-
try:
|
109 |
-
if args.face_enhance:
|
110 |
-
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
111 |
-
else:
|
112 |
-
output, _ = upsampler.enhance(img, outscale=args.outscale)
|
113 |
-
except RuntimeError as error:
|
114 |
-
print('Error', error)
|
115 |
-
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
|
116 |
-
else:
|
117 |
-
if args.ext == 'auto':
|
118 |
-
extension = extension[1:]
|
119 |
-
else:
|
120 |
-
extension = args.ext
|
121 |
-
if img_mode == 'RGBA': # RGBA images should be saved in png format
|
122 |
-
extension = 'png'
|
123 |
-
save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}')
|
124 |
-
cv2.imwrite(save_path, output)
|
125 |
-
|
126 |
-
|
127 |
-
if __name__ == '__main__':
|
128 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BreetheRun/stabilityai-stable-diffusion-xl-base-1.0/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/stabilityai/stable-diffusion-xl-base-1.0").launch()
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/launch.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import logging
|
3 |
-
import torch
|
4 |
-
import torch.distributed as dist
|
5 |
-
import torch.multiprocessing as mp
|
6 |
-
|
7 |
-
from detectron2.utils import comm
|
8 |
-
|
9 |
-
__all__ = ["launch"]
|
10 |
-
|
11 |
-
|
12 |
-
def _find_free_port():
|
13 |
-
import socket
|
14 |
-
|
15 |
-
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
16 |
-
# Binding to port 0 will cause the OS to find an available port for us
|
17 |
-
sock.bind(("", 0))
|
18 |
-
port = sock.getsockname()[1]
|
19 |
-
sock.close()
|
20 |
-
# NOTE: there is still a chance the port could be taken by other processes.
|
21 |
-
return port
|
22 |
-
|
23 |
-
|
24 |
-
def launch(main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=()):
|
25 |
-
"""
|
26 |
-
Args:
|
27 |
-
main_func: a function that will be called by `main_func(*args)`
|
28 |
-
num_machines (int): the total number of machines
|
29 |
-
machine_rank (int): the rank of this machine (one per machine)
|
30 |
-
dist_url (str): url to connect to for distributed training, including protocol
|
31 |
-
e.g. "tcp://127.0.0.1:8686".
|
32 |
-
Can be set to auto to automatically select a free port on localhost
|
33 |
-
args (tuple): arguments passed to main_func
|
34 |
-
"""
|
35 |
-
world_size = num_machines * num_gpus_per_machine
|
36 |
-
if world_size > 1:
|
37 |
-
# https://github.com/pytorch/pytorch/pull/14391
|
38 |
-
# TODO prctl in spawned processes
|
39 |
-
|
40 |
-
if dist_url == "auto":
|
41 |
-
assert num_machines == 1, "dist_url=auto cannot work with distributed training."
|
42 |
-
port = _find_free_port()
|
43 |
-
dist_url = f"tcp://127.0.0.1:{port}"
|
44 |
-
|
45 |
-
mp.spawn(
|
46 |
-
_distributed_worker,
|
47 |
-
nprocs=num_gpus_per_machine,
|
48 |
-
args=(main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args),
|
49 |
-
daemon=False,
|
50 |
-
)
|
51 |
-
else:
|
52 |
-
main_func(*args)
|
53 |
-
|
54 |
-
|
55 |
-
def _distributed_worker(
|
56 |
-
local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args
|
57 |
-
):
|
58 |
-
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
|
59 |
-
global_rank = machine_rank * num_gpus_per_machine + local_rank
|
60 |
-
try:
|
61 |
-
dist.init_process_group(
|
62 |
-
backend="NCCL", init_method=dist_url, world_size=world_size, rank=global_rank
|
63 |
-
)
|
64 |
-
except Exception as e:
|
65 |
-
logger = logging.getLogger(__name__)
|
66 |
-
logger.error("Process group URL: {}".format(dist_url))
|
67 |
-
raise e
|
68 |
-
# synchronize is needed here to prevent a possible timeout after calling init_process_group
|
69 |
-
# See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
|
70 |
-
comm.synchronize()
|
71 |
-
|
72 |
-
assert num_gpus_per_machine <= torch.cuda.device_count()
|
73 |
-
torch.cuda.set_device(local_rank)
|
74 |
-
|
75 |
-
# Setup the local process group (which contains ranks within the same machine)
|
76 |
-
assert comm._LOCAL_PROCESS_GROUP is None
|
77 |
-
num_machines = world_size // num_gpus_per_machine
|
78 |
-
for i in range(num_machines):
|
79 |
-
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
|
80 |
-
pg = dist.new_group(ranks_on_i)
|
81 |
-
if i == machine_rank:
|
82 |
-
comm._LOCAL_PROCESS_GROUP = pg
|
83 |
-
|
84 |
-
main_func(*args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/README.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
## Unit Tests
|
2 |
-
|
3 |
-
To run the unittests, do:
|
4 |
-
```
|
5 |
-
python -m unittest discover -v -s tests
|
6 |
-
```
|
7 |
-
|
8 |
-
There are also end-to-end inference & training tests, in [dev/run_*_tests.sh](../dev).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/necks/yolo_neck.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import torch.nn.functional as F
|
6 |
-
from mmcv.cnn import ConvModule
|
7 |
-
|
8 |
-
from ..builder import NECKS
|
9 |
-
|
10 |
-
|
11 |
-
class DetectionBlock(nn.Module):
|
12 |
-
"""Detection block in YOLO neck.
|
13 |
-
|
14 |
-
Let out_channels = n, the DetectionBlock contains:
|
15 |
-
Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.
|
16 |
-
The first 6 ConvLayers are formed the following way:
|
17 |
-
1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.
|
18 |
-
The Conv2D layer is 1x1x255.
|
19 |
-
Some block will have branch after the fifth ConvLayer.
|
20 |
-
The input channel is arbitrary (in_channels)
|
21 |
-
|
22 |
-
Args:
|
23 |
-
in_channels (int): The number of input channels.
|
24 |
-
out_channels (int): The number of output channels.
|
25 |
-
conv_cfg (dict): Config dict for convolution layer. Default: None.
|
26 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
27 |
-
Default: dict(type='BN', requires_grad=True)
|
28 |
-
act_cfg (dict): Config dict for activation layer.
|
29 |
-
Default: dict(type='LeakyReLU', negative_slope=0.1).
|
30 |
-
"""
|
31 |
-
|
32 |
-
def __init__(self,
|
33 |
-
in_channels,
|
34 |
-
out_channels,
|
35 |
-
conv_cfg=None,
|
36 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
37 |
-
act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
|
38 |
-
super(DetectionBlock, self).__init__()
|
39 |
-
double_out_channels = out_channels * 2
|
40 |
-
|
41 |
-
# shortcut
|
42 |
-
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
|
43 |
-
self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
|
44 |
-
self.conv2 = ConvModule(
|
45 |
-
out_channels, double_out_channels, 3, padding=1, **cfg)
|
46 |
-
self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
|
47 |
-
self.conv4 = ConvModule(
|
48 |
-
out_channels, double_out_channels, 3, padding=1, **cfg)
|
49 |
-
self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
tmp = self.conv1(x)
|
53 |
-
tmp = self.conv2(tmp)
|
54 |
-
tmp = self.conv3(tmp)
|
55 |
-
tmp = self.conv4(tmp)
|
56 |
-
out = self.conv5(tmp)
|
57 |
-
return out
|
58 |
-
|
59 |
-
|
60 |
-
@NECKS.register_module()
|
61 |
-
class YOLOV3Neck(nn.Module):
|
62 |
-
"""The neck of YOLOV3.
|
63 |
-
|
64 |
-
It can be treated as a simplified version of FPN. It
|
65 |
-
will take the result from Darknet backbone and do some upsampling and
|
66 |
-
concatenation. It will finally output the detection result.
|
67 |
-
|
68 |
-
Note:
|
69 |
-
The input feats should be from top to bottom.
|
70 |
-
i.e., from high-lvl to low-lvl
|
71 |
-
But YOLOV3Neck will process them in reversed order.
|
72 |
-
i.e., from bottom (high-lvl) to top (low-lvl)
|
73 |
-
|
74 |
-
Args:
|
75 |
-
num_scales (int): The number of scales / stages.
|
76 |
-
in_channels (int): The number of input channels.
|
77 |
-
out_channels (int): The number of output channels.
|
78 |
-
conv_cfg (dict): Config dict for convolution layer. Default: None.
|
79 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
80 |
-
Default: dict(type='BN', requires_grad=True)
|
81 |
-
act_cfg (dict): Config dict for activation layer.
|
82 |
-
Default: dict(type='LeakyReLU', negative_slope=0.1).
|
83 |
-
"""
|
84 |
-
|
85 |
-
def __init__(self,
|
86 |
-
num_scales,
|
87 |
-
in_channels,
|
88 |
-
out_channels,
|
89 |
-
conv_cfg=None,
|
90 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
91 |
-
act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
|
92 |
-
super(YOLOV3Neck, self).__init__()
|
93 |
-
assert (num_scales == len(in_channels) == len(out_channels))
|
94 |
-
self.num_scales = num_scales
|
95 |
-
self.in_channels = in_channels
|
96 |
-
self.out_channels = out_channels
|
97 |
-
|
98 |
-
# shortcut
|
99 |
-
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
|
100 |
-
|
101 |
-
# To support arbitrary scales, the code looks awful, but it works.
|
102 |
-
# Better solution is welcomed.
|
103 |
-
self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)
|
104 |
-
for i in range(1, self.num_scales):
|
105 |
-
in_c, out_c = self.in_channels[i], self.out_channels[i]
|
106 |
-
self.add_module(f'conv{i}', ConvModule(in_c, out_c, 1, **cfg))
|
107 |
-
# in_c + out_c : High-lvl feats will be cat with low-lvl feats
|
108 |
-
self.add_module(f'detect{i+1}',
|
109 |
-
DetectionBlock(in_c + out_c, out_c, **cfg))
|
110 |
-
|
111 |
-
def forward(self, feats):
|
112 |
-
assert len(feats) == self.num_scales
|
113 |
-
|
114 |
-
# processed from bottom (high-lvl) to top (low-lvl)
|
115 |
-
outs = []
|
116 |
-
out = self.detect1(feats[-1])
|
117 |
-
outs.append(out)
|
118 |
-
|
119 |
-
for i, x in enumerate(reversed(feats[:-1])):
|
120 |
-
conv = getattr(self, f'conv{i+1}')
|
121 |
-
tmp = conv(out)
|
122 |
-
|
123 |
-
# Cat with low-lvl feats
|
124 |
-
tmp = F.interpolate(tmp, scale_factor=2)
|
125 |
-
tmp = torch.cat((tmp, x), 1)
|
126 |
-
|
127 |
-
detect = getattr(self, f'detect{i+2}')
|
128 |
-
out = detect(tmp)
|
129 |
-
outs.append(out)
|
130 |
-
|
131 |
-
return tuple(outs)
|
132 |
-
|
133 |
-
def init_weights(self):
|
134 |
-
"""Initialize the weights of module."""
|
135 |
-
# init is done in ConvModule
|
136 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/roi_heads/mask_heads/grid_head.py
DELETED
@@ -1,359 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from mmcv.cnn import ConvModule, kaiming_init, normal_init
|
6 |
-
|
7 |
-
from mmdet.models.builder import HEADS, build_loss
|
8 |
-
|
9 |
-
|
10 |
-
@HEADS.register_module()
|
11 |
-
class GridHead(nn.Module):
|
12 |
-
|
13 |
-
def __init__(self,
|
14 |
-
grid_points=9,
|
15 |
-
num_convs=8,
|
16 |
-
roi_feat_size=14,
|
17 |
-
in_channels=256,
|
18 |
-
conv_kernel_size=3,
|
19 |
-
point_feat_channels=64,
|
20 |
-
deconv_kernel_size=4,
|
21 |
-
class_agnostic=False,
|
22 |
-
loss_grid=dict(
|
23 |
-
type='CrossEntropyLoss', use_sigmoid=True,
|
24 |
-
loss_weight=15),
|
25 |
-
conv_cfg=None,
|
26 |
-
norm_cfg=dict(type='GN', num_groups=36)):
|
27 |
-
super(GridHead, self).__init__()
|
28 |
-
self.grid_points = grid_points
|
29 |
-
self.num_convs = num_convs
|
30 |
-
self.roi_feat_size = roi_feat_size
|
31 |
-
self.in_channels = in_channels
|
32 |
-
self.conv_kernel_size = conv_kernel_size
|
33 |
-
self.point_feat_channels = point_feat_channels
|
34 |
-
self.conv_out_channels = self.point_feat_channels * self.grid_points
|
35 |
-
self.class_agnostic = class_agnostic
|
36 |
-
self.conv_cfg = conv_cfg
|
37 |
-
self.norm_cfg = norm_cfg
|
38 |
-
if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
|
39 |
-
assert self.conv_out_channels % norm_cfg['num_groups'] == 0
|
40 |
-
|
41 |
-
assert self.grid_points >= 4
|
42 |
-
self.grid_size = int(np.sqrt(self.grid_points))
|
43 |
-
if self.grid_size * self.grid_size != self.grid_points:
|
44 |
-
raise ValueError('grid_points must be a square number')
|
45 |
-
|
46 |
-
# the predicted heatmap is half of whole_map_size
|
47 |
-
if not isinstance(self.roi_feat_size, int):
|
48 |
-
raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
|
49 |
-
self.whole_map_size = self.roi_feat_size * 4
|
50 |
-
|
51 |
-
# compute point-wise sub-regions
|
52 |
-
self.sub_regions = self.calc_sub_regions()
|
53 |
-
|
54 |
-
self.convs = []
|
55 |
-
for i in range(self.num_convs):
|
56 |
-
in_channels = (
|
57 |
-
self.in_channels if i == 0 else self.conv_out_channels)
|
58 |
-
stride = 2 if i == 0 else 1
|
59 |
-
padding = (self.conv_kernel_size - 1) // 2
|
60 |
-
self.convs.append(
|
61 |
-
ConvModule(
|
62 |
-
in_channels,
|
63 |
-
self.conv_out_channels,
|
64 |
-
self.conv_kernel_size,
|
65 |
-
stride=stride,
|
66 |
-
padding=padding,
|
67 |
-
conv_cfg=self.conv_cfg,
|
68 |
-
norm_cfg=self.norm_cfg,
|
69 |
-
bias=True))
|
70 |
-
self.convs = nn.Sequential(*self.convs)
|
71 |
-
|
72 |
-
self.deconv1 = nn.ConvTranspose2d(
|
73 |
-
self.conv_out_channels,
|
74 |
-
self.conv_out_channels,
|
75 |
-
kernel_size=deconv_kernel_size,
|
76 |
-
stride=2,
|
77 |
-
padding=(deconv_kernel_size - 2) // 2,
|
78 |
-
groups=grid_points)
|
79 |
-
self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
|
80 |
-
self.deconv2 = nn.ConvTranspose2d(
|
81 |
-
self.conv_out_channels,
|
82 |
-
grid_points,
|
83 |
-
kernel_size=deconv_kernel_size,
|
84 |
-
stride=2,
|
85 |
-
padding=(deconv_kernel_size - 2) // 2,
|
86 |
-
groups=grid_points)
|
87 |
-
|
88 |
-
# find the 4-neighbor of each grid point
|
89 |
-
self.neighbor_points = []
|
90 |
-
grid_size = self.grid_size
|
91 |
-
for i in range(grid_size): # i-th column
|
92 |
-
for j in range(grid_size): # j-th row
|
93 |
-
neighbors = []
|
94 |
-
if i > 0: # left: (i - 1, j)
|
95 |
-
neighbors.append((i - 1) * grid_size + j)
|
96 |
-
if j > 0: # up: (i, j - 1)
|
97 |
-
neighbors.append(i * grid_size + j - 1)
|
98 |
-
if j < grid_size - 1: # down: (i, j + 1)
|
99 |
-
neighbors.append(i * grid_size + j + 1)
|
100 |
-
if i < grid_size - 1: # right: (i + 1, j)
|
101 |
-
neighbors.append((i + 1) * grid_size + j)
|
102 |
-
self.neighbor_points.append(tuple(neighbors))
|
103 |
-
# total edges in the grid
|
104 |
-
self.num_edges = sum([len(p) for p in self.neighbor_points])
|
105 |
-
|
106 |
-
self.forder_trans = nn.ModuleList() # first-order feature transition
|
107 |
-
self.sorder_trans = nn.ModuleList() # second-order feature transition
|
108 |
-
for neighbors in self.neighbor_points:
|
109 |
-
fo_trans = nn.ModuleList()
|
110 |
-
so_trans = nn.ModuleList()
|
111 |
-
for _ in range(len(neighbors)):
|
112 |
-
# each transition module consists of a 5x5 depth-wise conv and
|
113 |
-
# 1x1 conv.
|
114 |
-
fo_trans.append(
|
115 |
-
nn.Sequential(
|
116 |
-
nn.Conv2d(
|
117 |
-
self.point_feat_channels,
|
118 |
-
self.point_feat_channels,
|
119 |
-
5,
|
120 |
-
stride=1,
|
121 |
-
padding=2,
|
122 |
-
groups=self.point_feat_channels),
|
123 |
-
nn.Conv2d(self.point_feat_channels,
|
124 |
-
self.point_feat_channels, 1)))
|
125 |
-
so_trans.append(
|
126 |
-
nn.Sequential(
|
127 |
-
nn.Conv2d(
|
128 |
-
self.point_feat_channels,
|
129 |
-
self.point_feat_channels,
|
130 |
-
5,
|
131 |
-
1,
|
132 |
-
2,
|
133 |
-
groups=self.point_feat_channels),
|
134 |
-
nn.Conv2d(self.point_feat_channels,
|
135 |
-
self.point_feat_channels, 1)))
|
136 |
-
self.forder_trans.append(fo_trans)
|
137 |
-
self.sorder_trans.append(so_trans)
|
138 |
-
|
139 |
-
self.loss_grid = build_loss(loss_grid)
|
140 |
-
|
141 |
-
def init_weights(self):
|
142 |
-
for m in self.modules():
|
143 |
-
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
|
144 |
-
# TODO: compare mode = "fan_in" or "fan_out"
|
145 |
-
kaiming_init(m)
|
146 |
-
for m in self.modules():
|
147 |
-
if isinstance(m, nn.ConvTranspose2d):
|
148 |
-
normal_init(m, std=0.001)
|
149 |
-
nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01))
|
150 |
-
|
151 |
-
def forward(self, x):
|
152 |
-
assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
|
153 |
-
# RoI feature transformation, downsample 2x
|
154 |
-
x = self.convs(x)
|
155 |
-
|
156 |
-
c = self.point_feat_channels
|
157 |
-
# first-order fusion
|
158 |
-
x_fo = [None for _ in range(self.grid_points)]
|
159 |
-
for i, points in enumerate(self.neighbor_points):
|
160 |
-
x_fo[i] = x[:, i * c:(i + 1) * c]
|
161 |
-
for j, point_idx in enumerate(points):
|
162 |
-
x_fo[i] = x_fo[i] + self.forder_trans[i][j](
|
163 |
-
x[:, point_idx * c:(point_idx + 1) * c])
|
164 |
-
|
165 |
-
# second-order fusion
|
166 |
-
x_so = [None for _ in range(self.grid_points)]
|
167 |
-
for i, points in enumerate(self.neighbor_points):
|
168 |
-
x_so[i] = x[:, i * c:(i + 1) * c]
|
169 |
-
for j, point_idx in enumerate(points):
|
170 |
-
x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
|
171 |
-
|
172 |
-
# predicted heatmap with fused features
|
173 |
-
x2 = torch.cat(x_so, dim=1)
|
174 |
-
x2 = self.deconv1(x2)
|
175 |
-
x2 = F.relu(self.norm1(x2), inplace=True)
|
176 |
-
heatmap = self.deconv2(x2)
|
177 |
-
|
178 |
-
# predicted heatmap with original features (applicable during training)
|
179 |
-
if self.training:
|
180 |
-
x1 = x
|
181 |
-
x1 = self.deconv1(x1)
|
182 |
-
x1 = F.relu(self.norm1(x1), inplace=True)
|
183 |
-
heatmap_unfused = self.deconv2(x1)
|
184 |
-
else:
|
185 |
-
heatmap_unfused = heatmap
|
186 |
-
|
187 |
-
return dict(fused=heatmap, unfused=heatmap_unfused)
|
188 |
-
|
189 |
-
def calc_sub_regions(self):
|
190 |
-
"""Compute point specific representation regions.
|
191 |
-
|
192 |
-
See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details.
|
193 |
-
"""
|
194 |
-
# to make it consistent with the original implementation, half_size
|
195 |
-
# is computed as 2 * quarter_size, which is smaller
|
196 |
-
half_size = self.whole_map_size // 4 * 2
|
197 |
-
sub_regions = []
|
198 |
-
for i in range(self.grid_points):
|
199 |
-
x_idx = i // self.grid_size
|
200 |
-
y_idx = i % self.grid_size
|
201 |
-
if x_idx == 0:
|
202 |
-
sub_x1 = 0
|
203 |
-
elif x_idx == self.grid_size - 1:
|
204 |
-
sub_x1 = half_size
|
205 |
-
else:
|
206 |
-
ratio = x_idx / (self.grid_size - 1) - 0.25
|
207 |
-
sub_x1 = max(int(ratio * self.whole_map_size), 0)
|
208 |
-
|
209 |
-
if y_idx == 0:
|
210 |
-
sub_y1 = 0
|
211 |
-
elif y_idx == self.grid_size - 1:
|
212 |
-
sub_y1 = half_size
|
213 |
-
else:
|
214 |
-
ratio = y_idx / (self.grid_size - 1) - 0.25
|
215 |
-
sub_y1 = max(int(ratio * self.whole_map_size), 0)
|
216 |
-
sub_regions.append(
|
217 |
-
(sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
|
218 |
-
return sub_regions
|
219 |
-
|
220 |
-
def get_targets(self, sampling_results, rcnn_train_cfg):
|
221 |
-
# mix all samples (across images) together.
|
222 |
-
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
|
223 |
-
dim=0).cpu()
|
224 |
-
pos_gt_bboxes = torch.cat(
|
225 |
-
[res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
|
226 |
-
assert pos_bboxes.shape == pos_gt_bboxes.shape
|
227 |
-
|
228 |
-
# expand pos_bboxes to 2x of original size
|
229 |
-
x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
|
230 |
-
y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
|
231 |
-
x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
|
232 |
-
y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
|
233 |
-
pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
|
234 |
-
pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
|
235 |
-
pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
|
236 |
-
|
237 |
-
num_rois = pos_bboxes.shape[0]
|
238 |
-
map_size = self.whole_map_size
|
239 |
-
# this is not the final target shape
|
240 |
-
targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
|
241 |
-
dtype=torch.float)
|
242 |
-
|
243 |
-
# pre-compute interpolation factors for all grid points.
|
244 |
-
# the first item is the factor of x-dim, and the second is y-dim.
|
245 |
-
# for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
|
246 |
-
factors = []
|
247 |
-
for j in range(self.grid_points):
|
248 |
-
x_idx = j // self.grid_size
|
249 |
-
y_idx = j % self.grid_size
|
250 |
-
factors.append((1 - x_idx / (self.grid_size - 1),
|
251 |
-
1 - y_idx / (self.grid_size - 1)))
|
252 |
-
|
253 |
-
radius = rcnn_train_cfg.pos_radius
|
254 |
-
radius2 = radius**2
|
255 |
-
for i in range(num_rois):
|
256 |
-
# ignore small bboxes
|
257 |
-
if (pos_bbox_ws[i] <= self.grid_size
|
258 |
-
or pos_bbox_hs[i] <= self.grid_size):
|
259 |
-
continue
|
260 |
-
# for each grid point, mark a small circle as positive
|
261 |
-
for j in range(self.grid_points):
|
262 |
-
factor_x, factor_y = factors[j]
|
263 |
-
gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
|
264 |
-
1 - factor_x) * pos_gt_bboxes[i, 2]
|
265 |
-
gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
|
266 |
-
1 - factor_y) * pos_gt_bboxes[i, 3]
|
267 |
-
|
268 |
-
cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
|
269 |
-
map_size)
|
270 |
-
cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
|
271 |
-
map_size)
|
272 |
-
|
273 |
-
for x in range(cx - radius, cx + radius + 1):
|
274 |
-
for y in range(cy - radius, cy + radius + 1):
|
275 |
-
if x >= 0 and x < map_size and y >= 0 and y < map_size:
|
276 |
-
if (x - cx)**2 + (y - cy)**2 <= radius2:
|
277 |
-
targets[i, j, y, x] = 1
|
278 |
-
# reduce the target heatmap size by a half
|
279 |
-
# proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
|
280 |
-
sub_targets = []
|
281 |
-
for i in range(self.grid_points):
|
282 |
-
sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
|
283 |
-
sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
|
284 |
-
sub_targets = torch.cat(sub_targets, dim=1)
|
285 |
-
sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)
|
286 |
-
return sub_targets
|
287 |
-
|
288 |
-
def loss(self, grid_pred, grid_targets):
|
289 |
-
loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
|
290 |
-
loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
|
291 |
-
loss_grid = loss_fused + loss_unfused
|
292 |
-
return dict(loss_grid=loss_grid)
|
293 |
-
|
294 |
-
def get_bboxes(self, det_bboxes, grid_pred, img_metas):
|
295 |
-
# TODO: refactoring
|
296 |
-
assert det_bboxes.shape[0] == grid_pred.shape[0]
|
297 |
-
det_bboxes = det_bboxes.cpu()
|
298 |
-
cls_scores = det_bboxes[:, [4]]
|
299 |
-
det_bboxes = det_bboxes[:, :4]
|
300 |
-
grid_pred = grid_pred.sigmoid().cpu()
|
301 |
-
|
302 |
-
R, c, h, w = grid_pred.shape
|
303 |
-
half_size = self.whole_map_size // 4 * 2
|
304 |
-
assert h == w == half_size
|
305 |
-
assert c == self.grid_points
|
306 |
-
|
307 |
-
# find the point with max scores in the half-sized heatmap
|
308 |
-
grid_pred = grid_pred.view(R * c, h * w)
|
309 |
-
pred_scores, pred_position = grid_pred.max(dim=1)
|
310 |
-
xs = pred_position % w
|
311 |
-
ys = pred_position // w
|
312 |
-
|
313 |
-
# get the position in the whole heatmap instead of half-sized heatmap
|
314 |
-
for i in range(self.grid_points):
|
315 |
-
xs[i::self.grid_points] += self.sub_regions[i][0]
|
316 |
-
ys[i::self.grid_points] += self.sub_regions[i][1]
|
317 |
-
|
318 |
-
# reshape to (num_rois, grid_points)
|
319 |
-
pred_scores, xs, ys = tuple(
|
320 |
-
map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
|
321 |
-
|
322 |
-
# get expanded pos_bboxes
|
323 |
-
widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1)
|
324 |
-
heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1)
|
325 |
-
x1 = (det_bboxes[:, 0, None] - widths / 2)
|
326 |
-
y1 = (det_bboxes[:, 1, None] - heights / 2)
|
327 |
-
# map the grid point to the absolute coordinates
|
328 |
-
abs_xs = (xs.float() + 0.5) / w * widths + x1
|
329 |
-
abs_ys = (ys.float() + 0.5) / h * heights + y1
|
330 |
-
|
331 |
-
# get the grid points indices that fall on the bbox boundaries
|
332 |
-
x1_inds = [i for i in range(self.grid_size)]
|
333 |
-
y1_inds = [i * self.grid_size for i in range(self.grid_size)]
|
334 |
-
x2_inds = [
|
335 |
-
self.grid_points - self.grid_size + i
|
336 |
-
for i in range(self.grid_size)
|
337 |
-
]
|
338 |
-
y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
|
339 |
-
|
340 |
-
# voting of all grid points on some boundary
|
341 |
-
bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
|
342 |
-
dim=1, keepdim=True) / (
|
343 |
-
pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
|
344 |
-
bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
|
345 |
-
dim=1, keepdim=True) / (
|
346 |
-
pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
|
347 |
-
bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
|
348 |
-
dim=1, keepdim=True) / (
|
349 |
-
pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
|
350 |
-
bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
|
351 |
-
dim=1, keepdim=True) / (
|
352 |
-
pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
|
353 |
-
|
354 |
-
bbox_res = torch.cat(
|
355 |
-
[bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1)
|
356 |
-
bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1])
|
357 |
-
bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0])
|
358 |
-
|
359 |
-
return bbox_res
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/models/ade20k/mobilenet.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This MobileNetV2 implementation is modified from the following repository:
|
3 |
-
https://github.com/tonylins/pytorch-mobilenet-v2
|
4 |
-
"""
|
5 |
-
|
6 |
-
import torch.nn as nn
|
7 |
-
import math
|
8 |
-
from .utils import load_url
|
9 |
-
from .segm_lib.nn import SynchronizedBatchNorm2d
|
10 |
-
|
11 |
-
BatchNorm2d = SynchronizedBatchNorm2d
|
12 |
-
|
13 |
-
|
14 |
-
__all__ = ['mobilenetv2']
|
15 |
-
|
16 |
-
|
17 |
-
model_urls = {
|
18 |
-
'mobilenetv2': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/mobilenet_v2.pth.tar',
|
19 |
-
}
|
20 |
-
|
21 |
-
|
22 |
-
def conv_bn(inp, oup, stride):
|
23 |
-
return nn.Sequential(
|
24 |
-
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
|
25 |
-
BatchNorm2d(oup),
|
26 |
-
nn.ReLU6(inplace=True)
|
27 |
-
)
|
28 |
-
|
29 |
-
|
30 |
-
def conv_1x1_bn(inp, oup):
|
31 |
-
return nn.Sequential(
|
32 |
-
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
|
33 |
-
BatchNorm2d(oup),
|
34 |
-
nn.ReLU6(inplace=True)
|
35 |
-
)
|
36 |
-
|
37 |
-
|
38 |
-
class InvertedResidual(nn.Module):
|
39 |
-
def __init__(self, inp, oup, stride, expand_ratio):
|
40 |
-
super(InvertedResidual, self).__init__()
|
41 |
-
self.stride = stride
|
42 |
-
assert stride in [1, 2]
|
43 |
-
|
44 |
-
hidden_dim = round(inp * expand_ratio)
|
45 |
-
self.use_res_connect = self.stride == 1 and inp == oup
|
46 |
-
|
47 |
-
if expand_ratio == 1:
|
48 |
-
self.conv = nn.Sequential(
|
49 |
-
# dw
|
50 |
-
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
|
51 |
-
BatchNorm2d(hidden_dim),
|
52 |
-
nn.ReLU6(inplace=True),
|
53 |
-
# pw-linear
|
54 |
-
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
|
55 |
-
BatchNorm2d(oup),
|
56 |
-
)
|
57 |
-
else:
|
58 |
-
self.conv = nn.Sequential(
|
59 |
-
# pw
|
60 |
-
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
|
61 |
-
BatchNorm2d(hidden_dim),
|
62 |
-
nn.ReLU6(inplace=True),
|
63 |
-
# dw
|
64 |
-
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
|
65 |
-
BatchNorm2d(hidden_dim),
|
66 |
-
nn.ReLU6(inplace=True),
|
67 |
-
# pw-linear
|
68 |
-
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
|
69 |
-
BatchNorm2d(oup),
|
70 |
-
)
|
71 |
-
|
72 |
-
def forward(self, x):
|
73 |
-
if self.use_res_connect:
|
74 |
-
return x + self.conv(x)
|
75 |
-
else:
|
76 |
-
return self.conv(x)
|
77 |
-
|
78 |
-
|
79 |
-
class MobileNetV2(nn.Module):
|
80 |
-
def __init__(self, n_class=1000, input_size=224, width_mult=1.):
|
81 |
-
super(MobileNetV2, self).__init__()
|
82 |
-
block = InvertedResidual
|
83 |
-
input_channel = 32
|
84 |
-
last_channel = 1280
|
85 |
-
interverted_residual_setting = [
|
86 |
-
# t, c, n, s
|
87 |
-
[1, 16, 1, 1],
|
88 |
-
[6, 24, 2, 2],
|
89 |
-
[6, 32, 3, 2],
|
90 |
-
[6, 64, 4, 2],
|
91 |
-
[6, 96, 3, 1],
|
92 |
-
[6, 160, 3, 2],
|
93 |
-
[6, 320, 1, 1],
|
94 |
-
]
|
95 |
-
|
96 |
-
# building first layer
|
97 |
-
assert input_size % 32 == 0
|
98 |
-
input_channel = int(input_channel * width_mult)
|
99 |
-
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
|
100 |
-
self.features = [conv_bn(3, input_channel, 2)]
|
101 |
-
# building inverted residual blocks
|
102 |
-
for t, c, n, s in interverted_residual_setting:
|
103 |
-
output_channel = int(c * width_mult)
|
104 |
-
for i in range(n):
|
105 |
-
if i == 0:
|
106 |
-
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
|
107 |
-
else:
|
108 |
-
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
|
109 |
-
input_channel = output_channel
|
110 |
-
# building last several layers
|
111 |
-
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
|
112 |
-
# make it nn.Sequential
|
113 |
-
self.features = nn.Sequential(*self.features)
|
114 |
-
|
115 |
-
# building classifier
|
116 |
-
self.classifier = nn.Sequential(
|
117 |
-
nn.Dropout(0.2),
|
118 |
-
nn.Linear(self.last_channel, n_class),
|
119 |
-
)
|
120 |
-
|
121 |
-
self._initialize_weights()
|
122 |
-
|
123 |
-
def forward(self, x):
|
124 |
-
x = self.features(x)
|
125 |
-
x = x.mean(3).mean(2)
|
126 |
-
x = self.classifier(x)
|
127 |
-
return x
|
128 |
-
|
129 |
-
def _initialize_weights(self):
|
130 |
-
for m in self.modules():
|
131 |
-
if isinstance(m, nn.Conv2d):
|
132 |
-
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
133 |
-
m.weight.data.normal_(0, math.sqrt(2. / n))
|
134 |
-
if m.bias is not None:
|
135 |
-
m.bias.data.zero_()
|
136 |
-
elif isinstance(m, BatchNorm2d):
|
137 |
-
m.weight.data.fill_(1)
|
138 |
-
m.bias.data.zero_()
|
139 |
-
elif isinstance(m, nn.Linear):
|
140 |
-
n = m.weight.size(1)
|
141 |
-
m.weight.data.normal_(0, 0.01)
|
142 |
-
m.bias.data.zero_()
|
143 |
-
|
144 |
-
|
145 |
-
def mobilenetv2(pretrained=False, **kwargs):
|
146 |
-
"""Constructs a MobileNet_V2 model.
|
147 |
-
|
148 |
-
Args:
|
149 |
-
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
150 |
-
"""
|
151 |
-
model = MobileNetV2(n_class=1000, **kwargs)
|
152 |
-
if pretrained:
|
153 |
-
model.load_state_dict(load_url(model_urls['mobilenetv2']), strict=False)
|
154 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/monoscene_lite/monoscene/.ipynb_checkpoints/CRP3D-checkpoint.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from monoscene.modules import (
|
4 |
-
Process,
|
5 |
-
ASPP,
|
6 |
-
)
|
7 |
-
|
8 |
-
|
9 |
-
class CPMegaVoxels(nn.Module):
|
10 |
-
def __init__(self, feature, size, n_relations=4, bn_momentum=0.0003):
|
11 |
-
super().__init__()
|
12 |
-
self.size = size
|
13 |
-
self.n_relations = n_relations
|
14 |
-
print("n_relations", self.n_relations)
|
15 |
-
self.flatten_size = size[0] * size[1] * size[2]
|
16 |
-
self.feature = feature
|
17 |
-
self.context_feature = feature * 2
|
18 |
-
self.flatten_context_size = (size[0] // 2) * (size[1] // 2) * (size[2] // 2)
|
19 |
-
padding = ((size[0] + 1) % 2, (size[1] + 1) % 2, (size[2] + 1) % 2)
|
20 |
-
|
21 |
-
self.mega_context = nn.Sequential(
|
22 |
-
nn.Conv3d(
|
23 |
-
feature, self.context_feature, stride=2, padding=padding, kernel_size=3
|
24 |
-
),
|
25 |
-
)
|
26 |
-
self.flatten_context_size = (size[0] // 2) * (size[1] // 2) * (size[2] // 2)
|
27 |
-
|
28 |
-
self.context_prior_logits = nn.ModuleList(
|
29 |
-
[
|
30 |
-
nn.Sequential(
|
31 |
-
nn.Conv3d(
|
32 |
-
self.feature,
|
33 |
-
self.flatten_context_size,
|
34 |
-
padding=0,
|
35 |
-
kernel_size=1,
|
36 |
-
),
|
37 |
-
)
|
38 |
-
for i in range(n_relations)
|
39 |
-
]
|
40 |
-
)
|
41 |
-
self.aspp = ASPP(feature, [1, 2, 3])
|
42 |
-
|
43 |
-
self.resize = nn.Sequential(
|
44 |
-
nn.Conv3d(
|
45 |
-
self.context_feature * self.n_relations + feature,
|
46 |
-
feature,
|
47 |
-
kernel_size=1,
|
48 |
-
padding=0,
|
49 |
-
bias=False,
|
50 |
-
),
|
51 |
-
Process(feature, nn.BatchNorm3d, bn_momentum, dilations=[1]),
|
52 |
-
)
|
53 |
-
|
54 |
-
def forward(self, input):
|
55 |
-
ret = {}
|
56 |
-
bs = input.shape[0]
|
57 |
-
|
58 |
-
x_agg = self.aspp(input)
|
59 |
-
|
60 |
-
# get the mega context
|
61 |
-
x_mega_context_raw = self.mega_context(x_agg)
|
62 |
-
x_mega_context = x_mega_context_raw.reshape(bs, self.context_feature, -1)
|
63 |
-
x_mega_context = x_mega_context.permute(0, 2, 1)
|
64 |
-
|
65 |
-
# get context prior map
|
66 |
-
x_context_prior_logits = []
|
67 |
-
x_context_rels = []
|
68 |
-
for rel in range(self.n_relations):
|
69 |
-
|
70 |
-
# Compute the relation matrices
|
71 |
-
x_context_prior_logit = self.context_prior_logits[rel](x_agg)
|
72 |
-
x_context_prior_logit = x_context_prior_logit.reshape(
|
73 |
-
bs, self.flatten_context_size, self.flatten_size
|
74 |
-
)
|
75 |
-
x_context_prior_logits.append(x_context_prior_logit.unsqueeze(1))
|
76 |
-
|
77 |
-
x_context_prior_logit = x_context_prior_logit.permute(0, 2, 1)
|
78 |
-
x_context_prior = torch.sigmoid(x_context_prior_logit)
|
79 |
-
|
80 |
-
# Multiply the relation matrices with the mega context to gather context features
|
81 |
-
x_context_rel = torch.bmm(x_context_prior, x_mega_context) # bs, N, f
|
82 |
-
x_context_rels.append(x_context_rel)
|
83 |
-
|
84 |
-
x_context = torch.cat(x_context_rels, dim=2)
|
85 |
-
x_context = x_context.permute(0, 2, 1)
|
86 |
-
x_context = x_context.reshape(
|
87 |
-
bs, x_context.shape[1], self.size[0], self.size[1], self.size[2]
|
88 |
-
)
|
89 |
-
|
90 |
-
x = torch.cat([input, x_context], dim=1)
|
91 |
-
x = self.resize(x)
|
92 |
-
|
93 |
-
x_context_prior_logits = torch.cat(x_context_prior_logits, dim=1)
|
94 |
-
ret["P_logits"] = x_context_prior_logits
|
95 |
-
ret["x"] = x
|
96 |
-
|
97 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Chukwuka/Dog_Breed_ImageWoof/data_setup.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
|
2 |
-
import os
|
3 |
-
import argparse
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import torchvision.transforms as tt
|
8 |
-
import albumentations as A
|
9 |
-
from albumentations.pytorch import ToTensorV2
|
10 |
-
|
11 |
-
|
12 |
-
stats = (0.4862, 0.4561, 0.3941), (0.2202, 0.2142, 0.2160)
|
13 |
-
|
14 |
-
model_tsfm = A.Compose([
|
15 |
-
A.Resize(224, 224),
|
16 |
-
A.Normalize(*stats),
|
17 |
-
ToTensorV2()
|
18 |
-
])
|
19 |
-
|
20 |
-
classes = ['Australian terrier', 'Border terrier', 'Samoyed', 'Beagle', 'Shih-Tzu', 'English foxhound', 'Rhodesian ridgeback', 'Dingo', 'Golden retriever', 'Old English sheepdog']
|
21 |
-
|
22 |
-
if __name__ == "__main__":
|
23 |
-
parser = argparse.ArgumentParser()
|
24 |
-
|
25 |
-
parser.add_argument('-i', '--Image',
|
26 |
-
help="input image path", required=True)
|
27 |
-
|
28 |
-
args = vars(parser.parse_args())
|
29 |
-
print(args)
|
30 |
-
img_path = args['Image']
|
31 |
-
#plt.imshow(get_image(img_path, model_tsfm).permute(1,2,0))
|
32 |
-
#img_pred = eff_b2(get_image(img_path, model_tsfm).unsqueeze(0).to(device))
|
33 |
-
#print(img_pred)
|
34 |
-
#img_class = torch.argmax(img_pred)
|
35 |
-
#print(img_class)
|
36 |
-
#print(classes[img_class.item()])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ClementBM/connectfour/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Connectfour
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.23.0
|
8 |
-
app_file: connectfour/app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
#include "libipc/pool_alloc.h"
|
2 |
-
|
3 |
-
#include "libipc/memory/resource.h"
|
4 |
-
|
5 |
-
namespace ipc {
|
6 |
-
namespace mem {
|
7 |
-
|
8 |
-
void* pool_alloc::alloc(std::size_t size) {
|
9 |
-
return async_pool_alloc::alloc(size);
|
10 |
-
}
|
11 |
-
|
12 |
-
void pool_alloc::free(void* p, std::size_t size) {
|
13 |
-
async_pool_alloc::free(p, size);
|
14 |
-
}
|
15 |
-
|
16 |
-
} // namespace mem
|
17 |
-
} // namespace ipc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Curranj/chatbot/app.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
import sklearn
|
2 |
-
import sqlite3
|
3 |
-
import numpy as np
|
4 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
5 |
-
import openai
|
6 |
-
import os
|
7 |
-
import gradio as gr
|
8 |
-
|
9 |
-
|
10 |
-
openai.api_key = os.environ["Secret"]
|
11 |
-
|
12 |
-
def find_closest_neighbors(vector1, dictionary_of_vectors):
|
13 |
-
"""
|
14 |
-
Takes a vector and a dictionary of vectors and returns the three closest neighbors
|
15 |
-
"""
|
16 |
-
vector = openai.Embedding.create(
|
17 |
-
input=vector1,
|
18 |
-
engine="text-embedding-ada-002"
|
19 |
-
)['data'][0]['embedding']
|
20 |
-
|
21 |
-
vector = np.array(vector)
|
22 |
-
|
23 |
-
cosine_similarities = {}
|
24 |
-
for key, value in dictionary_of_vectors.items():
|
25 |
-
cosine_similarities[key] = cosine_similarity(vector.reshape(1, -1), value.reshape(1, -1))[0][0]
|
26 |
-
|
27 |
-
sorted_cosine_similarities = sorted(cosine_similarities.items(), key=lambda x: x[1], reverse=True)
|
28 |
-
match_list = sorted_cosine_similarities[0:4]
|
29 |
-
|
30 |
-
return match_list
|
31 |
-
|
32 |
-
def predict(message, history):
|
33 |
-
# Connect to the database
|
34 |
-
conn = sqlite3.connect('QRIdatabase7 (1).db')
|
35 |
-
cursor = conn.cursor()
|
36 |
-
cursor.execute('''SELECT text, embedding FROM chunks''')
|
37 |
-
rows = cursor.fetchall()
|
38 |
-
|
39 |
-
dictionary_of_vectors = {}
|
40 |
-
for row in rows:
|
41 |
-
text = row[0]
|
42 |
-
embedding_str = row[1]
|
43 |
-
embedding = np.fromstring(embedding_str, sep=' ')
|
44 |
-
dictionary_of_vectors[text] = embedding
|
45 |
-
conn.close()
|
46 |
-
|
47 |
-
# Find the closest neighbors
|
48 |
-
match_list = find_closest_neighbors(message, dictionary_of_vectors)
|
49 |
-
context = ''
|
50 |
-
for match in match_list:
|
51 |
-
context += str(match[0])
|
52 |
-
context = context[:-1500]
|
53 |
-
|
54 |
-
prep = f"This is an OpenAI model tuned to answer questions specific to the Qualia Research institute, a research institute that focuses on consciousness. Here is some question-specific context, and then the Question to answer, related to consciousness, the human experience, and phenomenology: {context}. Here is a question specific to QRI and consciousness in general Q: {message} A: "
|
55 |
-
|
56 |
-
history_openai_format = []
|
57 |
-
for human, assistant in history:
|
58 |
-
history_openai_format.append({"role": "user", "content": human })
|
59 |
-
history_openai_format.append({"role": "assistant", "content":assistant})
|
60 |
-
history_openai_format.append({"role": "user", "content": prep})
|
61 |
-
|
62 |
-
response = openai.ChatCompletion.create(
|
63 |
-
model='gpt-4',
|
64 |
-
messages= history_openai_format,
|
65 |
-
temperature=1.0,
|
66 |
-
stream=True
|
67 |
-
)
|
68 |
-
|
69 |
-
partial_message = ""
|
70 |
-
for chunk in response:
|
71 |
-
if len(chunk['choices'][0]['delta']) != 0:
|
72 |
-
partial_message = partial_message + chunk['choices'][0]['delta']['content']
|
73 |
-
yield partial_message
|
74 |
-
|
75 |
-
demo = gr.ChatInterface(predict).queue()
|
76 |
-
|
77 |
-
if __name__ == "__main__":
|
78 |
-
demo.launch()
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/openapi/__init__.py
DELETED
File without changes
|