Commit
·
0694f6e
1
Parent(s):
4a64a5c
Update parquet files (step 66 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/__init__.py +0 -478
- spaces/123Kumar/vits-uma-genshin-honkai123/text/cleaners.py +0 -475
- spaces/1gistliPinn/ChatGPT4/Examples/Aaja Nachle Eng Sub [CRACKED] Free Downloa.md +0 -10
- spaces/1gistliPinn/ChatGPT4/Examples/Amada Ap100 Software Crack 28.md +0 -7
- spaces/1gistliPinn/ChatGPT4/Examples/Baka Loader 1.4.md +0 -74
- spaces/1gistliPinn/ChatGPT4/Examples/Contoh Surat Undangan Peneguhan Sidi.md +0 -62
- spaces/1gistliPinn/ChatGPT4/Examples/Download Game Need For Speed Undercover FREE.md +0 -9
- spaces/1gistliPinn/ChatGPT4/Examples/Fisika Universitas Jilid 1 Sears Zemansky Pdf 14l ((BETTER)).md +0 -12
- spaces/1pelhydcardo/ChatGPT-prompt-generator/Lo-Que-Varguitas-No-Dijo-Libro-Pdf-11-Fixed.md +0 -58
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Blue Hai Pani - Pani MP3 Download Listen to Yo Yo Honey Singh Sandeep Kapoor and Soniya Sharma.md +0 -131
- spaces/1phancelerku/anime-remove-background/Download Gin Rummy Plus Hack APK for Free and Experience the Fun of Gin Rummy with Unlimited Coins.md +0 -78
- spaces/1phancelerku/anime-remove-background/Download NBA 2K14 v1.14 APK for Android Multiplayer Mode HD Graphics and More.md +0 -87
- spaces/1phancelerku/anime-remove-background/FIFA Mobile APK Mod - Snrsz Para ve Altn Hilesi Nasl Yaplr?.md +0 -132
- spaces/1toTree/lora_test/ppdiffusers/models/embeddings.py +0 -199
- spaces/3bdo7ss/Neutron_Chatbot/README.md +0 -13
- spaces/AIConsultant/MusicGen/tests/common_utils/wav_utils.py +0 -32
- spaces/AIFILMS/generate_human_motion/VQ-Trans/README.md +0 -400
- spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/base.py +0 -76
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/htsat.py +0 -1022
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/transforms.py +0 -98
- spaces/ASJMO/freegpt/client/css/hljs.css +0 -68
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py +0 -135
- spaces/Abhilashvj/planogram-compliance/data/scripts/get_coco.sh +0 -56
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/ChatgptLogin.py +0 -74
- spaces/AgentVerse/agentVerse/agentverse/agents/simulation_agent/prisoner_dilemma.py +0 -167
- spaces/Alichuan/VITS-Umamusume-voice-synthesizer/transforms.py +0 -193
- spaces/Ameaou/academic-chatgpt3.1/config.py +0 -58
- spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/util.py +0 -492
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +0 -532
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/pndm/test_pndm.py +0 -87
- spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py +0 -2
- spaces/Anonymous-123/ImageNet-Editing/resize_obj.py +0 -188
- spaces/Apex-X/GODROOP/roop/ui.py +0 -232
- spaces/Artrajz/vits-simple-api/bert_vits2/text/english_bert_mock.py +0 -5
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/utils.py +0 -49
- spaces/Bart92/RVC_HF/Applio-RVC-Fork/utils/clonerepo_experimental.py +0 -253
- spaces/Benson/text-generation/Examples/Asfalto 8 Mod Apk Dinero Ilimitado Y Fichas ltima Versin 2023.md +0 -80
- spaces/Benson/text-generation/Examples/Bgmi 2.0 90 Fps Archivo De Configuracin.md +0 -105
- spaces/Benson/text-generation/Examples/Cmo Puedo Descargar Candy Crush Saga En Facebook.md +0 -60
- spaces/Benson/text-generation/Examples/Descarga 22h2 Windows 10 Actualizacin.md +0 -76
- spaces/Benson/text-generation/Examples/Descargar Coche Deportivo 3 Apk.md +0 -84
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/unpacking.py +0 -257
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/certifi/__main__.py +0 -12
- spaces/BlinkDL/RWKV-World-7B/README.md +0 -13
- spaces/CCOM/README/README.md +0 -11
- spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/train.py +0 -93
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/rpn_outputs.py +0 -453
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/find.h +0 -44
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/tag.h +0 -48
- spaces/CarlDennis/HYTTS/models.py +0 -498
spaces/101-5/gpt4free/g4f/.v1/gpt4free/quora/__init__.py
DELETED
@@ -1,478 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from datetime import datetime
|
3 |
-
from hashlib import md5
|
4 |
-
from json import dumps
|
5 |
-
from pathlib import Path
|
6 |
-
from random import choice, choices, randint
|
7 |
-
from re import search, findall
|
8 |
-
from string import ascii_letters, digits
|
9 |
-
from typing import Optional, Union, List, Any, Generator
|
10 |
-
from urllib.parse import unquote
|
11 |
-
|
12 |
-
import selenium.webdriver.support.expected_conditions as EC
|
13 |
-
from fake_useragent import UserAgent
|
14 |
-
from pydantic import BaseModel
|
15 |
-
from pypasser import reCaptchaV3
|
16 |
-
from requests import Session
|
17 |
-
from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions
|
18 |
-
from selenium.webdriver.common.by import By
|
19 |
-
from selenium.webdriver.support.wait import WebDriverWait
|
20 |
-
from tls_client import Session as TLS
|
21 |
-
|
22 |
-
from .api import Client as PoeClient
|
23 |
-
from .mail import Emailnator
|
24 |
-
|
25 |
-
SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not
|
26 |
-
being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location
|
27 |
-
to your system\'s PATH.\n\nHere are the steps to resolve the issue:\n\n1. Download the geckodriver for your platform
|
28 |
-
(Windows, macOS, or Linux) from the following link: https://github.com/mozilla/geckodriver/releases\n\n2. Extract the
|
29 |
-
downloaded archive and locate the geckodriver executable.\n\n3. Add the geckodriver executable to your system\'s
|
30 |
-
PATH.\n\nFor macOS and Linux:\n\n- Open a terminal window.\n- Move the geckodriver executable to a directory that is
|
31 |
-
already in your PATH, or create a new directory and add it to your PATH:\n\n```bash\n# Example: Move geckodriver to
|
32 |
-
/usr/local/bin\nmv /path/to/your/geckodriver /usr/local/bin\n```\n\n- If you created a new directory, add it to your
|
33 |
-
PATH:\n\n```bash\n# Example: Add a new directory to PATH\nexport PATH=$PATH:/path/to/your/directory\n```\n\nFor
|
34 |
-
Windows:\n\n- Right-click on "My Computer" or "This PC" and select "Properties".\n- Click on "Advanced system
|
35 |
-
settings".\n- Click on the "Environment Variables" button.\n- In the "System variables" section, find the "Path"
|
36 |
-
variable, select it, and click "Edit".\n- Click "New" and add the path to the directory containing the geckodriver
|
37 |
-
executable.\n\nAfter adding the geckodriver to your PATH, restart your terminal or command prompt and try running
|
38 |
-
your script again. The error should be resolved.'''
|
39 |
-
|
40 |
-
# from twocaptcha import TwoCaptcha
|
41 |
-
# solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')
|
42 |
-
|
43 |
-
MODELS = {
|
44 |
-
'Sage': 'capybara',
|
45 |
-
'GPT-4': 'beaver',
|
46 |
-
'Claude+': 'a2_2',
|
47 |
-
'Claude-instant': 'a2',
|
48 |
-
'ChatGPT': 'chinchilla',
|
49 |
-
'Dragonfly': 'nutria',
|
50 |
-
'NeevaAI': 'hutia',
|
51 |
-
}
|
52 |
-
|
53 |
-
|
54 |
-
def extract_formkey(html):
|
55 |
-
script_regex = r'<script>if\(.+\)throw new Error;(.+)</script>'
|
56 |
-
script_text = search(script_regex, html).group(1)
|
57 |
-
key_regex = r'var .="([0-9a-f]+)",'
|
58 |
-
key_text = search(key_regex, script_text).group(1)
|
59 |
-
cipher_regex = r'.\[(\d+)\]=.\[(\d+)\]'
|
60 |
-
cipher_pairs = findall(cipher_regex, script_text)
|
61 |
-
|
62 |
-
formkey_list = [''] * len(cipher_pairs)
|
63 |
-
for pair in cipher_pairs:
|
64 |
-
formkey_index, key_index = map(int, pair)
|
65 |
-
formkey_list[formkey_index] = key_text[key_index]
|
66 |
-
formkey = ''.join(formkey_list)
|
67 |
-
|
68 |
-
return formkey
|
69 |
-
|
70 |
-
|
71 |
-
class Choice(BaseModel):
|
72 |
-
text: str
|
73 |
-
index: int
|
74 |
-
logprobs: Any
|
75 |
-
finish_reason: str
|
76 |
-
|
77 |
-
|
78 |
-
class Usage(BaseModel):
|
79 |
-
prompt_tokens: int
|
80 |
-
completion_tokens: int
|
81 |
-
total_tokens: int
|
82 |
-
|
83 |
-
|
84 |
-
class PoeResponse(BaseModel):
|
85 |
-
id: int
|
86 |
-
object: str
|
87 |
-
created: int
|
88 |
-
model: str
|
89 |
-
choices: List[Choice]
|
90 |
-
usage: Usage
|
91 |
-
text: str
|
92 |
-
|
93 |
-
|
94 |
-
class ModelResponse:
|
95 |
-
def __init__(self, json_response: dict) -> None:
|
96 |
-
self.id = json_response['data']['poeBotCreate']['bot']['id']
|
97 |
-
self.name = json_response['data']['poeBotCreate']['bot']['displayName']
|
98 |
-
self.limit = json_response['data']['poeBotCreate']['bot']['messageLimit']['dailyLimit']
|
99 |
-
self.deleted = json_response['data']['poeBotCreate']['bot']['deletionState']
|
100 |
-
|
101 |
-
|
102 |
-
class Model:
|
103 |
-
@staticmethod
|
104 |
-
def create(
|
105 |
-
token: str,
|
106 |
-
model: str = 'gpt-3.5-turbo', # claude-instant
|
107 |
-
system_prompt: str = 'You are ChatGPT a large language model. Answer as consisely as possible',
|
108 |
-
description: str = 'gpt-3.5 language model',
|
109 |
-
handle: str = None,
|
110 |
-
) -> ModelResponse:
|
111 |
-
if not handle:
|
112 |
-
handle = f'gptx{randint(1111111, 9999999)}'
|
113 |
-
|
114 |
-
client = Session()
|
115 |
-
client.cookies['p-b'] = token
|
116 |
-
|
117 |
-
formkey = extract_formkey(client.get('https://poe.com').text)
|
118 |
-
settings = client.get('https://poe.com/api/settings').json()
|
119 |
-
|
120 |
-
client.headers = {
|
121 |
-
'host': 'poe.com',
|
122 |
-
'origin': 'https://poe.com',
|
123 |
-
'referer': 'https://poe.com/',
|
124 |
-
'poe-formkey': formkey,
|
125 |
-
'poe-tchannel': settings['tchannelData']['channel'],
|
126 |
-
'user-agent': UserAgent().random,
|
127 |
-
'connection': 'keep-alive',
|
128 |
-
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
129 |
-
'sec-ch-ua-mobile': '?0',
|
130 |
-
'sec-ch-ua-platform': '"macOS"',
|
131 |
-
'content-type': 'application/json',
|
132 |
-
'sec-fetch-site': 'same-origin',
|
133 |
-
'sec-fetch-mode': 'cors',
|
134 |
-
'sec-fetch-dest': 'empty',
|
135 |
-
'accept': '*/*',
|
136 |
-
'accept-encoding': 'gzip, deflate, br',
|
137 |
-
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
|
138 |
-
}
|
139 |
-
|
140 |
-
payload = dumps(
|
141 |
-
separators=(',', ':'),
|
142 |
-
obj={
|
143 |
-
'queryName': 'CreateBotMain_poeBotCreate_Mutation',
|
144 |
-
'variables': {
|
145 |
-
'model': MODELS[model],
|
146 |
-
'handle': handle,
|
147 |
-
'prompt': system_prompt,
|
148 |
-
'isPromptPublic': True,
|
149 |
-
'introduction': '',
|
150 |
-
'description': description,
|
151 |
-
'profilePictureUrl': 'https://qph.fs.quoracdn.net/main-qimg-24e0b480dcd946e1cc6728802c5128b6',
|
152 |
-
'apiUrl': None,
|
153 |
-
'apiKey': ''.join(choices(ascii_letters + digits, k=32)),
|
154 |
-
'isApiBot': False,
|
155 |
-
'hasLinkification': False,
|
156 |
-
'hasMarkdownRendering': False,
|
157 |
-
'hasSuggestedReplies': False,
|
158 |
-
'isPrivateBot': False,
|
159 |
-
},
|
160 |
-
'query': 'mutation CreateBotMain_poeBotCreate_Mutation(\n $model: String!\n $handle: String!\n $prompt: String!\n $isPromptPublic: Boolean!\n $introduction: String!\n $description: String!\n $profilePictureUrl: String\n $apiUrl: String\n $apiKey: String\n $isApiBot: Boolean\n $hasLinkification: Boolean\n $hasMarkdownRendering: Boolean\n $hasSuggestedReplies: Boolean\n $isPrivateBot: Boolean\n) {\n poeBotCreate(model: $model, handle: $handle, promptPlaintext: $prompt, isPromptPublic: $isPromptPublic, introduction: $introduction, description: $description, profilePicture: $profilePictureUrl, apiUrl: $apiUrl, apiKey: $apiKey, isApiBot: $isApiBot, hasLinkification: $hasLinkification, hasMarkdownRendering: $hasMarkdownRendering, hasSuggestedReplies: $hasSuggestedReplies, isPrivateBot: $isPrivateBot) {\n status\n bot {\n id\n ...BotHeader_bot\n }\n }\n}\n\nfragment BotHeader_bot on Bot {\n displayName\n messageLimit {\n dailyLimit\n }\n ...BotImage_bot\n ...BotLink_bot\n ...IdAnnotation_node\n ...botHelpers_useViewerCanAccessPrivateBot\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotImage_bot on Bot {\n displayName\n ...botHelpers_useDeletion_bot\n ...BotImage_useProfileImage_bot\n}\n\nfragment BotImage_useProfileImage_bot on Bot {\n image {\n __typename\n ... on LocalBotImage {\n localName\n }\n ... on UrlBotImage {\n url\n }\n }\n ...botHelpers_useDeletion_bot\n}\n\nfragment BotLink_bot on Bot {\n displayName\n}\n\nfragment IdAnnotation_node on Node {\n __isNode: __typename\n id\n}\n\nfragment botHelpers_useDeletion_bot on Bot {\n deletionState\n}\n\nfragment botHelpers_useViewerCanAccessPrivateBot on Bot {\n isPrivateBot\n viewerIsCreator\n}\n',
|
161 |
-
},
|
162 |
-
)
|
163 |
-
|
164 |
-
base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
|
165 |
-
client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
|
166 |
-
|
167 |
-
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
168 |
-
|
169 |
-
if 'success' not in response.text:
|
170 |
-
raise Exception(
|
171 |
-
'''
|
172 |
-
Bot creation Failed
|
173 |
-
!! Important !!
|
174 |
-
Bot creation was not enabled on this account
|
175 |
-
please use: quora.Account.create with enable_bot_creation set to True
|
176 |
-
'''
|
177 |
-
)
|
178 |
-
|
179 |
-
return ModelResponse(response.json())
|
180 |
-
|
181 |
-
|
182 |
-
class Account:
|
183 |
-
@staticmethod
|
184 |
-
def create(
|
185 |
-
proxy: Optional[str] = None,
|
186 |
-
logging: bool = False,
|
187 |
-
enable_bot_creation: bool = False,
|
188 |
-
):
|
189 |
-
client = TLS(client_identifier='chrome110')
|
190 |
-
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else {}
|
191 |
-
|
192 |
-
mail_client = Emailnator()
|
193 |
-
mail_address = mail_client.get_mail()
|
194 |
-
|
195 |
-
if logging:
|
196 |
-
print('email', mail_address)
|
197 |
-
|
198 |
-
client.headers = {
|
199 |
-
'authority': 'poe.com',
|
200 |
-
'accept': '*/*',
|
201 |
-
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
202 |
-
'content-type': 'application/json',
|
203 |
-
'origin': 'https://poe.com',
|
204 |
-
'poe-tag-id': 'null',
|
205 |
-
'referer': 'https://poe.com/login',
|
206 |
-
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
207 |
-
'sec-ch-ua-mobile': '?0',
|
208 |
-
'sec-ch-ua-platform': '"macOS"',
|
209 |
-
'sec-fetch-dest': 'empty',
|
210 |
-
'sec-fetch-mode': 'cors',
|
211 |
-
'sec-fetch-site': 'same-origin',
|
212 |
-
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
213 |
-
'poe-formkey': extract_formkey(client.get('https://poe.com/login').text),
|
214 |
-
'poe-tchannel': client.get('https://poe.com/api/settings').json()['tchannelData']['channel'],
|
215 |
-
}
|
216 |
-
|
217 |
-
token = reCaptchaV3(
|
218 |
-
'https://www.recaptcha.net/recaptcha/enterprise/anchor?ar=1&k=6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG&co=aHR0cHM6Ly9wb2UuY29tOjQ0Mw..&hl=en&v=4PnKmGB9wRHh1i04o7YUICeI&size=invisible&cb=bi6ivxoskyal'
|
219 |
-
)
|
220 |
-
# token = solver.recaptcha(sitekey='6LflhEElAAAAAI_ewVwRWI9hsyV4mbZnYAslSvlG',
|
221 |
-
# url = 'https://poe.com/login?redirect_url=%2F',
|
222 |
-
# version = 'v3',
|
223 |
-
# enterprise = 1,
|
224 |
-
# invisible = 1,
|
225 |
-
# action = 'login',)['code']
|
226 |
-
|
227 |
-
payload = dumps(
|
228 |
-
separators=(',', ':'),
|
229 |
-
obj={
|
230 |
-
'queryName': 'MainSignupLoginSection_sendVerificationCodeMutation_Mutation',
|
231 |
-
'variables': {
|
232 |
-
'emailAddress': mail_address,
|
233 |
-
'phoneNumber': None,
|
234 |
-
'recaptchaToken': token,
|
235 |
-
},
|
236 |
-
'query': 'mutation MainSignupLoginSection_sendVerificationCodeMutation_Mutation(\n $emailAddress: String\n $phoneNumber: String\n $recaptchaToken: String\n) {\n sendVerificationCode(verificationReason: login, emailAddress: $emailAddress, phoneNumber: $phoneNumber, recaptchaToken: $recaptchaToken) {\n status\n errorMessage\n }\n}\n',
|
237 |
-
},
|
238 |
-
)
|
239 |
-
|
240 |
-
base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
|
241 |
-
client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
|
242 |
-
|
243 |
-
print(dumps(client.headers, indent=4))
|
244 |
-
|
245 |
-
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
246 |
-
|
247 |
-
if 'automated_request_detected' in response.text:
|
248 |
-
print('please try using a proxy / wait for fix')
|
249 |
-
|
250 |
-
if 'Bad Request' in response.text:
|
251 |
-
if logging:
|
252 |
-
print('bad request, retrying...', response.json())
|
253 |
-
quit()
|
254 |
-
|
255 |
-
if logging:
|
256 |
-
print('send_code', response.json())
|
257 |
-
|
258 |
-
mail_content = mail_client.get_message()
|
259 |
-
mail_token = findall(r';">(\d{6,7})</div>', mail_content)[0]
|
260 |
-
|
261 |
-
if logging:
|
262 |
-
print('code', mail_token)
|
263 |
-
|
264 |
-
payload = dumps(
|
265 |
-
separators=(',', ':'),
|
266 |
-
obj={
|
267 |
-
'queryName': 'SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation',
|
268 |
-
'variables': {
|
269 |
-
'verificationCode': str(mail_token),
|
270 |
-
'emailAddress': mail_address,
|
271 |
-
'phoneNumber': None,
|
272 |
-
},
|
273 |
-
'query': 'mutation SignupOrLoginWithCodeSection_signupWithVerificationCodeMutation_Mutation(\n $verificationCode: String!\n $emailAddress: String\n $phoneNumber: String\n) {\n signupWithVerificationCode(verificationCode: $verificationCode, emailAddress: $emailAddress, phoneNumber: $phoneNumber) {\n status\n errorMessage\n }\n}\n',
|
274 |
-
},
|
275 |
-
)
|
276 |
-
|
277 |
-
base_string = payload + client.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
|
278 |
-
client.headers['poe-tag-id'] = md5(base_string.encode()).hexdigest()
|
279 |
-
|
280 |
-
response = client.post('https://poe.com/api/gql_POST', data=payload)
|
281 |
-
if logging:
|
282 |
-
print('verify_code', response.json())
|
283 |
-
|
284 |
-
def get(self):
|
285 |
-
cookies = open(Path(__file__).resolve().parent / 'cookies.txt', 'r').read().splitlines()
|
286 |
-
return choice(cookies)
|
287 |
-
|
288 |
-
@staticmethod
|
289 |
-
def delete(token: str, proxy: Optional[str] = None):
|
290 |
-
client = PoeClient(token, proxy=proxy)
|
291 |
-
client.delete_account()
|
292 |
-
|
293 |
-
|
294 |
-
class StreamingCompletion:
|
295 |
-
@staticmethod
|
296 |
-
def create(
|
297 |
-
model: str = 'gpt-4',
|
298 |
-
custom_model: bool = None,
|
299 |
-
prompt: str = 'hello world',
|
300 |
-
token: str = '',
|
301 |
-
proxy: Optional[str] = None,
|
302 |
-
) -> Generator[PoeResponse, None, None]:
|
303 |
-
_model = MODELS[model] if not custom_model else custom_model
|
304 |
-
|
305 |
-
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
|
306 |
-
client = PoeClient(token)
|
307 |
-
client.proxy = proxies
|
308 |
-
|
309 |
-
for chunk in client.send_message(_model, prompt):
|
310 |
-
yield PoeResponse(
|
311 |
-
**{
|
312 |
-
'id': chunk['messageId'],
|
313 |
-
'object': 'text_completion',
|
314 |
-
'created': chunk['creationTime'],
|
315 |
-
'model': _model,
|
316 |
-
'text': chunk['text_new'],
|
317 |
-
'choices': [
|
318 |
-
{
|
319 |
-
'text': chunk['text_new'],
|
320 |
-
'index': 0,
|
321 |
-
'logprobs': None,
|
322 |
-
'finish_reason': 'stop',
|
323 |
-
}
|
324 |
-
],
|
325 |
-
'usage': {
|
326 |
-
'prompt_tokens': len(prompt),
|
327 |
-
'completion_tokens': len(chunk['text_new']),
|
328 |
-
'total_tokens': len(prompt) + len(chunk['text_new']),
|
329 |
-
},
|
330 |
-
}
|
331 |
-
)
|
332 |
-
|
333 |
-
|
334 |
-
class Completion:
|
335 |
-
@staticmethod
|
336 |
-
def create(
|
337 |
-
model: str = 'gpt-4',
|
338 |
-
custom_model: str = None,
|
339 |
-
prompt: str = 'hello world',
|
340 |
-
token: str = '',
|
341 |
-
proxy: Optional[str] = None,
|
342 |
-
) -> PoeResponse:
|
343 |
-
_model = MODELS[model] if not custom_model else custom_model
|
344 |
-
|
345 |
-
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
|
346 |
-
client = PoeClient(token)
|
347 |
-
client.proxy = proxies
|
348 |
-
|
349 |
-
chunk = None
|
350 |
-
for response in client.send_message(_model, prompt):
|
351 |
-
chunk = response
|
352 |
-
|
353 |
-
return PoeResponse(
|
354 |
-
**{
|
355 |
-
'id': chunk['messageId'],
|
356 |
-
'object': 'text_completion',
|
357 |
-
'created': chunk['creationTime'],
|
358 |
-
'model': _model,
|
359 |
-
'text': chunk['text'],
|
360 |
-
'choices': [
|
361 |
-
{
|
362 |
-
'text': chunk['text'],
|
363 |
-
'index': 0,
|
364 |
-
'logprobs': None,
|
365 |
-
'finish_reason': 'stop',
|
366 |
-
}
|
367 |
-
],
|
368 |
-
'usage': {
|
369 |
-
'prompt_tokens': len(prompt),
|
370 |
-
'completion_tokens': len(chunk['text']),
|
371 |
-
'total_tokens': len(prompt) + len(chunk['text']),
|
372 |
-
},
|
373 |
-
}
|
374 |
-
)
|
375 |
-
|
376 |
-
|
377 |
-
class Poe:
|
378 |
-
def __init__(
|
379 |
-
self,
|
380 |
-
model: str = 'ChatGPT',
|
381 |
-
driver: str = 'firefox',
|
382 |
-
download_driver: bool = False,
|
383 |
-
driver_path: Optional[str] = None,
|
384 |
-
cookie_path: str = './quora/cookie.json',
|
385 |
-
):
|
386 |
-
# validating the model
|
387 |
-
if model and model not in MODELS:
|
388 |
-
raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.')
|
389 |
-
self.model = MODELS[model]
|
390 |
-
self.cookie_path = cookie_path
|
391 |
-
self.cookie = self.__load_cookie(driver, driver_path=driver_path)
|
392 |
-
self.client = PoeClient(self.cookie)
|
393 |
-
|
394 |
-
def __load_cookie(self, driver: str, driver_path: Optional[str] = None) -> str:
|
395 |
-
if (cookie_file := Path(self.cookie_path)).exists():
|
396 |
-
with cookie_file.open() as fp:
|
397 |
-
cookie = json.load(fp)
|
398 |
-
if datetime.fromtimestamp(cookie['expiry']) < datetime.now():
|
399 |
-
cookie = self.__register_and_get_cookie(driver, driver_path=driver_path)
|
400 |
-
else:
|
401 |
-
print('Loading the cookie from file')
|
402 |
-
else:
|
403 |
-
cookie = self.__register_and_get_cookie(driver, driver_path=driver_path)
|
404 |
-
|
405 |
-
return unquote(cookie['value'])
|
406 |
-
|
407 |
-
def __register_and_get_cookie(self, driver: str, driver_path: Optional[str] = None) -> dict:
|
408 |
-
mail_client = Emailnator()
|
409 |
-
mail_address = mail_client.get_mail()
|
410 |
-
|
411 |
-
driver = self.__resolve_driver(driver, driver_path=driver_path)
|
412 |
-
driver.get("https://www.poe.com")
|
413 |
-
|
414 |
-
# clicking use email button
|
415 |
-
driver.find_element(By.XPATH, '//button[contains(text(), "Use email")]').click()
|
416 |
-
|
417 |
-
email = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, '//input[@type="email"]')))
|
418 |
-
email.send_keys(mail_address)
|
419 |
-
driver.find_element(By.XPATH, '//button[text()="Go"]').click()
|
420 |
-
|
421 |
-
code = findall(r';">(\d{6,7})</div>', mail_client.get_message())[0]
|
422 |
-
print(code)
|
423 |
-
|
424 |
-
verification_code = WebDriverWait(driver, 30).until(
|
425 |
-
EC.presence_of_element_located((By.XPATH, '//input[@placeholder="Code"]'))
|
426 |
-
)
|
427 |
-
verification_code.send_keys(code)
|
428 |
-
verify_button = EC.presence_of_element_located((By.XPATH, '//button[text()="Verify"]'))
|
429 |
-
login_button = EC.presence_of_element_located((By.XPATH, '//button[text()="Log In"]'))
|
430 |
-
|
431 |
-
WebDriverWait(driver, 30).until(EC.any_of(verify_button, login_button)).click()
|
432 |
-
|
433 |
-
cookie = driver.get_cookie('p-b')
|
434 |
-
|
435 |
-
with open(self.cookie_path, 'w') as fw:
|
436 |
-
json.dump(cookie, fw)
|
437 |
-
|
438 |
-
driver.close()
|
439 |
-
return cookie
|
440 |
-
|
441 |
-
@staticmethod
|
442 |
-
def __resolve_driver(driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]:
|
443 |
-
options = FirefoxOptions() if driver == 'firefox' else ChromeOptions()
|
444 |
-
options.add_argument('-headless')
|
445 |
-
|
446 |
-
if driver_path:
|
447 |
-
options.binary_location = driver_path
|
448 |
-
try:
|
449 |
-
return Firefox(options=options) if driver == 'firefox' else Chrome(options=options)
|
450 |
-
except Exception:
|
451 |
-
raise Exception(SELENIUM_WEB_DRIVER_ERROR_MSG)
|
452 |
-
|
453 |
-
def chat(self, message: str, model: Optional[str] = None) -> str:
|
454 |
-
if model and model not in MODELS:
|
455 |
-
raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.')
|
456 |
-
model = MODELS[model] if model else self.model
|
457 |
-
response = None
|
458 |
-
for chunk in self.client.send_message(model, message):
|
459 |
-
response = chunk['text']
|
460 |
-
return response
|
461 |
-
|
462 |
-
def create_bot(self, name: str, /, prompt: str = '', base_model: str = 'ChatGPT', description: str = '') -> None:
|
463 |
-
if base_model not in MODELS:
|
464 |
-
raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
|
465 |
-
|
466 |
-
response = self.client.create_bot(
|
467 |
-
handle=name,
|
468 |
-
prompt=prompt,
|
469 |
-
base_model=MODELS[base_model],
|
470 |
-
description=description,
|
471 |
-
)
|
472 |
-
print(f'Successfully created bot with name: {response["bot"]["displayName"]}')
|
473 |
-
|
474 |
-
def list_bots(self) -> list:
|
475 |
-
return list(self.client.bot_names.values())
|
476 |
-
|
477 |
-
def delete_account(self) -> None:
|
478 |
-
self.client.delete_account()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/123Kumar/vits-uma-genshin-honkai123/text/cleaners.py
DELETED
@@ -1,475 +0,0 @@
|
|
1 |
-
""" from https://github.com/keithito/tacotron """
|
2 |
-
|
3 |
-
'''
|
4 |
-
Cleaners are transformations that run over the input text at both training and eval time.
|
5 |
-
|
6 |
-
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
|
7 |
-
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
|
8 |
-
1. "english_cleaners" for English text
|
9 |
-
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
|
10 |
-
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
|
11 |
-
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
|
12 |
-
the symbols in symbols.py to match your data).
|
13 |
-
'''
|
14 |
-
|
15 |
-
import re
|
16 |
-
from unidecode import unidecode
|
17 |
-
import pyopenjtalk
|
18 |
-
from jamo import h2j, j2hcj
|
19 |
-
from pypinyin import lazy_pinyin, BOPOMOFO
|
20 |
-
import jieba, cn2an
|
21 |
-
|
22 |
-
|
23 |
-
# This is a list of Korean classifiers preceded by pure Korean numerals.
|
24 |
-
_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
|
25 |
-
|
26 |
-
# Regular expression matching whitespace:
|
27 |
-
_whitespace_re = re.compile(r'\s+')
|
28 |
-
|
29 |
-
# Regular expression matching Japanese without punctuation marks:
|
30 |
-
_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
|
31 |
-
|
32 |
-
# Regular expression matching non-Japanese characters or punctuation marks:
|
33 |
-
_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
|
34 |
-
|
35 |
-
# List of (regular expression, replacement) pairs for abbreviations:
|
36 |
-
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
|
37 |
-
('mrs', 'misess'),
|
38 |
-
('mr', 'mister'),
|
39 |
-
('dr', 'doctor'),
|
40 |
-
('st', 'saint'),
|
41 |
-
('co', 'company'),
|
42 |
-
('jr', 'junior'),
|
43 |
-
('maj', 'major'),
|
44 |
-
('gen', 'general'),
|
45 |
-
('drs', 'doctors'),
|
46 |
-
('rev', 'reverend'),
|
47 |
-
('lt', 'lieutenant'),
|
48 |
-
('hon', 'honorable'),
|
49 |
-
('sgt', 'sergeant'),
|
50 |
-
('capt', 'captain'),
|
51 |
-
('esq', 'esquire'),
|
52 |
-
('ltd', 'limited'),
|
53 |
-
('col', 'colonel'),
|
54 |
-
('ft', 'fort'),
|
55 |
-
]]
|
56 |
-
|
57 |
-
# List of (hangul, hangul divided) pairs:
|
58 |
-
_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
|
59 |
-
('ㄳ', 'ㄱㅅ'),
|
60 |
-
('ㄵ', 'ㄴㅈ'),
|
61 |
-
('ㄶ', 'ㄴㅎ'),
|
62 |
-
('ㄺ', 'ㄹㄱ'),
|
63 |
-
('ㄻ', 'ㄹㅁ'),
|
64 |
-
('ㄼ', 'ㄹㅂ'),
|
65 |
-
('ㄽ', 'ㄹㅅ'),
|
66 |
-
('ㄾ', 'ㄹㅌ'),
|
67 |
-
('ㄿ', 'ㄹㅍ'),
|
68 |
-
('ㅀ', 'ㄹㅎ'),
|
69 |
-
('ㅄ', 'ㅂㅅ'),
|
70 |
-
('ㅘ', 'ㅗㅏ'),
|
71 |
-
('ㅙ', 'ㅗㅐ'),
|
72 |
-
('ㅚ', 'ㅗㅣ'),
|
73 |
-
('ㅝ', 'ㅜㅓ'),
|
74 |
-
('ㅞ', 'ㅜㅔ'),
|
75 |
-
('ㅟ', 'ㅜㅣ'),
|
76 |
-
('ㅢ', 'ㅡㅣ'),
|
77 |
-
('ㅑ', 'ㅣㅏ'),
|
78 |
-
('ㅒ', 'ㅣㅐ'),
|
79 |
-
('ㅕ', 'ㅣㅓ'),
|
80 |
-
('ㅖ', 'ㅣㅔ'),
|
81 |
-
('ㅛ', 'ㅣㅗ'),
|
82 |
-
('ㅠ', 'ㅣㅜ')
|
83 |
-
]]
|
84 |
-
|
85 |
-
# List of (Latin alphabet, hangul) pairs:
|
86 |
-
_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
|
87 |
-
('a', '에이'),
|
88 |
-
('b', '비'),
|
89 |
-
('c', '시'),
|
90 |
-
('d', '디'),
|
91 |
-
('e', '이'),
|
92 |
-
('f', '에프'),
|
93 |
-
('g', '지'),
|
94 |
-
('h', '에이치'),
|
95 |
-
('i', '아이'),
|
96 |
-
('j', '제이'),
|
97 |
-
('k', '케이'),
|
98 |
-
('l', '엘'),
|
99 |
-
('m', '엠'),
|
100 |
-
('n', '엔'),
|
101 |
-
('o', '오'),
|
102 |
-
('p', '피'),
|
103 |
-
('q', '큐'),
|
104 |
-
('r', '아르'),
|
105 |
-
('s', '에스'),
|
106 |
-
('t', '티'),
|
107 |
-
('u', '유'),
|
108 |
-
('v', '브이'),
|
109 |
-
('w', '더블유'),
|
110 |
-
('x', '엑스'),
|
111 |
-
('y', '와이'),
|
112 |
-
('z', '제트')
|
113 |
-
]]
|
114 |
-
|
115 |
-
# List of (Latin alphabet, bopomofo) pairs:
|
116 |
-
_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
|
117 |
-
('a', 'ㄟˉ'),
|
118 |
-
('b', 'ㄅㄧˋ'),
|
119 |
-
('c', 'ㄙㄧˉ'),
|
120 |
-
('d', 'ㄉㄧˋ'),
|
121 |
-
('e', 'ㄧˋ'),
|
122 |
-
('f', 'ㄝˊㄈㄨˋ'),
|
123 |
-
('g', 'ㄐㄧˋ'),
|
124 |
-
('h', 'ㄝˇㄑㄩˋ'),
|
125 |
-
('i', 'ㄞˋ'),
|
126 |
-
('j', 'ㄐㄟˋ'),
|
127 |
-
('k', 'ㄎㄟˋ'),
|
128 |
-
('l', 'ㄝˊㄛˋ'),
|
129 |
-
('m', 'ㄝˊㄇㄨˋ'),
|
130 |
-
('n', 'ㄣˉ'),
|
131 |
-
('o', 'ㄡˉ'),
|
132 |
-
('p', 'ㄆㄧˉ'),
|
133 |
-
('q', 'ㄎㄧㄡˉ'),
|
134 |
-
('r', 'ㄚˋ'),
|
135 |
-
('s', 'ㄝˊㄙˋ'),
|
136 |
-
('t', 'ㄊㄧˋ'),
|
137 |
-
('u', 'ㄧㄡˉ'),
|
138 |
-
('v', 'ㄨㄧˉ'),
|
139 |
-
('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
|
140 |
-
('x', 'ㄝˉㄎㄨˋㄙˋ'),
|
141 |
-
('y', 'ㄨㄞˋ'),
|
142 |
-
('z', 'ㄗㄟˋ')
|
143 |
-
]]
|
144 |
-
|
145 |
-
|
146 |
-
# List of (bopomofo, romaji) pairs:
|
147 |
-
_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
|
148 |
-
('ㄅㄛ', 'p⁼wo'),
|
149 |
-
('ㄆㄛ', 'pʰwo'),
|
150 |
-
('ㄇㄛ', 'mwo'),
|
151 |
-
('ㄈㄛ', 'fwo'),
|
152 |
-
('ㄅ', 'p⁼'),
|
153 |
-
('ㄆ', 'pʰ'),
|
154 |
-
('ㄇ', 'm'),
|
155 |
-
('ㄈ', 'f'),
|
156 |
-
('ㄉ', 't⁼'),
|
157 |
-
('ㄊ', 'tʰ'),
|
158 |
-
('ㄋ', 'n'),
|
159 |
-
('ㄌ', 'l'),
|
160 |
-
('ㄍ', 'k⁼'),
|
161 |
-
('ㄎ', 'kʰ'),
|
162 |
-
('ㄏ', 'h'),
|
163 |
-
('ㄐ', 'ʧ⁼'),
|
164 |
-
('ㄑ', 'ʧʰ'),
|
165 |
-
('ㄒ', 'ʃ'),
|
166 |
-
('ㄓ', 'ʦ`⁼'),
|
167 |
-
('ㄔ', 'ʦ`ʰ'),
|
168 |
-
('ㄕ', 's`'),
|
169 |
-
('ㄖ', 'ɹ`'),
|
170 |
-
('ㄗ', 'ʦ⁼'),
|
171 |
-
('ㄘ', 'ʦʰ'),
|
172 |
-
('ㄙ', 's'),
|
173 |
-
('ㄚ', 'a'),
|
174 |
-
('ㄛ', 'o'),
|
175 |
-
('ㄜ', 'ə'),
|
176 |
-
('ㄝ', 'e'),
|
177 |
-
('ㄞ', 'ai'),
|
178 |
-
('ㄟ', 'ei'),
|
179 |
-
('ㄠ', 'au'),
|
180 |
-
('ㄡ', 'ou'),
|
181 |
-
('ㄧㄢ', 'yeNN'),
|
182 |
-
('ㄢ', 'aNN'),
|
183 |
-
('ㄧㄣ', 'iNN'),
|
184 |
-
('ㄣ', 'əNN'),
|
185 |
-
('ㄤ', 'aNg'),
|
186 |
-
('ㄧㄥ', 'iNg'),
|
187 |
-
('ㄨㄥ', 'uNg'),
|
188 |
-
('ㄩㄥ', 'yuNg'),
|
189 |
-
('ㄥ', 'əNg'),
|
190 |
-
('ㄦ', 'əɻ'),
|
191 |
-
('ㄧ', 'i'),
|
192 |
-
('ㄨ', 'u'),
|
193 |
-
('ㄩ', 'ɥ'),
|
194 |
-
('ˉ', '→'),
|
195 |
-
('ˊ', '↑'),
|
196 |
-
('ˇ', '↓↑'),
|
197 |
-
('ˋ', '↓'),
|
198 |
-
('˙', ''),
|
199 |
-
(',', ','),
|
200 |
-
('。', '.'),
|
201 |
-
('!', '!'),
|
202 |
-
('?', '?'),
|
203 |
-
('—', '-')
|
204 |
-
]]
|
205 |
-
|
206 |
-
|
207 |
-
def expand_abbreviations(text):
|
208 |
-
for regex, replacement in _abbreviations:
|
209 |
-
text = re.sub(regex, replacement, text)
|
210 |
-
return text
|
211 |
-
|
212 |
-
|
213 |
-
def lowercase(text):
|
214 |
-
return text.lower()
|
215 |
-
|
216 |
-
|
217 |
-
def collapse_whitespace(text):
|
218 |
-
return re.sub(_whitespace_re, ' ', text)
|
219 |
-
|
220 |
-
|
221 |
-
def convert_to_ascii(text):
|
222 |
-
return unidecode(text)
|
223 |
-
|
224 |
-
|
225 |
-
def japanese_to_romaji_with_accent(text):
|
226 |
-
'''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
|
227 |
-
sentences = re.split(_japanese_marks, text)
|
228 |
-
marks = re.findall(_japanese_marks, text)
|
229 |
-
text = ''
|
230 |
-
for i, sentence in enumerate(sentences):
|
231 |
-
if re.match(_japanese_characters, sentence):
|
232 |
-
if text!='':
|
233 |
-
text+=' '
|
234 |
-
labels = pyopenjtalk.extract_fullcontext(sentence)
|
235 |
-
for n, label in enumerate(labels):
|
236 |
-
phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
|
237 |
-
if phoneme not in ['sil','pau']:
|
238 |
-
text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
|
239 |
-
else:
|
240 |
-
continue
|
241 |
-
n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
|
242 |
-
a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
|
243 |
-
a2 = int(re.search(r"\+(\d+)\+", label).group(1))
|
244 |
-
a3 = int(re.search(r"\+(\d+)/", label).group(1))
|
245 |
-
if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
|
246 |
-
a2_next=-1
|
247 |
-
else:
|
248 |
-
a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
|
249 |
-
# Accent phrase boundary
|
250 |
-
if a3 == 1 and a2_next == 1:
|
251 |
-
text += ' '
|
252 |
-
# Falling
|
253 |
-
elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
|
254 |
-
text += '↓'
|
255 |
-
# Rising
|
256 |
-
elif a2 == 1 and a2_next == 2:
|
257 |
-
text += '↑'
|
258 |
-
if i<len(marks):
|
259 |
-
text += unidecode(marks[i]).replace(' ','')
|
260 |
-
return text
|
261 |
-
|
262 |
-
|
263 |
-
def latin_to_hangul(text):
|
264 |
-
for regex, replacement in _latin_to_hangul:
|
265 |
-
text = re.sub(regex, replacement, text)
|
266 |
-
return text
|
267 |
-
|
268 |
-
|
269 |
-
def divide_hangul(text):
|
270 |
-
for regex, replacement in _hangul_divided:
|
271 |
-
text = re.sub(regex, replacement, text)
|
272 |
-
return text
|
273 |
-
|
274 |
-
|
275 |
-
def hangul_number(num, sino=True):
|
276 |
-
'''Reference https://github.com/Kyubyong/g2pK'''
|
277 |
-
num = re.sub(',', '', num)
|
278 |
-
|
279 |
-
if num == '0':
|
280 |
-
return '영'
|
281 |
-
if not sino and num == '20':
|
282 |
-
return '스무'
|
283 |
-
|
284 |
-
digits = '123456789'
|
285 |
-
names = '일이삼사오육칠팔구'
|
286 |
-
digit2name = {d: n for d, n in zip(digits, names)}
|
287 |
-
|
288 |
-
modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
|
289 |
-
decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
|
290 |
-
digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
|
291 |
-
digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
|
292 |
-
|
293 |
-
spelledout = []
|
294 |
-
for i, digit in enumerate(num):
|
295 |
-
i = len(num) - i - 1
|
296 |
-
if sino:
|
297 |
-
if i == 0:
|
298 |
-
name = digit2name.get(digit, '')
|
299 |
-
elif i == 1:
|
300 |
-
name = digit2name.get(digit, '') + '십'
|
301 |
-
name = name.replace('일십', '십')
|
302 |
-
else:
|
303 |
-
if i == 0:
|
304 |
-
name = digit2mod.get(digit, '')
|
305 |
-
elif i == 1:
|
306 |
-
name = digit2dec.get(digit, '')
|
307 |
-
if digit == '0':
|
308 |
-
if i % 4 == 0:
|
309 |
-
last_three = spelledout[-min(3, len(spelledout)):]
|
310 |
-
if ''.join(last_three) == '':
|
311 |
-
spelledout.append('')
|
312 |
-
continue
|
313 |
-
else:
|
314 |
-
spelledout.append('')
|
315 |
-
continue
|
316 |
-
if i == 2:
|
317 |
-
name = digit2name.get(digit, '') + '백'
|
318 |
-
name = name.replace('일백', '백')
|
319 |
-
elif i == 3:
|
320 |
-
name = digit2name.get(digit, '') + '천'
|
321 |
-
name = name.replace('일천', '천')
|
322 |
-
elif i == 4:
|
323 |
-
name = digit2name.get(digit, '') + '만'
|
324 |
-
name = name.replace('일만', '만')
|
325 |
-
elif i == 5:
|
326 |
-
name = digit2name.get(digit, '') + '십'
|
327 |
-
name = name.replace('일십', '십')
|
328 |
-
elif i == 6:
|
329 |
-
name = digit2name.get(digit, '') + '백'
|
330 |
-
name = name.replace('일백', '백')
|
331 |
-
elif i == 7:
|
332 |
-
name = digit2name.get(digit, '') + '천'
|
333 |
-
name = name.replace('일천', '천')
|
334 |
-
elif i == 8:
|
335 |
-
name = digit2name.get(digit, '') + '억'
|
336 |
-
elif i == 9:
|
337 |
-
name = digit2name.get(digit, '') + '십'
|
338 |
-
elif i == 10:
|
339 |
-
name = digit2name.get(digit, '') + '백'
|
340 |
-
elif i == 11:
|
341 |
-
name = digit2name.get(digit, '') + '천'
|
342 |
-
elif i == 12:
|
343 |
-
name = digit2name.get(digit, '') + '조'
|
344 |
-
elif i == 13:
|
345 |
-
name = digit2name.get(digit, '') + '십'
|
346 |
-
elif i == 14:
|
347 |
-
name = digit2name.get(digit, '') + '백'
|
348 |
-
elif i == 15:
|
349 |
-
name = digit2name.get(digit, '') + '천'
|
350 |
-
spelledout.append(name)
|
351 |
-
return ''.join(elem for elem in spelledout)
|
352 |
-
|
353 |
-
|
354 |
-
def number_to_hangul(text):
|
355 |
-
'''Reference https://github.com/Kyubyong/g2pK'''
|
356 |
-
tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
|
357 |
-
for token in tokens:
|
358 |
-
num, classifier = token
|
359 |
-
if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
|
360 |
-
spelledout = hangul_number(num, sino=False)
|
361 |
-
else:
|
362 |
-
spelledout = hangul_number(num, sino=True)
|
363 |
-
text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
|
364 |
-
# digit by digit for remaining digits
|
365 |
-
digits = '0123456789'
|
366 |
-
names = '영일이삼사오육칠팔구'
|
367 |
-
for d, n in zip(digits, names):
|
368 |
-
text = text.replace(d, n)
|
369 |
-
return text
|
370 |
-
|
371 |
-
|
372 |
-
def number_to_chinese(text):
|
373 |
-
numbers = re.findall(r'\d+(?:\.?\d+)?', text)
|
374 |
-
for number in numbers:
|
375 |
-
text = text.replace(number, cn2an.an2cn(number),1)
|
376 |
-
return text
|
377 |
-
|
378 |
-
|
379 |
-
def chinese_to_bopomofo(text):
|
380 |
-
text=text.replace('、',',').replace(';',',').replace(':',',')
|
381 |
-
words=jieba.lcut(text,cut_all=False)
|
382 |
-
text=''
|
383 |
-
for word in words:
|
384 |
-
bopomofos=lazy_pinyin(word,BOPOMOFO)
|
385 |
-
if not re.search('[\u4e00-\u9fff]',word):
|
386 |
-
text+=word
|
387 |
-
continue
|
388 |
-
for i in range(len(bopomofos)):
|
389 |
-
if re.match('[\u3105-\u3129]',bopomofos[i][-1]):
|
390 |
-
bopomofos[i]+='ˉ'
|
391 |
-
if text!='':
|
392 |
-
text+=' '
|
393 |
-
text+=''.join(bopomofos)
|
394 |
-
return text
|
395 |
-
|
396 |
-
|
397 |
-
def latin_to_bopomofo(text):
|
398 |
-
for regex, replacement in _latin_to_bopomofo:
|
399 |
-
text = re.sub(regex, replacement, text)
|
400 |
-
return text
|
401 |
-
|
402 |
-
|
403 |
-
def bopomofo_to_romaji(text):
|
404 |
-
for regex, replacement in _bopomofo_to_romaji:
|
405 |
-
text = re.sub(regex, replacement, text)
|
406 |
-
return text
|
407 |
-
|
408 |
-
|
409 |
-
def basic_cleaners(text):
|
410 |
-
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
|
411 |
-
text = lowercase(text)
|
412 |
-
text = collapse_whitespace(text)
|
413 |
-
return text
|
414 |
-
|
415 |
-
|
416 |
-
def transliteration_cleaners(text):
|
417 |
-
'''Pipeline for non-English text that transliterates to ASCII.'''
|
418 |
-
text = convert_to_ascii(text)
|
419 |
-
text = lowercase(text)
|
420 |
-
text = collapse_whitespace(text)
|
421 |
-
return text
|
422 |
-
|
423 |
-
|
424 |
-
def japanese_cleaners(text):
|
425 |
-
text=japanese_to_romaji_with_accent(text)
|
426 |
-
if re.match('[A-Za-z]',text[-1]):
|
427 |
-
text += '.'
|
428 |
-
return text
|
429 |
-
|
430 |
-
|
431 |
-
def japanese_cleaners2(text):
|
432 |
-
return japanese_cleaners(text).replace('ts','ʦ').replace('...','…')
|
433 |
-
|
434 |
-
|
435 |
-
def korean_cleaners(text):
|
436 |
-
'''Pipeline for Korean text'''
|
437 |
-
text = latin_to_hangul(text)
|
438 |
-
text = number_to_hangul(text)
|
439 |
-
text = j2hcj(h2j(text))
|
440 |
-
text = divide_hangul(text)
|
441 |
-
if re.match('[\u3131-\u3163]',text[-1]):
|
442 |
-
text += '.'
|
443 |
-
return text
|
444 |
-
|
445 |
-
|
446 |
-
def chinese_cleaners(text):
|
447 |
-
'''Pipeline for Chinese text'''
|
448 |
-
text=number_to_chinese(text)
|
449 |
-
text=chinese_to_bopomofo(text)
|
450 |
-
text=latin_to_bopomofo(text)
|
451 |
-
if re.match('[ˉˊˇˋ˙]',text[-1]):
|
452 |
-
text += '。'
|
453 |
-
return text
|
454 |
-
|
455 |
-
|
456 |
-
def zh_ja_mixture_cleaners(text):
|
457 |
-
chinese_texts=re.findall(r'\[ZH\].*?\[ZH\]',text)
|
458 |
-
japanese_texts=re.findall(r'\[JA\].*?\[JA\]',text)
|
459 |
-
for chinese_text in chinese_texts:
|
460 |
-
cleaned_text=number_to_chinese(chinese_text[4:-4])
|
461 |
-
cleaned_text=chinese_to_bopomofo(cleaned_text)
|
462 |
-
cleaned_text=latin_to_bopomofo(cleaned_text)
|
463 |
-
cleaned_text=bopomofo_to_romaji(cleaned_text)
|
464 |
-
cleaned_text=re.sub('i[aoe]',lambda x:'y'+x.group(0)[1:],cleaned_text)
|
465 |
-
cleaned_text=re.sub('u[aoəe]',lambda x:'w'+x.group(0)[1:],cleaned_text)
|
466 |
-
cleaned_text=re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ`'+x.group(2),cleaned_text).replace('ɻ','ɹ`')
|
467 |
-
cleaned_text=re.sub('([ʦs][⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ'+x.group(2),cleaned_text)
|
468 |
-
text = text.replace(chinese_text,cleaned_text+' ',1)
|
469 |
-
for japanese_text in japanese_texts:
|
470 |
-
cleaned_text=japanese_to_romaji_with_accent(japanese_text[4:-4]).replace('ts','ʦ').replace('u','ɯ').replace('...','…')
|
471 |
-
text = text.replace(japanese_text,cleaned_text+' ',1)
|
472 |
-
text=text[:-1]
|
473 |
-
if re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]):
|
474 |
-
text += '.'
|
475 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Aaja Nachle Eng Sub [CRACKED] Free Downloa.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>watch aaja nachle eng sub free download hindi movie on internet for free at hindiganadownload.com. as hindiganadownload.com is an best online website to download movies in hd quality. you can stream latest bollywood movies here on your mobile, pc, tablet. </p>
|
3 |
-
<h2>Aaja Nachle Eng Sub Free Downloa</h2><br /><p><b><b>Download File</b> ✔ <a href="https://imgfil.com/2uxZu0">https://imgfil.com/2uxZu0</a></b></p><br /><br />
|
4 |
-
<p>download aaja nachle (2007) full movie in in 480p, 720p, 1080p quality. the short story of this movies is dias dance teacher is dying. she returns to the town where she learnt to live and dance and most importantly to save the endangered ajanta theatre. this movie is based on drama, family, music and available in hindi.</p>
|
5 |
-
<p>aaja nachle free download full hd, 720p 720, 1080p 1080, mp3 audio songs, mp3 songs, divx songs, top star mp3 songs, mayank mp3 songs, kareena kapoor mp3 songs, hrithik mp3 songs, akshay kumar mp3 songs, katrina kaif mp3 songs, katrina kaif, twinkle twinkle song,aravind saagar</p>
|
6 |
-
<p>watch aaja nachle (2007) full movie. aaja nachle mp3 song. aajnachle free download full movie 1080p & 720p, aaja nachle free full movie 1080p, 720p 720, music player free, music songs, playlist track, playlist, music albums, facebook,download, tagalog hindi aajnachle - aaja nachle (2007) full movie. mp3 song. aaja nachle free full movie 1080p & 720p, aaja nachle free full movie 1080p, 720p 720, music player free, music songs, playlist track, playlist, music albums, facebook,download, tagalog hindi aajnachle - aaja nachle (2007) full movie.</p>
|
7 |
-
<p>aaja nachle full movie download 720p, 1080p, download aaja nachle full movie 1080p, aaja nachle full movie 1080p, download aaja nachle full movie 720p, download aaja nachle full movie 1080p 720p, download aaja nachle full movie 720p, download aaja nachle full movie 1080p, download aaja nachle full movie 720p free download, download aaja nachle full movie 720p, download aaja nachle full movie 1080p 720p free download,download aaja nachle full movie free download 720p, download aaja nachle full movie free 720p 1080p, download aaja nachle full movie free 720p 1080p, download aaja nachle full movie free 720p 1080p </p>
|
8 |
-
<p></p> 899543212b<br />
|
9 |
-
<br />
|
10 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Amada Ap100 Software Crack 28.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
<h2>amada ap100 software crack 28</h2><br /><p><b><b>Download Zip</b> ⇒ <a href="https://imgfil.com/2uxYkR">https://imgfil.com/2uxYkR</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Amada Ap100 Software Crack 28 amada software, amada software support, amada software training, amada software download, amada . Amada Ap100 - free download program for recording.
|
4 |
-
Amada Ap100 Software 8a78ff9644<br />
|
5 |
-
<br />
|
6 |
-
<br />
|
7 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Baka Loader 1.4.md
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Baka Loader 1.4: A Review</h1>
|
3 |
-
<p>If you are looking for a tool that can help you convert DIB files, enhance your graphics, and use shaders and effects, you might want to check out Baka Loader 1.4. This is a software application that is developed by Windows Software Developer and is part of the Convertdib program. In this article, we will review what Baka Loader 1.4 is, how it works, and what are its advantages and disadvantages.</p>
|
4 |
-
<h2>What is Baka Loader 1.4?</h2>
|
5 |
-
<p>Baka Loader 1.4 is an executable file that runs on your PC and allows you to convert DIB files to other formats, such as BMP, JPG, PNG, etc. DIB files are device-independent bitmap files that are used to store graphics data. They are often used by Windows applications and games, but they are not compatible with some other programs or devices. Baka Loader 1.4 can help you convert DIB files to more common formats that can be opened by other software or hardware.</p>
|
6 |
-
<h2>Baka Loader 1.4</h2><br /><p><b><b>Download File</b> ››››› <a href="https://imgfil.com/2uy024">https://imgfil.com/2uy024</a></b></p><br /><br />
|
7 |
-
<p>Besides converting DIB files, Baka Loader 1.4 also lets you enhance your graphics by using shaders and effects. Shaders are programs that run on your graphics card and modify the appearance of your images or animations. Effects are visual features that add realism or style to your graphics, such as lighting, shadows, reflections, etc. Baka Loader 1.4 has plenty of shaders and effects built-in and online for free. You can download them from the internet and apply them to your DIB files or other graphics files.</p>
|
8 |
-
<h2>How does Baka Loader 1.4 work?</h2>
|
9 |
-
<p>To use Baka Loader 1.4, you need to download it from the internet and install it on your PC. The installation process is simple and fast, and it does not require any special skills or knowledge. Once you have installed Baka Loader 1.4, you can run it by double-clicking on the baka.loader.exe file in your program folder.</p>
|
10 |
-
<p>When you run Baka Loader 1.4, you will see a pop-up screen that shows its package name, version, the Chinese vendor name and the symbol of the app. You will also see a menu bar with several options, such as File, Edit, View, Tools, Help, etc. You can use these options to open, save, edit, view, convert, apply shaders and effects, and get help for your DIB files or other graphics files.</p>
|
11 |
-
<p>To convert a DIB file to another format, you need to open it with Baka Loader 1.4 by clicking on File -> Open or by dragging and dropping it into the app window. Then you need to choose the output format from the drop-down list at the bottom of the app window. You can also adjust some settings for the output file, such as quality, size, compression, etc. Then you need to click on File -> Save As or press Ctrl+S to save the converted file in your desired location.</p>
|
12 |
-
<p>To apply shaders and effects to a DIB file or another graphics file, you need to open it with Baka Loader 1.4 as well. Then you need to click on Tools -> Shader Library or press Ctrl+L to open the shader library window. Here you can see a list of available shaders and effects that you can download from the internet or use from your local folder. You can preview each shader or effect by clicking on it and see how it changes the appearance of your file in the app window. You can also adjust some parameters for each shader or effect by using the sliders or checkboxes below the preview window.</p>
|
13 |
-
<p>Once you have chosen the shader or effect that you want to apply to your file, you need to click on Apply or press Enter to confirm your choice. You will see a progress bar showing how long it takes to apply the shader or effect to your file. When it is done, you can save the modified file by clicking on File -> Save As or pressing Ctrl+S.</p>
|
14 |
-
<h2>What are the advantages and disadvantages of Baka Loader 1.4?</h2>
|
15 |
-
<p>Baka Loader 1.4 has some advantages and disadvantages that you should consider before using it.</p>
|
16 |
-
<p></p>
|
17 |
-
<p>Some of the advantages of Baka Loader 1.4 are:</p>
|
18 |
-
<ul>
|
19 |
-
<li>It is free and easy to use.</li>
|
20 |
-
<li>It can convert DIB files to other formats quickly and easily.</li>
|
21 |
-
<li>It can enhance your graphics by using shaders and effects.</li>
|
22 |
-
<li>It has a large collection of shaders and effects that you can download from the internet or use from your local folder.</li>
|
23 |
-
<li>It supports multiple languages, such as English, Chinese, Japanese, etc.</li>
|
24 |
-
</ul>
|
25 |
-
<p>Some of the disadvantages of Baka Loader 1.4 are:</p>
|
26 |
-
<ul>
|
27 |
-
<li>It may not be compatible with some Windows versions or devices.</li>
|
28 |
-
<li>It may not be updated regularly or have technical support.</li>
|
29 |
-
<li>It may contain some bugs or errors that may affect its performance or functionality.</li>
|
30 |
-
<li>It may not be safe or secure to download from some sources or websites.</li>
|
31 |
-
<li>It may not be able to convert some DIB files or other graphics files due to their size or format.</li>
|
32 |
-
</ul>
|
33 |
-
<h2>Conclusion</h2>
|
34 |
-
<p>Baka Loader 1.4 is a software application that can help you convert DIB files to other formats and enhance your graphics by using shaders and effects. It is free and easy to use, but it may also have some drawbacks that you should be aware of before using it.</p>
|
35 |
-
<p>If you want to try Baka Loader 1.4 for yourself, you can download it from this link: <a href="http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4">http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4</a></p>
|
36 |
-
<h2>How to download and install Baka Loader 1.4?</h2>
|
37 |
-
<p>If you want to download and install Baka Loader 1.4 on your PC, you need to follow these steps:</p>
|
38 |
-
<ol>
|
39 |
-
<li>Go to this link: <a href="http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4">http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4</a> and click on the download button.</li>
|
40 |
-
<li>Wait for the download to finish and then open the downloaded file.</li>
|
41 |
-
<li>Follow the instructions on the screen to install Baka Loader 1.4 on your PC.</li>
|
42 |
-
<li>Choose the destination folder where you want to install Baka Loader 1.4 and click on Next.</li>
|
43 |
-
<li>Wait for the installation to complete and then click on Finish.</li>
|
44 |
-
<li>You can now run Baka Loader 1.4 by double-clicking on the baka.loader.exe file in your program folder.</li>
|
45 |
-
</ol>
|
46 |
-
<h2>What are some alternatives to Baka Loader 1.4?</h2>
|
47 |
-
<p>Baka Loader 1.4 is not the only tool that can help you convert DIB files and use shaders and effects. There are some other alternatives that you can try if you are not satisfied with Baka Loader 1.4 or if you want to compare different options. Here are some of them:</p>
|
48 |
-
<ul>
|
49 |
-
<li><a href="https://www.xnview.com/en/xnconvert/">XnConvert</a>: This is a powerful and free image converter that supports over 500 formats, including DIB files. You can also apply various filters and effects to your images with this tool.</li>
|
50 |
-
<li><a href="https://www.irfanview.com/">IrfanView</a>: This is a fast and compact image viewer and editor that can also convert DIB files to other formats. You can also use plugins to add more features and functions to this tool.</li>
|
51 |
-
<li><a href="https://www.gimp.org/">GIMP</a>: This is a free and open source image editor that can handle DIB files and many other formats. You can also use various tools and plugins to enhance your graphics with this tool.</li>
|
52 |
-
</ul>
|
53 |
-
<h2>Conclusion</h2>
|
54 |
-
<p>Baka Loader 1.4 is a software application that can help you convert DIB files to other formats and enhance your graphics by using shaders and effects. It is free and easy to use, but it may also have some drawbacks that you should be aware of before using it.</p>
|
55 |
-
<p>If you want to try Baka Loader 1.4 for yourself, you can download it from this link: <a href="http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4">http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4</a></p>
|
56 |
-
<p>If you want to learn more about DIB files, shaders, effects, or other graphics topics, you can check out these links:</p>
|
57 |
-
<ul>
|
58 |
-
<li><a href="https://en.wikipedia.org/wiki/Device-independent_bitmap">Device-independent bitmap - Wikipedia</a></li>
|
59 |
-
<li><a href="https://www.lifewire.com/shaders-in-computer-graphics-958215">Shaders in Computer Graphics - Lifewire</a></li>
|
60 |
-
<li><a href="https://www.makeuseof.com/tag/effects-in-computer-graphics/">Effects in Computer Graphics - MakeUseOf</a></li>
|
61 |
-
</ul>
|
62 |
-
<h2>Conclusion</h2>
|
63 |
-
<p>Baka Loader 1.4 is a software application that can help you convert DIB files to other formats and enhance your graphics by using shaders and effects. It is free and easy to use, but it may also have some drawbacks that you should be aware of before using it.</p>
|
64 |
-
<p>If you want to try Baka Loader 1.4 for yourself, you can download it from this link: <a href="http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4">http://jenovaswitness.guildwork.com/forum/threads/57716dcc002aa807a2e819e5-baka-loader-1-4</a></p>
|
65 |
-
<p>If you want to learn more about DIB files, shaders, effects, or other graphics topics, you can check out these links:</p>
|
66 |
-
<ul>
|
67 |
-
<li><a href="https://en.wikipedia.org/wiki/Device-independent_bitmap">Device-independent bitmap - Wikipedia</a></li>
|
68 |
-
<li><a href="https://www.lifewire.com/shaders-in-computer-graphics-958215">Shaders in Computer Graphics - Lifewire</a></li>
|
69 |
-
<li><a href="https://www.makeuseof.com/tag/effects-in-computer-graphics/">Effects in Computer Graphics - MakeUseOf</a></li>
|
70 |
-
</ul>
|
71 |
-
|
72 |
-
There is no need to write another conclusion for the article. I hope you are satisfied with the article and thank you for using Bing. Have a nice day. ?</p> 3cee63e6c2<br />
|
73 |
-
<br />
|
74 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Contoh Surat Undangan Peneguhan Sidi.md
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
<h2>Contoh Surat Undangan Peneguhan Sidi</h2><br /><p><b><b>Download</b> ✦✦✦ <a href="https://imgfil.com/2uy0Sx">https://imgfil.com/2uy0Sx</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
10.1
|
4 |
-
|
5 |
-
05 Counter Objectives
|
6 |
-
|
7 |
-
II. 메시지는 통하며
|
8 |
-
|
9 |
-
07.1. 소중한 메시지
|
10 |
-
|
11 |
-
09 Browsers
|
12 |
-
|
13 |
-
07.2. 컴퓨터용 검색기
|
14 |
-
|
15 |
-
10.1. 종류
|
16 |
-
|
17 |
-
11.1. 웹
|
18 |
-
|
19 |
-
11.2. 트래픽
|
20 |
-
|
21 |
-
11.3. 지금 구매가 있는 것
|
22 |
-
|
23 |
-
12.1. 웹
|
24 |
-
|
25 |
-
12.2. 웹
|
26 |
-
|
27 |
-
12.3. 비밀번호
|
28 |
-
|
29 |
-
12.4. 클라이언트
|
30 |
-
|
31 |
-
13.1. 다른 클라이언트
|
32 |
-
|
33 |
-
14.1. 가가
|
34 |
-
|
35 |
-
15.1. 블루투스
|
36 |
-
|
37 |
-
15.2. 크립토스
|
38 |
-
|
39 |
-
15.3. 시스템 컴퓨터
|
40 |
-
|
41 |
-
15.4. 월드트래픽
|
42 |
-
|
43 |
-
16.1. 소프트웨어 시스템 컴퓨터
|
44 |
-
|
45 |
-
16.2. 기술 컴퓨터
|
46 |
-
|
47 |
-
16.3. 데이터 컴퓨터
|
48 |
-
|
49 |
-
16.4. 시스템 컴퓨터
|
50 |
-
|
51 |
-
16.5. 데이터 컴퓨터
|
52 |
-
|
53 |
-
16.6. 생성된 데이터
|
54 |
-
|
55 |
-
17.1. 소프트웨어
|
56 |
-
|
57 |
-
17.2. 소프트웨어
|
58 |
-
|
59 |
-
17.3 4fefd39f24<br />
|
60 |
-
<br />
|
61 |
-
<br />
|
62 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Game Need For Speed Undercover FREE.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<h2>Download Game Need For Speed Undercover</h2><br /><p><b><b>Download Zip</b> →→→ <a href="https://imgfil.com/2uy0YT">https://imgfil.com/2uy0YT</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Need for Speed ​​Undercover Walkthrough Part 1 - NFS Undercover Part 1 gameplay featuring Maggie Q. Speed: Undercover is a 2008 racing video game, the twelfth installment in the Need for Speed ​​series. Developed by EA Black Box and published. Need for Speed ​​Undercover # Walkthrough Need for Speed ​​Undercover.
|
4 |
-
The Need for Speed: Underground 2, NFS: Underground, NFS: Underground 2, Need for Speed ​​No Limits, NFS Undercover, NFS Hot Pursuit, NFS Underground 2, NFS Underground 2, Need for Speed ​​Underground 2, NFS Underground 2, NFS Carbon , NFS Most Wanted, NFS Heat, NFS Carbon.
|
5 |
-
Walkthrough Need for Speed: Underground 2. Need for Speed: Underground 2 (from English.
|
6 |
-
Need for Speed ​​No Limits - Arcade Racing Video 8a78ff9644<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
9 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Fisika Universitas Jilid 1 Sears Zemansky Pdf 14l ((BETTER)).md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<p>1.714k likes. Sears dan Zemansky.2000. Rahasia Fisika Universitas Jilid (1) (3). Fisika Universitas Jilid 1, terjemahan Endang Juliastuti.http://www.khybersales.com/2031/07/18/fisika-universitas-jilid-1-sears-zemansky-pdf-download-top/. </p>
|
3 |
-
<p>2018 Universitas Negeri Semarang p-ISSN 2252-6617 e-ISSN 252-6232. Sears dan Zemansky Fisika Universitas Jilid 1, terjemahan Endang Juliastuti.http://www.khybersales.com/2029/07/18/fisika-universitas-jilid-1-sears-zemansky-pdf-download-top/ </p>
|
4 |
-
<h2>Fisika Universitas Jilid 1 Sears Zemansky Pdf 14l</h2><br /><p><b><b>Download File</b> ► <a href="https://imgfil.com/2uy0Xc">https://imgfil.com/2uy0Xc</a></b></p><br /><br />
|
5 |
-
<p>2018 Universitas Negeri Semarang p-ISSN 2252-6617 e-ISSN 252-6232. Sears dan Zemansky Fisika Universitas Jilid 1, terjemahan Endang Juliastuti.http://www.khybersales.com/2028/07/18/fisika-universitas-jilid-1-sears-zemansky-pdf-download-top/ </p>
|
6 |
-
<p>2018 Universitas Negeri Semarang p-ISSN 2252-6617 e-ISSN 252-6232. Sears dan Zemansky Fisika Universitas Jilid 1, terjemahan Endang Juliastuti.http://www.khybersales.com/2027/07/18/fisika-universitas-jilid-1-sears-zemansky-pdf-download-top/ </p>
|
7 |
-
<p>Fisika Dasar Edisi 7 Jilid 2 Jakarta: Erlangga. Herminegari. 2013.. Sears, Francis W. & Zemansky, Mark W. 1962. Fisika untuk Universitas 2. . Sears, Francis Weston Fisika untuk. panas dan bunyi / Francis Weston Sears, Mark W. Zemansky. Amir Achmad Judul : Fisika untuk universitas jilid. </p>
|
8 |
-
<p>dalam buku Fisika Universitas dan diklarifikasi dengan wawancara para ahli.. Law (Sears & Zemansky, 1991). Universitas Jilid 1 Edisi Kesepuluh. Trans.) Jakarta: Salemba Teknika. [7] Sears, F. W., & Zemansky, M. (1991). Fisika untuk Universitas 1, Mekanika, Panas, dan Bunyi. </p>
|
9 |
-
<p></p>
|
10 |
-
<p>2018 Universitas Negeri Semarang p-ISSN 2252-6617 e-ISSN 252-6232. Sears dan Zemansky Fisika Universitas Jilid 1, terjemahan Endang Juliastuti. Jakarta:. https://allindiaherb.com/fisika-universitas-jilid-1-sears-zemansky-pdf-14l-hot/. </p> 899543212b<br />
|
11 |
-
<br />
|
12 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/Lo-Que-Varguitas-No-Dijo-Libro-Pdf-11-Fixed.md
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
## Lo Que Varguitas No Dijo Libro Pdf 11
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-

|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
**LINK ===> [https://lodystiri.blogspot.com/?file=2txPB4](https://lodystiri.blogspot.com/?file=2txPB4)**
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
Here is a possible title and article with html formatting for the keyword "Lo Que Varguitas No Dijo Libro Pdf 11":
|
26 |
-
|
27 |
-
# Lo que Varguitas no dijo: el libro que revela la verdadera historia de amor entre Julia Urquidi y Mario Vargas Llosa
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
Lo que Varguitas no dijo es una obra autobiográfica de Julia Urquidi Illanes publicada en 1983, que se enfoca en el tiempo que vivió una relación con el escritor Mario Vargas Llosa. Se casaron en mayo de 1955, cuando Vargas Llosa tenÃa 19 años y ella 29, después de enfrentar diferentes problemas por el hecho de que Julia era hermana de la tÃa polÃtica de Vargas Llosa y la diferencia de edades que existÃa. El libro tiene relevancia porque narra los años que Urquidi vivió ayudando y apoyando a Vargas Llosa a que se convirtiera en escritor exitoso, según la autora. El matrimonio sobrevivió diferentes crisis, como los celos de Julia y la infidelidad de Mario, hasta que en 1964, por medio de una carta, Vargas Llosa le confiesa a ella su amor por su prima Patricia Llosa Urquidi (y sobrina de Julia) y sus intenciones de casarse con ella. Urquidi decide escribir este libro en respuesta a La tÃa Julia y el escribidor escrito por Vargas Llosa.
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
En este artÃculo, te contamos más detalles sobre este libro que muestra la otra cara de la historia de amor entre Julia Urquidi y Mario Vargas Llosa, que inspiró una de las novelas más famosas del Premio Nobel de Literatura.
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
## ¿Quién fue Julia Urquidi?
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
Julia Urquidi Illanes nació en Cochabamba, Bolivia, el 30 de marzo de 1926. Era hija de un diplomático boliviano y una dama peruana. Estudió en el Colegio Americano de La Paz y luego se trasladó a Lima, donde trabajó como secretaria en la embajada boliviana. Allà conoció a Mario Vargas Llosa, quien era sobrino polÃtico de su hermana Olga. Se enamoraron y se casaron en 1955, pese a la oposición familiar y social. Julia apoyó a Mario en sus estudios universitarios y en sus primeros pasos como escritor. Lo acompañó a ParÃs, donde vivieron entre 1959 y 1963. Sin embargo, su relación se deterioró por las infidelidades de Mario y la diferencia de caracteres. En 1964, se separaron y luego se divorciaron. Julia regresó a Lima y trabajó como productora de televisión. En 1983, publicó Lo que Varguitas no dijo, donde cuenta su versión de los hechos. Murió en Lima el 10 de marzo de 2010.
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
## ¿Qué dice el libro Lo que Varguitas no dijo?
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
El libro Lo que Varguitas no dijo es un testimonio personal de Julia Urquidi sobre su matrimonio con Mario Vargas Llosa. En él, relata cómo se conocieron, cómo se enamoraron, cómo se casaron, cómo vivieron en ParÃs y cómo se separaron. También describe los momentos felices y difÃciles que compartieron, asà como las personalidades y los sueños de ambos. El libro tiene un tono Ãntimo y emotivo, pero también crÃtico y reivindicativo. Julia busca mostrar su papel como esposa, compañera y colaboradora de Mario, asà como defender su dignidad frente a las mentiras y las injurias que sufrió por parte de él y de su familia. El libro también es una respuesta a La tÃa Julia y el escribidor, la novela que Mario Vargas Llosa escribió en 1977, donde narra su historia de amor con Julia bajo el nombre ficticio de Marito y la tÃa Julia. En esta novela, Mario presenta a Julia como una mujer mayor, frÃvola y manipuladora, que sed
|
52 |
-
|
53 |
-
dfd1c89656
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Blue Hai Pani - Pani MP3 Download Listen to Yo Yo Honey Singh Sandeep Kapoor and Soniya Sharma.md
DELETED
@@ -1,131 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Blue Hai Pani Pani MP3: A Guide for Music Lovers</h1>
|
3 |
-
<p>Do you love listening to upbeat and catchy songs? Do you want to add some fun and color to your playlist? If yes, then you should definitely check out blue hai pani pani mp3.</p>
|
4 |
-
<h2>download blue hai pani pani mp3</h2><br /><p><b><b>Download File</b> … <a href="https://urlin.us/2uSVHp">https://urlin.us/2uSVHp</a></b></p><br /><br />
|
5 |
-
<p>Blue hai pani pani is a popular Hindi song from the movie Yaariyan, sung by Yo Yo Honey Singh and Neha Kakkar. It is a party anthem that will make you want to dance and sing along.</p>
|
6 |
-
<p>In this article, we will guide you on how to download blue hai pani pani mp3 legally and safely. We will also tell you more about this amazing song and why it is so popular among music lovers.</p>
|
7 |
-
<h2>What is Blue Hai Pani Pani?</h2>
|
8 |
-
<p>Blue hai pani pani is a song from the 2014 Bollywood movie Yaariyan, directed by Divya Khosla Kumar and starring Himansh Kohli, Rakul Preet Singh, and Nicole Faria. The movie is a coming-of-age story of five friends who embark on a college adventure and face various challenges in life, love, and friendship.</p>
|
9 |
-
<p>The song is sung by Yo Yo Honey Singh and Neha Kakkar, two of the most popular and versatile singers in the Indian music industry. Yo Yo Honey Singh is known for his rap songs that blend Hindi and Punjabi lyrics with catchy beats and tunes. Neha Kakkar is known for her melodious voice and energetic style that can suit any genre of music.</p>
|
10 |
-
<p>Blue hai pani pani is a fusion of rap and pop music, with a mix of Hindi and English lyrics. The song is about having fun and enjoying life with your friends. The title of the song literally means "blue water water", which is a reference to the color of the sky and the sea. The song also uses some metaphors and similes to describe the feelings of the singers, such as "tujhe lagta hai tu chaand hai sitaara" (you think you are the moon and the star) and "teri aankhon ka ye paani sunny sunny sunny" (the water in your eyes is sunny sunny sunny).</p>
|
11 |
-
<h2>Why is Blue Hai Pani Pani So Popular?</h2>
|
12 |
-
<p>Blue hai pani pani is one of the most popular songs of 2014, and it still remains a favorite among music lovers. There are many reasons why this song is so popular, such as:</p>
|
13 |
-
<p>download blue hai pani pani mp3 song from Bollywood Holi<br />
|
14 |
-
download blue hai pani pani mp3 free online on Wynk Music<br />
|
15 |
-
download sunny sunny song by Yo Yo Honey Singh and Neha Kakkar from Yaariyan<br />
|
16 |
-
download blue hai pani pani mp3 ringtone for mobile<br />
|
17 |
-
download blue hai pani pani mp3 320kbps high quality<br />
|
18 |
-
download blue hai pani pani mp3 lyrics and video<br />
|
19 |
-
download blue hai pani pani mp3 remix version by DJ Chetas<br />
|
20 |
-
download blue hai pani pani mp3 instrumental karaoke<br />
|
21 |
-
download blue hai pani pani mp3 full song with album art<br />
|
22 |
-
download blue hai pani pani mp3 pagalworld.com<br />
|
23 |
-
download blue hai pani pani mp3 mr jatt.com<br />
|
24 |
-
download blue hai pani pani mp3 djpunjab.com<br />
|
25 |
-
download blue hai pani pani mp3 gaana.com<br />
|
26 |
-
download blue hai pani pani mp3 hungama.com<br />
|
27 |
-
download blue hai pani pani mp3 saavn.com<br />
|
28 |
-
download blue hai pani pani mp3 spotify.com<br />
|
29 |
-
download blue hai pani pani mp3 apple music<br />
|
30 |
-
download blue hai pani pani mp3 amazon music<br />
|
31 |
-
download blue hai pani pani mp3 youtube music<br />
|
32 |
-
download blue hai pani pani mp3 soundcloud.com<br />
|
33 |
-
download blue hai pani pani mp3 song.pk<br />
|
34 |
-
download blue hai pani pani mp3 wapking.cc<br />
|
35 |
-
download blue hai pani pani mp3 webmusic.in<br />
|
36 |
-
download blue hai pani pani mp3 raag.fm<br />
|
37 |
-
download blue hai pani pani mp3 masstamilan.com<br />
|
38 |
-
download blue hai pani pani mp3 naa songs<br />
|
39 |
-
download blue hai pani pani mp3 starmusiq.com<br />
|
40 |
-
download blue hai pani pani mp3 tamilwire.com<br />
|
41 |
-
download blue hai pani pani mp3 isaimini.com<br />
|
42 |
-
download blue hai pani pani mp3 kuttyweb.com<br />
|
43 |
-
download blue hai pani pani mp3 malayalamwap.net<br />
|
44 |
-
download blue hai pani pani mp3 teluguwap.net<br />
|
45 |
-
download blue hai pani pani mp3 kannadamasti.net<br />
|
46 |
-
download blue hai pani pani mp3 sensongsmp3.co.in<br />
|
47 |
-
download blue hai pani pani mp3 djmaza.info<br />
|
48 |
-
download blue hai pani</p>
|
49 |
-
<ul>
|
50 |
-
<li>The song has a catchy tune and an upbeat tempo that makes it easy to dance and sing along. The song also has a catchy chorus that repeats the words "blue hai pani pani" several times, making it easy to remember.</li>
|
51 |
-
<li>The song has fun and playful lyrics that appeal to the young generation. The song also uses some English words and phrases, such as "sunny sunny", "baby doll", and "party all night", that make it more relatable and trendy.</li>
|
52 |
-
<li>The song has a colorful and vibrant video that showcases the scenic locations of Australia, where the movie was shot. The video also features the actors and singers performing some cool dance moves and having fun with each other.</li>
|
53 |
-
<li>The song has received many awards and accolades for its popularity and quality. The song won the Best Music Director award for Yo Yo Honey Singh at the 2015 Filmfare Awards. The song also won the Most Entertaining Song award at the 2015 BIG Star Entertainment Awards. The song also topped many charts and playlists, such as the Mirchi Top 20, iTunes India, Gaana.com, etc.</li>
|
54 |
-
</ul>
|
55 |
-
<p>With so many reasons to love this song, it is no wonder that blue hai pani pani mp3 is one of the most downloaded songs in India.</p>
|
56 |
-
<h2>How to Download Blue Hai Pani Pani MP3?</h2>
|
57 |
-
<h3>Legal and Safe Options</h3>
|
58 |
-
<p>If you want to download blue hai pani pani mp3, you should always opt for legal and safe options. This means that you should avoid using any pirated or illegal websites or apps that offer free downloads of songs without permission from the artists or the producers. Downloading songs from such sources can have many negative consequences, such as:</p>
|
59 |
-
<ul>
|
60 |
-
<li>You may violate the intellectual property rights of the artists or the producers, who have invested their time, money, and effort in creating the song. This may result in legal action or penalties against you.</li>
|
61 |
-
<li>You may expose your device to malware or viruses that can harm your data or privacy. Some illegal websites or apps may contain malicious software that can infect your device or steal your personal information.</li>
|
62 |
-
<li>You may deprive the artists or the producers of their rightful income or recognition. By downloading songs from illegal sources, you are not supporting the artists or the producers who deserve to be paid for their work. This may affect their ability to create more quality music in the future.</li>
|
63 |
-
</ul>
|
64 |
-
<p>Therefore, you should always use legal and safe options to download blue hai pani pani mp3. There are many platforms where you can download blue hai pani pani mp3 legally and safely, such as JioSaavn, YouTube Music, Spotify, Amazon Music, etc. These platforms offer high-quality downloads of songs at reasonable prices or subscriptions. They also respect the intellectual property rights of the artists or the producers and support them financially or otherwise.</p>
|
65 |
-
<h3>Comparison of Different Platforms</h3>
|
66 |
-
<p>To help you choose the best platform to download blue hai pani pani mp3 legally and safely, we have prepared a table that compares some of the most popular platforms where blue hai pani pani mp3 can be downloaded legally and safely: JioSaavn, YouTube Music, Spotify, and Amazon Music. The table includes information such as price, quality, availability, features, etc.</p>
|
67 |
-
<table>
|
68 |
-
<tr>
|
69 |
-
<th>Platform</th>
|
70 |
-
<th>Price</th>
|
71 |
-
<th>Quality</th>
|
72 |
-
<th>Availability</th>
|
73 |
-
<th>Features</th>
|
74 |
-
</tr>
|
75 |
-
<tr>
|
76 |
-
<td>JioSaavn</td>
|
77 |
-
<td>Free with ads or Rs. 99 per month for premium</td>
|
78 |
-
<td>Up to 320 kbps</td>
|
79 |
-
<td>India only</td>
|
80 |
-
<td>Unlimited downloads, offline listening, ad-free music, exclusive content, podcasts, radio, lyrics, etc.</td>
|
81 |
-
</tr>
|
82 |
-
<tr>
|
83 |
-
<td>YouTube Music</td>
|
84 |
-
<td>Free with ads or Rs. 99 per month for premium</td>
|
85 |
-
<td>Up to 256 kbps</td>
|
86 |
-
<td>Worldwide</td>
|
87 |
-
<td>Unlimited downloads, offline listening, ad-free music, background play, video mode, personalized recommendations, playlists, etc.</td>
|
88 |
-
</tr>
|
89 |
-
<tr>
|
90 |
-
<td>Spotify</td>
|
91 |
-
<td>Free with ads or Rs. 119 per month for premium</td>
|
92 |
-
<td>Up to 320 kbps</td>
|
93 |
-
<td>Worldwide</td>
|
94 |
-
<td>Unlimited downloads, offline listening, ad-free music, podcasts, radio, lyrics, personalized recommendations, playlists, etc.</td>
|
95 |
-
</tr>
|
96 |
-
<tr>
|
97 |
-
<td>Amazon Music</td>
|
98 |
-
<td>Free for Prime members or Rs. 129 per month for non-Prime members</td>
|
99 |
-
<td>Up to 320 kbps</td>
|
100 |
-
<td>Worldwide</td>
|
101 |
-
<td>Unlimited downloads, offline listening, ad-free music, podcasts, radio, lyrics, personalized recommendations, playlists, etc.</td>
|
102 |
-
</tr>
|
103 |
-
</table>
|
104 |
-
<p>As you can see from the table, each platform has its own advantages and disadvantages. You can choose the one that suits your preferences and budget. However, if you ask us for our recommendation, we would suggest you to use JioSaavn to download blue hai pani pani mp3 legally and safely. Here's why:</p>
|
105 |
-
<h3>Step-by-Step Guide for JioSaavn</h3>
|
106 |
-
<p>JioSaavn is one of the best platforms to download blue hai pani pani mp3 legally and safely. It offers high-quality downloads of songs at a reasonable price or subscription. It also respects the intellectual property rights of the artists or the producers and supports them financially or otherwise. Moreover, it has some exclusive features that make it stand out from the rest of the platforms.</p>
|
107 |
-
<p>To download blue hai pani pani mp3 from JioSaavn legally and safely, you need to follow these simple steps:</p>
|
108 |
-
<ol>
|
109 |
-
<li>Create an account on JioSaavn using your email address or phone number. You can also sign in with your Facebook or Google account.</li>
|
110 |
-
<li>Search for blue hai pani pani on the search bar or browse through the categories or playlists.</li>
|
111 |
-
<li>Select the song from the results and click on the download icon on the bottom right corner of the screen.</li>
|
112 |
-
<li>Choose the quality and location of the download. You can choose between low (64 kbps), medium (160 kbps), or high (320 kbps) quality. You can also choose the folder where you want to save the song on your device.</li>
|
113 |
-
<li>Enjoy listening to blue hai pani pani mp3 on your device anytime and anywhere.</li>
|
114 |
-
</ol>
|
115 |
-
<p>That's it! You have successfully downloaded blue hai pani pani mp3 from JioSaavn legally and safely. Now you can enjoy listening to this amazing song and have fun with your friends.</p>
|
116 |
-
<h2>Conclusion</h2>
|
117 |
-
<p>In this article, we have guided you on how to download blue hai pani pani mp3 legally and safely. We have also told you more about this amazing song and why it is so popular among music lovers.</p>
|
118 |
-
<p>We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
|
119 |
-
<p>If you liked this article, please share it with your friends and family who might also be interested in downloading blue hai pani pani mp3 legally and safely. And don't forget to subscribe to our newsletter for more tips and tricks on how to enjoy music online.</p>
|
120 |
-
<p>Thank you for reading this article and happy listening!</p>
|
121 |
-
<h2>FAQs</h2>
|
122 |
-
<p>Here are some frequently asked questions about blue hai pani pani mp3:</p>
|
123 |
-
<ol>
|
124 |
-
<li><b>When was blue hai pani pani released?</b><br>The song was released on December 9, [user](# 2014. The song was released on December 9, 2014, as part of the movie soundtrack. The song was composed by Yo Yo Honey Singh and written by him and Lil Golu.</li>
|
125 |
-
<li><b>Who wrote blue hai pani pani?</b><br>The song was written by Yo Yo Honey Singh and Lil Golu. Yo Yo Honey Singh is a rapper, singer, composer, and producer who has created many hit songs in Bollywood and Punjabi music. Lil Golu is a rapper and lyricist who has collaborated with Yo Yo Honey Singh on several songs.</li>
|
126 |
-
<li><b>What does blue hai pani pani mean?</b><br>The title of the song literally means "blue water water", which is a reference to the color of the sky and the sea. The song also uses some metaphors and similes to describe the feelings of the singers, such as "tujhe lagta hai tu chaand hai sitaara" (you think you are the moon and the star) and "teri aankhon ka ye paani sunny sunny sunny" (the water in your eyes is sunny sunny sunny). The song is about having fun and enjoying life with your friends.</li>
|
127 |
-
<li><b>How many views does blue hai pani pani have?</b><br>The official video of blue hai pani pani has over 500 million views on YouTube as of June 2023. The video features the actors and singers performing some cool dance moves and having fun with each other in Australia. The video is one of the most watched videos on YouTube in India.</li>
|
128 |
-
<li><b>Can I use blue hai pani pani as a ringtone?</b><br>Yes, you can use blue hai pani pani as a ringtone on your phone. You can either download the song from a legal and safe platform and set it as your ringtone, or you can use a ringtone maker app to create a custom ringtone from the song. However, you should always respect the intellectual property rights of the artists or the producers and not share or distribute the ringtone without their permission.</li>
|
129 |
-
</ol></p> 197e85843d<br />
|
130 |
-
<br />
|
131 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Gin Rummy Plus Hack APK for Free and Experience the Fun of Gin Rummy with Unlimited Coins.md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Gin Rummy Plus Hack APK: How to Get Unlimited Coins and Enjoy the Game</h1>
|
3 |
-
<p>If you are a fan of card games, you might have heard of Gin Rummy Plus, one of the most popular and addictive online multiplayer games. In this game, you can play with millions of players from around the world, chat with them, send gifts, and compete in tournaments. However, if you want to have more fun and win more games, you might need some extra coins to buy more chips, unlock new tables, and access premium features. That's why many players are looking for ways to download Gin Rummy Plus hack apk, a modified version of the game that gives you unlimited coins and other benefits. In this article, we will show you how to download Gin Rummy Plus hack apk, what are the benefits and risks of using it, and some frequently asked questions about it.</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is Gin Rummy Plus?</h3>
|
6 |
-
<p>Gin Rummy Plus is a card game developed by Zynga, the same company that created popular games like FarmVille, Words with Friends, and Zynga Poker. Gin Rummy Plus is based on the classic card game of gin rummy, where two players try to form sets and runs of cards and score points by knocking or going gin. The game has several modes, such as classic, quick, oklahoma, and royal. You can also join leagues, play with friends, or challenge random players online. The game is free to download and play, but it also offers in-app purchases that allow you to buy more coins, chips, gems, and other items.</p>
|
7 |
-
<h2>download gin rummy plus hack apk</h2><br /><p><b><b>Download Zip</b> ☑ <a href="https://jinyurl.com/2uNPrS">https://jinyurl.com/2uNPrS</a></b></p><br /><br />
|
8 |
-
<h3>Why do you need Gin Rummy Plus hack apk?</h3>
|
9 |
-
<p>As much as Gin Rummy Plus is fun and entertaining, it can also be frustrating and expensive if you run out of coins or chips. Coins are the main currency in the game that you use to buy chips, which are used to enter tables and tournaments. You can earn coins by winning games, completing daily missions, spinning the wheel, or watching ads. However, these methods are not enough to keep up with the increasing costs of playing at higher levels. You might also encounter players who use cheats or hacks to gain an unfair advantage over you. That's why many players resort to downloading Gin Rummy Plus hack apk, a modified version of the game that gives you unlimited coins and other benefits.</p>
|
10 |
-
<h2>How to download Gin Rummy Plus hack apk</h2>
|
11 |
-
<p>If you want to download Gin Rummy Plus hack apk, you need to follow these steps:</p>
|
12 |
-
<p>How to get unlimited coins in gin rummy plus mod apk<br />
|
13 |
-
Gin rummy plus hack apk latest version 2023<br />
|
14 |
-
Download gin rummy plus cheat tool for android<br />
|
15 |
-
Gin rummy plus mod apk free download no survey<br />
|
16 |
-
Gin rummy plus hack apk online generator<br />
|
17 |
-
Download gin rummy plus mod apk with happymod<br />
|
18 |
-
Gin rummy plus hack apk unlimited money and gems<br />
|
19 |
-
Download gin rummy plus mod apk for ios devices<br />
|
20 |
-
Gin rummy plus hack apk no root required<br />
|
21 |
-
Download gin rummy plus mod apk from zynga<br />
|
22 |
-
Gin rummy plus hack apk with anti-ban feature<br />
|
23 |
-
Download gin rummy plus mod apk 6.5.2 for android<br />
|
24 |
-
Gin rummy plus hack apk easy and fast<br />
|
25 |
-
Download gin rummy plus mod apk with multiplayer mode<br />
|
26 |
-
Gin rummy plus hack apk safe and secure<br />
|
27 |
-
Download gin rummy plus mod apk with all cards unlocked<br />
|
28 |
-
Gin rummy plus hack apk without human verification<br />
|
29 |
-
Download gin rummy plus mod apk from trusted sources<br />
|
30 |
-
Gin rummy plus hack apk working 100%<br />
|
31 |
-
Download gin rummy plus mod apk with unlimited lives<br />
|
32 |
-
Gin rummy plus hack apk for beginners and experts<br />
|
33 |
-
Download gin rummy plus mod apk with high-quality graphics<br />
|
34 |
-
Gin rummy plus hack apk with daily rewards and bonuses<br />
|
35 |
-
Download gin rummy plus mod apk with offline mode<br />
|
36 |
-
Gin rummy plus hack apk with auto-update feature</p>
|
37 |
-
<h3>Step 1: Find a reliable source</h3>
|
38 |
-
<p>The first thing you need to do is find a reliable source that offers the mod file of Gin Rummy Plus hack apk. There are many websites that claim to provide this file, but not all of them are trustworthy. Some of them might contain malware or viruses that can harm your device or steal your personal information. Some of them might also provide outdated or fake files that don't work or cause problems in the game. Therefore, you need to be careful and do some research before downloading anything from unknown sources. One of the sources that we recommend is HappyMod, a website that provides 100% working mods for various games and apps.</p>
|
39 |
-
<h3>Step 2: Download the mod file</h3>
|
40 |
-
<p>Once you find a reliable source, you need to download the mod file of Gin Rummy Plus hack apk. The file size is about <p>50 MB, so it won't take too long to download. You can use any browser or download manager to download the file. Make sure you have enough storage space on your device and a stable internet connection. You might also need to enable the option to install apps from unknown sources in your device settings. This will allow you to install the mod file without any issues.</p>
|
41 |
-
<h3>Step 3: Install the mod file</h3>
|
42 |
-
<p>After downloading the mod file, you need to install it on your device. To do this, you need to locate the file in your downloads folder or wherever you saved it. Then, you need to tap on the file and follow the instructions on the screen. The installation process might take a few minutes, depending on your device and the mod file. Once the installation is complete, you will see a confirmation message and an icon of Gin Rummy Plus on your home screen or app drawer.</p>
|
43 |
-
<h3>Step 4: Launch the game and enjoy</h3>
|
44 |
-
<p>The final step is to launch the game and enjoy the benefits of Gin Rummy Plus hack apk. To do this, you need to tap on the icon of Gin Rummy Plus and wait for the game to load. You will notice that you have unlimited coins and other features unlocked in the game. You can use these coins to buy more chips, enter higher tables, and access premium features. You can also play with other players who use the hack apk or the original version of the game. However, you should be careful not to abuse the hack apk or get caught by the game developers, as this might result in a ban or legal issues.</p>
|
45 |
-
<h2>Benefits of using Gin Rummy Plus hack apk</h2>
|
46 |
-
<p>There are many benefits of using Gin Rummy Plus hack apk, such as:</p>
|
47 |
-
<h3>Unlimited coins</h3>
|
48 |
-
<p>The main benefit of using Gin Rummy Plus hack apk is that you get unlimited coins in the game. Coins are the main currency in Gin Rummy Plus that you use to buy chips, which are used to enter tables and tournaments. With unlimited coins, you don't have to worry about running out of chips or losing games. You can buy as many chips as you want and play at any table or tournament you like. You can also use coins to buy gems, which are used to unlock new decks, backgrounds, and avatars.</p>
|
49 |
-
<h3>No ads</h3>
|
50 |
-
<p>Another benefit of using Gin Rummy Plus hack apk is that you don't have to watch ads in the game. Ads are annoying and distracting, especially when they pop up in the middle of a game or when you want to spin the wheel or claim a bonus. With Gin Rummy Plus hack apk, you can enjoy the game without any interruptions or delays caused by ads. You can also save your data and battery by not watching ads.</p>
|
51 |
-
<h3>Free bonuses and rewards</h3>
|
52 |
-
<p>A third benefit of using Gin Rummy Plus hack apk is that you get free bonuses and rewards in the game. Bonuses and rewards are extra coins, chips, gems, or items that you can get by completing daily missions, spinning the wheel, opening chests, or watching videos. With Gin Rummy Plus hack apk, you don't have to do any of these tasks to get bonuses and rewards. You can get them automatically every day or whenever you want. You can also get more bonuses and rewards by playing more games and winning more tournaments.</p>
|
53 |
-
<h3>Access to premium features</h3>
|
54 |
-
<p>A fourth benefit of using Gin Rummy Plus hack apk is that you get access to premium features in the game. Premium features are special features that are only available for players who pay real money or use gems. Some of these features are VIP tables, exclusive decks, backgrounds, avatars, chat stickers, and more. With Gin Rummy Plus hack apk, you don't have to pay anything or use gems to access these features. You can use them for free and customize your game experience according to your preferences.</p>
|
55 |
-
<h2>Risks of using Gin Rummy Plus hack apk</h2>
|
56 |
-
<p>However, there are also some risks of using Gin Rummy Plus hack apk, such as:</p>
|
57 |
-
<h3>Malware and viruses</h3>
|
58 |
-
<p>The first risk of using Gin Rummy Plus hack apk is that you might download malware or viruses on your device. Malware and viruses are malicious software that can harm your device or steal your personal information. They can also cause problems in your game or other apps on your device. As we mentioned earlier, not all sources that offer Gin Rummy Plus hack apk are trustworthy. Some of them might contain malware or viruses that can infect your device when you download or install them. Therefore, you need to be careful and use a reliable source like HappyMod or scan the file with an antivirus before installing it.</p>
|
59 |
-
<h3>Ban <h3>Ban from the game</h3>
|
60 |
-
<p>The second risk of using Gin Rummy Plus hack apk is that you might get banned from the game. Ban is a punishment that prevents you from playing the game or accessing your account. Ban can happen for various reasons, such as violating the game rules, using cheats or hacks, or being reported by other players. The game developers have the right to ban any player who uses Gin Rummy Plus hack apk or any other unauthorized mod. Ban can be temporary or permanent, depending on the severity of the offense. If you get banned, you will lose all your progress, coins, chips, gems, and items in the game. You will also not be able to play with your friends or join leagues and tournaments.</p>
|
61 |
-
<h3>Legal issues</h3>
|
62 |
-
<p>The third risk of using Gin Rummy Plus hack apk is that you might face legal issues. Legal issues are problems that involve the law or the authorities. Legal issues can happen for various reasons, such as violating the game terms and conditions, infringing the game copyrights, or engaging in illegal gambling. The game developers have the right to take legal action against any player who uses Gin Rummy Plus hack apk or any other unauthorized mod. Legal action can result in fines, lawsuits, or even jail time, depending on the severity of the offense. If you face legal issues, you will not only lose your access to the game, but also your reputation and freedom.</p>
|
63 |
-
<h2>Conclusion</h2>
|
64 |
-
<p>Gin Rummy Plus is a fun and addictive card game that you can play with millions of players from around the world. However, if you want to have more fun and win more games, you might need some extra coins and other benefits. That's why many players are looking for ways to download Gin Rummy Plus hack apk, a modified version of the game that gives you unlimited coins and other benefits. However, before you download Gin Rummy Plus hack apk, you should also be aware of the benefits and risks of using it. While Gin Rummy Plus hack apk can give you unlimited coins, no ads, free bonuses and rewards, and access to premium features, it can also expose you to malware and viruses, ban from the game, and legal issues. Therefore, you should be careful and use a reliable source like HappyMod or scan the file with an antivirus before installing it. You should also not abuse the hack apk or get caught by the game developers, as this might result in a ban or legal issues.</p>
|
65 |
-
<h2>FAQs</h2>
|
66 |
-
<p>Here are some frequently asked questions about Gin Rummy Plus hack apk:</p>
|
67 |
-
<h3>Q: Is Gin Rummy Plus hack apk safe to use?</h3>
|
68 |
-
<p>A: Gin Rummy Plus hack apk is not 100% safe to use, as it might contain malware or viruses that can harm your device or steal your personal information. It might also cause problems in your game or other apps on your device. Therefore, you should be careful and use a reliable source like HappyMod or scan the file with an antivirus before installing it.</p>
|
69 |
-
<h3>Q: Is Gin Rummy Plus hack apk legal to use?</h3>
|
70 |
-
<p>A: Gin Rummy Plus hack apk is not legal to use, as it violates the game terms and conditions and infringes the game copyrights. It might also involve illegal gambling activities that are prohibited by law. Therefore, you should not use Gin Rummy Plus hack apk unless you want to face legal issues.</p>
|
71 |
-
<h3>Q: How can I avoid getting banned from using Gin Rummy Plus hack apk?</h3>
|
72 |
-
<p>A: The best way to avoid getting banned from using Gin Rummy Plus hack apk is to not use it at all. However, if you still want to use it, you should be careful and not abuse it or get caught by the game developers. You should also not play with other players who use the hack apk or the original version of the game, as they might report you or expose you.</p>
|
73 |
-
<h3>Q: How can I update Gin Rummy Plus hack apk?</h3>
|
74 |
-
<p>A: To update Gin Rummy Plus hack apk, you need to download and install the latest version of the mod file from a reliable source like HappyMod. You should also uninstall the previous version of the mod file before installing the new one. However, you should be aware that updating Gin Rummy Plus hack apk might cause some issues in your game or device.</p>
|
75 |
-
<h3>Q: Where can I find more information about Gin Rummy Plus hack apk?</h3>
|
76 |
-
<p>A: You can find more information about Gin Rummy Plus hack apk on HappyMod, a website that provides 100% working mods for various games and apps. You can also read reviews and comments from other users who have used Gin Rummy Plus hack apk.</p> 401be4b1e0<br />
|
77 |
-
<br />
|
78 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download NBA 2K14 v1.14 APK for Android Multiplayer Mode HD Graphics and More.md
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download NBA 2K14 v1.14 for Android</h1>
|
3 |
-
<p>If you are a fan of basketball games, you must have heard of NBA 2K14, the latest installment of the world's best NBA video game franchise. NBA 2K14 is a realistic and immersive game that lets you play with your favorite players and teams in various modes and challenges. You can also enjoy the full 2K Beats soundtrack that features music from top artists and producers.</p>
|
4 |
-
<p>In this article, we will show you how to download NBA 2K14 v1.14 for Android, which is the most updated version of the game that offers improved graphics, performance, and features. We will also share some tips and tricks that will help you master the game and become a champion.</p>
|
5 |
-
<h2>download nba 2k14 v1.14 for android</h2><br /><p><b><b>Download</b> ✑ ✑ ✑ <a href="https://jinyurl.com/2uNSqL">https://jinyurl.com/2uNSqL</a></b></p><br /><br />
|
6 |
-
<h2>Features of NBA 2K14 v1.14 for Android</h2>
|
7 |
-
<p>NBA 2K14 v1.14 for Android is not just a simple basketball game, it is a complete experience that will make you feel like you are on the court. Here are some of the features that make this game stand out:</p>
|
8 |
-
<ul>
|
9 |
-
<li><b>Intuitive controls:</b> You can choose between classic control and one-finger control, which are both designed specifically for mobile devices. You can also customize your controls according to your preference.</li>
|
10 |
-
<li><b>Multiseason mode:</b> You can play through multiple NBA seasons and establish your team as a dynasty. You can manage your roster, trade players, sign contracts, and more. You can also compete against other players online and climb the leaderboards.</li>
|
11 |
-
<li><b>Full 2K Beats soundtrack:</b> You can enjoy the music from top artists and producers such as Jay-Z, Eminem, Drake, Pharrell, and more. You can also create your own playlist and listen to it while playing.</li>
|
12 |
-
</ul>
|
13 |
-
<h2>Requirements for NBA 2K14 v1.14 for Android</h2>
|
14 |
-
<p>NBA 2K14 v1.14 for Android is a high-quality game that requires a decent device to run smoothly. Here are the minimum requirements that you need to meet:</p>
|
15 |
-
<ul>
|
16 |
-
<li><b>Memory (RAM):</b> 2 GB</li>
|
17 |
-
<li><b>CPU:</b> 1.7 GHz + GPU needed</ <li><b>Android version:</b> 4.0 and above</li>
|
18 |
-
</ul>
|
19 |
-
<p>If your device meets these requirements, you are ready to download NBA 2K14 v1.14 for Android. If not, you may experience some lagging or crashing issues.</p>
|
20 |
-
<h2>How to Download NBA 2K14 v1.14 for Android</h2>
|
21 |
-
<p>Downloading NBA 2K14 v1.14 for Android is not as simple as downloading any other app from the Google Play Store. You need to follow some steps to make sure that the game works properly on your device. Here are the steps that you need to follow:</p>
|
22 |
-
<ol>
|
23 |
-
<li><b>Enable unknown sources on your device:</b> This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on.</li>
|
24 |
-
<li><b>Download the APK and OBB files from a trusted source:</b> You need to download two files: the APK file, which is the installer of the game, and the OBB file, which is the data of the game. You can find these files from various websites, but make sure that they are safe and virus-free. You can use this link as an example, but we are not responsible for any issues that may arise from using it.</li>
|
25 |
-
<li><b>Install the APK file and extract the OBB file to the Android/obb folder:</b> After downloading the files, locate them in your device's file manager and tap on the APK file to install it. Then, use a file extractor app such as ZArchiver to extract the OBB file to the Android/obb folder. If you don't have this folder, create it manually.</li>
|
26 |
-
<li><b>Launch the game and enjoy:</b> Once you have installed and extracted the files, you can launch the game from your app drawer and start playing. You may need to grant some permissions to the game before it runs.</li>
|
27 |
-
</ol>
|
28 |
-
<h2>Tips and Tricks for NBA 2K14 v1.14 for Android</h2>
|
29 |
-
<p>NBA 2K14 v1.14 for Android is a fun and challenging game that will test your skills and knowledge of basketball. To help you improve your game and have more fun, here are some tips and tricks that you can use:</p>
|
30 |
-
<p>download nba 2k14 apk for android latest version<br />
|
31 |
-
download nba 2k14 mod apk + obb for android<br />
|
32 |
-
download nba 2k14 game for android free<br />
|
33 |
-
download nba 2k14 full version for android<br />
|
34 |
-
download nba 2k14 android apk + data<br />
|
35 |
-
download nba 2k14 offline apk for android<br />
|
36 |
-
download nba 2k14 apk + obb file for android<br />
|
37 |
-
download nba 2k14 updated roster for android<br />
|
38 |
-
download nba 2k14 apk + sd data for android<br />
|
39 |
-
download nba 2k14 apk + obb highly compressed for android<br />
|
40 |
-
download nba 2k14 apk + obb modded for android<br />
|
41 |
-
download nba 2k14 apk + obb unlimited money for android<br />
|
42 |
-
download nba 2k14 apk + obb no root for android<br />
|
43 |
-
download nba 2k14 apk + obb offline mode for android<br />
|
44 |
-
download nba 2k14 apk + obb with lebron james for android<br />
|
45 |
-
download nba 2k14 apk + obb with commentary for android<br />
|
46 |
-
download nba 2k14 apk + obb with hd graphics for android<br />
|
47 |
-
download nba 2k14 apk + obb with multiplayer for android<br />
|
48 |
-
download nba 2k14 apk + obb with all players unlocked for android<br />
|
49 |
-
download nba 2k14 apk + obb with realistic gameplay for android<br />
|
50 |
-
how to download nba 2k14 v1.14 for android<br />
|
51 |
-
where to download nba 2k14 v1.14 for android<br />
|
52 |
-
best site to download nba 2k14 v1.14 for android<br />
|
53 |
-
easiest way to download nba 2k14 v1.14 for android<br />
|
54 |
-
fastest way to download nba 2k14 v1.14 for android<br />
|
55 |
-
safest way to download nba 2k14 v1.14 for android<br />
|
56 |
-
cheapest way to download nba 2k14 v1.14 for android<br />
|
57 |
-
legal way to download nba 2k14 v1.14 for android<br />
|
58 |
-
illegal way to download nba 2k14 v1.14 for android<br />
|
59 |
-
working method to download nba 2k14 v1.14 for android<br />
|
60 |
-
tips and tricks to download nba 2k14 v1.14 for android<br />
|
61 |
-
guide and tutorial to download nba 2k14 v1.14 for android<br />
|
62 |
-
step by step instructions to download nba 2k14 v1.14 for android<br />
|
63 |
-
video tutorial to download nba 2k14 v1.14 for android<br />
|
64 |
-
review and rating of nba 2k14 v1.14 for android<br />
|
65 |
-
features and benefits of nba 2k14 v1.14 for android<br />
|
66 |
-
pros and cons of nba 2k14 v1.14 for android<br />
|
67 |
-
comparison and contrast of nba 2k14 v1.14 for android<br />
|
68 |
-
alternatives and substitutes of nba 2k14 v1.14 for android<br />
|
69 |
-
recommendations and suggestions of nba 2k14 v1.14 for android</p>
|
70 |
-
<ul>
|
71 |
-
<li><b>How to improve your shooting skills:</b> Shooting is one of the most important skills in basketball, and NBA 2K14 v1.14 for Android has a realistic shooting system that requires timing and accuracy. To improve your shooting skills, you need to practice a lot and learn how to use the shot meter, which shows you when to release the ball for a perfect shot. You can also adjust the difficulty level and the camera angle to suit your preference.</li>
|
72 |
-
<li><b>How to unlock more players and teams:</b> NBA 2K14 v1.14 for Android has a huge roster of players and teams that you can play with, but not all of them are available from the start. To unlock more players and teams, you need to play through the multiseason mode and earn points that you can use to buy them from the store. You can also unlock some players and teams by completing certain achievements or using cheats and mods.</li>
|
73 |
-
<li><b>How to use cheats and mods:</b> Cheats and mods are ways to modify or enhance the game according to your liking. For example, you can use cheats to get unlimited money, stamina, or skill points, or you can use mods to change the appearance of players, courts, or jerseys. However, using cheats and mods may affect the performance of the game or cause some errors, so use them at your own risk. You can find some cheats and mods from this website, but we are not affiliated with them or endorse them in any way.</li>
|
74 |
-
</ul>
|
75 |
-
<h2>Conclusion</h2>
|
76 |
-
<p>NBA 2K14 v1.14 for Android is an amazing basketball game that will give you hours of entertainment and excitement. It has realistic graphics, sound effects, and gameplay that will make you feel like you are on the court with your favorite players and teams. It also has various modes and features that will keep you engaged and challenged.</p>
|
77 |
-
<p>If you want to download NBA 2K14 v1.14 for Android, you need to follow some steps that we have explained in this article. You also need to meet some requirements that we have listed in this article. And if you want to improve your game and have more fun, you can use some tips and tricks that we have shared in this article.</p>
|
78 |
-
<p>We hope that this article has helped you learn how to download NBA 2K14 v1.14 for Android and enjoy it on your device have enough free space on your device before downloading and installing the game.</li>
|
79 |
-
<li><b>Q3: Can you play NBA 2K14 v1.14 for Android offline?</b></li>
|
80 |
-
<li><b>A3:</b> Yes, you can play NBA 2K14 v1.14 for Android offline, but you will not be able to access some features that require an internet connection, such as the online multiplayer mode, the leaderboards, or the store. You will also need to activate the game online once before playing it offline.</li>
|
81 |
-
<li><b>Q4: Can you play NBA 2K14 v1.14 for Android with your friends?</b></li>
|
82 |
-
<li><b>A4:</b> Yes, you can play NBA 2K14 v1.14 for Android with your friends, either online or locally. To play online, you need to have an internet connection and a Google Play Games account. You can then invite your friends to join your game or join their game from the multiplayer menu. To play locally, you need to have a Wi-Fi connection and a Bluetooth connection. You can then create or join a game from the local multiplayer menu.</li>
|
83 |
-
<li><b>Q5: What are the best teams to play with in NBA 2K14 v1.14 for Android?</b></li>
|
84 |
-
<li><b>A5:</b> This is a subjective question that depends on your personal preference and style of play. However, some of the teams that are generally considered to be the best in NBA 2K14 v1.14 for Android are the Miami Heat, the Los Angeles Lakers, the Brooklyn Nets, the Golden State Warriors, and the Chicago Bulls. These teams have some of the best players and ratings in the game and can dominate any opponent.</li>
|
85 |
-
</ul></p> 401be4b1e0<br />
|
86 |
-
<br />
|
87 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FIFA Mobile APK Mod - Snrsz Para ve Altn Hilesi Nasl Yaplr?.md
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>FIFA Mobile para hilesi apk: How to get unlimited coins and gems in FIFA Mobile</h1>
|
3 |
-
<p>If you are a fan of soccer games, you have probably heard of FIFA Mobile, the mobile version of the popular FIFA franchise by EA Sports. FIFA Mobile is a free-to-play game that lets you build your ultimate team of soccer stars, compete in various modes and events, and experience realistic soccer simulation on your device. But as with most free-to-play games, FIFA Mobile also has a currency system that limits your progress and enjoyment. Coins and gems are the main currencies in FIFA Mobile, and you need them to buy players, packs, upgrades, and more. However, earning coins and gems can be slow and tedious, especially if you want to get the best players and items in the game.</p>
|
4 |
-
<p>That's why some players resort to using cheat tools like para hilesi apk, which claims to give you unlimited coins and gems in FIFA Mobile. But what is para hilesi apk, how does it work, and is it safe to use? In this article, we will answer these questions and more, as well as provide you with a step-by-step guide on how to download, install, and use para hilesi apk on your device. Read on to find out more.</p>
|
5 |
-
<h2>fifa mobile para hilesi apk</h2><br /><p><b><b>Download File</b> >>> <a href="https://jinyurl.com/2uNScH">https://jinyurl.com/2uNScH</a></b></p><br /><br />
|
6 |
-
<h2>What is FIFA Mobile and why is it so popular?</h2>
|
7 |
-
<p>FIFA Mobile is a soccer game developed by EA Sports for iOS and Android devices. It is based on the FIFA series of games, which are known for their realistic graphics, gameplay, and licenses. FIFA Mobile features over 15,000 authentic soccer players from over 600 teams across 30+ leagues, including the Premier League, La Liga, Bundesliga, Serie A, Ligue 1, MLS, and more. You can also play with national teams from the FIFA World Cup 2022™ mode, which lets you replay the official tournament brackets with any of the 32 qualified nations.</p>
|
8 |
-
<h3>FIFA Mobile features and gameplay</h3>
|
9 |
-
<p>FIFA Mobile has several features that make it an immersive and engaging soccer game for mobile devices. Some of these features are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>A brand new engine with all new graphics and gameplay</li>
|
12 |
-
<li>New player likeness, custom run styles and celebrations</li>
|
13 |
-
<li>Chemistry system that boosts your team performance based on player relationships</li>
|
14 |
-
<li>VIP Program with special privileges for loyal players</li>
|
15 |
-
<li>Advanced Passing system that lets you open up space and create more attacking opportunities</li>
|
16 |
-
<li>Live events that correspond with the real-world tournaments throughout the soccer season</li>
|
17 |
-
<li>Icons and Heroes that let you build a team of soccer legends from different eras</li>
|
18 |
-
</ul>
|
19 |
-
<p>The gameplay of FIFA Mobile is simple and intuitive. You can control your players using a virtual joystick on the left side of the screen, and use buttons on the right side to sprint, skill, pass, shoot, tackle, or switch players. You can also use swipe gestures to aim your shots or passes more precisely. The game also has an auto-play option that lets the AI control your players for you.</p>
|
20 |
-
<h3>FIFA Mobile modes and events</h3>
|
21 |
-
<p>FIFA Mobile has several modes and events that let you compete against other players or the AI in different scenarios. Some of these modes and events are:</p>
|
22 |
-
<ul>
|
23 |
-
<li>Attack Mode: A turn-based mode where you and your opponent take turns to score goals within a set time limit - Head to Head: A real-time mode where you and your opponent play a full 90-minute match with 11v11 gameplay - Season: A mode where you play a series of matches against teams from different leagues and divisions - Campaign: A mode where you complete various challenges and objectives to earn rewards and unlock new players - Events: Special modes that are based on real-world soccer tournaments, such as the UEFA Champions League, the FIFA World Cup, the Copa America, and more - Squad Building Challenges: A mode where you create a team with specific requirements and earn rewards for completing them - Team of the Week: A mode where you can play against the best players of the week from different leagues and earn their cards</li>
|
24 |
-
</ul>
|
25 |
-
<p>FIFA Mobile also has a social aspect, where you can join a league with other players and chat, compete, and cooperate with them. You can also participate in league tournaments, league vs league matches, and league survival events.</p>
|
26 |
-
<p>fifa mobile mod apk unlimited money<br />
|
27 |
-
fifa mobile hack apk download<br />
|
28 |
-
fifa mobile 2022 apk para hilesi<br />
|
29 |
-
fifa mobile apk indir ücretsiz<br />
|
30 |
-
fifa mobile son sürüm apk hile<br />
|
31 |
-
fifa mobile android oyun club apk<br />
|
32 |
-
fifa mobile 18 mod apk para hilesi<br />
|
33 |
-
fifa mobile 21 apk hileli indir<br />
|
34 |
-
fifa mobile apk mod menu<br />
|
35 |
-
fifa mobile apk full sınırsız para<br />
|
36 |
-
fifa mobile apk hile nasıl yapılır<br />
|
37 |
-
fifa mobile apk güncel hile<br />
|
38 |
-
fifa mobile apk mega hileli<br />
|
39 |
-
fifa mobile apk vip hile<br />
|
40 |
-
fifa mobile apk altın hilesi<br />
|
41 |
-
fifa mobile apk elmas hilesi<br />
|
42 |
-
fifa mobile apk oyuncu hilesi<br />
|
43 |
-
fifa mobile apk transfer hilesi<br />
|
44 |
-
fifa mobile apk antrenman hilesi<br />
|
45 |
-
fifa mobile apk enerji hilesi<br />
|
46 |
-
fifa mobile apk online hile<br />
|
47 |
-
fifa mobile apk offline hile<br />
|
48 |
-
fifa mobile apk no root hile<br />
|
49 |
-
fifa mobile apk yeni hileler<br />
|
50 |
-
fifa mobile apk kolay hile yapma<br />
|
51 |
-
fifa mobile mod apk son sürüm indir<br />
|
52 |
-
fifa mobile mod apk android 1<br />
|
53 |
-
fifa mobile mod apk revdl<br />
|
54 |
-
fifa mobile mod apk rexdl<br />
|
55 |
-
fifa mobile mod apk happymod<br />
|
56 |
-
fifa mobile mod apk an1.com<br />
|
57 |
-
fifa mobile mod apk unlimited coins and points<br />
|
58 |
-
fifa mobile mod apk all players unlocked<br />
|
59 |
-
fifa mobile mod apk latest version 2022<br />
|
60 |
-
fifa mobile mod apk free download for android<br />
|
61 |
-
fifa mobile mod apk no verification required<br />
|
62 |
-
fifa mobile mod apk no ban risk<br />
|
63 |
-
fifa mobile mod apk anti ban protection<br />
|
64 |
-
fifa mobile mod apk cheat engine enabled<br />
|
65 |
-
fifa mobile mod apk gameplay video proof</p>
|
66 |
-
<h2>What is para hilesi apk and how does it work?</h2>
|
67 |
-
<p>Para hilesi apk is a cheat tool that claims to give you unlimited coins and gems in FIFA Mobile. It is an application that you can download and install on your device, and use it to modify the game data and resources. Para hilesi apk is not an official product of EA Sports or FIFA Mobile, and it is not endorsed or supported by them. It is a third-party tool that is created by unknown developers who may have malicious intentions.</p>
|
68 |
-
<h3>Para hilesi apk features and benefits</h3>
|
69 |
-
<p>Para hilesi apk promises to give you several benefits that can enhance your FIFA Mobile experience. Some of these benefits are:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Unlimited coins and gems: You can get as many coins and gems as you want, without spending any real money or time. You can use them to buy players, packs, upgrades, and more. <li>Unlimited stamina: You can play as many matches as you want, without waiting for your stamina to refill. <li>Unlimited energy: You can participate in as many events as you want, without worrying about running out of energy. <li>Unlimited VIP points: You can access the VIP Program and enjoy its perks, such as exclusive players, packs, rewards, and more. <li>No ads: You can play the game without any interruptions or distractions from ads.</li>
|
72 |
-
</ul>
|
73 |
-
<h3>Para hilesi apk risks and drawbacks</h3>
|
74 |
-
<p>However, para hilesi apk also comes with several risks and drawbacks that can ruin your FIFA Mobile experience. Some of these risks and drawbacks are:</p>
|
75 |
-
<ul>
|
76 |
-
<li>Ban risk: Using para hilesi apk is against the terms of service of FIFA Mobile, and it can be detected by the game's anti-cheat system. If you are caught using para hilesi apk, you may face consequences such as account suspension or deletion, loss of progress and items, or legal action. <li>Virus risk: Downloading para hilesi apk from unknown sources can expose your device to viruses, malware, spyware, or other harmful software. These can damage your device, steal your personal information, or compromise your security. <li>Compatibility risk: Para hilesi apk may not work properly with the latest version of FIFA Mobile, or with different devices or operating systems. It may cause errors, glitches, crashes, or performance issues that can affect your gameplay. <li>Quality risk: Using para hilesi apk may reduce the quality of your gameplay, as it may make the game too easy or boring. It may also take away the fun and challenge of earning coins and gems legitimately, or competing with other players fairly.</li>
|
77 |
-
</ul>
|
78 |
-
<h2>How to download and install para hilesi apk on your device?</h2>
|
79 |
-
<p>If you still want to try para hilesi apk despite its risks and drawbacks, you will need to follow some steps to download and install it on your device. However, we do not recommend doing so, as it may harm your device or your account. Use para hilesi apk at your own risk.</p>
|
80 |
-
<h3>Step-by-step guide for Android users</h3>
|
81 |
-
<p>If you are using an Android device, here are the steps to download and install para hilesi apk:</p>
|
82 |
-
<ol>
|
83 |
-
<li>Go to the settings of your device and enable the option to install apps from unknown sources.</li>
|
84 |
-
<li>Go to a website that offers para hilesi apk download link. Make sure it is a reliable and trustworthy source.</li>
|
85 |
-
<li>Click on the download button and wait for the file to be downloaded on your device.</li>
|
86 |
-
<li>Locate the file in your device's file manager and tap on it to start the installation process.</li>
|
87 |
-
<li>Follow the instructions on the screen and grant the necessary permissions to para hilesi apk.</ <li>Once the installation is complete, you can launch para hilesi apk from your device's app drawer or home screen.</li>
|
88 |
-
</ol>
|
89 |
-
<h3>Step-by-step guide for iOS users</h3>
|
90 |
-
<p>If you are using an iOS device, here are the steps to download and install para hilesi apk:</p>
|
91 |
-
<ol>
|
92 |
-
<li>Go to the settings of your device and trust the profile of para hilesi apk. You may need to enter your device's passcode to do so.</li>
|
93 |
-
<li>Go to a website that offers para hilesi apk download link. Make sure it is a reliable and trustworthy source.</li>
|
94 |
-
<li>Click on the download button and wait for the file to be downloaded on your device.</li>
|
95 |
-
<li>Locate the file in your device's file manager and tap on it to start the installation process.</li>
|
96 |
-
<li>Follow the instructions on the screen and grant the necessary permissions to para hilesi apk.</li>
|
97 |
-
<li>Once the installation is complete, you can launch para hilesi apk from your device's app drawer or home screen.</li>
|
98 |
-
</ol>
|
99 |
-
<h2>How to use para hilesi apk to get unlimited coins and gems in FIFA Mobile?</h2>
|
100 |
-
<p>After you have downloaded and installed para hilesi apk on your device, you can use it to get unlimited coins and gems in FIFA Mobile. Here are some tips and tricks for using para hilesi apk effectively:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Make sure you have a stable internet connection and enough storage space on your device.</li>
|
103 |
-
<li>Make sure you have the latest version of FIFA Mobile installed on your device.</li>
|
104 |
-
<li>Make sure you have a backup of your FIFA Mobile account and data, in case something goes wrong or you get banned.</li>
|
105 |
-
<li>Launch para hilesi apk and enter your FIFA Mobile username or email address.</li>
|
106 |
-
<li>Select the amount of coins and gems you want to generate. You can also choose other options such as stamina, energy, VIP points, or no ads.</li>
|
107 |
-
<li>Click on the generate button and wait for the process to complete. You may need to verify that you are not a robot by completing a captcha or a survey.</li>
|
108 |
-
<li>Once the process is done, you can close para hilesi apk and open FIFA Mobile. You should see your coins and gems added to your account.</li>
|
109 |
-
</ul>
|
110 |
-
<h3>Alternatives to para hilesi apk</h3>
|
111 |
-
<p>If you are looking for alternatives to para hilesi apk, there are some other ways to get coins and gems in FIFA Mobile without cheating. Some of these ways are:</p>
|
112 |
-
<ul>
|
113 |
-
<li>Playing matches and events: You can earn coins and gems by playing different modes and events in FIFA Mobile, such as Attack Mode, Head to Head, Season, Campaign, Events, Squad Building Challenges, Team of the Week, etc. You can also get bonus coins and gems by completing daily and weekly objectives, achievements, milestones, etc. <li>Buying packs and offers: You can buy coins and gems with real money by purchasing packs and offers in FIFA Mobile. There are different types of packs and offers available, such as player packs, icon packs, hero packs, event packs, special packs, etc. You can also get discounts and deals by checking the store regularly. <li>Selling players and items: You can sell your unwanted players and items in FIFA Mobile by using the market or the quick sell option. You can get coins by selling your players or items to other players or to the game. You can also get gems by selling some rare or special players or items. <li>Joining a league: You can join a league with other players in FIFA Mobile and benefit from their help and support. You can get coins and gems by participating in league tournaments, league vs league matches, league survival events, etc. You can also get rewards by contributing to your league's achievements.</li>
|
114 |
-
</ul>
|
115 |
-
<h2>Conclusion</h2>
|
116 |
-
<h4>Summary of the main points</h4>
|
117 |
-
<p>In this article, we have discussed FIFA Mobile para hilesi apk, a cheat tool that claims to give you unlimited coins and gems in FIFA Mobile. We have explained what FIFA Mobile is and why it is so popular, what para hilesi apk is and how it works, how to download and install para hilesi apk on your device, how to use para hilesi apk to get unlimited coins and gems in FIFA Mobile, and some alternatives to para hilesi apk. We have also highlighted some of the risks and drawbacks of using para hilesi apk, such as ban risk, virus risk, compatibility risk, quality risk, etc.</p>
|
118 |
-
<h4>Call to action and disclaimer</h4>
|
119 |
-
<p>If you want to try para hilesi apk for yourself, you can follow the steps we have provided above. However, we do not recommend doing so, as it may harm your device or your account. Use para hilesi apk at your own risk. We are not responsible for any damage or loss that may occur from using para hilesi apk.</p>
|
120 |
-
<p>Alternatively, you can play FIFA Mobile the legit way and enjoy the game without cheating. You can earn coins and gems by playing matches and events, buying packs and offers, selling players and items, joining a league, etc. You can also improve your skills and strategies by learning from other players, watching tutorials, reading guides, etc. You can have fun and satisfaction by building your ultimate team of soccer stars, competing in various modes and events, and experiencing realistic soccer simulation on your device.</p>
|
121 |
-
<p>Whatever you choose to do, we hope you have a great time playing FIFA Mobile. Thank you for reading this article.</p>
|
122 |
-
<h2>FAQs</h2>
|
123 |
-
<p>Here are some frequently asked questions about FIFA Mobile para hilesi apk:</p>
|
124 |
-
<ol>
|
125 |
-
<li>Q: Is para hilesi apk free to use? A: Yes, para hilesi apk is free to use. However, you may need to complete some verification steps before you can use it, such as completing a captcha or a survey.</li>
|
126 |
-
<li>Q: Is para hilesi apk safe to use? A: No, para hilesi apk is not safe to use. It is a cheat tool that violates the terms of service of FIFA Mobile, and it can be detected by the game's anti-cheat system. It can also expose your device to viruses, malware, spyware, or other harmful software. It can also cause errors, glitches, crashes, or performance issues that can affect your gameplay.</li>
|
127 |
-
<li>Q: Can I use para hilesi apk on any device or operating system? A: No, para hilesi apk may not work properly on any device or operating system. It may be incompatible with the latest version of FIFA Mobile, or with different devices or operating systems. It may also require some settings or permissions that may not be available on your device or operating system.</li>
|
128 |
-
<li>Q: Can I use para hilesi apk with my existing FIFA Mobile account? A: Yes, you can use para hilesi apk with your existing FIFA Mobile account. However, you may risk losing your account or your progress if you are caught using para hilesi apk. You may also lose your items or rewards that you have earned legitimately in the game.</li>
|
129 |
-
<li>Q: Can I use para hilesi apk offline? A: No, you cannot use para hilesi apk offline. You need to have a stable internet connection and enough storage space on your device to use para hilesi apk. You also need to connect to the game's servers to generate coins and gems in FIFA Mobile.</li>
|
130 |
-
</ol></p> 197e85843d<br />
|
131 |
-
<br />
|
132 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/models/embeddings.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
import math
|
16 |
-
|
17 |
-
import numpy as np
|
18 |
-
import paddle
|
19 |
-
from paddle import nn
|
20 |
-
|
21 |
-
|
22 |
-
def get_timestep_embedding(
|
23 |
-
timesteps: paddle.Tensor,
|
24 |
-
embedding_dim: int,
|
25 |
-
flip_sin_to_cos: bool = False,
|
26 |
-
downscale_freq_shift: float = 1,
|
27 |
-
scale: float = 1,
|
28 |
-
max_period: int = 10000,
|
29 |
-
):
|
30 |
-
"""
|
31 |
-
This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
|
32 |
-
|
33 |
-
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
34 |
-
These may be fractional.
|
35 |
-
:param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
|
36 |
-
embeddings. :return: an [N x dim] Tensor of positional embeddings.
|
37 |
-
"""
|
38 |
-
assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
|
39 |
-
|
40 |
-
half_dim = embedding_dim // 2
|
41 |
-
exponent = -math.log(max_period) * paddle.arange(start=0, end=half_dim, dtype="float32")
|
42 |
-
exponent = exponent / (half_dim - downscale_freq_shift)
|
43 |
-
|
44 |
-
emb = paddle.exp(exponent)
|
45 |
-
emb = timesteps[:, None].cast("float32") * emb[None, :]
|
46 |
-
|
47 |
-
# scale embeddings
|
48 |
-
emb = scale * emb
|
49 |
-
|
50 |
-
# concat sine and cosine embeddings
|
51 |
-
emb = paddle.concat([paddle.sin(emb), paddle.cos(emb)], axis=-1)
|
52 |
-
|
53 |
-
# flip sine and cosine embeddings
|
54 |
-
if flip_sin_to_cos:
|
55 |
-
emb = paddle.concat([emb[:, half_dim:], emb[:, :half_dim]], axis=-1)
|
56 |
-
|
57 |
-
# zero pad
|
58 |
-
if embedding_dim % 2 == 1:
|
59 |
-
emb = paddle.concat(emb, paddle.zeros([emb.shape[0], 1]), axis=-1)
|
60 |
-
return emb
|
61 |
-
|
62 |
-
|
63 |
-
class TimestepEmbedding(nn.Layer):
|
64 |
-
def __init__(self, in_channels: int, time_embed_dim: int, act_fn: str = "silu", out_dim: int = None):
|
65 |
-
super().__init__()
|
66 |
-
|
67 |
-
self.linear_1 = nn.Linear(in_channels, time_embed_dim)
|
68 |
-
self.act = None
|
69 |
-
if act_fn == "silu":
|
70 |
-
self.act = nn.Silu()
|
71 |
-
elif act_fn == "mish":
|
72 |
-
self.act = nn.Mish()
|
73 |
-
|
74 |
-
if out_dim is not None:
|
75 |
-
time_embed_dim_out = out_dim
|
76 |
-
else:
|
77 |
-
time_embed_dim_out = time_embed_dim
|
78 |
-
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out)
|
79 |
-
|
80 |
-
def forward(self, sample):
|
81 |
-
sample = self.linear_1(sample)
|
82 |
-
|
83 |
-
if self.act is not None:
|
84 |
-
sample = self.act(sample)
|
85 |
-
|
86 |
-
sample = self.linear_2(sample)
|
87 |
-
return sample
|
88 |
-
|
89 |
-
|
90 |
-
class Timesteps(nn.Layer):
|
91 |
-
def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float):
|
92 |
-
super().__init__()
|
93 |
-
self.num_channels = num_channels
|
94 |
-
self.flip_sin_to_cos = flip_sin_to_cos
|
95 |
-
self.downscale_freq_shift = downscale_freq_shift
|
96 |
-
|
97 |
-
def forward(self, timesteps):
|
98 |
-
t_emb = get_timestep_embedding(
|
99 |
-
timesteps,
|
100 |
-
self.num_channels,
|
101 |
-
flip_sin_to_cos=self.flip_sin_to_cos,
|
102 |
-
downscale_freq_shift=self.downscale_freq_shift,
|
103 |
-
)
|
104 |
-
return t_emb
|
105 |
-
|
106 |
-
|
107 |
-
class GaussianFourierProjection(nn.Layer):
|
108 |
-
"""Gaussian Fourier embeddings for noise levels."""
|
109 |
-
|
110 |
-
def __init__(
|
111 |
-
self, embedding_size: int = 256, scale: float = 1.0, set_W_to_weight=True, log=True, flip_sin_to_cos=False
|
112 |
-
):
|
113 |
-
super().__init__()
|
114 |
-
self.register_buffer("weight", paddle.randn((embedding_size,)) * scale)
|
115 |
-
self.log = log
|
116 |
-
self.flip_sin_to_cos = flip_sin_to_cos
|
117 |
-
|
118 |
-
if set_W_to_weight:
|
119 |
-
# to delete later
|
120 |
-
self.register_buffer("W", paddle.randn((embedding_size,)) * scale)
|
121 |
-
|
122 |
-
self.weight = self.W
|
123 |
-
|
124 |
-
def forward(self, x):
|
125 |
-
if self.log:
|
126 |
-
x = paddle.log(x.cast(self.weight.dtype))
|
127 |
-
|
128 |
-
x_proj = x[:, None] * self.weight[None, :] * 2 * np.pi
|
129 |
-
|
130 |
-
if self.flip_sin_to_cos:
|
131 |
-
out = paddle.concat([paddle.cos(x_proj), paddle.sin(x_proj)], axis=-1)
|
132 |
-
else:
|
133 |
-
out = paddle.concat([paddle.sin(x_proj), paddle.cos(x_proj)], axis=-1)
|
134 |
-
return out
|
135 |
-
|
136 |
-
|
137 |
-
class ImagePositionalEmbeddings(nn.Layer):
|
138 |
-
"""
|
139 |
-
Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the
|
140 |
-
height and width of the latent space.
|
141 |
-
|
142 |
-
For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092
|
143 |
-
|
144 |
-
For VQ-diffusion:
|
145 |
-
|
146 |
-
Output vector embeddings are used as input for the transformer.
|
147 |
-
|
148 |
-
Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE.
|
149 |
-
|
150 |
-
Args:
|
151 |
-
num_embed (`int`):
|
152 |
-
Number of embeddings for the latent pixels embeddings.
|
153 |
-
height (`int`):
|
154 |
-
Height of the latent image i.e. the number of height embeddings.
|
155 |
-
width (`int`):
|
156 |
-
Width of the latent image i.e. the number of width embeddings.
|
157 |
-
embed_dim (`int`):
|
158 |
-
Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings.
|
159 |
-
"""
|
160 |
-
|
161 |
-
def __init__(
|
162 |
-
self,
|
163 |
-
num_embed: int,
|
164 |
-
height: int,
|
165 |
-
width: int,
|
166 |
-
embed_dim: int,
|
167 |
-
):
|
168 |
-
super().__init__()
|
169 |
-
|
170 |
-
self.height = height
|
171 |
-
self.width = width
|
172 |
-
self.num_embed = num_embed
|
173 |
-
self.embed_dim = embed_dim
|
174 |
-
|
175 |
-
self.emb = nn.Embedding(self.num_embed, embed_dim)
|
176 |
-
self.height_emb = nn.Embedding(self.height, embed_dim)
|
177 |
-
self.width_emb = nn.Embedding(self.width, embed_dim)
|
178 |
-
|
179 |
-
def forward(self, index):
|
180 |
-
emb = self.emb(index)
|
181 |
-
|
182 |
-
height_emb = self.height_emb(paddle.arange(self.height).reshape([1, self.height]))
|
183 |
-
|
184 |
-
# 1 x H x D -> 1 x H x 1 x D
|
185 |
-
height_emb = height_emb.unsqueeze(2)
|
186 |
-
|
187 |
-
width_emb = self.width_emb(paddle.arange(self.width).reshape([1, self.width]))
|
188 |
-
|
189 |
-
# 1 x W x D -> 1 x 1 x W x D
|
190 |
-
width_emb = width_emb.unsqueeze(1)
|
191 |
-
|
192 |
-
pos_emb = height_emb + width_emb
|
193 |
-
|
194 |
-
# 1 x H x W x D -> 1 x L xD
|
195 |
-
pos_emb = pos_emb.reshape([1, self.height * self.width, -1])
|
196 |
-
|
197 |
-
emb = emb + pos_emb[:, : emb.shape[1], :]
|
198 |
-
|
199 |
-
return emb
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/3bdo7ss/Neutron_Chatbot/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Neutron Chatbot
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.3
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: afl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/tests/common_utils/wav_utils.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
from pathlib import Path
|
8 |
-
import typing as tp
|
9 |
-
|
10 |
-
import torch
|
11 |
-
import torchaudio
|
12 |
-
|
13 |
-
|
14 |
-
def get_white_noise(chs: int = 1, num_frames: int = 1):
|
15 |
-
wav = torch.randn(chs, num_frames)
|
16 |
-
return wav
|
17 |
-
|
18 |
-
|
19 |
-
def get_batch_white_noise(bs: int = 1, chs: int = 1, num_frames: int = 1):
|
20 |
-
wav = torch.randn(bs, chs, num_frames)
|
21 |
-
return wav
|
22 |
-
|
23 |
-
|
24 |
-
def save_wav(path: str, wav: torch.Tensor, sample_rate: int):
|
25 |
-
fp = Path(path)
|
26 |
-
kwargs: tp.Dict[str, tp.Any] = {}
|
27 |
-
if fp.suffix == '.wav':
|
28 |
-
kwargs['encoding'] = 'PCM_S'
|
29 |
-
kwargs['bits_per_sample'] = 16
|
30 |
-
elif fp.suffix == '.mp3':
|
31 |
-
kwargs['compression'] = 320
|
32 |
-
torchaudio.save(str(fp), wav, sample_rate, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/VQ-Trans/README.md
DELETED
@@ -1,400 +0,0 @@
|
|
1 |
-
# Motion VQ-Trans
|
2 |
-
Pytorch implementation of paper "Generating Human Motion from Textual Descriptions with High Quality Discrete Representation"
|
3 |
-
|
4 |
-
|
5 |
-
[[Notebook Demo]](https://colab.research.google.com/drive/1tAHlmcpKcjg_zZrqKku7AfpqdVAIFrF8?usp=sharing)
|
6 |
-
|
7 |
-
|
8 |
-

|
9 |
-
|
10 |
-
If our project is helpful for your research, please consider citing : (todo)
|
11 |
-
```
|
12 |
-
@inproceedings{shen2020ransac,
|
13 |
-
title={RANSAC-Flow: generic two-stage image alignment},
|
14 |
-
author={Shen, Xi and Darmon, Fran{\c{c}}ois and Efros, Alexei A and Aubry, Mathieu},
|
15 |
-
booktitle={16th European Conference on Computer Vision}
|
16 |
-
year={2020}
|
17 |
-
}
|
18 |
-
```
|
19 |
-
|
20 |
-
|
21 |
-
## Table of Content
|
22 |
-
* [1. Visual Results](#1-visual-results)
|
23 |
-
* [2. Installation](#2-installation)
|
24 |
-
* [3. Quick Start](#3-quick-start)
|
25 |
-
* [4. Train](#4-train)
|
26 |
-
* [5. Evaluation](#5-evaluation)
|
27 |
-
* [6. Motion Render](#6-motion-render)
|
28 |
-
* [7. Acknowledgement](#7-acknowledgement)
|
29 |
-
* [8. ChangLog](#8-changlog)
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
## 1. Visual Results (More results can be found in our project page (todo))
|
35 |
-
|
36 |
-

|
37 |
-
|
38 |
-
|
39 |
-
## 2. Installation
|
40 |
-
|
41 |
-
### 2.1. Environment
|
42 |
-
|
43 |
-
<!-- Our model can be learnt in a **single GPU GeForce GTX 1080Ti** (12G).
|
44 |
-
|
45 |
-
Install Pytorch adapted to your CUDA version :
|
46 |
-
|
47 |
-
* [Pytorch 1.2.0](https://pytorch.org/get-started/previous-versions/#linux-and-windows-1)
|
48 |
-
* [Torchvision 0.4.0](https://pytorch.org/get-started/previous-versions/#linux-and-windows-1)
|
49 |
-
|
50 |
-
Other dependencies (tqdm, visdom, pandas, kornia, opencv-python) :
|
51 |
-
``` Bash
|
52 |
-
bash requirement.sh
|
53 |
-
``` -->
|
54 |
-
|
55 |
-
Our model can be learnt in a **single GPU V100-32G**
|
56 |
-
|
57 |
-
```bash
|
58 |
-
conda env create -f environment.yml
|
59 |
-
conda activate VQTrans
|
60 |
-
```
|
61 |
-
|
62 |
-
The code was tested on Python 3.8 and PyTorch 1.8.1.
|
63 |
-
|
64 |
-
|
65 |
-
### 2.2. Dependencies
|
66 |
-
|
67 |
-
```bash
|
68 |
-
bash dataset/prepare/download_glove.sh
|
69 |
-
```
|
70 |
-
|
71 |
-
|
72 |
-
### 2.3. Datasets
|
73 |
-
|
74 |
-
|
75 |
-
We are using two 3D human motion-language dataset: HumanML3D and KIT-ML. For both datasets, you could find the details as well as download link [[here]](https://github.com/EricGuo5513/HumanML3D).
|
76 |
-
|
77 |
-
Take HumanML3D for an example, the file directory should look like this:
|
78 |
-
```
|
79 |
-
./dataset/HumanML3D/
|
80 |
-
├── new_joint_vecs/
|
81 |
-
├── texts/
|
82 |
-
├── Mean.npy # same as in [HumanML3D](https://github.com/EricGuo5513/HumanML3D)
|
83 |
-
├── Std.npy # same as in [HumanML3D](https://github.com/EricGuo5513/HumanML3D)
|
84 |
-
├── train.txt
|
85 |
-
├── val.txt
|
86 |
-
├── test.txt
|
87 |
-
├── train_val.txt
|
88 |
-
└──all.txt
|
89 |
-
```
|
90 |
-
|
91 |
-
|
92 |
-
### 2.4. Motion & text feature extractors:
|
93 |
-
|
94 |
-
We use the same extractors provided by [t2m](https://github.com/EricGuo5513/text-to-motion) to evaluate our generated motions. Please download the extractors.
|
95 |
-
|
96 |
-
```bash
|
97 |
-
bash dataset/prepare/download_extractor.sh
|
98 |
-
```
|
99 |
-
|
100 |
-
### 2.5. Pre-trained models
|
101 |
-
|
102 |
-
The pretrained model files will be stored in the 'pretrained' folder:
|
103 |
-
```bash
|
104 |
-
bash dataset/prepare/download_model.sh
|
105 |
-
```
|
106 |
-
|
107 |
-
<!-- Quick download :
|
108 |
-
|
109 |
-
``` Bash
|
110 |
-
cd model/pretrained
|
111 |
-
bash download_model.sh
|
112 |
-
```
|
113 |
-
|
114 |
-
For more details of the pre-trained models, see [here](https://github.com/XiSHEN0220/RANSAC-Flow/blob/master/model/pretrained) -->
|
115 |
-
|
116 |
-
### 2.6. Render motion (optional)
|
117 |
-
|
118 |
-
If you want to render the generated motion, you need to install:
|
119 |
-
|
120 |
-
```bash
|
121 |
-
sudo sh dataset/prepare/download_smpl.sh
|
122 |
-
conda install -c menpo osmesa
|
123 |
-
conda install h5py
|
124 |
-
conda install -c conda-forge shapely pyrender trimesh mapbox_earcut
|
125 |
-
```
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
## 3. Quick Start
|
130 |
-
|
131 |
-
A quick start guide of how to use our code is available in [demo.ipynb](https://colab.research.google.com/drive/1tAHlmcpKcjg_zZrqKku7AfpqdVAIFrF8?usp=sharing)
|
132 |
-
|
133 |
-
<p align="center">
|
134 |
-
<img src="img/demo.png" width="400px" alt="demo">
|
135 |
-
</p>
|
136 |
-
|
137 |
-
|
138 |
-
## 4. Train
|
139 |
-
|
140 |
-
Note that, for kit dataset, just need to set '--dataname kit'.
|
141 |
-
|
142 |
-
### 4.1. VQ-VAE
|
143 |
-
|
144 |
-
The results are saved in the folder output_vqfinal.
|
145 |
-
|
146 |
-
<details>
|
147 |
-
<summary>
|
148 |
-
VQ training
|
149 |
-
</summary>
|
150 |
-
|
151 |
-
```bash
|
152 |
-
python3 train_vq.py \
|
153 |
-
--batch-size 256 \
|
154 |
-
--lr 2e-4 \
|
155 |
-
--total-iter 300000 \
|
156 |
-
--lr-scheduler 200000 \
|
157 |
-
--nb-code 512 \
|
158 |
-
--down-t 2 \
|
159 |
-
--depth 3 \
|
160 |
-
--dilation-growth-rate 3 \
|
161 |
-
--out-dir output \
|
162 |
-
--dataname t2m \
|
163 |
-
--vq-act relu \
|
164 |
-
--quantizer ema_reset \
|
165 |
-
--loss-vel 0.5 \
|
166 |
-
--recons-loss l1_smooth \
|
167 |
-
--exp-name VQVAE
|
168 |
-
```
|
169 |
-
|
170 |
-
</details>
|
171 |
-
|
172 |
-
### 4.2. Motion-Transformer
|
173 |
-
|
174 |
-
The results are saved in the folder output_transformer.
|
175 |
-
|
176 |
-
<details>
|
177 |
-
<summary>
|
178 |
-
MoTrans training
|
179 |
-
</summary>
|
180 |
-
|
181 |
-
```bash
|
182 |
-
python3 train_t2m_trans.py \
|
183 |
-
--exp-name VQTransformer \
|
184 |
-
--batch-size 128 \
|
185 |
-
--num-layers 9 \
|
186 |
-
--embed-dim-gpt 1024 \
|
187 |
-
--nb-code 512 \
|
188 |
-
--n-head-gpt 16 \
|
189 |
-
--block-size 51 \
|
190 |
-
--ff-rate 4 \
|
191 |
-
--drop-out-rate 0.1 \
|
192 |
-
--resume-pth output/VQVAE/net_last.pth \
|
193 |
-
--vq-name VQVAE \
|
194 |
-
--out-dir output \
|
195 |
-
--total-iter 300000 \
|
196 |
-
--lr-scheduler 150000 \
|
197 |
-
--lr 0.0001 \
|
198 |
-
--dataname t2m \
|
199 |
-
--down-t 2 \
|
200 |
-
--depth 3 \
|
201 |
-
--quantizer ema_reset \
|
202 |
-
--eval-iter 10000 \
|
203 |
-
--pkeep 0.5 \
|
204 |
-
--dilation-growth-rate 3 \
|
205 |
-
--vq-act relu
|
206 |
-
```
|
207 |
-
|
208 |
-
</details>
|
209 |
-
|
210 |
-
## 5. Evaluation
|
211 |
-
|
212 |
-
### 5.1. VQ-VAE
|
213 |
-
<details>
|
214 |
-
<summary>
|
215 |
-
VQ eval
|
216 |
-
</summary>
|
217 |
-
|
218 |
-
```bash
|
219 |
-
python3 VQ_eval.py \
|
220 |
-
--batch-size 256 \
|
221 |
-
--lr 2e-4 \
|
222 |
-
--total-iter 300000 \
|
223 |
-
--lr-scheduler 200000 \
|
224 |
-
--nb-code 512 \
|
225 |
-
--down-t 2 \
|
226 |
-
--depth 3 \
|
227 |
-
--dilation-growth-rate 3 \
|
228 |
-
--out-dir output \
|
229 |
-
--dataname t2m \
|
230 |
-
--vq-act relu \
|
231 |
-
--quantizer ema_reset \
|
232 |
-
--loss-vel 0.5 \
|
233 |
-
--recons-loss l1_smooth \
|
234 |
-
--exp-name TEST_VQVAE \
|
235 |
-
--resume-pth output/VQVAE/net_last.pth
|
236 |
-
```
|
237 |
-
|
238 |
-
</details>
|
239 |
-
|
240 |
-
### 5.2. Motion-Transformer
|
241 |
-
|
242 |
-
<details>
|
243 |
-
<summary>
|
244 |
-
MoTrans eval
|
245 |
-
</summary>
|
246 |
-
|
247 |
-
```bash
|
248 |
-
python3 GPT_eval_multi.py \
|
249 |
-
--exp-name TEST_VQTransformer \
|
250 |
-
--batch-size 128 \
|
251 |
-
--num-layers 9 \
|
252 |
-
--embed-dim-gpt 1024 \
|
253 |
-
--nb-code 512 \
|
254 |
-
--n-head-gpt 16 \
|
255 |
-
--block-size 51 \
|
256 |
-
--ff-rate 4 \
|
257 |
-
--drop-out-rate 0.1 \
|
258 |
-
--resume-pth output/VQVAE/net_last.pth \
|
259 |
-
--vq-name VQVAE \
|
260 |
-
--out-dir output \
|
261 |
-
--total-iter 300000 \
|
262 |
-
--lr-scheduler 150000 \
|
263 |
-
--lr 0.0001 \
|
264 |
-
--dataname t2m \
|
265 |
-
--down-t 2 \
|
266 |
-
--depth 3 \
|
267 |
-
--quantizer ema_reset \
|
268 |
-
--eval-iter 10000 \
|
269 |
-
--pkeep 0.5 \
|
270 |
-
--dilation-growth-rate 3 \
|
271 |
-
--vq-act relu \
|
272 |
-
--resume-gpt output/VQTransformer/net_best_fid.pth
|
273 |
-
```
|
274 |
-
|
275 |
-
</details>
|
276 |
-
|
277 |
-
|
278 |
-
## 6. Motion Render
|
279 |
-
|
280 |
-
<details>
|
281 |
-
<summary>
|
282 |
-
Motion Render
|
283 |
-
</summary>
|
284 |
-
|
285 |
-
You should input the npy folder address and the motion names. Here is an example:
|
286 |
-
|
287 |
-
```bash
|
288 |
-
python3 render_final.py --filedir output/TEST_VQTransformer/ --motion-list 000019 005485
|
289 |
-
```
|
290 |
-
|
291 |
-
</details>
|
292 |
-
|
293 |
-
### 7. Acknowledgement
|
294 |
-
|
295 |
-
We appreciate helps from :
|
296 |
-
|
297 |
-
* Public code like [text-to-motion](https://github.com/EricGuo5513/text-to-motion), [TM2T](https://github.com/EricGuo5513/TM2T) etc.
|
298 |
-
|
299 |
-
### 8. ChangLog
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
<!-- # VQGPT
|
324 |
-
|
325 |
-
```
|
326 |
-
# VQ during training OT
|
327 |
-
/apdcephfs_cq2/share_1290939/jirozhang/anaconda3/envs/motionclip/bin/python3 train_251_cnn_all.py \
|
328 |
-
--batch-size 128 \
|
329 |
-
--exp-name xxxxxx \
|
330 |
-
--lr 2e-4 \
|
331 |
-
--total-iter 300000 \
|
332 |
-
--lr-scheduler 200000 \
|
333 |
-
--nb-code 512 \
|
334 |
-
--down-t 2 \
|
335 |
-
--depth 5 \
|
336 |
-
--out-dir /apdcephfs_cq2/share_1290939/jirozhang/VQCNN_HUMAN/ \
|
337 |
-
--dataname t2m \
|
338 |
-
--vq-act relu \
|
339 |
-
--quantizer ot \
|
340 |
-
--ot-temperature 1 \
|
341 |
-
--ot-eps 0.5 \
|
342 |
-
--commit 0.001 \
|
343 |
-
```
|
344 |
-
|
345 |
-
```
|
346 |
-
# VQ251 training baseline
|
347 |
-
/apdcephfs_cq2/share_1290939/jirozhang/anaconda3/envs/motionclip/bin/python3 train_251_cnn_all.py \
|
348 |
-
--batch-size 128 \
|
349 |
-
--exp-name VQ263_300K_512cb_down4_t2m_ema_relu_test \
|
350 |
-
--lr 2e-4 \
|
351 |
-
--total-iter 300000 \
|
352 |
-
--lr-scheduler 200000 \
|
353 |
-
--nb-code 512 \
|
354 |
-
--down-t 2 \
|
355 |
-
--depth 5 \
|
356 |
-
--out-dir /apdcephfs_cq2/share_1290939/jirozhang/VQCNN_HUMAN/ \
|
357 |
-
--dataname t2m \
|
358 |
-
--vq-act relu \
|
359 |
-
--quantizer ema \
|
360 |
-
```
|
361 |
-
|
362 |
-
|
363 |
-
```bash
|
364 |
-
# gpt training + noise
|
365 |
-
/apdcephfs_cq2/share_1290939/jirozhang/anaconda3/envs/motionclip/bin/python3 train_gpt_cnn_noise.py \
|
366 |
-
--exp-name GPT_VQ_300K_512cb_down4_t2m_ema_relu_bs128_ws64_fid_mask1_08 \
|
367 |
-
--batch-size 128 \
|
368 |
-
--num-layers 4 \
|
369 |
-
--block-size 51 \
|
370 |
-
--n-head-gpt 8 \
|
371 |
-
--ff-rate 4 \
|
372 |
-
--drop-out-rate 0.1 \
|
373 |
-
--resume-pth output_vqhuman/VQ_300K_512cb_down4_t2m_ema_relu_bs128_ws64/net_best_fid.pth \
|
374 |
-
--vq-name VQ_300K_512cb_down4_t2m_ema_relu_bs128_ws64_fid_mask1_08 \
|
375 |
-
--total-iter 300000 \
|
376 |
-
--lr-scheduler 150000 \
|
377 |
-
--lr 0.0001 \
|
378 |
-
--if-auxloss \
|
379 |
-
--dataname t2m \
|
380 |
-
--down-t 2 \
|
381 |
-
--depth 5 \
|
382 |
-
--quantizer ema \
|
383 |
-
--eval-iter 5000 \
|
384 |
-
--pkeep 0.8
|
385 |
-
```
|
386 |
-
|
387 |
-
|
388 |
-
### Visualize VQ (Arch Taming) in HTML
|
389 |
-
|
390 |
-
* Generate motion. This will save generated motions in `./visual_results/vel05_taming_l1s`
|
391 |
-
|
392 |
-
```
|
393 |
-
python vis.py --dataname t2m --resume-pth /apdcephfs_cq2/share_1290939/jirozhang/VQ_t2m_bailando_relu_NoNorm_dilate3_vel05_taming_l1s/net_last.pth --visual-name vel05_taming_l1s --vis-gt --nb-vis 20
|
394 |
-
```
|
395 |
-
|
396 |
-
* Make a Webpage. Go to visual_html.py, modify the name, then run :
|
397 |
-
|
398 |
-
```
|
399 |
-
python visual_html.py
|
400 |
-
``` -->
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/platforms/base.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import abc
|
2 |
-
|
3 |
-
import six
|
4 |
-
|
5 |
-
|
6 |
-
@six.add_metaclass(abc.ABCMeta)
|
7 |
-
class Platform(object):
|
8 |
-
"""Base class for all OpenGL platforms.
|
9 |
-
|
10 |
-
Parameters
|
11 |
-
----------
|
12 |
-
viewport_width : int
|
13 |
-
The width of the main viewport, in pixels.
|
14 |
-
viewport_height : int
|
15 |
-
The height of the main viewport, in pixels
|
16 |
-
"""
|
17 |
-
|
18 |
-
def __init__(self, viewport_width, viewport_height):
|
19 |
-
self.viewport_width = viewport_width
|
20 |
-
self.viewport_height = viewport_height
|
21 |
-
|
22 |
-
@property
|
23 |
-
def viewport_width(self):
|
24 |
-
"""int : The width of the main viewport, in pixels.
|
25 |
-
"""
|
26 |
-
return self._viewport_width
|
27 |
-
|
28 |
-
@viewport_width.setter
|
29 |
-
def viewport_width(self, value):
|
30 |
-
self._viewport_width = value
|
31 |
-
|
32 |
-
@property
|
33 |
-
def viewport_height(self):
|
34 |
-
"""int : The height of the main viewport, in pixels.
|
35 |
-
"""
|
36 |
-
return self._viewport_height
|
37 |
-
|
38 |
-
@viewport_height.setter
|
39 |
-
def viewport_height(self, value):
|
40 |
-
self._viewport_height = value
|
41 |
-
|
42 |
-
@abc.abstractmethod
|
43 |
-
def init_context(self):
|
44 |
-
"""Create an OpenGL context.
|
45 |
-
"""
|
46 |
-
pass
|
47 |
-
|
48 |
-
@abc.abstractmethod
|
49 |
-
def make_current(self):
|
50 |
-
"""Make the OpenGL context current.
|
51 |
-
"""
|
52 |
-
pass
|
53 |
-
|
54 |
-
@abc.abstractmethod
|
55 |
-
def make_uncurrent(self):
|
56 |
-
"""Make the OpenGL context uncurrent.
|
57 |
-
"""
|
58 |
-
pass
|
59 |
-
|
60 |
-
@abc.abstractmethod
|
61 |
-
def delete_context(self):
|
62 |
-
"""Delete the OpenGL context.
|
63 |
-
"""
|
64 |
-
pass
|
65 |
-
|
66 |
-
@abc.abstractmethod
|
67 |
-
def supports_framebuffers(self):
|
68 |
-
"""Returns True if the method supports framebuffer rendering.
|
69 |
-
"""
|
70 |
-
pass
|
71 |
-
|
72 |
-
def __del__(self):
|
73 |
-
try:
|
74 |
-
self.delete_context()
|
75 |
-
except Exception:
|
76 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/htsat.py
DELETED
@@ -1,1022 +0,0 @@
|
|
1 |
-
# Ke Chen
|
2 | |
3 |
-
# HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
|
4 |
-
# Some layers designed on the model
|
5 |
-
# below codes are based and referred from https://github.com/microsoft/Swin-Transformer
|
6 |
-
# Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf
|
7 |
-
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
import torch.nn.functional as F
|
11 |
-
from itertools import repeat
|
12 |
-
import collections.abc
|
13 |
-
import math
|
14 |
-
import warnings
|
15 |
-
|
16 |
-
from torch.nn.init import _calculate_fan_in_and_fan_out
|
17 |
-
import torch.utils.checkpoint as checkpoint
|
18 |
-
|
19 |
-
import random
|
20 |
-
|
21 |
-
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
|
22 |
-
from torchlibrosa.augmentation import SpecAugmentation
|
23 |
-
|
24 |
-
from itertools import repeat
|
25 |
-
from .utils import do_mixup, interpolate
|
26 |
-
|
27 |
-
from .feature_fusion import iAFF, AFF, DAF
|
28 |
-
|
29 |
-
# from PyTorch internals
|
30 |
-
def _ntuple(n):
|
31 |
-
def parse(x):
|
32 |
-
if isinstance(x, collections.abc.Iterable):
|
33 |
-
return x
|
34 |
-
return tuple(repeat(x, n))
|
35 |
-
return parse
|
36 |
-
|
37 |
-
to_1tuple = _ntuple(1)
|
38 |
-
to_2tuple = _ntuple(2)
|
39 |
-
to_3tuple = _ntuple(3)
|
40 |
-
to_4tuple = _ntuple(4)
|
41 |
-
to_ntuple = _ntuple
|
42 |
-
|
43 |
-
def drop_path(x, drop_prob: float = 0., training: bool = False):
|
44 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
45 |
-
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
46 |
-
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
47 |
-
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
48 |
-
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
49 |
-
'survival rate' as the argument.
|
50 |
-
"""
|
51 |
-
if drop_prob == 0. or not training:
|
52 |
-
return x
|
53 |
-
keep_prob = 1 - drop_prob
|
54 |
-
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
55 |
-
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
|
56 |
-
random_tensor.floor_() # binarize
|
57 |
-
output = x.div(keep_prob) * random_tensor
|
58 |
-
return output
|
59 |
-
|
60 |
-
|
61 |
-
class DropPath(nn.Module):
|
62 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
63 |
-
"""
|
64 |
-
def __init__(self, drop_prob=None):
|
65 |
-
super(DropPath, self).__init__()
|
66 |
-
self.drop_prob = drop_prob
|
67 |
-
|
68 |
-
def forward(self, x):
|
69 |
-
return drop_path(x, self.drop_prob, self.training)
|
70 |
-
|
71 |
-
class PatchEmbed(nn.Module):
|
72 |
-
""" 2D Image to Patch Embedding
|
73 |
-
"""
|
74 |
-
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, patch_stride = 16,
|
75 |
-
enable_fusion=False, fusion_type='None'):
|
76 |
-
super().__init__()
|
77 |
-
img_size = to_2tuple(img_size)
|
78 |
-
patch_size = to_2tuple(patch_size)
|
79 |
-
patch_stride = to_2tuple(patch_stride)
|
80 |
-
self.img_size = img_size
|
81 |
-
self.patch_size = patch_size
|
82 |
-
self.patch_stride = patch_stride
|
83 |
-
self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
|
84 |
-
self.num_patches = self.grid_size[0] * self.grid_size[1]
|
85 |
-
self.flatten = flatten
|
86 |
-
self.in_chans = in_chans
|
87 |
-
self.embed_dim = embed_dim
|
88 |
-
|
89 |
-
self.enable_fusion = enable_fusion
|
90 |
-
self.fusion_type = fusion_type
|
91 |
-
|
92 |
-
padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
|
93 |
-
|
94 |
-
if (self.enable_fusion) and (self.fusion_type == 'channel_map'):
|
95 |
-
self.proj = nn.Conv2d(in_chans*4, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding)
|
96 |
-
else:
|
97 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding)
|
98 |
-
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
99 |
-
|
100 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
|
101 |
-
self.mel_conv2d = nn.Conv2d(in_chans, embed_dim, kernel_size=(patch_size[0], patch_size[1]*3), stride=(patch_stride[0], patch_stride[1] * 3), padding=padding)
|
102 |
-
if self.fusion_type == 'daf_2d':
|
103 |
-
self.fusion_model = DAF()
|
104 |
-
elif self.fusion_type == 'aff_2d':
|
105 |
-
self.fusion_model = AFF(channels=embed_dim, type='2D')
|
106 |
-
elif self.fusion_type == 'iaff_2d':
|
107 |
-
self.fusion_model = iAFF(channels=embed_dim, type='2D')
|
108 |
-
def forward(self, x, longer_idx = None):
|
109 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
|
110 |
-
global_x = x[:,0:1,:,:]
|
111 |
-
|
112 |
-
|
113 |
-
# global processing
|
114 |
-
B, C, H, W = global_x.shape
|
115 |
-
assert H == self.img_size[0] and W == self.img_size[1], \
|
116 |
-
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
117 |
-
global_x = self.proj(global_x)
|
118 |
-
TW = global_x.size(-1)
|
119 |
-
if len(longer_idx) > 0:
|
120 |
-
# local processing
|
121 |
-
local_x = x[longer_idx,1:,:,:].contiguous()
|
122 |
-
B, C, H, W = local_x.shape
|
123 |
-
local_x = local_x.view(B*C,1,H,W)
|
124 |
-
local_x = self.mel_conv2d(local_x)
|
125 |
-
local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3))
|
126 |
-
local_x = local_x.permute((0,2,3,1,4)).contiguous().flatten(3)
|
127 |
-
TB,TC,TH,_ = local_x.size()
|
128 |
-
if local_x.size(-1) < TW:
|
129 |
-
local_x = torch.cat([local_x, torch.zeros((TB,TC,TH,TW-local_x.size(-1)), device=global_x.device)], dim=-1)
|
130 |
-
else:
|
131 |
-
local_x = local_x[:,:,:,:TW]
|
132 |
-
|
133 |
-
global_x[longer_idx] = self.fusion_model(global_x[longer_idx],local_x)
|
134 |
-
x = global_x
|
135 |
-
else:
|
136 |
-
B, C, H, W = x.shape
|
137 |
-
assert H == self.img_size[0] and W == self.img_size[1], \
|
138 |
-
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
139 |
-
x = self.proj(x)
|
140 |
-
|
141 |
-
if self.flatten:
|
142 |
-
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
|
143 |
-
x = self.norm(x)
|
144 |
-
return x
|
145 |
-
|
146 |
-
class Mlp(nn.Module):
|
147 |
-
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
|
148 |
-
"""
|
149 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
150 |
-
super().__init__()
|
151 |
-
out_features = out_features or in_features
|
152 |
-
hidden_features = hidden_features or in_features
|
153 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
154 |
-
self.act = act_layer()
|
155 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
156 |
-
self.drop = nn.Dropout(drop)
|
157 |
-
|
158 |
-
def forward(self, x):
|
159 |
-
x = self.fc1(x)
|
160 |
-
x = self.act(x)
|
161 |
-
x = self.drop(x)
|
162 |
-
x = self.fc2(x)
|
163 |
-
x = self.drop(x)
|
164 |
-
return x
|
165 |
-
|
166 |
-
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
167 |
-
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
168 |
-
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
169 |
-
def norm_cdf(x):
|
170 |
-
# Computes standard normal cumulative distribution function
|
171 |
-
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
172 |
-
|
173 |
-
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
174 |
-
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
175 |
-
"The distribution of values may be incorrect.",
|
176 |
-
stacklevel=2)
|
177 |
-
|
178 |
-
with torch.no_grad():
|
179 |
-
# Values are generated by using a truncated uniform distribution and
|
180 |
-
# then using the inverse CDF for the normal distribution.
|
181 |
-
# Get upper and lower cdf values
|
182 |
-
l = norm_cdf((a - mean) / std)
|
183 |
-
u = norm_cdf((b - mean) / std)
|
184 |
-
|
185 |
-
# Uniformly fill tensor with values from [l, u], then translate to
|
186 |
-
# [2l-1, 2u-1].
|
187 |
-
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
188 |
-
|
189 |
-
# Use inverse cdf transform for normal distribution to get truncated
|
190 |
-
# standard normal
|
191 |
-
tensor.erfinv_()
|
192 |
-
|
193 |
-
# Transform to proper mean, std
|
194 |
-
tensor.mul_(std * math.sqrt(2.))
|
195 |
-
tensor.add_(mean)
|
196 |
-
|
197 |
-
# Clamp to ensure it's in the proper range
|
198 |
-
tensor.clamp_(min=a, max=b)
|
199 |
-
return tensor
|
200 |
-
|
201 |
-
|
202 |
-
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
|
203 |
-
# type: (Tensor, float, float, float, float) -> Tensor
|
204 |
-
r"""Fills the input Tensor with values drawn from a truncated
|
205 |
-
normal distribution. The values are effectively drawn from the
|
206 |
-
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
|
207 |
-
with values outside :math:`[a, b]` redrawn until they are within
|
208 |
-
the bounds. The method used for generating the random values works
|
209 |
-
best when :math:`a \leq \text{mean} \leq b`.
|
210 |
-
Args:
|
211 |
-
tensor: an n-dimensional `torch.Tensor`
|
212 |
-
mean: the mean of the normal distribution
|
213 |
-
std: the standard deviation of the normal distribution
|
214 |
-
a: the minimum cutoff value
|
215 |
-
b: the maximum cutoff value
|
216 |
-
Examples:
|
217 |
-
>>> w = torch.empty(3, 5)
|
218 |
-
>>> nn.init.trunc_normal_(w)
|
219 |
-
"""
|
220 |
-
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
221 |
-
|
222 |
-
|
223 |
-
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
|
224 |
-
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
|
225 |
-
if mode == 'fan_in':
|
226 |
-
denom = fan_in
|
227 |
-
elif mode == 'fan_out':
|
228 |
-
denom = fan_out
|
229 |
-
elif mode == 'fan_avg':
|
230 |
-
denom = (fan_in + fan_out) / 2
|
231 |
-
|
232 |
-
variance = scale / denom
|
233 |
-
|
234 |
-
if distribution == "truncated_normal":
|
235 |
-
# constant is stddev of standard normal truncated to (-2, 2)
|
236 |
-
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
|
237 |
-
elif distribution == "normal":
|
238 |
-
tensor.normal_(std=math.sqrt(variance))
|
239 |
-
elif distribution == "uniform":
|
240 |
-
bound = math.sqrt(3 * variance)
|
241 |
-
tensor.uniform_(-bound, bound)
|
242 |
-
else:
|
243 |
-
raise ValueError(f"invalid distribution {distribution}")
|
244 |
-
|
245 |
-
|
246 |
-
def lecun_normal_(tensor):
|
247 |
-
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
|
248 |
-
|
249 |
-
def window_partition(x, window_size):
|
250 |
-
"""
|
251 |
-
Args:
|
252 |
-
x: (B, H, W, C)
|
253 |
-
window_size (int): window size
|
254 |
-
Returns:
|
255 |
-
windows: (num_windows*B, window_size, window_size, C)
|
256 |
-
"""
|
257 |
-
B, H, W, C = x.shape
|
258 |
-
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
259 |
-
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
260 |
-
return windows
|
261 |
-
|
262 |
-
|
263 |
-
def window_reverse(windows, window_size, H, W):
|
264 |
-
"""
|
265 |
-
Args:
|
266 |
-
windows: (num_windows*B, window_size, window_size, C)
|
267 |
-
window_size (int): Window size
|
268 |
-
H (int): Height of image
|
269 |
-
W (int): Width of image
|
270 |
-
Returns:
|
271 |
-
x: (B, H, W, C)
|
272 |
-
"""
|
273 |
-
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
274 |
-
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
275 |
-
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
276 |
-
return x
|
277 |
-
|
278 |
-
|
279 |
-
class WindowAttention(nn.Module):
|
280 |
-
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
281 |
-
It supports both of shifted and non-shifted window.
|
282 |
-
Args:
|
283 |
-
dim (int): Number of input channels.
|
284 |
-
window_size (tuple[int]): The height and width of the window.
|
285 |
-
num_heads (int): Number of attention heads.
|
286 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
287 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
288 |
-
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
289 |
-
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
290 |
-
"""
|
291 |
-
|
292 |
-
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
|
293 |
-
|
294 |
-
super().__init__()
|
295 |
-
self.dim = dim
|
296 |
-
self.window_size = window_size # Wh, Ww
|
297 |
-
self.num_heads = num_heads
|
298 |
-
head_dim = dim // num_heads
|
299 |
-
self.scale = qk_scale or head_dim ** -0.5
|
300 |
-
|
301 |
-
# define a parameter table of relative position bias
|
302 |
-
self.relative_position_bias_table = nn.Parameter(
|
303 |
-
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
304 |
-
|
305 |
-
# get pair-wise relative position index for each token inside the window
|
306 |
-
coords_h = torch.arange(self.window_size[0])
|
307 |
-
coords_w = torch.arange(self.window_size[1])
|
308 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
309 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
310 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
311 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
312 |
-
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
313 |
-
relative_coords[:, :, 1] += self.window_size[1] - 1
|
314 |
-
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
315 |
-
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
316 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
317 |
-
|
318 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
319 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
320 |
-
self.proj = nn.Linear(dim, dim)
|
321 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
322 |
-
|
323 |
-
trunc_normal_(self.relative_position_bias_table, std=.02)
|
324 |
-
self.softmax = nn.Softmax(dim=-1)
|
325 |
-
|
326 |
-
def forward(self, x, mask=None):
|
327 |
-
"""
|
328 |
-
Args:
|
329 |
-
x: input features with shape of (num_windows*B, N, C)
|
330 |
-
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
331 |
-
"""
|
332 |
-
B_, N, C = x.shape
|
333 |
-
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
334 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
335 |
-
|
336 |
-
q = q * self.scale
|
337 |
-
attn = (q @ k.transpose(-2, -1))
|
338 |
-
|
339 |
-
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
340 |
-
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
|
341 |
-
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
342 |
-
attn = attn + relative_position_bias.unsqueeze(0)
|
343 |
-
|
344 |
-
if mask is not None:
|
345 |
-
nW = mask.shape[0]
|
346 |
-
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
347 |
-
attn = attn.view(-1, self.num_heads, N, N)
|
348 |
-
attn = self.softmax(attn)
|
349 |
-
else:
|
350 |
-
attn = self.softmax(attn)
|
351 |
-
|
352 |
-
attn = self.attn_drop(attn)
|
353 |
-
|
354 |
-
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
355 |
-
x = self.proj(x)
|
356 |
-
x = self.proj_drop(x)
|
357 |
-
return x, attn
|
358 |
-
|
359 |
-
def extra_repr(self):
|
360 |
-
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
|
361 |
-
|
362 |
-
|
363 |
-
# We use the model based on Swintransformer Block, therefore we can use the swin-transformer pretrained model
|
364 |
-
class SwinTransformerBlock(nn.Module):
|
365 |
-
r""" Swin Transformer Block.
|
366 |
-
Args:
|
367 |
-
dim (int): Number of input channels.
|
368 |
-
input_resolution (tuple[int]): Input resulotion.
|
369 |
-
num_heads (int): Number of attention heads.
|
370 |
-
window_size (int): Window size.
|
371 |
-
shift_size (int): Shift size for SW-MSA.
|
372 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
373 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
374 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
375 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
376 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
377 |
-
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
378 |
-
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
379 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
380 |
-
"""
|
381 |
-
|
382 |
-
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
|
383 |
-
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
|
384 |
-
act_layer=nn.GELU, norm_layer=nn.LayerNorm, norm_before_mlp='ln'):
|
385 |
-
super().__init__()
|
386 |
-
self.dim = dim
|
387 |
-
self.input_resolution = input_resolution
|
388 |
-
self.num_heads = num_heads
|
389 |
-
self.window_size = window_size
|
390 |
-
self.shift_size = shift_size
|
391 |
-
self.mlp_ratio = mlp_ratio
|
392 |
-
self.norm_before_mlp = norm_before_mlp
|
393 |
-
if min(self.input_resolution) <= self.window_size:
|
394 |
-
# if window size is larger than input resolution, we don't partition windows
|
395 |
-
self.shift_size = 0
|
396 |
-
self.window_size = min(self.input_resolution)
|
397 |
-
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
398 |
-
|
399 |
-
self.norm1 = norm_layer(dim)
|
400 |
-
self.attn = WindowAttention(
|
401 |
-
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
|
402 |
-
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
403 |
-
|
404 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
405 |
-
if self.norm_before_mlp == 'ln':
|
406 |
-
self.norm2 = nn.LayerNorm(dim)
|
407 |
-
elif self.norm_before_mlp == 'bn':
|
408 |
-
self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(1, 2)
|
409 |
-
else:
|
410 |
-
raise NotImplementedError
|
411 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
412 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
413 |
-
|
414 |
-
if self.shift_size > 0:
|
415 |
-
# calculate attention mask for SW-MSA
|
416 |
-
H, W = self.input_resolution
|
417 |
-
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
|
418 |
-
h_slices = (slice(0, -self.window_size),
|
419 |
-
slice(-self.window_size, -self.shift_size),
|
420 |
-
slice(-self.shift_size, None))
|
421 |
-
w_slices = (slice(0, -self.window_size),
|
422 |
-
slice(-self.window_size, -self.shift_size),
|
423 |
-
slice(-self.shift_size, None))
|
424 |
-
cnt = 0
|
425 |
-
for h in h_slices:
|
426 |
-
for w in w_slices:
|
427 |
-
img_mask[:, h, w, :] = cnt
|
428 |
-
cnt += 1
|
429 |
-
|
430 |
-
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
|
431 |
-
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
432 |
-
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
433 |
-
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
434 |
-
else:
|
435 |
-
attn_mask = None
|
436 |
-
|
437 |
-
self.register_buffer("attn_mask", attn_mask)
|
438 |
-
|
439 |
-
def forward(self, x):
|
440 |
-
# pdb.set_trace()
|
441 |
-
H, W = self.input_resolution
|
442 |
-
# print("H: ", H)
|
443 |
-
# print("W: ", W)
|
444 |
-
# pdb.set_trace()
|
445 |
-
B, L, C = x.shape
|
446 |
-
# assert L == H * W, "input feature has wrong size"
|
447 |
-
|
448 |
-
shortcut = x
|
449 |
-
x = self.norm1(x)
|
450 |
-
x = x.view(B, H, W, C)
|
451 |
-
|
452 |
-
# cyclic shift
|
453 |
-
if self.shift_size > 0:
|
454 |
-
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
455 |
-
else:
|
456 |
-
shifted_x = x
|
457 |
-
|
458 |
-
# partition windows
|
459 |
-
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
|
460 |
-
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
|
461 |
-
|
462 |
-
# W-MSA/SW-MSA
|
463 |
-
attn_windows, attn = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
|
464 |
-
|
465 |
-
# merge windows
|
466 |
-
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
467 |
-
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
|
468 |
-
|
469 |
-
# reverse cyclic shift
|
470 |
-
if self.shift_size > 0:
|
471 |
-
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
472 |
-
else:
|
473 |
-
x = shifted_x
|
474 |
-
x = x.view(B, H * W, C)
|
475 |
-
|
476 |
-
# FFN
|
477 |
-
x = shortcut + self.drop_path(x)
|
478 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
479 |
-
|
480 |
-
return x, attn
|
481 |
-
|
482 |
-
def extra_repr(self):
|
483 |
-
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
|
484 |
-
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
class PatchMerging(nn.Module):
|
489 |
-
r""" Patch Merging Layer.
|
490 |
-
Args:
|
491 |
-
input_resolution (tuple[int]): Resolution of input feature.
|
492 |
-
dim (int): Number of input channels.
|
493 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
494 |
-
"""
|
495 |
-
|
496 |
-
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
497 |
-
super().__init__()
|
498 |
-
self.input_resolution = input_resolution
|
499 |
-
self.dim = dim
|
500 |
-
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
501 |
-
self.norm = norm_layer(4 * dim)
|
502 |
-
|
503 |
-
def forward(self, x):
|
504 |
-
"""
|
505 |
-
x: B, H*W, C
|
506 |
-
"""
|
507 |
-
H, W = self.input_resolution
|
508 |
-
B, L, C = x.shape
|
509 |
-
assert L == H * W, "input feature has wrong size"
|
510 |
-
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
|
511 |
-
|
512 |
-
x = x.view(B, H, W, C)
|
513 |
-
|
514 |
-
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
515 |
-
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
516 |
-
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
517 |
-
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
518 |
-
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
519 |
-
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
520 |
-
|
521 |
-
x = self.norm(x)
|
522 |
-
x = self.reduction(x)
|
523 |
-
|
524 |
-
return x
|
525 |
-
|
526 |
-
def extra_repr(self):
|
527 |
-
return f"input_resolution={self.input_resolution}, dim={self.dim}"
|
528 |
-
|
529 |
-
|
530 |
-
class BasicLayer(nn.Module):
|
531 |
-
""" A basic Swin Transformer layer for one stage.
|
532 |
-
Args:
|
533 |
-
dim (int): Number of input channels.
|
534 |
-
input_resolution (tuple[int]): Input resolution.
|
535 |
-
depth (int): Number of blocks.
|
536 |
-
num_heads (int): Number of attention heads.
|
537 |
-
window_size (int): Local window size.
|
538 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
539 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
540 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
541 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
542 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
543 |
-
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
544 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
545 |
-
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
546 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
547 |
-
"""
|
548 |
-
|
549 |
-
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
550 |
-
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
551 |
-
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
|
552 |
-
norm_before_mlp='ln'):
|
553 |
-
|
554 |
-
super().__init__()
|
555 |
-
self.dim = dim
|
556 |
-
self.input_resolution = input_resolution
|
557 |
-
self.depth = depth
|
558 |
-
self.use_checkpoint = use_checkpoint
|
559 |
-
|
560 |
-
# build blocks
|
561 |
-
self.blocks = nn.ModuleList([
|
562 |
-
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
|
563 |
-
num_heads=num_heads, window_size=window_size,
|
564 |
-
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
565 |
-
mlp_ratio=mlp_ratio,
|
566 |
-
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
567 |
-
drop=drop, attn_drop=attn_drop,
|
568 |
-
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
569 |
-
norm_layer=norm_layer, norm_before_mlp=norm_before_mlp)
|
570 |
-
for i in range(depth)])
|
571 |
-
|
572 |
-
# patch merging layer
|
573 |
-
if downsample is not None:
|
574 |
-
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
|
575 |
-
else:
|
576 |
-
self.downsample = None
|
577 |
-
|
578 |
-
def forward(self, x):
|
579 |
-
attns = []
|
580 |
-
for blk in self.blocks:
|
581 |
-
if self.use_checkpoint:
|
582 |
-
x = checkpoint.checkpoint(blk, x)
|
583 |
-
else:
|
584 |
-
x, attn = blk(x)
|
585 |
-
if not self.training:
|
586 |
-
attns.append(attn.unsqueeze(0))
|
587 |
-
if self.downsample is not None:
|
588 |
-
x = self.downsample(x)
|
589 |
-
if not self.training:
|
590 |
-
attn = torch.cat(attns, dim = 0)
|
591 |
-
attn = torch.mean(attn, dim = 0)
|
592 |
-
return x, attn
|
593 |
-
|
594 |
-
def extra_repr(self):
|
595 |
-
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
596 |
-
|
597 |
-
|
598 |
-
# The Core of HTSAT
|
599 |
-
class HTSAT_Swin_Transformer(nn.Module):
|
600 |
-
r"""HTSAT based on the Swin Transformer
|
601 |
-
Args:
|
602 |
-
spec_size (int | tuple(int)): Input Spectrogram size. Default 256
|
603 |
-
patch_size (int | tuple(int)): Patch size. Default: 4
|
604 |
-
path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4
|
605 |
-
in_chans (int): Number of input image channels. Default: 1 (mono)
|
606 |
-
num_classes (int): Number of classes for classification head. Default: 527
|
607 |
-
embed_dim (int): Patch embedding dimension. Default: 96
|
608 |
-
depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer.
|
609 |
-
num_heads (tuple(int)): Number of attention heads in different layers.
|
610 |
-
window_size (int): Window size. Default: 8
|
611 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
612 |
-
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
613 |
-
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
|
614 |
-
drop_rate (float): Dropout rate. Default: 0
|
615 |
-
attn_drop_rate (float): Attention dropout rate. Default: 0
|
616 |
-
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
617 |
-
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
618 |
-
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
619 |
-
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
620 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
621 |
-
config (module): The configuration Module from config.py
|
622 |
-
"""
|
623 |
-
|
624 |
-
def __init__(self, spec_size=256, patch_size=4, patch_stride=(4,4),
|
625 |
-
in_chans=1, num_classes=527,
|
626 |
-
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[4, 8, 16, 32],
|
627 |
-
window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
|
628 |
-
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
629 |
-
norm_layer=nn.LayerNorm,
|
630 |
-
ape=False, patch_norm=True,
|
631 |
-
use_checkpoint=False, norm_before_mlp='ln', config = None,
|
632 |
-
enable_fusion = False, fusion_type = 'None', **kwargs):
|
633 |
-
super(HTSAT_Swin_Transformer, self).__init__()
|
634 |
-
|
635 |
-
self.config = config
|
636 |
-
self.spec_size = spec_size
|
637 |
-
self.patch_stride = patch_stride
|
638 |
-
self.patch_size = patch_size
|
639 |
-
self.window_size = window_size
|
640 |
-
self.embed_dim = embed_dim
|
641 |
-
self.depths = depths
|
642 |
-
self.ape = ape
|
643 |
-
self.in_chans = in_chans
|
644 |
-
self.num_classes = num_classes
|
645 |
-
self.num_heads = num_heads
|
646 |
-
self.num_layers = len(self.depths)
|
647 |
-
self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
|
648 |
-
|
649 |
-
self.drop_rate = drop_rate
|
650 |
-
self.attn_drop_rate = attn_drop_rate
|
651 |
-
self.drop_path_rate = drop_path_rate
|
652 |
-
|
653 |
-
self.qkv_bias = qkv_bias
|
654 |
-
self.qk_scale = None
|
655 |
-
|
656 |
-
self.patch_norm = patch_norm
|
657 |
-
self.norm_layer = norm_layer if self.patch_norm else None
|
658 |
-
self.norm_before_mlp = norm_before_mlp
|
659 |
-
self.mlp_ratio = mlp_ratio
|
660 |
-
|
661 |
-
self.use_checkpoint = use_checkpoint
|
662 |
-
|
663 |
-
self.enable_fusion = enable_fusion
|
664 |
-
self.fusion_type = fusion_type
|
665 |
-
|
666 |
-
# process mel-spec ; used only once
|
667 |
-
self.freq_ratio = self.spec_size // self.config.mel_bins
|
668 |
-
window = 'hann'
|
669 |
-
center = True
|
670 |
-
pad_mode = 'reflect'
|
671 |
-
ref = 1.0
|
672 |
-
amin = 1e-10
|
673 |
-
top_db = None
|
674 |
-
self.interpolate_ratio = 32 # Downsampled ratio
|
675 |
-
# Spectrogram extractor
|
676 |
-
self.spectrogram_extractor = Spectrogram(n_fft=config.window_size, hop_length=config.hop_size,
|
677 |
-
win_length=config.window_size, window=window, center=center, pad_mode=pad_mode,
|
678 |
-
freeze_parameters=True)
|
679 |
-
# Logmel feature extractor
|
680 |
-
self.logmel_extractor = LogmelFilterBank(sr=config.sample_rate, n_fft=config.window_size,
|
681 |
-
n_mels=config.mel_bins, fmin=config.fmin, fmax=config.fmax, ref=ref, amin=amin, top_db=top_db,
|
682 |
-
freeze_parameters=True)
|
683 |
-
# Spec augmenter
|
684 |
-
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
|
685 |
-
freq_drop_width=8, freq_stripes_num=2) # 2 2
|
686 |
-
self.bn0 = nn.BatchNorm2d(self.config.mel_bins)
|
687 |
-
|
688 |
-
|
689 |
-
# split spctrogram into non-overlapping patches
|
690 |
-
self.patch_embed = PatchEmbed(
|
691 |
-
img_size=self.spec_size, patch_size=self.patch_size, in_chans=self.in_chans,
|
692 |
-
embed_dim=self.embed_dim, norm_layer=self.norm_layer, patch_stride = patch_stride,
|
693 |
-
enable_fusion=self.enable_fusion, fusion_type=self.fusion_type
|
694 |
-
)
|
695 |
-
|
696 |
-
num_patches = self.patch_embed.num_patches
|
697 |
-
patches_resolution = self.patch_embed.grid_size
|
698 |
-
self.patches_resolution = patches_resolution
|
699 |
-
|
700 |
-
# absolute position embedding
|
701 |
-
if self.ape:
|
702 |
-
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.embed_dim))
|
703 |
-
trunc_normal_(self.absolute_pos_embed, std=.02)
|
704 |
-
|
705 |
-
self.pos_drop = nn.Dropout(p=self.drop_rate)
|
706 |
-
|
707 |
-
# stochastic depth
|
708 |
-
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))] # stochastic depth decay rule
|
709 |
-
|
710 |
-
# build layers
|
711 |
-
self.layers = nn.ModuleList()
|
712 |
-
for i_layer in range(self.num_layers):
|
713 |
-
layer = BasicLayer(dim=int(self.embed_dim * 2 ** i_layer),
|
714 |
-
input_resolution=(patches_resolution[0] // (2 ** i_layer),
|
715 |
-
patches_resolution[1] // (2 ** i_layer)),
|
716 |
-
depth=self.depths[i_layer],
|
717 |
-
num_heads=self.num_heads[i_layer],
|
718 |
-
window_size=self.window_size,
|
719 |
-
mlp_ratio=self.mlp_ratio,
|
720 |
-
qkv_bias=self.qkv_bias, qk_scale=self.qk_scale,
|
721 |
-
drop=self.drop_rate, attn_drop=self.attn_drop_rate,
|
722 |
-
drop_path=dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])],
|
723 |
-
norm_layer=self.norm_layer,
|
724 |
-
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
|
725 |
-
use_checkpoint=use_checkpoint,
|
726 |
-
norm_before_mlp=self.norm_before_mlp)
|
727 |
-
self.layers.append(layer)
|
728 |
-
|
729 |
-
self.norm = self.norm_layer(self.num_features)
|
730 |
-
self.avgpool = nn.AdaptiveAvgPool1d(1)
|
731 |
-
self.maxpool = nn.AdaptiveMaxPool1d(1)
|
732 |
-
|
733 |
-
SF = self.spec_size // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] // self.freq_ratio
|
734 |
-
self.tscam_conv = nn.Conv2d(
|
735 |
-
in_channels = self.num_features,
|
736 |
-
out_channels = self.num_classes,
|
737 |
-
kernel_size = (SF,3),
|
738 |
-
padding = (0,1)
|
739 |
-
)
|
740 |
-
self.head = nn.Linear(num_classes, num_classes)
|
741 |
-
|
742 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']):
|
743 |
-
self.mel_conv1d = nn.Sequential(
|
744 |
-
nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
|
745 |
-
nn.BatchNorm1d(64)
|
746 |
-
)
|
747 |
-
if self.fusion_type == 'daf_1d':
|
748 |
-
self.fusion_model = DAF()
|
749 |
-
elif self.fusion_type == 'aff_1d':
|
750 |
-
self.fusion_model = AFF(channels=64, type='1D')
|
751 |
-
elif self.fusion_type == 'iaff_1d':
|
752 |
-
self.fusion_model = iAFF(channels=64, type='1D')
|
753 |
-
|
754 |
-
self.apply(self._init_weights)
|
755 |
-
|
756 |
-
def _init_weights(self, m):
|
757 |
-
if isinstance(m, nn.Linear):
|
758 |
-
trunc_normal_(m.weight, std=.02)
|
759 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
760 |
-
nn.init.constant_(m.bias, 0)
|
761 |
-
elif isinstance(m, nn.LayerNorm):
|
762 |
-
nn.init.constant_(m.bias, 0)
|
763 |
-
nn.init.constant_(m.weight, 1.0)
|
764 |
-
|
765 |
-
@torch.jit.ignore
|
766 |
-
def no_weight_decay(self):
|
767 |
-
return {'absolute_pos_embed'}
|
768 |
-
|
769 |
-
@torch.jit.ignore
|
770 |
-
def no_weight_decay_keywords(self):
|
771 |
-
return {'relative_position_bias_table'}
|
772 |
-
|
773 |
-
|
774 |
-
def forward_features(self, x, longer_idx = None):
|
775 |
-
# A deprecated optimization for using a hierarchical output from different blocks
|
776 |
-
|
777 |
-
frames_num = x.shape[2]
|
778 |
-
x = self.patch_embed(x, longer_idx = longer_idx)
|
779 |
-
if self.ape:
|
780 |
-
x = x + self.absolute_pos_embed
|
781 |
-
x = self.pos_drop(x)
|
782 |
-
for i, layer in enumerate(self.layers):
|
783 |
-
x, attn = layer(x)
|
784 |
-
# for x
|
785 |
-
x = self.norm(x)
|
786 |
-
B, N, C = x.shape
|
787 |
-
SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
|
788 |
-
ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
|
789 |
-
x = x.permute(0,2,1).contiguous().reshape(B, C, SF, ST)
|
790 |
-
B, C, F, T = x.shape
|
791 |
-
# group 2D CNN
|
792 |
-
c_freq_bin = F // self.freq_ratio
|
793 |
-
x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T)
|
794 |
-
x = x.permute(0,1,3,2,4).contiguous().reshape(B, C, c_freq_bin, -1)
|
795 |
-
# get latent_output
|
796 |
-
fine_grained_latent_output = torch.mean(x, dim = 2)
|
797 |
-
fine_grained_latent_output = interpolate(fine_grained_latent_output.permute(0,2,1).contiguous(), 8 * self.patch_stride[1])
|
798 |
-
|
799 |
-
latent_output = self.avgpool(torch.flatten(x,2))
|
800 |
-
latent_output = torch.flatten(latent_output, 1)
|
801 |
-
|
802 |
-
# display the attention map, if needed
|
803 |
-
|
804 |
-
x = self.tscam_conv(x)
|
805 |
-
x = torch.flatten(x, 2) # B, C, T
|
806 |
-
|
807 |
-
fpx = interpolate(torch.sigmoid(x).permute(0,2,1).contiguous(), 8 * self.patch_stride[1])
|
808 |
-
|
809 |
-
x = self.avgpool(x)
|
810 |
-
x = torch.flatten(x, 1)
|
811 |
-
|
812 |
-
output_dict = {
|
813 |
-
'framewise_output': fpx, # already sigmoided
|
814 |
-
'clipwise_output': torch.sigmoid(x),
|
815 |
-
'fine_grained_embedding': fine_grained_latent_output,
|
816 |
-
'embedding': latent_output
|
817 |
-
}
|
818 |
-
|
819 |
-
return output_dict
|
820 |
-
|
821 |
-
def crop_wav(self, x, crop_size, spe_pos = None):
|
822 |
-
time_steps = x.shape[2]
|
823 |
-
tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device)
|
824 |
-
for i in range(len(x)):
|
825 |
-
if spe_pos is None:
|
826 |
-
crop_pos = random.randint(0, time_steps - crop_size - 1)
|
827 |
-
else:
|
828 |
-
crop_pos = spe_pos
|
829 |
-
tx[i][0] = x[i, 0, crop_pos:crop_pos + crop_size,:]
|
830 |
-
return tx
|
831 |
-
|
832 |
-
# Reshape the wavform to a img size, if you want to use the pretrained swin transformer model
|
833 |
-
def reshape_wav2img(self, x):
|
834 |
-
B, C, T, F = x.shape
|
835 |
-
target_T = int(self.spec_size * self.freq_ratio)
|
836 |
-
target_F = self.spec_size // self.freq_ratio
|
837 |
-
assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size"
|
838 |
-
# to avoid bicubic zero error
|
839 |
-
if T < target_T:
|
840 |
-
x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True)
|
841 |
-
if F < target_F:
|
842 |
-
x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True)
|
843 |
-
x = x.permute(0,1,3,2).contiguous()
|
844 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2], self.freq_ratio, x.shape[3] // self.freq_ratio)
|
845 |
-
# print(x.shape)
|
846 |
-
x = x.permute(0,1,3,2,4).contiguous()
|
847 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4])
|
848 |
-
return x
|
849 |
-
|
850 |
-
# Repeat the wavform to a img size, if you want to use the pretrained swin transformer model
|
851 |
-
def repeat_wat2img(self, x, cur_pos):
|
852 |
-
B, C, T, F = x.shape
|
853 |
-
target_T = int(self.spec_size * self.freq_ratio)
|
854 |
-
target_F = self.spec_size // self.freq_ratio
|
855 |
-
assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size"
|
856 |
-
# to avoid bicubic zero error
|
857 |
-
if T < target_T:
|
858 |
-
x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True)
|
859 |
-
if F < target_F:
|
860 |
-
x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True)
|
861 |
-
x = x.permute(0,1,3,2).contiguous() # B C F T
|
862 |
-
x = x[:,:,:,cur_pos:cur_pos + self.spec_size]
|
863 |
-
x = x.repeat(repeats = (1,1,4,1))
|
864 |
-
return x
|
865 |
-
|
866 |
-
def forward(self, x: torch.Tensor, mixup_lambda = None, infer_mode = False, device=None):# out_feat_keys: List[str] = None):
|
867 |
-
|
868 |
-
if self.enable_fusion and x["longer"].sum() == 0:
|
869 |
-
# if no audio is longer than 10s, then randomly select one audio to be longer
|
870 |
-
x["longer"][torch.randint(0, x["longer"].shape[0], (1,))] = True
|
871 |
-
|
872 |
-
if not self.enable_fusion:
|
873 |
-
x = x["waveform"].to(device=device, non_blocking=True)
|
874 |
-
x = self.spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
|
875 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
876 |
-
x = x.transpose(1, 3)
|
877 |
-
x = self.bn0(x)
|
878 |
-
x = x.transpose(1, 3)
|
879 |
-
if self.training:
|
880 |
-
x = self.spec_augmenter(x)
|
881 |
-
|
882 |
-
if self.training and mixup_lambda is not None:
|
883 |
-
x = do_mixup(x, mixup_lambda)
|
884 |
-
|
885 |
-
x = self.reshape_wav2img(x)
|
886 |
-
output_dict = self.forward_features(x)
|
887 |
-
else:
|
888 |
-
longer_list = x["longer"].to(device=device, non_blocking=True)
|
889 |
-
x = x["mel_fusion"].to(device=device, non_blocking=True)
|
890 |
-
x = x.transpose(1, 3)
|
891 |
-
x = self.bn0(x)
|
892 |
-
x = x.transpose(1, 3)
|
893 |
-
longer_list_idx = torch.where(longer_list)[0]
|
894 |
-
if self.fusion_type in ['daf_1d','aff_1d','iaff_1d']:
|
895 |
-
new_x = x[:,0:1,:,:].clone().contiguous()
|
896 |
-
if len(longer_list_idx) > 0:
|
897 |
-
# local processing
|
898 |
-
fusion_x_local = x[longer_list_idx,1:,:,:].clone().contiguous()
|
899 |
-
FB,FC,FT,FF = fusion_x_local.size()
|
900 |
-
fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
|
901 |
-
fusion_x_local = torch.permute(fusion_x_local, (0,2,1)).contiguous()
|
902 |
-
fusion_x_local = self.mel_conv1d(fusion_x_local)
|
903 |
-
fusion_x_local = fusion_x_local.view(FB,FC,FF,fusion_x_local.size(-1))
|
904 |
-
fusion_x_local = torch.permute(fusion_x_local, (0,2,1,3)).contiguous().flatten(2)
|
905 |
-
if fusion_x_local.size(-1) < FT:
|
906 |
-
fusion_x_local = torch.cat([fusion_x_local, torch.zeros((FB,FF,FT- fusion_x_local.size(-1)), device=device)], dim=-1)
|
907 |
-
else:
|
908 |
-
fusion_x_local = fusion_x_local[:,:,:FT]
|
909 |
-
# 1D fusion
|
910 |
-
new_x = new_x.squeeze(1).permute((0,2,1)).contiguous()
|
911 |
-
new_x[longer_list_idx] = self.fusion_model(new_x[longer_list_idx], fusion_x_local)
|
912 |
-
x = new_x.permute((0,2,1)).contiguous()[:,None,:,:]
|
913 |
-
else:
|
914 |
-
x = new_x
|
915 |
-
|
916 |
-
elif self.fusion_type in ['daf_2d','aff_2d','iaff_2d','channel_map']:
|
917 |
-
x = x # no change
|
918 |
-
|
919 |
-
if self.training:
|
920 |
-
x = self.spec_augmenter(x)
|
921 |
-
if self.training and mixup_lambda is not None:
|
922 |
-
x = do_mixup(x, mixup_lambda)
|
923 |
-
|
924 |
-
x = self.reshape_wav2img(x)
|
925 |
-
output_dict = self.forward_features(x, longer_idx = longer_list_idx)
|
926 |
-
|
927 |
-
# if infer_mode:
|
928 |
-
# # in infer mode. we need to handle different length audio input
|
929 |
-
# frame_num = x.shape[2]
|
930 |
-
# target_T = int(self.spec_size * self.freq_ratio)
|
931 |
-
# repeat_ratio = math.floor(target_T / frame_num)
|
932 |
-
# x = x.repeat(repeats=(1,1,repeat_ratio,1))
|
933 |
-
# x = self.reshape_wav2img(x)
|
934 |
-
# output_dict = self.forward_features(x)
|
935 |
-
# else:
|
936 |
-
# if x.shape[2] > self.freq_ratio * self.spec_size:
|
937 |
-
# if self.training:
|
938 |
-
# x = self.crop_wav(x, crop_size=self.freq_ratio * self.spec_size)
|
939 |
-
# x = self.reshape_wav2img(x)
|
940 |
-
# output_dict = self.forward_features(x)
|
941 |
-
# else:
|
942 |
-
# # Change: Hard code here
|
943 |
-
# overlap_size = (x.shape[2] - 1) // 4
|
944 |
-
# output_dicts = []
|
945 |
-
# crop_size = (x.shape[2] - 1) // 2
|
946 |
-
# for cur_pos in range(0, x.shape[2] - crop_size - 1, overlap_size):
|
947 |
-
# tx = self.crop_wav(x, crop_size = crop_size, spe_pos = cur_pos)
|
948 |
-
# tx = self.reshape_wav2img(tx)
|
949 |
-
# output_dicts.append(self.forward_features(tx))
|
950 |
-
# clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device)
|
951 |
-
# framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device)
|
952 |
-
# for d in output_dicts:
|
953 |
-
# clipwise_output += d["clipwise_output"]
|
954 |
-
# framewise_output += d["framewise_output"]
|
955 |
-
# clipwise_output = clipwise_output / len(output_dicts)
|
956 |
-
# framewise_output = framewise_output / len(output_dicts)
|
957 |
-
# output_dict = {
|
958 |
-
# 'framewise_output': framewise_output,
|
959 |
-
# 'clipwise_output': clipwise_output
|
960 |
-
# }
|
961 |
-
# else: # this part is typically used, and most easy one
|
962 |
-
# x = self.reshape_wav2img(x)
|
963 |
-
# output_dict = self.forward_features(x)
|
964 |
-
# x = self.head(x)
|
965 |
-
|
966 |
-
# We process the data in the dataloader part, in that here we only consider the input_T < fixed_T
|
967 |
-
|
968 |
-
|
969 |
-
|
970 |
-
return output_dict
|
971 |
-
|
972 |
-
def create_htsat_model(audio_cfg, enable_fusion=False, fusion_type='None'):
|
973 |
-
try:
|
974 |
-
|
975 |
-
assert audio_cfg.model_name in ["tiny", "base", "large"], "model name for HTS-AT is wrong!"
|
976 |
-
if audio_cfg.model_name == "tiny":
|
977 |
-
model = HTSAT_Swin_Transformer(
|
978 |
-
spec_size=256,
|
979 |
-
patch_size=4,
|
980 |
-
patch_stride=(4,4),
|
981 |
-
num_classes=audio_cfg.class_num,
|
982 |
-
embed_dim=96,
|
983 |
-
depths=[2,2,6,2],
|
984 |
-
num_heads=[4,8,16,32],
|
985 |
-
window_size=8,
|
986 |
-
config = audio_cfg,
|
987 |
-
enable_fusion = enable_fusion,
|
988 |
-
fusion_type = fusion_type
|
989 |
-
)
|
990 |
-
elif audio_cfg.model_name == "base":
|
991 |
-
model = HTSAT_Swin_Transformer(
|
992 |
-
spec_size=256,
|
993 |
-
patch_size=4,
|
994 |
-
patch_stride=(4,4),
|
995 |
-
num_classes=audio_cfg.class_num,
|
996 |
-
embed_dim=128,
|
997 |
-
depths=[2,2,12,2],
|
998 |
-
num_heads=[4,8,16,32],
|
999 |
-
window_size=8,
|
1000 |
-
config = audio_cfg,
|
1001 |
-
enable_fusion = enable_fusion,
|
1002 |
-
fusion_type = fusion_type
|
1003 |
-
)
|
1004 |
-
elif audio_cfg.model_name == "large":
|
1005 |
-
model = HTSAT_Swin_Transformer(
|
1006 |
-
spec_size=256,
|
1007 |
-
patch_size=4,
|
1008 |
-
patch_stride=(4,4),
|
1009 |
-
num_classes=audio_cfg.class_num,
|
1010 |
-
embed_dim=256,
|
1011 |
-
depths=[2,2,12,2],
|
1012 |
-
num_heads=[4,8,16,32],
|
1013 |
-
window_size=8,
|
1014 |
-
config = audio_cfg,
|
1015 |
-
enable_fusion = enable_fusion,
|
1016 |
-
fusion_type = fusion_type
|
1017 |
-
)
|
1018 |
-
|
1019 |
-
return model
|
1020 |
-
except:
|
1021 |
-
raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.')
|
1022 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/transforms.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import os
|
3 |
-
from pathlib import Path
|
4 |
-
|
5 |
-
import albumentations
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
from tqdm import tqdm
|
9 |
-
|
10 |
-
logger = logging.getLogger(f'main.{__name__}')
|
11 |
-
|
12 |
-
|
13 |
-
class StandardNormalizeAudio(object):
|
14 |
-
'''
|
15 |
-
Frequency-wise normalization
|
16 |
-
'''
|
17 |
-
def __init__(self, specs_dir, train_ids_path='./data/vggsound_train.txt', cache_path='./data/'):
|
18 |
-
self.specs_dir = specs_dir
|
19 |
-
self.train_ids_path = train_ids_path
|
20 |
-
# making the stats filename to match the specs dir name
|
21 |
-
self.cache_path = os.path.join(cache_path, f'train_means_stds_{Path(specs_dir).stem}.txt')
|
22 |
-
logger.info('Assuming that the input stats are calculated using preprocessed spectrograms (log)')
|
23 |
-
self.train_stats = self.calculate_or_load_stats()
|
24 |
-
|
25 |
-
def __call__(self, item):
|
26 |
-
# just to generalizat the input handling. Useful for FID, IS eval and training other staff
|
27 |
-
if isinstance(item, dict):
|
28 |
-
if 'input' in item:
|
29 |
-
input_key = 'input'
|
30 |
-
elif 'image' in item:
|
31 |
-
input_key = 'image'
|
32 |
-
else:
|
33 |
-
raise NotImplementedError
|
34 |
-
item[input_key] = (item[input_key] - self.train_stats['means']) / self.train_stats['stds']
|
35 |
-
elif isinstance(item, torch.Tensor):
|
36 |
-
# broadcasts np.ndarray (80, 1) to (1, 80, 1) because item is torch.Tensor (B, 80, T)
|
37 |
-
item = (item - self.train_stats['means']) / self.train_stats['stds']
|
38 |
-
else:
|
39 |
-
raise NotImplementedError
|
40 |
-
return item
|
41 |
-
|
42 |
-
def calculate_or_load_stats(self):
|
43 |
-
try:
|
44 |
-
# (F, 2)
|
45 |
-
train_stats = np.loadtxt(self.cache_path)
|
46 |
-
means, stds = train_stats.T
|
47 |
-
logger.info('Trying to load train stats for Standard Normalization of inputs')
|
48 |
-
except OSError:
|
49 |
-
logger.info('Could not find the precalculated stats for Standard Normalization. Calculating...')
|
50 |
-
train_vid_ids = open(self.train_ids_path)
|
51 |
-
specs_paths = [os.path.join(self.specs_dir, f'{i.rstrip()}_mel.npy') for i in train_vid_ids]
|
52 |
-
means = [None] * len(specs_paths)
|
53 |
-
stds = [None] * len(specs_paths)
|
54 |
-
for i, path in enumerate(tqdm(specs_paths)):
|
55 |
-
spec = np.load(path)
|
56 |
-
means[i] = spec.mean(axis=1)
|
57 |
-
stds[i] = spec.std(axis=1)
|
58 |
-
# (F) <- (num_files, F)
|
59 |
-
means = np.array(means).mean(axis=0)
|
60 |
-
stds = np.array(stds).mean(axis=0)
|
61 |
-
# saving in two columns
|
62 |
-
np.savetxt(self.cache_path, np.vstack([means, stds]).T, fmt='%0.8f')
|
63 |
-
means = means.reshape(-1, 1)
|
64 |
-
stds = stds.reshape(-1, 1)
|
65 |
-
return {'means': means, 'stds': stds}
|
66 |
-
|
67 |
-
class ToTensor(object):
|
68 |
-
|
69 |
-
def __call__(self, item):
|
70 |
-
item['input'] = torch.from_numpy(item['input']).float()
|
71 |
-
# if 'target' in item:
|
72 |
-
item['target'] = torch.tensor(item['target'])
|
73 |
-
return item
|
74 |
-
|
75 |
-
class Crop(object):
|
76 |
-
|
77 |
-
def __init__(self, cropped_shape=None, random_crop=False):
|
78 |
-
self.cropped_shape = cropped_shape
|
79 |
-
if cropped_shape is not None:
|
80 |
-
mel_num, spec_len = cropped_shape
|
81 |
-
if random_crop:
|
82 |
-
self.cropper = albumentations.RandomCrop
|
83 |
-
else:
|
84 |
-
self.cropper = albumentations.CenterCrop
|
85 |
-
self.preprocessor = albumentations.Compose([self.cropper(mel_num, spec_len)])
|
86 |
-
else:
|
87 |
-
self.preprocessor = lambda **kwargs: kwargs
|
88 |
-
|
89 |
-
def __call__(self, item):
|
90 |
-
item['input'] = self.preprocessor(image=item['input'])['image']
|
91 |
-
return item
|
92 |
-
|
93 |
-
|
94 |
-
if __name__ == '__main__':
|
95 |
-
cropper = Crop([80, 848])
|
96 |
-
item = {'input': torch.rand([80, 860])}
|
97 |
-
outputs = cropper(item)
|
98 |
-
print(outputs['input'].shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/client/css/hljs.css
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
.hljs {
|
2 |
-
color: #e9e9f4;
|
3 |
-
background: #28293629;
|
4 |
-
border-radius: var(--border-radius-1);
|
5 |
-
border: 1px solid var(--blur-border);
|
6 |
-
font-size: 15px;
|
7 |
-
word-wrap: break-word;
|
8 |
-
white-space: pre-wrap;
|
9 |
-
}
|
10 |
-
|
11 |
-
/* style for hljs copy */
|
12 |
-
.hljs-copy-wrapper {
|
13 |
-
position: relative;
|
14 |
-
overflow: hidden;
|
15 |
-
}
|
16 |
-
|
17 |
-
.hljs-copy-wrapper:hover .hljs-copy-button,
|
18 |
-
.hljs-copy-button:focus {
|
19 |
-
transform: translateX(0);
|
20 |
-
}
|
21 |
-
|
22 |
-
.hljs-copy-button {
|
23 |
-
position: absolute;
|
24 |
-
transform: translateX(calc(100% + 1.125em));
|
25 |
-
top: 1em;
|
26 |
-
right: 1em;
|
27 |
-
width: 2rem;
|
28 |
-
height: 2rem;
|
29 |
-
text-indent: -9999px;
|
30 |
-
color: #fff;
|
31 |
-
border-radius: 0.25rem;
|
32 |
-
border: 1px solid #ffffff22;
|
33 |
-
background-color: #2d2b57;
|
34 |
-
background-image: url('data:image/svg+xml;utf-8,<svg width="16" height="16" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M6 5C5.73478 5 5.48043 5.10536 5.29289 5.29289C5.10536 5.48043 5 5.73478 5 6V20C5 20.2652 5.10536 20.5196 5.29289 20.7071C5.48043 20.8946 5.73478 21 6 21H18C18.2652 21 18.5196 20.8946 18.7071 20.7071C18.8946 20.5196 19 20.2652 19 20V6C19 5.73478 18.8946 5.48043 18.7071 5.29289C18.5196 5.10536 18.2652 5 18 5H16C15.4477 5 15 4.55228 15 4C15 3.44772 15.4477 3 16 3H18C18.7956 3 19.5587 3.31607 20.1213 3.87868C20.6839 4.44129 21 5.20435 21 6V20C21 20.7957 20.6839 21.5587 20.1213 22.1213C19.5587 22.6839 18.7957 23 18 23H6C5.20435 23 4.44129 22.6839 3.87868 22.1213C3.31607 21.5587 3 20.7957 3 20V6C3 5.20435 3.31607 4.44129 3.87868 3.87868C4.44129 3.31607 5.20435 3 6 3H8C8.55228 3 9 3.44772 9 4C9 4.55228 8.55228 5 8 5H6Z" fill="white"/><path fill-rule="evenodd" clip-rule="evenodd" d="M7 3C7 1.89543 7.89543 1 9 1H15C16.1046 1 17 1.89543 17 3V5C17 6.10457 16.1046 7 15 7H9C7.89543 7 7 6.10457 7 5V3ZM15 3H9V5H15V3Z" fill="white"/></svg>');
|
35 |
-
background-repeat: no-repeat;
|
36 |
-
background-position: center;
|
37 |
-
transition: background-color 200ms ease, transform 200ms ease-out;
|
38 |
-
}
|
39 |
-
|
40 |
-
.hljs-copy-button:hover {
|
41 |
-
border-color: #ffffff44;
|
42 |
-
}
|
43 |
-
|
44 |
-
.hljs-copy-button:active {
|
45 |
-
border-color: #ffffff66;
|
46 |
-
}
|
47 |
-
|
48 |
-
.hljs-copy-button[data-copied="true"] {
|
49 |
-
text-indent: 0;
|
50 |
-
width: auto;
|
51 |
-
background-image: none;
|
52 |
-
}
|
53 |
-
|
54 |
-
.hljs-copy-alert {
|
55 |
-
clip: rect(0 0 0 0);
|
56 |
-
clip-path: inset(50%);
|
57 |
-
height: 1px;
|
58 |
-
overflow: hidden;
|
59 |
-
position: absolute;
|
60 |
-
white-space: nowrap;
|
61 |
-
width: 1px;
|
62 |
-
}
|
63 |
-
|
64 |
-
@media (prefers-reduced-motion) {
|
65 |
-
.hljs-copy-button {
|
66 |
-
transition: none;
|
67 |
-
}
|
68 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
_base_ = '../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
|
2 |
-
|
3 |
-
max_epochs = 100 # 训练的最大 epoch
|
4 |
-
data_root = './data-df2/' # 数据集目录的绝对路径
|
5 |
-
# data_root = '/root/workspace/mmyolo/data/cat/' # Docker 容器里面数据集目录的绝对路径
|
6 |
-
|
7 |
-
# 结果保存的路径,可以省略,省略保存的文件名位于 work_dirs 下 config 同名的文件夹中
|
8 |
-
# 如果某个 config 只是修改了部分参数,修改这个变量就可以将新的训练文件保存到其他地方
|
9 |
-
work_dir = './work_dirs/yolov5_s_df2'
|
10 |
-
|
11 |
-
# load_from 可以指定本地路径或者 URL,设置了 URL 会自动进行下载,因为上面已经下载过,我们这里设置本地路径
|
12 |
-
# 因为本教程是在 cat 数据集上微调,故这里需要使用 `load_from` 来加载 MMYOLO 中的预训练模型,这样可以在加快收敛速度的同时保证精度
|
13 |
-
# load_from = './work_dirs/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa
|
14 |
-
|
15 |
-
# 根据自己的 GPU 情况,修改 batch size,YOLOv5-s 默认为 8卡 x 16bs
|
16 |
-
train_batch_size_per_gpu = 32
|
17 |
-
train_num_workers = 4 # 推荐使用 train_num_workers = nGPU x 4
|
18 |
-
|
19 |
-
save_epoch_intervals = 2 # 每 interval 轮迭代进行一次保存一次权重
|
20 |
-
|
21 |
-
# 根据自己的 GPU 情况,修改 base_lr,修改的比例是 base_lr_default * (your_bs / default_bs)
|
22 |
-
base_lr = _base_.base_lr / 4
|
23 |
-
|
24 |
-
anchors = [ # 此处已经根据数据集特点更新了 anchor,关于 anchor 的生成,后面小节会讲解
|
25 |
-
[(68, 69), (154, 91), (143, 162)], # P3/8
|
26 |
-
[(242, 160), (189, 287), (391, 207)], # P4/16
|
27 |
-
[(353, 337), (539, 341), (443, 432)] # P5/32
|
28 |
-
]
|
29 |
-
|
30 |
-
class_name = ('short_sleeved_shirt',
|
31 |
-
'long_sleeved_shirt',
|
32 |
-
'short_sleeved_outwear',
|
33 |
-
'long_sleeved_outwear',
|
34 |
-
'vest',
|
35 |
-
'sling',
|
36 |
-
'shorts',
|
37 |
-
'trousers',
|
38 |
-
'skirt',
|
39 |
-
'short_sleeved_dress',
|
40 |
-
'long_sleeved_dress',
|
41 |
-
'vest_dress',
|
42 |
-
'sling_dress') # 根据 class_with_id.txt 类别信息,设置 class_name
|
43 |
-
|
44 |
-
num_classes = len(class_name)
|
45 |
-
metainfo = dict(
|
46 |
-
classes=class_name,
|
47 |
-
palette=[(255, 0, 0),
|
48 |
-
(255, 128, 0),
|
49 |
-
(255, 255, 0),
|
50 |
-
(128, 255, 0),
|
51 |
-
(0, 255, 0),
|
52 |
-
(0, 255, 128),
|
53 |
-
(0, 255, 255),
|
54 |
-
(0, 128, 255),
|
55 |
-
(0, 0, 255),
|
56 |
-
(127, 0, 255),
|
57 |
-
(255, 0, 255),
|
58 |
-
(255, 0, 127),
|
59 |
-
(128, 128, 128)] # 画图时候的颜色,随便设置即可
|
60 |
-
)
|
61 |
-
|
62 |
-
train_cfg = dict(
|
63 |
-
max_epochs=max_epochs,
|
64 |
-
val_begin=20, # 第几个 epoch 后验证,这里设置 20 是因为前 20 个 epoch 精度不高,测试意义不大,故跳过
|
65 |
-
val_interval=save_epoch_intervals # 每 val_interval 轮迭代进行一次测试评估
|
66 |
-
# dynamic_intervals=[(max_epochs-_base_.num_last_epochs, 1)]
|
67 |
-
)
|
68 |
-
|
69 |
-
model = dict(
|
70 |
-
bbox_head=dict(
|
71 |
-
head_module=dict(num_classes=num_classes),
|
72 |
-
prior_generator=dict(base_sizes=anchors),
|
73 |
-
|
74 |
-
# loss_cls 会根据 num_classes 动态调整,但是 num_classes = 1 的时候,loss_cls 恒为 0
|
75 |
-
loss_cls=dict(loss_weight=0.5 *
|
76 |
-
(num_classes / 80 * 3 / _base_.num_det_layers))))
|
77 |
-
|
78 |
-
train_dataloader = dict(
|
79 |
-
batch_size=train_batch_size_per_gpu,
|
80 |
-
num_workers=train_num_workers,
|
81 |
-
dataset=dict(
|
82 |
-
_delete_=True,
|
83 |
-
type='RepeatDataset',
|
84 |
-
# 数据量太少的话,可以使用 RepeatDataset ,在每个 epoch 内重复当前数据集 n 次,这里设置 5 是重复 5 次
|
85 |
-
times=2,
|
86 |
-
dataset=dict(
|
87 |
-
type=_base_.dataset_type,
|
88 |
-
data_root=data_root,
|
89 |
-
metainfo=metainfo,
|
90 |
-
ann_file='annotations/trainval.json',
|
91 |
-
data_prefix=dict(img='smaller-dataset/'),
|
92 |
-
filter_cfg=dict(filter_empty_gt=False, min_size=32),
|
93 |
-
pipeline=_base_.train_pipeline)))
|
94 |
-
|
95 |
-
val_dataloader = dict(
|
96 |
-
dataset=dict(
|
97 |
-
metainfo=metainfo,
|
98 |
-
data_root=data_root,
|
99 |
-
ann_file='annotations/trainval.json',
|
100 |
-
data_prefix=dict(img='smaller-dataset/')))
|
101 |
-
|
102 |
-
test_dataloader = val_dataloader
|
103 |
-
|
104 |
-
val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json')
|
105 |
-
test_evaluator = val_evaluator
|
106 |
-
|
107 |
-
optim_wrapper = dict(optimizer=dict(lr=base_lr))
|
108 |
-
|
109 |
-
default_hooks = dict(
|
110 |
-
# 设置间隔多少个 epoch 保存模型,以及保存模型最多几个,`save_best` 是另外保存最佳模型(推荐)
|
111 |
-
checkpoint=dict(
|
112 |
-
type='CheckpointHook',
|
113 |
-
interval=save_epoch_intervals,
|
114 |
-
max_keep_ckpts=5,
|
115 |
-
save_best='auto'),
|
116 |
-
param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10),
|
117 |
-
# logger 输出的间隔
|
118 |
-
logger=dict(type='LoggerHook', interval=10))
|
119 |
-
|
120 |
-
# custom_hooks = [
|
121 |
-
# dict(
|
122 |
-
# type="EMAHook",
|
123 |
-
# ema_type="ExpMomentumEMA",
|
124 |
-
# momentum=0.0001,
|
125 |
-
# update_buffers=True,
|
126 |
-
# strict_load=False,
|
127 |
-
# priority=49),
|
128 |
-
# dict(
|
129 |
-
# type="mmdet.PipelineSwitchHook",
|
130 |
-
# switch_epoch=max_epochs-max_epochs-_base_.num_last_epochs,
|
131 |
-
# switch_pipeline=_base_.train_pipeline_stage2
|
132 |
-
# )
|
133 |
-
# ]
|
134 |
-
|
135 |
-
visualizer = dict(vis_backends=[dict(type='LocalVisBackend'), dict(type='WandbVisBackend')])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/data/scripts/get_coco.sh
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
3 |
-
# Download COCO 2017 dataset http://cocodataset.org
|
4 |
-
# Example usage: bash data/scripts/get_coco.sh
|
5 |
-
# parent
|
6 |
-
# ├── yolov5
|
7 |
-
# └── datasets
|
8 |
-
# └── coco ← downloads here
|
9 |
-
|
10 |
-
# Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments
|
11 |
-
if [ "$#" -gt 0 ]; then
|
12 |
-
for opt in "$@"; do
|
13 |
-
case "${opt}" in
|
14 |
-
--train) train=true ;;
|
15 |
-
--val) val=true ;;
|
16 |
-
--test) test=true ;;
|
17 |
-
--segments) segments=true ;;
|
18 |
-
esac
|
19 |
-
done
|
20 |
-
else
|
21 |
-
train=true
|
22 |
-
val=true
|
23 |
-
test=false
|
24 |
-
segments=false
|
25 |
-
fi
|
26 |
-
|
27 |
-
# Download/unzip labels
|
28 |
-
d='../datasets' # unzip directory
|
29 |
-
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
30 |
-
if [ "$segments" == "true" ]; then
|
31 |
-
f='coco2017labels-segments.zip' # 168 MB
|
32 |
-
else
|
33 |
-
f='coco2017labels.zip' # 46 MB
|
34 |
-
fi
|
35 |
-
echo 'Downloading' $url$f ' ...'
|
36 |
-
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
37 |
-
|
38 |
-
# Download/unzip images
|
39 |
-
d='../datasets/coco/images' # unzip directory
|
40 |
-
url=http://images.cocodataset.org/zips/
|
41 |
-
if [ "$train" == "true" ]; then
|
42 |
-
f='train2017.zip' # 19G, 118k images
|
43 |
-
echo 'Downloading' $url$f '...'
|
44 |
-
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
45 |
-
fi
|
46 |
-
if [ "$val" == "true" ]; then
|
47 |
-
f='val2017.zip' # 1G, 5k images
|
48 |
-
echo 'Downloading' $url$f '...'
|
49 |
-
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
50 |
-
fi
|
51 |
-
if [ "$test" == "true" ]; then
|
52 |
-
f='test2017.zip' # 7G, 41k images (optional)
|
53 |
-
echo 'Downloading' $url$f '...'
|
54 |
-
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
55 |
-
fi
|
56 |
-
wait # finish background tasks
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/ChatgptLogin.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import os, re
|
4 |
-
from aiohttp import ClientSession
|
5 |
-
|
6 |
-
from ..base_provider import AsyncProvider, format_prompt
|
7 |
-
|
8 |
-
|
9 |
-
class ChatgptLogin(AsyncProvider):
|
10 |
-
url = "https://opchatgpts.net"
|
11 |
-
supports_gpt_35_turbo = True
|
12 |
-
working = True
|
13 |
-
_nonce = None
|
14 |
-
|
15 |
-
@classmethod
|
16 |
-
async def create_async(
|
17 |
-
cls,
|
18 |
-
model: str,
|
19 |
-
messages: list[dict[str, str]],
|
20 |
-
**kwargs
|
21 |
-
) -> str:
|
22 |
-
headers = {
|
23 |
-
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
24 |
-
"Accept" : "*/*",
|
25 |
-
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
26 |
-
"Origin" : "https://opchatgpts.net",
|
27 |
-
"Alt-Used" : "opchatgpts.net",
|
28 |
-
"Referer" : "https://opchatgpts.net/chatgpt-free-use/",
|
29 |
-
"Sec-Fetch-Dest" : "empty",
|
30 |
-
"Sec-Fetch-Mode" : "cors",
|
31 |
-
"Sec-Fetch-Site" : "same-origin",
|
32 |
-
}
|
33 |
-
async with ClientSession(
|
34 |
-
headers=headers
|
35 |
-
) as session:
|
36 |
-
if not cls._nonce:
|
37 |
-
async with session.get(
|
38 |
-
"https://opchatgpts.net/chatgpt-free-use/",
|
39 |
-
params={"id": os.urandom(6).hex()},
|
40 |
-
) as response:
|
41 |
-
result = re.search(r'data-nonce="(.*?)"', await response.text())
|
42 |
-
if not result:
|
43 |
-
raise RuntimeError("No nonce value")
|
44 |
-
cls._nonce = result.group(1)
|
45 |
-
data = {
|
46 |
-
"_wpnonce": cls._nonce,
|
47 |
-
"post_id": 28,
|
48 |
-
"url": "https://opchatgpts.net/chatgpt-free-use",
|
49 |
-
"action": "wpaicg_chat_shortcode_message",
|
50 |
-
"message": format_prompt(messages),
|
51 |
-
"bot_id": 0
|
52 |
-
}
|
53 |
-
async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
|
54 |
-
response.raise_for_status()
|
55 |
-
data = await response.json()
|
56 |
-
if "data" in data:
|
57 |
-
return data["data"]
|
58 |
-
elif "msg" in data:
|
59 |
-
raise RuntimeError(data["msg"])
|
60 |
-
else:
|
61 |
-
raise RuntimeError(f"Response: {data}")
|
62 |
-
|
63 |
-
|
64 |
-
@classmethod
|
65 |
-
@property
|
66 |
-
def params(cls):
|
67 |
-
params = [
|
68 |
-
("model", "str"),
|
69 |
-
("messages", "list[dict[str, str]]"),
|
70 |
-
("stream", "bool"),
|
71 |
-
("temperature", "float"),
|
72 |
-
]
|
73 |
-
param = ", ".join([": ".join(p) for p in params])
|
74 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/agents/simulation_agent/prisoner_dilemma.py
DELETED
@@ -1,167 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import logging
|
4 |
-
from string import Template
|
5 |
-
from typing import TYPE_CHECKING, List
|
6 |
-
|
7 |
-
from agentverse.message import Message
|
8 |
-
|
9 |
-
# from . import agent_registry
|
10 |
-
# from .base import BaseAgent
|
11 |
-
from agentverse.agents import agent_registry
|
12 |
-
from agentverse.agents.base import BaseAgent
|
13 |
-
|
14 |
-
if TYPE_CHECKING:
|
15 |
-
from agentverse.environments.base import BaseEnvironment
|
16 |
-
|
17 |
-
|
18 |
-
class PrisonerDilemaAgent(BaseAgent):
|
19 |
-
def step(
|
20 |
-
self,
|
21 |
-
environment: BaseEnvironment,
|
22 |
-
env_description: str = "",
|
23 |
-
) -> Message:
|
24 |
-
prompt = self._fill_prompt_template(env_description)
|
25 |
-
|
26 |
-
parsed_response = None
|
27 |
-
for i in range(self.max_retry):
|
28 |
-
try:
|
29 |
-
response = self.llm.generate_response(prompt)
|
30 |
-
parsed_response = self.output_parser.parse(self, environment, response)
|
31 |
-
break
|
32 |
-
except Exception as e:
|
33 |
-
logging.error(e)
|
34 |
-
logging.warning("Retrying...")
|
35 |
-
continue
|
36 |
-
|
37 |
-
if parsed_response is None:
|
38 |
-
logging.error(f"{self.name} failed to generate valid response.")
|
39 |
-
|
40 |
-
message = Message(
|
41 |
-
content=""
|
42 |
-
if parsed_response is None
|
43 |
-
else parsed_response.return_values["output"],
|
44 |
-
sender=self.name,
|
45 |
-
receiver=self.get_receiver(),
|
46 |
-
)
|
47 |
-
return message
|
48 |
-
|
49 |
-
async def astep(
|
50 |
-
self, environment: BaseEnvironment, env_description: str = ""
|
51 |
-
) -> Message:
|
52 |
-
"""Asynchronous version of step"""
|
53 |
-
prompt = self._fill_prompt_template(env_description)
|
54 |
-
|
55 |
-
parsed_response = None
|
56 |
-
for i in range(self.max_retry):
|
57 |
-
try:
|
58 |
-
response = await self.llm.agenerate_response(prompt)
|
59 |
-
parsed_response = self.output_parser.parse(self, environment, response)
|
60 |
-
break
|
61 |
-
except Exception as e:
|
62 |
-
logging.error(e)
|
63 |
-
logging.warning("Retrying...")
|
64 |
-
continue
|
65 |
-
|
66 |
-
if parsed_response is None:
|
67 |
-
logging.error(f"{self.name} failed to generate valid response.")
|
68 |
-
|
69 |
-
message = Message(
|
70 |
-
content=""
|
71 |
-
if parsed_response is None
|
72 |
-
else parsed_response.return_values["output"],
|
73 |
-
sender=self.name,
|
74 |
-
receiver=self.get_receiver(),
|
75 |
-
)
|
76 |
-
return message
|
77 |
-
|
78 |
-
def _fill_prompt_template(self, env_description: str = "") -> str:
|
79 |
-
"""Fill the placeholders in the prompt template
|
80 |
-
|
81 |
-
In the conversation agent, three placeholders are supported:
|
82 |
-
- ${agent_name}: the name of the agent
|
83 |
-
- ${env_description}: the description of the environment
|
84 |
-
- ${role_description}: the description of the role of the agent
|
85 |
-
- ${chat_history}: the chat history of the agent
|
86 |
-
"""
|
87 |
-
input_arguments = {
|
88 |
-
"agent_name": self.name,
|
89 |
-
"env_description": env_description,
|
90 |
-
"role_description": self.role_description,
|
91 |
-
"chat_history": self.memory.to_string(add_sender_prefix=True),
|
92 |
-
}
|
93 |
-
return Template(self.prompt_template).safe_substitute(input_arguments)
|
94 |
-
|
95 |
-
def add_message_to_memory(self, messages: List[Message]) -> None:
|
96 |
-
self.memory.add_message(messages)
|
97 |
-
|
98 |
-
def reset(self) -> None:
|
99 |
-
"""Reset the agent"""
|
100 |
-
self.memory.reset()
|
101 |
-
# TODO: reset receiver
|
102 |
-
|
103 |
-
|
104 |
-
@agent_registry.register("police")
|
105 |
-
class PoliceAgent(PrisonerDilemaAgent):
|
106 |
-
interrogating_form: str
|
107 |
-
|
108 |
-
def _fill_prompt_template(self, env_description: str = "") -> str:
|
109 |
-
"""Fill the placeholders in the prompt template
|
110 |
-
|
111 |
-
In the conversation agent, three placeholders are supported:
|
112 |
-
- ${agent_name}: the name of the agent
|
113 |
-
- ${env_description}: the description of the environment
|
114 |
-
- ${role_description}: the description of the role of the agent
|
115 |
-
- ${chat_history}: the chat history of the agent
|
116 |
-
"""
|
117 |
-
input_arguments = {
|
118 |
-
"agent_name": self.name,
|
119 |
-
"env_description": env_description,
|
120 |
-
"role_description": self.role_description,
|
121 |
-
"chat_history": self.memory.to_string(add_sender_prefix=True),
|
122 |
-
}
|
123 |
-
|
124 |
-
role_argument = {
|
125 |
-
"interrogating_form": self.interrogating_form,
|
126 |
-
}
|
127 |
-
|
128 |
-
role_description = Template(self.role_description).safe_substitute(
|
129 |
-
role_argument
|
130 |
-
)
|
131 |
-
input_arguments["role_description"] = role_description
|
132 |
-
|
133 |
-
return Template(self.prompt_template).safe_substitute(input_arguments)
|
134 |
-
|
135 |
-
|
136 |
-
@agent_registry.register("prisoner")
|
137 |
-
class PrisonerAgent(PrisonerDilemaAgent):
|
138 |
-
personality: str
|
139 |
-
relationship_with_another: str
|
140 |
-
|
141 |
-
def _fill_prompt_template(self, env_description: str = "") -> str:
|
142 |
-
"""Fill the placeholders in the prompt template
|
143 |
-
|
144 |
-
In the conversation agent, three placeholders are supported:
|
145 |
-
- ${agent_name}: the name of the agent
|
146 |
-
- ${env_description}: the description of the environment
|
147 |
-
- ${role_description}: the description of the role of the agent
|
148 |
-
- ${chat_history}: the chat history of the agent
|
149 |
-
"""
|
150 |
-
input_arguments = {
|
151 |
-
"agent_name": self.name,
|
152 |
-
"env_description": env_description,
|
153 |
-
"role_description": self.role_description,
|
154 |
-
"chat_history": self.memory.to_string(add_sender_prefix=True),
|
155 |
-
}
|
156 |
-
|
157 |
-
role_argument = {
|
158 |
-
"personality": self.personality,
|
159 |
-
"relationship_with_another": self.relationship_with_another,
|
160 |
-
}
|
161 |
-
|
162 |
-
role_description = Template(self.role_description).safe_substitute(
|
163 |
-
role_argument
|
164 |
-
)
|
165 |
-
input_arguments["role_description"] = role_description
|
166 |
-
|
167 |
-
return Template(self.prompt_template).safe_substitute(input_arguments)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/transforms.py
DELETED
@@ -1,193 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
|
7 |
-
DEFAULT_MIN_BIN_WIDTH = 1e-3
|
8 |
-
DEFAULT_MIN_BIN_HEIGHT = 1e-3
|
9 |
-
DEFAULT_MIN_DERIVATIVE = 1e-3
|
10 |
-
|
11 |
-
|
12 |
-
def piecewise_rational_quadratic_transform(inputs,
|
13 |
-
unnormalized_widths,
|
14 |
-
unnormalized_heights,
|
15 |
-
unnormalized_derivatives,
|
16 |
-
inverse=False,
|
17 |
-
tails=None,
|
18 |
-
tail_bound=1.,
|
19 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
20 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
21 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
22 |
-
|
23 |
-
if tails is None:
|
24 |
-
spline_fn = rational_quadratic_spline
|
25 |
-
spline_kwargs = {}
|
26 |
-
else:
|
27 |
-
spline_fn = unconstrained_rational_quadratic_spline
|
28 |
-
spline_kwargs = {
|
29 |
-
'tails': tails,
|
30 |
-
'tail_bound': tail_bound
|
31 |
-
}
|
32 |
-
|
33 |
-
outputs, logabsdet = spline_fn(
|
34 |
-
inputs=inputs,
|
35 |
-
unnormalized_widths=unnormalized_widths,
|
36 |
-
unnormalized_heights=unnormalized_heights,
|
37 |
-
unnormalized_derivatives=unnormalized_derivatives,
|
38 |
-
inverse=inverse,
|
39 |
-
min_bin_width=min_bin_width,
|
40 |
-
min_bin_height=min_bin_height,
|
41 |
-
min_derivative=min_derivative,
|
42 |
-
**spline_kwargs
|
43 |
-
)
|
44 |
-
return outputs, logabsdet
|
45 |
-
|
46 |
-
|
47 |
-
def searchsorted(bin_locations, inputs, eps=1e-6):
|
48 |
-
bin_locations[..., -1] += eps
|
49 |
-
return torch.sum(
|
50 |
-
inputs[..., None] >= bin_locations,
|
51 |
-
dim=-1
|
52 |
-
) - 1
|
53 |
-
|
54 |
-
|
55 |
-
def unconstrained_rational_quadratic_spline(inputs,
|
56 |
-
unnormalized_widths,
|
57 |
-
unnormalized_heights,
|
58 |
-
unnormalized_derivatives,
|
59 |
-
inverse=False,
|
60 |
-
tails='linear',
|
61 |
-
tail_bound=1.,
|
62 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
63 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
64 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
65 |
-
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
|
66 |
-
outside_interval_mask = ~inside_interval_mask
|
67 |
-
|
68 |
-
outputs = torch.zeros_like(inputs)
|
69 |
-
logabsdet = torch.zeros_like(inputs)
|
70 |
-
|
71 |
-
if tails == 'linear':
|
72 |
-
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
|
73 |
-
constant = np.log(np.exp(1 - min_derivative) - 1)
|
74 |
-
unnormalized_derivatives[..., 0] = constant
|
75 |
-
unnormalized_derivatives[..., -1] = constant
|
76 |
-
|
77 |
-
outputs[outside_interval_mask] = inputs[outside_interval_mask]
|
78 |
-
logabsdet[outside_interval_mask] = 0
|
79 |
-
else:
|
80 |
-
raise RuntimeError('{} tails are not implemented.'.format(tails))
|
81 |
-
|
82 |
-
outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
|
83 |
-
inputs=inputs[inside_interval_mask],
|
84 |
-
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
|
85 |
-
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
|
86 |
-
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
|
87 |
-
inverse=inverse,
|
88 |
-
left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
|
89 |
-
min_bin_width=min_bin_width,
|
90 |
-
min_bin_height=min_bin_height,
|
91 |
-
min_derivative=min_derivative
|
92 |
-
)
|
93 |
-
|
94 |
-
return outputs, logabsdet
|
95 |
-
|
96 |
-
def rational_quadratic_spline(inputs,
|
97 |
-
unnormalized_widths,
|
98 |
-
unnormalized_heights,
|
99 |
-
unnormalized_derivatives,
|
100 |
-
inverse=False,
|
101 |
-
left=0., right=1., bottom=0., top=1.,
|
102 |
-
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
|
103 |
-
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
|
104 |
-
min_derivative=DEFAULT_MIN_DERIVATIVE):
|
105 |
-
if torch.min(inputs) < left or torch.max(inputs) > right:
|
106 |
-
raise ValueError('Input to a transform is not within its domain')
|
107 |
-
|
108 |
-
num_bins = unnormalized_widths.shape[-1]
|
109 |
-
|
110 |
-
if min_bin_width * num_bins > 1.0:
|
111 |
-
raise ValueError('Minimal bin width too large for the number of bins')
|
112 |
-
if min_bin_height * num_bins > 1.0:
|
113 |
-
raise ValueError('Minimal bin height too large for the number of bins')
|
114 |
-
|
115 |
-
widths = F.softmax(unnormalized_widths, dim=-1)
|
116 |
-
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
|
117 |
-
cumwidths = torch.cumsum(widths, dim=-1)
|
118 |
-
cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
|
119 |
-
cumwidths = (right - left) * cumwidths + left
|
120 |
-
cumwidths[..., 0] = left
|
121 |
-
cumwidths[..., -1] = right
|
122 |
-
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
|
123 |
-
|
124 |
-
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
|
125 |
-
|
126 |
-
heights = F.softmax(unnormalized_heights, dim=-1)
|
127 |
-
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
|
128 |
-
cumheights = torch.cumsum(heights, dim=-1)
|
129 |
-
cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
|
130 |
-
cumheights = (top - bottom) * cumheights + bottom
|
131 |
-
cumheights[..., 0] = bottom
|
132 |
-
cumheights[..., -1] = top
|
133 |
-
heights = cumheights[..., 1:] - cumheights[..., :-1]
|
134 |
-
|
135 |
-
if inverse:
|
136 |
-
bin_idx = searchsorted(cumheights, inputs)[..., None]
|
137 |
-
else:
|
138 |
-
bin_idx = searchsorted(cumwidths, inputs)[..., None]
|
139 |
-
|
140 |
-
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
|
141 |
-
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
|
142 |
-
|
143 |
-
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
|
144 |
-
delta = heights / widths
|
145 |
-
input_delta = delta.gather(-1, bin_idx)[..., 0]
|
146 |
-
|
147 |
-
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
|
148 |
-
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
|
149 |
-
|
150 |
-
input_heights = heights.gather(-1, bin_idx)[..., 0]
|
151 |
-
|
152 |
-
if inverse:
|
153 |
-
a = (((inputs - input_cumheights) * (input_derivatives
|
154 |
-
+ input_derivatives_plus_one
|
155 |
-
- 2 * input_delta)
|
156 |
-
+ input_heights * (input_delta - input_derivatives)))
|
157 |
-
b = (input_heights * input_derivatives
|
158 |
-
- (inputs - input_cumheights) * (input_derivatives
|
159 |
-
+ input_derivatives_plus_one
|
160 |
-
- 2 * input_delta))
|
161 |
-
c = - input_delta * (inputs - input_cumheights)
|
162 |
-
|
163 |
-
discriminant = b.pow(2) - 4 * a * c
|
164 |
-
assert (discriminant >= 0).all()
|
165 |
-
|
166 |
-
root = (2 * c) / (-b - torch.sqrt(discriminant))
|
167 |
-
outputs = root * input_bin_widths + input_cumwidths
|
168 |
-
|
169 |
-
theta_one_minus_theta = root * (1 - root)
|
170 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
171 |
-
* theta_one_minus_theta)
|
172 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
|
173 |
-
+ 2 * input_delta * theta_one_minus_theta
|
174 |
-
+ input_derivatives * (1 - root).pow(2))
|
175 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
176 |
-
|
177 |
-
return outputs, -logabsdet
|
178 |
-
else:
|
179 |
-
theta = (inputs - input_cumwidths) / input_bin_widths
|
180 |
-
theta_one_minus_theta = theta * (1 - theta)
|
181 |
-
|
182 |
-
numerator = input_heights * (input_delta * theta.pow(2)
|
183 |
-
+ input_derivatives * theta_one_minus_theta)
|
184 |
-
denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
|
185 |
-
* theta_one_minus_theta)
|
186 |
-
outputs = input_cumheights + numerator / denominator
|
187 |
-
|
188 |
-
derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
|
189 |
-
+ 2 * input_delta * theta_one_minus_theta
|
190 |
-
+ input_derivatives * (1 - theta).pow(2))
|
191 |
-
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
|
192 |
-
|
193 |
-
return outputs, logabsdet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/config.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
# [step 1]>> 例如: API_KEY = "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" (此key无效)
|
2 |
-
API_KEY = "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" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
|
3 |
-
|
4 |
-
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改
|
5 |
-
USE_PROXY = True
|
6 |
-
if USE_PROXY:
|
7 |
-
# 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
|
8 |
-
# 例如 "socks5h://localhost:11284"
|
9 |
-
# [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
|
10 |
-
# [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
|
11 |
-
# [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
12 |
-
|
13 |
-
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
14 |
-
proxies = {
|
15 |
-
# [协议]:// [地址] :[端口]
|
16 |
-
"http": "http://127.0.0.1:7890",
|
17 |
-
"https": "http://127.0.0.1:7890",
|
18 |
-
}
|
19 |
-
else:
|
20 |
-
proxies = None
|
21 |
-
|
22 |
-
# [step 3]>> 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
|
23 |
-
# 一言以蔽之:免费用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
|
24 |
-
DEFAULT_WORKER_NUM = 3
|
25 |
-
|
26 |
-
|
27 |
-
# [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改
|
28 |
-
# 对话窗的高度
|
29 |
-
CHATBOT_HEIGHT = 1115
|
30 |
-
|
31 |
-
# 代码高亮
|
32 |
-
CODE_HIGHLIGHT = True
|
33 |
-
|
34 |
-
# 窗口布局
|
35 |
-
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
36 |
-
|
37 |
-
# 发送请求到OpenAI后,等待多久判定为超时
|
38 |
-
TIMEOUT_SECONDS = 30
|
39 |
-
|
40 |
-
# 网页的端口, -1代表随机端口
|
41 |
-
WEB_PORT = -1
|
42 |
-
|
43 |
-
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
44 |
-
MAX_RETRY = 2
|
45 |
-
|
46 |
-
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
|
47 |
-
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
|
48 |
-
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
|
49 |
-
|
50 |
-
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
51 |
-
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
52 |
-
|
53 |
-
# 设置gradio的并行线程数(不需要修改)
|
54 |
-
CONCURRENT_COUNT = 100
|
55 |
-
|
56 |
-
# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
57 |
-
# [("username", "password"), ("username2", "password2"), ...]
|
58 |
-
AUTHENTICATION = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/util.py
DELETED
@@ -1,492 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
#
|
4 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
5 |
-
# and proprietary rights in and to this software, related documentation
|
6 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
7 |
-
# distribution of this software and related documentation without an express
|
8 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
9 |
-
|
10 |
-
"""Miscellaneous utility classes and functions."""
|
11 |
-
|
12 |
-
import ctypes
|
13 |
-
import fnmatch
|
14 |
-
import importlib
|
15 |
-
import inspect
|
16 |
-
import numpy as np
|
17 |
-
import os
|
18 |
-
import shutil
|
19 |
-
import sys
|
20 |
-
import types
|
21 |
-
import io
|
22 |
-
import pickle
|
23 |
-
import re
|
24 |
-
import requests
|
25 |
-
import html
|
26 |
-
import hashlib
|
27 |
-
import glob
|
28 |
-
import tempfile
|
29 |
-
import urllib
|
30 |
-
import urllib.request
|
31 |
-
import uuid
|
32 |
-
|
33 |
-
from distutils.util import strtobool
|
34 |
-
from typing import Any, List, Tuple, Union
|
35 |
-
|
36 |
-
|
37 |
-
# Util classes
|
38 |
-
# ------------------------------------------------------------------------------------------
|
39 |
-
|
40 |
-
|
41 |
-
class EasyDict(dict):
|
42 |
-
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
|
43 |
-
|
44 |
-
def __getattr__(self, name: str) -> Any:
|
45 |
-
try:
|
46 |
-
return self[name]
|
47 |
-
except KeyError:
|
48 |
-
raise AttributeError(name)
|
49 |
-
|
50 |
-
def __setattr__(self, name: str, value: Any) -> None:
|
51 |
-
self[name] = value
|
52 |
-
|
53 |
-
def __delattr__(self, name: str) -> None:
|
54 |
-
del self[name]
|
55 |
-
|
56 |
-
|
57 |
-
class Logger(object):
|
58 |
-
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
|
59 |
-
|
60 |
-
def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
|
61 |
-
self.file = None
|
62 |
-
|
63 |
-
if file_name is not None:
|
64 |
-
self.file = open(file_name, file_mode)
|
65 |
-
|
66 |
-
self.should_flush = should_flush
|
67 |
-
self.stdout = sys.stdout
|
68 |
-
self.stderr = sys.stderr
|
69 |
-
|
70 |
-
sys.stdout = self
|
71 |
-
sys.stderr = self
|
72 |
-
|
73 |
-
def __enter__(self) -> "Logger":
|
74 |
-
return self
|
75 |
-
|
76 |
-
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
77 |
-
self.close()
|
78 |
-
|
79 |
-
def write(self, text: Union[str, bytes]) -> None:
|
80 |
-
"""Write text to stdout (and a file) and optionally flush."""
|
81 |
-
if isinstance(text, bytes):
|
82 |
-
text = text.decode()
|
83 |
-
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
|
84 |
-
return
|
85 |
-
|
86 |
-
if self.file is not None:
|
87 |
-
self.file.write(text)
|
88 |
-
|
89 |
-
self.stdout.write(text)
|
90 |
-
|
91 |
-
if self.should_flush:
|
92 |
-
self.flush()
|
93 |
-
|
94 |
-
def flush(self) -> None:
|
95 |
-
"""Flush written text to both stdout and a file, if open."""
|
96 |
-
if self.file is not None:
|
97 |
-
self.file.flush()
|
98 |
-
|
99 |
-
self.stdout.flush()
|
100 |
-
|
101 |
-
def close(self) -> None:
|
102 |
-
"""Flush, close possible files, and remove stdout/stderr mirroring."""
|
103 |
-
self.flush()
|
104 |
-
|
105 |
-
# if using multiple loggers, prevent closing in wrong order
|
106 |
-
if sys.stdout is self:
|
107 |
-
sys.stdout = self.stdout
|
108 |
-
if sys.stderr is self:
|
109 |
-
sys.stderr = self.stderr
|
110 |
-
|
111 |
-
if self.file is not None:
|
112 |
-
self.file.close()
|
113 |
-
self.file = None
|
114 |
-
|
115 |
-
|
116 |
-
# Cache directories
|
117 |
-
# ------------------------------------------------------------------------------------------
|
118 |
-
|
119 |
-
_dnnlib_cache_dir = None
|
120 |
-
|
121 |
-
|
122 |
-
def set_cache_dir(path: str) -> None:
|
123 |
-
global _dnnlib_cache_dir
|
124 |
-
_dnnlib_cache_dir = path
|
125 |
-
|
126 |
-
|
127 |
-
def make_cache_dir_path(*paths: str) -> str:
|
128 |
-
if _dnnlib_cache_dir is not None:
|
129 |
-
return os.path.join(_dnnlib_cache_dir, *paths)
|
130 |
-
if 'DNNLIB_CACHE_DIR' in os.environ:
|
131 |
-
return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
|
132 |
-
if 'HOME' in os.environ:
|
133 |
-
return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
|
134 |
-
if 'USERPROFILE' in os.environ:
|
135 |
-
return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
|
136 |
-
return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
|
137 |
-
|
138 |
-
# Small util functions
|
139 |
-
# ------------------------------------------------------------------------------------------
|
140 |
-
|
141 |
-
|
142 |
-
def format_time(seconds: Union[int, float]) -> str:
|
143 |
-
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
|
144 |
-
s = int(np.rint(seconds))
|
145 |
-
|
146 |
-
if s < 60:
|
147 |
-
return "{0}s".format(s)
|
148 |
-
elif s < 60 * 60:
|
149 |
-
return "{0}m {1:02}s".format(s // 60, s % 60)
|
150 |
-
elif s < 24 * 60 * 60:
|
151 |
-
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
|
152 |
-
else:
|
153 |
-
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
|
154 |
-
|
155 |
-
|
156 |
-
def ask_yes_no(question: str) -> bool:
|
157 |
-
"""Ask the user the question until the user inputs a valid answer."""
|
158 |
-
while True:
|
159 |
-
try:
|
160 |
-
print("{0} [y/n]".format(question))
|
161 |
-
return strtobool(input().lower())
|
162 |
-
except ValueError:
|
163 |
-
pass
|
164 |
-
|
165 |
-
|
166 |
-
def tuple_product(t: Tuple) -> Any:
|
167 |
-
"""Calculate the product of the tuple elements."""
|
168 |
-
result = 1
|
169 |
-
|
170 |
-
for v in t:
|
171 |
-
result *= v
|
172 |
-
|
173 |
-
return result
|
174 |
-
|
175 |
-
|
176 |
-
_str_to_ctype = {
|
177 |
-
"uint8": ctypes.c_ubyte,
|
178 |
-
"uint16": ctypes.c_uint16,
|
179 |
-
"uint32": ctypes.c_uint32,
|
180 |
-
"uint64": ctypes.c_uint64,
|
181 |
-
"int8": ctypes.c_byte,
|
182 |
-
"int16": ctypes.c_int16,
|
183 |
-
"int32": ctypes.c_int32,
|
184 |
-
"int64": ctypes.c_int64,
|
185 |
-
"float32": ctypes.c_float,
|
186 |
-
"float64": ctypes.c_double
|
187 |
-
}
|
188 |
-
|
189 |
-
|
190 |
-
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
|
191 |
-
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
|
192 |
-
type_str = None
|
193 |
-
|
194 |
-
if isinstance(type_obj, str):
|
195 |
-
type_str = type_obj
|
196 |
-
elif hasattr(type_obj, "__name__"):
|
197 |
-
type_str = type_obj.__name__
|
198 |
-
elif hasattr(type_obj, "name"):
|
199 |
-
type_str = type_obj.name
|
200 |
-
else:
|
201 |
-
raise RuntimeError("Cannot infer type name from input")
|
202 |
-
|
203 |
-
assert type_str in _str_to_ctype.keys()
|
204 |
-
|
205 |
-
my_dtype = np.dtype(type_str)
|
206 |
-
my_ctype = _str_to_ctype[type_str]
|
207 |
-
|
208 |
-
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
|
209 |
-
|
210 |
-
return my_dtype, my_ctype
|
211 |
-
|
212 |
-
|
213 |
-
def is_pickleable(obj: Any) -> bool:
|
214 |
-
try:
|
215 |
-
with io.BytesIO() as stream:
|
216 |
-
pickle.dump(obj, stream)
|
217 |
-
return True
|
218 |
-
except:
|
219 |
-
return False
|
220 |
-
|
221 |
-
|
222 |
-
# Functionality to import modules/objects by name, and call functions by name
|
223 |
-
# ------------------------------------------------------------------------------------------
|
224 |
-
|
225 |
-
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
|
226 |
-
"""Searches for the underlying module behind the name to some python object.
|
227 |
-
Returns the module and the object name (original name with module part removed)."""
|
228 |
-
|
229 |
-
# allow convenience shorthands, substitute them by full names
|
230 |
-
obj_name = re.sub("^np.", "numpy.", obj_name)
|
231 |
-
obj_name = re.sub("^tf.", "tensorflow.", obj_name)
|
232 |
-
|
233 |
-
# list alternatives for (module_name, local_obj_name)
|
234 |
-
parts = obj_name.split(".")
|
235 |
-
name_pairs = [(".".join(parts[:i]), ".".join(parts[i:]))
|
236 |
-
for i in range(len(parts), 0, -1)]
|
237 |
-
|
238 |
-
# try each alternative in turn
|
239 |
-
for module_name, local_obj_name in name_pairs:
|
240 |
-
try:
|
241 |
-
module = importlib.import_module(
|
242 |
-
module_name) # may raise ImportError
|
243 |
-
# may raise AttributeError
|
244 |
-
get_obj_from_module(module, local_obj_name)
|
245 |
-
return module, local_obj_name
|
246 |
-
except:
|
247 |
-
pass
|
248 |
-
|
249 |
-
# maybe some of the modules themselves contain errors?
|
250 |
-
for module_name, _local_obj_name in name_pairs:
|
251 |
-
try:
|
252 |
-
importlib.import_module(module_name) # may raise ImportError
|
253 |
-
except ImportError:
|
254 |
-
if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
|
255 |
-
raise
|
256 |
-
|
257 |
-
# maybe the requested attribute is missing?
|
258 |
-
for module_name, local_obj_name in name_pairs:
|
259 |
-
try:
|
260 |
-
module = importlib.import_module(
|
261 |
-
module_name) # may raise ImportError
|
262 |
-
# may raise AttributeError
|
263 |
-
get_obj_from_module(module, local_obj_name)
|
264 |
-
except ImportError:
|
265 |
-
pass
|
266 |
-
|
267 |
-
# we are out of luck, but we have no idea why
|
268 |
-
raise ImportError(obj_name)
|
269 |
-
|
270 |
-
|
271 |
-
def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
|
272 |
-
"""Traverses the object name and returns the last (rightmost) python object."""
|
273 |
-
if obj_name == '':
|
274 |
-
return module
|
275 |
-
obj = module
|
276 |
-
for part in obj_name.split("."):
|
277 |
-
obj = getattr(obj, part)
|
278 |
-
return obj
|
279 |
-
|
280 |
-
|
281 |
-
def get_obj_by_name(name: str) -> Any:
|
282 |
-
"""Finds the python object with the given name."""
|
283 |
-
module, obj_name = get_module_from_obj_name(name)
|
284 |
-
return get_obj_from_module(module, obj_name)
|
285 |
-
|
286 |
-
|
287 |
-
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
|
288 |
-
"""Finds the python object with the given name and calls it as a function."""
|
289 |
-
assert func_name is not None
|
290 |
-
# print('func_name: ', func_name) #'training.dataset.ImageFolderDataset'
|
291 |
-
func_obj = get_obj_by_name(func_name)
|
292 |
-
assert callable(func_obj)
|
293 |
-
return func_obj(*args, **kwargs)
|
294 |
-
|
295 |
-
|
296 |
-
def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
|
297 |
-
"""Finds the python class with the given name and constructs it with the given arguments."""
|
298 |
-
return call_func_by_name(*args, func_name=class_name, **kwargs)
|
299 |
-
|
300 |
-
|
301 |
-
def get_module_dir_by_obj_name(obj_name: str) -> str:
|
302 |
-
"""Get the directory path of the module containing the given object name."""
|
303 |
-
module, _ = get_module_from_obj_name(obj_name)
|
304 |
-
return os.path.dirname(inspect.getfile(module))
|
305 |
-
|
306 |
-
|
307 |
-
def is_top_level_function(obj: Any) -> bool:
|
308 |
-
"""Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
|
309 |
-
return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
|
310 |
-
|
311 |
-
|
312 |
-
def get_top_level_function_name(obj: Any) -> str:
|
313 |
-
"""Return the fully-qualified name of a top-level function."""
|
314 |
-
assert is_top_level_function(obj)
|
315 |
-
module = obj.__module__
|
316 |
-
if module == '__main__':
|
317 |
-
module = os.path.splitext(os.path.basename(
|
318 |
-
sys.modules[module].__file__))[0]
|
319 |
-
return module + "." + obj.__name__
|
320 |
-
|
321 |
-
|
322 |
-
# File system helpers
|
323 |
-
# ------------------------------------------------------------------------------------------
|
324 |
-
|
325 |
-
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
|
326 |
-
"""List all files recursively in a given directory while ignoring given file and directory names.
|
327 |
-
Returns list of tuples containing both absolute and relative paths."""
|
328 |
-
assert os.path.isdir(dir_path)
|
329 |
-
base_name = os.path.basename(os.path.normpath(dir_path))
|
330 |
-
|
331 |
-
if ignores is None:
|
332 |
-
ignores = []
|
333 |
-
|
334 |
-
result = []
|
335 |
-
|
336 |
-
for root, dirs, files in os.walk(dir_path, topdown=True):
|
337 |
-
for ignore_ in ignores:
|
338 |
-
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
|
339 |
-
|
340 |
-
# dirs need to be edited in-place
|
341 |
-
for d in dirs_to_remove:
|
342 |
-
dirs.remove(d)
|
343 |
-
|
344 |
-
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
|
345 |
-
|
346 |
-
absolute_paths = [os.path.join(root, f) for f in files]
|
347 |
-
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
|
348 |
-
|
349 |
-
if add_base_to_relative:
|
350 |
-
relative_paths = [os.path.join(base_name, p)
|
351 |
-
for p in relative_paths]
|
352 |
-
|
353 |
-
assert len(absolute_paths) == len(relative_paths)
|
354 |
-
result += zip(absolute_paths, relative_paths)
|
355 |
-
|
356 |
-
return result
|
357 |
-
|
358 |
-
|
359 |
-
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
|
360 |
-
"""Takes in a list of tuples of (src, dst) paths and copies files.
|
361 |
-
Will create all necessary directories."""
|
362 |
-
for file in files:
|
363 |
-
target_dir_name = os.path.dirname(file[1])
|
364 |
-
|
365 |
-
# will create all intermediate-level directories
|
366 |
-
if not os.path.exists(target_dir_name):
|
367 |
-
os.makedirs(target_dir_name)
|
368 |
-
|
369 |
-
shutil.copyfile(file[0], file[1])
|
370 |
-
|
371 |
-
|
372 |
-
# URL helpers
|
373 |
-
# ------------------------------------------------------------------------------------------
|
374 |
-
|
375 |
-
def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
|
376 |
-
"""Determine whether the given object is a valid URL string."""
|
377 |
-
if not isinstance(obj, str) or not "://" in obj:
|
378 |
-
return False
|
379 |
-
if allow_file_urls and obj.startswith('file://'):
|
380 |
-
return True
|
381 |
-
try:
|
382 |
-
res = requests.compat.urlparse(obj)
|
383 |
-
if not res.scheme or not res.netloc or not "." in res.netloc:
|
384 |
-
return False
|
385 |
-
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
|
386 |
-
if not res.scheme or not res.netloc or not "." in res.netloc:
|
387 |
-
return False
|
388 |
-
except:
|
389 |
-
return False
|
390 |
-
return True
|
391 |
-
|
392 |
-
|
393 |
-
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
|
394 |
-
"""Download the given URL and return a binary-mode file object to access the data."""
|
395 |
-
assert num_attempts >= 1
|
396 |
-
assert not (return_filename and (not cache))
|
397 |
-
|
398 |
-
# Doesn't look like an URL scheme so interpret it as a local filename.
|
399 |
-
if not re.match('^[a-z]+://', url):
|
400 |
-
return url if return_filename else open(url, "rb")
|
401 |
-
|
402 |
-
# Handle file URLs. This code handles unusual file:// patterns that
|
403 |
-
# arise on Windows:
|
404 |
-
#
|
405 |
-
# file:///c:/foo.txt
|
406 |
-
#
|
407 |
-
# which would translate to a local '/c:/foo.txt' filename that's
|
408 |
-
# invalid. Drop the forward slash for such pathnames.
|
409 |
-
#
|
410 |
-
# If you touch this code path, you should test it on both Linux and
|
411 |
-
# Windows.
|
412 |
-
#
|
413 |
-
# Some internet resources suggest using urllib.request.url2pathname() but
|
414 |
-
# but that converts forward slashes to backslashes and this causes
|
415 |
-
# its own set of problems.
|
416 |
-
if url.startswith('file://'):
|
417 |
-
filename = urllib.parse.urlparse(url).path
|
418 |
-
if re.match(r'^/[a-zA-Z]:', filename):
|
419 |
-
filename = filename[1:]
|
420 |
-
return filename if return_filename else open(filename, "rb")
|
421 |
-
|
422 |
-
assert is_url(url)
|
423 |
-
|
424 |
-
# Lookup from cache.
|
425 |
-
if cache_dir is None:
|
426 |
-
cache_dir = make_cache_dir_path('downloads')
|
427 |
-
|
428 |
-
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
|
429 |
-
if cache:
|
430 |
-
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
|
431 |
-
if len(cache_files) == 1:
|
432 |
-
filename = cache_files[0]
|
433 |
-
return filename if return_filename else open(filename, "rb")
|
434 |
-
|
435 |
-
# Download.
|
436 |
-
url_name = None
|
437 |
-
url_data = None
|
438 |
-
with requests.Session() as session:
|
439 |
-
if verbose:
|
440 |
-
print("Downloading %s ..." % url, end="", flush=True)
|
441 |
-
for attempts_left in reversed(range(num_attempts)):
|
442 |
-
try:
|
443 |
-
with session.get(url) as res:
|
444 |
-
res.raise_for_status()
|
445 |
-
if len(res.content) == 0:
|
446 |
-
raise IOError("No data received")
|
447 |
-
|
448 |
-
if len(res.content) < 8192:
|
449 |
-
content_str = res.content.decode("utf-8")
|
450 |
-
if "download_warning" in res.headers.get("Set-Cookie", ""):
|
451 |
-
links = [html.unescape(link) for link in content_str.split(
|
452 |
-
'"') if "export=download" in link]
|
453 |
-
if len(links) == 1:
|
454 |
-
url = requests.compat.urljoin(url, links[0])
|
455 |
-
raise IOError("Google Drive virus checker nag")
|
456 |
-
if "Google Drive - Quota exceeded" in content_str:
|
457 |
-
raise IOError(
|
458 |
-
"Google Drive download quota exceeded -- please try again later")
|
459 |
-
|
460 |
-
match = re.search(
|
461 |
-
r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
|
462 |
-
url_name = match[1] if match else url
|
463 |
-
url_data = res.content
|
464 |
-
if verbose:
|
465 |
-
print(" done")
|
466 |
-
break
|
467 |
-
except KeyboardInterrupt:
|
468 |
-
raise
|
469 |
-
except:
|
470 |
-
if not attempts_left:
|
471 |
-
if verbose:
|
472 |
-
print(" failed")
|
473 |
-
raise
|
474 |
-
if verbose:
|
475 |
-
print(".", end="", flush=True)
|
476 |
-
|
477 |
-
# Save to cache.
|
478 |
-
if cache:
|
479 |
-
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
|
480 |
-
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
|
481 |
-
temp_file = os.path.join(
|
482 |
-
cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
|
483 |
-
os.makedirs(cache_dir, exist_ok=True)
|
484 |
-
with open(temp_file, "wb") as f:
|
485 |
-
f.write(url_data)
|
486 |
-
os.replace(temp_file, cache_file) # atomic
|
487 |
-
if return_filename:
|
488 |
-
return cache_file
|
489 |
-
|
490 |
-
# Return data as file object.
|
491 |
-
assert not return_filename
|
492 |
-
return io.BytesIO(url_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py
DELETED
@@ -1,532 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import warnings
|
16 |
-
from functools import partial
|
17 |
-
from typing import Dict, List, Optional, Union
|
18 |
-
|
19 |
-
import jax
|
20 |
-
import jax.numpy as jnp
|
21 |
-
import numpy as np
|
22 |
-
from flax.core.frozen_dict import FrozenDict
|
23 |
-
from flax.jax_utils import unreplicate
|
24 |
-
from flax.training.common_utils import shard
|
25 |
-
from PIL import Image
|
26 |
-
from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel
|
27 |
-
|
28 |
-
from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel
|
29 |
-
from ...schedulers import (
|
30 |
-
FlaxDDIMScheduler,
|
31 |
-
FlaxDPMSolverMultistepScheduler,
|
32 |
-
FlaxLMSDiscreteScheduler,
|
33 |
-
FlaxPNDMScheduler,
|
34 |
-
)
|
35 |
-
from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring
|
36 |
-
from ..pipeline_flax_utils import FlaxDiffusionPipeline
|
37 |
-
from . import FlaxStableDiffusionPipelineOutput
|
38 |
-
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
|
39 |
-
|
40 |
-
|
41 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
42 |
-
|
43 |
-
# Set to True to use python for loop instead of jax.fori_loop for easier debugging
|
44 |
-
DEBUG = False
|
45 |
-
|
46 |
-
EXAMPLE_DOC_STRING = """
|
47 |
-
Examples:
|
48 |
-
```py
|
49 |
-
>>> import jax
|
50 |
-
>>> import numpy as np
|
51 |
-
>>> import jax.numpy as jnp
|
52 |
-
>>> from flax.jax_utils import replicate
|
53 |
-
>>> from flax.training.common_utils import shard
|
54 |
-
>>> import requests
|
55 |
-
>>> from io import BytesIO
|
56 |
-
>>> from PIL import Image
|
57 |
-
>>> from diffusers import FlaxStableDiffusionImg2ImgPipeline
|
58 |
-
|
59 |
-
|
60 |
-
>>> def create_key(seed=0):
|
61 |
-
... return jax.random.PRNGKey(seed)
|
62 |
-
|
63 |
-
|
64 |
-
>>> rng = create_key(0)
|
65 |
-
|
66 |
-
>>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
67 |
-
>>> response = requests.get(url)
|
68 |
-
>>> init_img = Image.open(BytesIO(response.content)).convert("RGB")
|
69 |
-
>>> init_img = init_img.resize((768, 512))
|
70 |
-
|
71 |
-
>>> prompts = "A fantasy landscape, trending on artstation"
|
72 |
-
|
73 |
-
>>> pipeline, params = FlaxStableDiffusionImg2ImgPipeline.from_pretrained(
|
74 |
-
... "CompVis/stable-diffusion-v1-4",
|
75 |
-
... revision="flax",
|
76 |
-
... dtype=jnp.bfloat16,
|
77 |
-
... )
|
78 |
-
|
79 |
-
>>> num_samples = jax.device_count()
|
80 |
-
>>> rng = jax.random.split(rng, jax.device_count())
|
81 |
-
>>> prompt_ids, processed_image = pipeline.prepare_inputs(
|
82 |
-
... prompt=[prompts] * num_samples, image=[init_img] * num_samples
|
83 |
-
... )
|
84 |
-
>>> p_params = replicate(params)
|
85 |
-
>>> prompt_ids = shard(prompt_ids)
|
86 |
-
>>> processed_image = shard(processed_image)
|
87 |
-
|
88 |
-
>>> output = pipeline(
|
89 |
-
... prompt_ids=prompt_ids,
|
90 |
-
... image=processed_image,
|
91 |
-
... params=p_params,
|
92 |
-
... prng_seed=rng,
|
93 |
-
... strength=0.75,
|
94 |
-
... num_inference_steps=50,
|
95 |
-
... jit=True,
|
96 |
-
... height=512,
|
97 |
-
... width=768,
|
98 |
-
... ).images
|
99 |
-
|
100 |
-
>>> output_images = pipeline.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:])))
|
101 |
-
```
|
102 |
-
"""
|
103 |
-
|
104 |
-
|
105 |
-
class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline):
|
106 |
-
r"""
|
107 |
-
Flax-based pipeline for text-guided image-to-image generation using Stable Diffusion.
|
108 |
-
|
109 |
-
This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods
|
110 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
111 |
-
|
112 |
-
Args:
|
113 |
-
vae ([`FlaxAutoencoderKL`]):
|
114 |
-
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
115 |
-
text_encoder ([`~transformers.FlaxCLIPTextModel`]):
|
116 |
-
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
117 |
-
tokenizer ([`~transformers.CLIPTokenizer`]):
|
118 |
-
A `CLIPTokenizer` to tokenize text.
|
119 |
-
unet ([`FlaxUNet2DConditionModel`]):
|
120 |
-
A `FlaxUNet2DConditionModel` to denoise the encoded image latents.
|
121 |
-
scheduler ([`SchedulerMixin`]):
|
122 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
123 |
-
[`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or
|
124 |
-
[`FlaxDPMSolverMultistepScheduler`].
|
125 |
-
safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
|
126 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
127 |
-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
|
128 |
-
about a model's potential harms.
|
129 |
-
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
130 |
-
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
131 |
-
"""
|
132 |
-
|
133 |
-
def __init__(
|
134 |
-
self,
|
135 |
-
vae: FlaxAutoencoderKL,
|
136 |
-
text_encoder: FlaxCLIPTextModel,
|
137 |
-
tokenizer: CLIPTokenizer,
|
138 |
-
unet: FlaxUNet2DConditionModel,
|
139 |
-
scheduler: Union[
|
140 |
-
FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler
|
141 |
-
],
|
142 |
-
safety_checker: FlaxStableDiffusionSafetyChecker,
|
143 |
-
feature_extractor: CLIPImageProcessor,
|
144 |
-
dtype: jnp.dtype = jnp.float32,
|
145 |
-
):
|
146 |
-
super().__init__()
|
147 |
-
self.dtype = dtype
|
148 |
-
|
149 |
-
if safety_checker is None:
|
150 |
-
logger.warn(
|
151 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
152 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
153 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
154 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
155 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
156 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
157 |
-
)
|
158 |
-
|
159 |
-
self.register_modules(
|
160 |
-
vae=vae,
|
161 |
-
text_encoder=text_encoder,
|
162 |
-
tokenizer=tokenizer,
|
163 |
-
unet=unet,
|
164 |
-
scheduler=scheduler,
|
165 |
-
safety_checker=safety_checker,
|
166 |
-
feature_extractor=feature_extractor,
|
167 |
-
)
|
168 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
169 |
-
|
170 |
-
def prepare_inputs(self, prompt: Union[str, List[str]], image: Union[Image.Image, List[Image.Image]]):
|
171 |
-
if not isinstance(prompt, (str, list)):
|
172 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
173 |
-
|
174 |
-
if not isinstance(image, (Image.Image, list)):
|
175 |
-
raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}")
|
176 |
-
|
177 |
-
if isinstance(image, Image.Image):
|
178 |
-
image = [image]
|
179 |
-
|
180 |
-
processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image])
|
181 |
-
|
182 |
-
text_input = self.tokenizer(
|
183 |
-
prompt,
|
184 |
-
padding="max_length",
|
185 |
-
max_length=self.tokenizer.model_max_length,
|
186 |
-
truncation=True,
|
187 |
-
return_tensors="np",
|
188 |
-
)
|
189 |
-
return text_input.input_ids, processed_images
|
190 |
-
|
191 |
-
def _get_has_nsfw_concepts(self, features, params):
|
192 |
-
has_nsfw_concepts = self.safety_checker(features, params)
|
193 |
-
return has_nsfw_concepts
|
194 |
-
|
195 |
-
def _run_safety_checker(self, images, safety_model_params, jit=False):
|
196 |
-
# safety_model_params should already be replicated when jit is True
|
197 |
-
pil_images = [Image.fromarray(image) for image in images]
|
198 |
-
features = self.feature_extractor(pil_images, return_tensors="np").pixel_values
|
199 |
-
|
200 |
-
if jit:
|
201 |
-
features = shard(features)
|
202 |
-
has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params)
|
203 |
-
has_nsfw_concepts = unshard(has_nsfw_concepts)
|
204 |
-
safety_model_params = unreplicate(safety_model_params)
|
205 |
-
else:
|
206 |
-
has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params)
|
207 |
-
|
208 |
-
images_was_copied = False
|
209 |
-
for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
|
210 |
-
if has_nsfw_concept:
|
211 |
-
if not images_was_copied:
|
212 |
-
images_was_copied = True
|
213 |
-
images = images.copy()
|
214 |
-
|
215 |
-
images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image
|
216 |
-
|
217 |
-
if any(has_nsfw_concepts):
|
218 |
-
warnings.warn(
|
219 |
-
"Potential NSFW content was detected in one or more images. A black image will be returned"
|
220 |
-
" instead. Try again with a different prompt and/or seed."
|
221 |
-
)
|
222 |
-
|
223 |
-
return images, has_nsfw_concepts
|
224 |
-
|
225 |
-
def get_timestep_start(self, num_inference_steps, strength):
|
226 |
-
# get the original timestep using init_timestep
|
227 |
-
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
228 |
-
|
229 |
-
t_start = max(num_inference_steps - init_timestep, 0)
|
230 |
-
|
231 |
-
return t_start
|
232 |
-
|
233 |
-
def _generate(
|
234 |
-
self,
|
235 |
-
prompt_ids: jnp.array,
|
236 |
-
image: jnp.array,
|
237 |
-
params: Union[Dict, FrozenDict],
|
238 |
-
prng_seed: jax.random.KeyArray,
|
239 |
-
start_timestep: int,
|
240 |
-
num_inference_steps: int,
|
241 |
-
height: int,
|
242 |
-
width: int,
|
243 |
-
guidance_scale: float,
|
244 |
-
noise: Optional[jnp.array] = None,
|
245 |
-
neg_prompt_ids: Optional[jnp.array] = None,
|
246 |
-
):
|
247 |
-
if height % 8 != 0 or width % 8 != 0:
|
248 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
249 |
-
|
250 |
-
# get prompt text embeddings
|
251 |
-
prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0]
|
252 |
-
|
253 |
-
# TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0`
|
254 |
-
# implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0`
|
255 |
-
batch_size = prompt_ids.shape[0]
|
256 |
-
|
257 |
-
max_length = prompt_ids.shape[-1]
|
258 |
-
|
259 |
-
if neg_prompt_ids is None:
|
260 |
-
uncond_input = self.tokenizer(
|
261 |
-
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np"
|
262 |
-
).input_ids
|
263 |
-
else:
|
264 |
-
uncond_input = neg_prompt_ids
|
265 |
-
negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0]
|
266 |
-
context = jnp.concatenate([negative_prompt_embeds, prompt_embeds])
|
267 |
-
|
268 |
-
latents_shape = (
|
269 |
-
batch_size,
|
270 |
-
self.unet.config.in_channels,
|
271 |
-
height // self.vae_scale_factor,
|
272 |
-
width // self.vae_scale_factor,
|
273 |
-
)
|
274 |
-
if noise is None:
|
275 |
-
noise = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32)
|
276 |
-
else:
|
277 |
-
if noise.shape != latents_shape:
|
278 |
-
raise ValueError(f"Unexpected latents shape, got {noise.shape}, expected {latents_shape}")
|
279 |
-
|
280 |
-
# Create init_latents
|
281 |
-
init_latent_dist = self.vae.apply({"params": params["vae"]}, image, method=self.vae.encode).latent_dist
|
282 |
-
init_latents = init_latent_dist.sample(key=prng_seed).transpose((0, 3, 1, 2))
|
283 |
-
init_latents = self.vae.config.scaling_factor * init_latents
|
284 |
-
|
285 |
-
def loop_body(step, args):
|
286 |
-
latents, scheduler_state = args
|
287 |
-
# For classifier free guidance, we need to do two forward passes.
|
288 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
289 |
-
# to avoid doing two forward passes
|
290 |
-
latents_input = jnp.concatenate([latents] * 2)
|
291 |
-
|
292 |
-
t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step]
|
293 |
-
timestep = jnp.broadcast_to(t, latents_input.shape[0])
|
294 |
-
|
295 |
-
latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t)
|
296 |
-
|
297 |
-
# predict the noise residual
|
298 |
-
noise_pred = self.unet.apply(
|
299 |
-
{"params": params["unet"]},
|
300 |
-
jnp.array(latents_input),
|
301 |
-
jnp.array(timestep, dtype=jnp.int32),
|
302 |
-
encoder_hidden_states=context,
|
303 |
-
).sample
|
304 |
-
# perform guidance
|
305 |
-
noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0)
|
306 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
|
307 |
-
|
308 |
-
# compute the previous noisy sample x_t -> x_t-1
|
309 |
-
latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple()
|
310 |
-
return latents, scheduler_state
|
311 |
-
|
312 |
-
scheduler_state = self.scheduler.set_timesteps(
|
313 |
-
params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape
|
314 |
-
)
|
315 |
-
|
316 |
-
latent_timestep = scheduler_state.timesteps[start_timestep : start_timestep + 1].repeat(batch_size)
|
317 |
-
|
318 |
-
latents = self.scheduler.add_noise(params["scheduler"], init_latents, noise, latent_timestep)
|
319 |
-
|
320 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
321 |
-
latents = latents * params["scheduler"].init_noise_sigma
|
322 |
-
|
323 |
-
if DEBUG:
|
324 |
-
# run with python for loop
|
325 |
-
for i in range(start_timestep, num_inference_steps):
|
326 |
-
latents, scheduler_state = loop_body(i, (latents, scheduler_state))
|
327 |
-
else:
|
328 |
-
latents, _ = jax.lax.fori_loop(start_timestep, num_inference_steps, loop_body, (latents, scheduler_state))
|
329 |
-
|
330 |
-
# scale and decode the image latents with vae
|
331 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
332 |
-
image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample
|
333 |
-
|
334 |
-
image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1)
|
335 |
-
return image
|
336 |
-
|
337 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
338 |
-
def __call__(
|
339 |
-
self,
|
340 |
-
prompt_ids: jnp.array,
|
341 |
-
image: jnp.array,
|
342 |
-
params: Union[Dict, FrozenDict],
|
343 |
-
prng_seed: jax.random.KeyArray,
|
344 |
-
strength: float = 0.8,
|
345 |
-
num_inference_steps: int = 50,
|
346 |
-
height: Optional[int] = None,
|
347 |
-
width: Optional[int] = None,
|
348 |
-
guidance_scale: Union[float, jnp.array] = 7.5,
|
349 |
-
noise: jnp.array = None,
|
350 |
-
neg_prompt_ids: jnp.array = None,
|
351 |
-
return_dict: bool = True,
|
352 |
-
jit: bool = False,
|
353 |
-
):
|
354 |
-
r"""
|
355 |
-
The call function to the pipeline for generation.
|
356 |
-
|
357 |
-
Args:
|
358 |
-
prompt_ids (`jnp.array`):
|
359 |
-
The prompt or prompts to guide image generation.
|
360 |
-
image (`jnp.array`):
|
361 |
-
Array representing an image batch to be used as the starting point.
|
362 |
-
params (`Dict` or `FrozenDict`):
|
363 |
-
Dictionary containing the model parameters/weights.
|
364 |
-
prng_seed (`jax.random.KeyArray` or `jax.Array`):
|
365 |
-
Array containing random number generator key.
|
366 |
-
strength (`float`, *optional*, defaults to 0.8):
|
367 |
-
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
368 |
-
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
369 |
-
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
370 |
-
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
371 |
-
essentially ignores `image`.
|
372 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
373 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
374 |
-
expense of slower inference. This parameter is modulated by `strength`.
|
375 |
-
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
376 |
-
The height in pixels of the generated image.
|
377 |
-
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
378 |
-
The width in pixels of the generated image.
|
379 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
380 |
-
A higher guidance scale value encourages the model to generate images closely linked to the text
|
381 |
-
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
382 |
-
noise (`jnp.array`, *optional*):
|
383 |
-
Pre-generated noisy latents sampled from a Gaussian distribution to be used as inputs for image
|
384 |
-
generation. Can be used to tweak the same generation with different prompts. The array is generated by
|
385 |
-
sampling using the supplied random `generator`.
|
386 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
387 |
-
Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of
|
388 |
-
a plain tuple.
|
389 |
-
jit (`bool`, defaults to `False`):
|
390 |
-
Whether to run `pmap` versions of the generation and safety scoring functions.
|
391 |
-
|
392 |
-
<Tip warning={true}>
|
393 |
-
|
394 |
-
This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a
|
395 |
-
future release.
|
396 |
-
|
397 |
-
</Tip>
|
398 |
-
|
399 |
-
Examples:
|
400 |
-
|
401 |
-
Returns:
|
402 |
-
[`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`:
|
403 |
-
If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is
|
404 |
-
returned, otherwise a `tuple` is returned where the first element is a list with the generated images
|
405 |
-
and the second element is a list of `bool`s indicating whether the corresponding generated image
|
406 |
-
contains "not-safe-for-work" (nsfw) content.
|
407 |
-
"""
|
408 |
-
# 0. Default height and width to unet
|
409 |
-
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
410 |
-
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
411 |
-
|
412 |
-
if isinstance(guidance_scale, float):
|
413 |
-
# Convert to a tensor so each device gets a copy. Follow the prompt_ids for
|
414 |
-
# shape information, as they may be sharded (when `jit` is `True`), or not.
|
415 |
-
guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0])
|
416 |
-
if len(prompt_ids.shape) > 2:
|
417 |
-
# Assume sharded
|
418 |
-
guidance_scale = guidance_scale[:, None]
|
419 |
-
|
420 |
-
start_timestep = self.get_timestep_start(num_inference_steps, strength)
|
421 |
-
|
422 |
-
if jit:
|
423 |
-
images = _p_generate(
|
424 |
-
self,
|
425 |
-
prompt_ids,
|
426 |
-
image,
|
427 |
-
params,
|
428 |
-
prng_seed,
|
429 |
-
start_timestep,
|
430 |
-
num_inference_steps,
|
431 |
-
height,
|
432 |
-
width,
|
433 |
-
guidance_scale,
|
434 |
-
noise,
|
435 |
-
neg_prompt_ids,
|
436 |
-
)
|
437 |
-
else:
|
438 |
-
images = self._generate(
|
439 |
-
prompt_ids,
|
440 |
-
image,
|
441 |
-
params,
|
442 |
-
prng_seed,
|
443 |
-
start_timestep,
|
444 |
-
num_inference_steps,
|
445 |
-
height,
|
446 |
-
width,
|
447 |
-
guidance_scale,
|
448 |
-
noise,
|
449 |
-
neg_prompt_ids,
|
450 |
-
)
|
451 |
-
|
452 |
-
if self.safety_checker is not None:
|
453 |
-
safety_params = params["safety_checker"]
|
454 |
-
images_uint8_casted = (images * 255).round().astype("uint8")
|
455 |
-
num_devices, batch_size = images.shape[:2]
|
456 |
-
|
457 |
-
images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3)
|
458 |
-
images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit)
|
459 |
-
images = np.asarray(images)
|
460 |
-
|
461 |
-
# block images
|
462 |
-
if any(has_nsfw_concept):
|
463 |
-
for i, is_nsfw in enumerate(has_nsfw_concept):
|
464 |
-
if is_nsfw:
|
465 |
-
images[i] = np.asarray(images_uint8_casted[i])
|
466 |
-
|
467 |
-
images = images.reshape(num_devices, batch_size, height, width, 3)
|
468 |
-
else:
|
469 |
-
images = np.asarray(images)
|
470 |
-
has_nsfw_concept = False
|
471 |
-
|
472 |
-
if not return_dict:
|
473 |
-
return (images, has_nsfw_concept)
|
474 |
-
|
475 |
-
return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
|
476 |
-
|
477 |
-
|
478 |
-
# Static argnums are pipe, start_timestep, num_inference_steps, height, width. A change would trigger recompilation.
|
479 |
-
# Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`).
|
480 |
-
@partial(
|
481 |
-
jax.pmap,
|
482 |
-
in_axes=(None, 0, 0, 0, 0, None, None, None, None, 0, 0, 0),
|
483 |
-
static_broadcasted_argnums=(0, 5, 6, 7, 8),
|
484 |
-
)
|
485 |
-
def _p_generate(
|
486 |
-
pipe,
|
487 |
-
prompt_ids,
|
488 |
-
image,
|
489 |
-
params,
|
490 |
-
prng_seed,
|
491 |
-
start_timestep,
|
492 |
-
num_inference_steps,
|
493 |
-
height,
|
494 |
-
width,
|
495 |
-
guidance_scale,
|
496 |
-
noise,
|
497 |
-
neg_prompt_ids,
|
498 |
-
):
|
499 |
-
return pipe._generate(
|
500 |
-
prompt_ids,
|
501 |
-
image,
|
502 |
-
params,
|
503 |
-
prng_seed,
|
504 |
-
start_timestep,
|
505 |
-
num_inference_steps,
|
506 |
-
height,
|
507 |
-
width,
|
508 |
-
guidance_scale,
|
509 |
-
noise,
|
510 |
-
neg_prompt_ids,
|
511 |
-
)
|
512 |
-
|
513 |
-
|
514 |
-
@partial(jax.pmap, static_broadcasted_argnums=(0,))
|
515 |
-
def _p_get_has_nsfw_concepts(pipe, features, params):
|
516 |
-
return pipe._get_has_nsfw_concepts(features, params)
|
517 |
-
|
518 |
-
|
519 |
-
def unshard(x: jnp.ndarray):
|
520 |
-
# einops.rearrange(x, 'd b ... -> (d b) ...')
|
521 |
-
num_devices, batch_size = x.shape[:2]
|
522 |
-
rest = x.shape[2:]
|
523 |
-
return x.reshape(num_devices * batch_size, *rest)
|
524 |
-
|
525 |
-
|
526 |
-
def preprocess(image, dtype):
|
527 |
-
w, h = image.size
|
528 |
-
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
529 |
-
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
530 |
-
image = jnp.array(image).astype(dtype) / 255.0
|
531 |
-
image = image[None].transpose(0, 3, 1, 2)
|
532 |
-
return 2.0 * image - 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/pndm/test_pndm.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import unittest
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
import torch
|
20 |
-
|
21 |
-
from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel
|
22 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
|
23 |
-
|
24 |
-
|
25 |
-
enable_full_determinism()
|
26 |
-
|
27 |
-
|
28 |
-
class PNDMPipelineFastTests(unittest.TestCase):
|
29 |
-
@property
|
30 |
-
def dummy_uncond_unet(self):
|
31 |
-
torch.manual_seed(0)
|
32 |
-
model = UNet2DModel(
|
33 |
-
block_out_channels=(32, 64),
|
34 |
-
layers_per_block=2,
|
35 |
-
sample_size=32,
|
36 |
-
in_channels=3,
|
37 |
-
out_channels=3,
|
38 |
-
down_block_types=("DownBlock2D", "AttnDownBlock2D"),
|
39 |
-
up_block_types=("AttnUpBlock2D", "UpBlock2D"),
|
40 |
-
)
|
41 |
-
return model
|
42 |
-
|
43 |
-
def test_inference(self):
|
44 |
-
unet = self.dummy_uncond_unet
|
45 |
-
scheduler = PNDMScheduler()
|
46 |
-
|
47 |
-
pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
|
48 |
-
pndm.to(torch_device)
|
49 |
-
pndm.set_progress_bar_config(disable=None)
|
50 |
-
|
51 |
-
generator = torch.manual_seed(0)
|
52 |
-
image = pndm(generator=generator, num_inference_steps=20, output_type="numpy").images
|
53 |
-
|
54 |
-
generator = torch.manual_seed(0)
|
55 |
-
image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="numpy", return_dict=False)[0]
|
56 |
-
|
57 |
-
image_slice = image[0, -3:, -3:, -1]
|
58 |
-
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
|
59 |
-
|
60 |
-
assert image.shape == (1, 32, 32, 3)
|
61 |
-
expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
|
62 |
-
|
63 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
64 |
-
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
|
65 |
-
|
66 |
-
|
67 |
-
@slow
|
68 |
-
@require_torch
|
69 |
-
class PNDMPipelineIntegrationTests(unittest.TestCase):
|
70 |
-
def test_inference_cifar10(self):
|
71 |
-
model_id = "google/ddpm-cifar10-32"
|
72 |
-
|
73 |
-
unet = UNet2DModel.from_pretrained(model_id)
|
74 |
-
scheduler = PNDMScheduler()
|
75 |
-
|
76 |
-
pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
|
77 |
-
pndm.to(torch_device)
|
78 |
-
pndm.set_progress_bar_config(disable=None)
|
79 |
-
generator = torch.manual_seed(0)
|
80 |
-
image = pndm(generator=generator, output_type="numpy").images
|
81 |
-
|
82 |
-
image_slice = image[0, -3:, -3:, -1]
|
83 |
-
|
84 |
-
assert image.shape == (1, 32, 32, 3)
|
85 |
-
expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
|
86 |
-
|
87 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './nonlocal_r50-d8_512x512_160k_ade20k.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/resize_obj.py
DELETED
@@ -1,188 +0,0 @@
|
|
1 |
-
#!/usr/bin/python
|
2 |
-
#****************************************************************#
|
3 |
-
# ScriptName: analysis_data.py
|
4 |
-
# Author: Anonymous_123
|
5 |
-
# Create Date: 2022-07-25 19:54
|
6 |
-
# Modify Author: Anonymous_123
|
7 |
-
# Modify Date: 2022-09-25 12:04
|
8 |
-
# Function:
|
9 |
-
#***************************************************************#
|
10 |
-
|
11 |
-
import os
|
12 |
-
import sys
|
13 |
-
import numpy as np
|
14 |
-
import cv2
|
15 |
-
import torch
|
16 |
-
from tqdm import tqdm
|
17 |
-
import shutil
|
18 |
-
import pdb
|
19 |
-
|
20 |
-
import argparse
|
21 |
-
|
22 |
-
parser = argparse.ArgumentParser(description='resize object')
|
23 |
-
parser.add_argument('--scale', type=float, default=None, help='object scale')
|
24 |
-
parser.add_argument('--img_path', type=str, help='image path')
|
25 |
-
parser.add_argument('--mask_path', type=str, help='mask path')
|
26 |
-
|
27 |
-
|
28 |
-
def get_bbox_and_rate(mask):
|
29 |
-
gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
|
30 |
-
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
|
31 |
-
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
|
32 |
-
if len(contours) == 0:
|
33 |
-
return None, None
|
34 |
-
max_area = 0
|
35 |
-
max_idx = 0
|
36 |
-
for i, cnt in enumerate(contours):
|
37 |
-
x,y,w,h = cv2.boundingRect(cnt)
|
38 |
-
if w*h > max_area:
|
39 |
-
max_idx = i
|
40 |
-
max_area = w*h
|
41 |
-
# 外接矩形
|
42 |
-
x,y,w,h = cv2.boundingRect(contours[max_idx])
|
43 |
-
mask_new = np.zeros(mask.shape, dtype='uint8')
|
44 |
-
mask_new[y:y+h, x:x+w, :] = mask[y:y+h, x:x+w, :]
|
45 |
-
|
46 |
-
rate = (mask_new[:,:,0]>127.5).sum()/mask.shape[0]/mask.shape[1]
|
47 |
-
|
48 |
-
return (x,y,w,h), rate
|
49 |
-
|
50 |
-
def resize_around_the_center(img, mask, bbox, operation, scale_step=1.2):
|
51 |
-
x,y,w,h = bbox
|
52 |
-
H,W,C = mask.shape
|
53 |
-
obj_mask = mask[y:y+h, x:x+w, :].copy()
|
54 |
-
# obj_mask = cv2.resize(obj_mask, (int(w*scale_step),int(h*scale_step)) if operation == 'upsample' else (int(w/scale_step), int(h/scale_step)))
|
55 |
-
obj_mask = cv2.resize(obj_mask, (int(w*scale_step),int(h*scale_step)))
|
56 |
-
start_point_x = max(x+w//2 - obj_mask.shape[1]//2, 0) # center - w
|
57 |
-
start_point_y = max(y+h//2 - obj_mask.shape[0]//2, 0) # center - h
|
58 |
-
end_point_x = min(x+w//2 + obj_mask.shape[1]//2, W) # center+w
|
59 |
-
end_point_y = min(y+h//2 + obj_mask.shape[0]//2, H) # center+h
|
60 |
-
|
61 |
-
start_point_x_obj = max(0,obj_mask.shape[1]//2-(x+w//2))
|
62 |
-
start_point_y_obj = max(0, obj_mask.shape[0]//2-(y+h//2))
|
63 |
-
mask[:] = 0
|
64 |
-
mask[start_point_y:end_point_y, start_point_x:end_point_x] = obj_mask[start_point_y_obj:start_point_y_obj+(end_point_y-start_point_y), start_point_x_obj:start_point_x_obj+(end_point_x-start_point_x)]
|
65 |
-
|
66 |
-
obj_img = img[y:y+h, x:x+w, :].copy()
|
67 |
-
# obj_img = cv2.resize(obj_img, (int(w*scale_step),int(h*scale_step)) if operation == 'upsample' else (int(w/scale_step), int(h/scale_step)))
|
68 |
-
obj_img = cv2.resize(obj_img, (int(w*scale_step),int(h*scale_step)))
|
69 |
-
img = cv2.GaussianBlur(img, (49, 49), 0)
|
70 |
-
img[start_point_y:end_point_y, start_point_x:end_point_x] = obj_img[start_point_y_obj:start_point_y_obj+(end_point_y-start_point_y), start_point_x_obj:start_point_x_obj+(end_point_x-start_point_x)]
|
71 |
-
|
72 |
-
return img, mask
|
73 |
-
|
74 |
-
def resize_around_the_center_padding(img, mask, bbox, scale_step=1.2):
|
75 |
-
x,y,w,h = bbox
|
76 |
-
H,W,C = mask.shape
|
77 |
-
mask_new = np.zeros((int(H/scale_step), int(W/scale_step), 3), dtype='uint8')
|
78 |
-
mask_new_full = np.zeros((int(H/scale_step), int(W/scale_step), 3), dtype='uint8')
|
79 |
-
# img_new = np.zeros((int(H/scale_step), int(W/scale_step), 3), dtype='uint8')
|
80 |
-
img_new = cv2.resize(img, (int(W/scale_step), int(H/scale_step)))
|
81 |
-
|
82 |
-
if scale_step < 1:
|
83 |
-
mask_new[int((y+h/2)*(1/scale_step-1)):int((y+h/2)*(1/scale_step-1)+H), int((x+w/2)*(1/scale_step-1)):int((x+w/2)*(1/scale_step-1)+W)] = mask
|
84 |
-
mask_new_full[int((y+h/2)*(1/scale_step-1)):int((y+h/2)*(1/scale_step-1)+H), int((x+w/2)*(1/scale_step-1)):int((x+w/2)*(1/scale_step-1)+W)] = mask.max()*np.ones(mask.shape, dtype='uint8')
|
85 |
-
|
86 |
-
img_new[int((y+h/2)*(1/scale_step-1)):int((y+h/2)*(1/scale_step-1)+H), int((x+w/2)*(1/scale_step-1)):int((x+w/2)*(1/scale_step-1)+W)] = img
|
87 |
-
|
88 |
-
else:
|
89 |
-
mask_new = mask[int((y+h/2)*(1-1/scale_step)):int((y+h/2)*(1-1/scale_step))+int(H/scale_step), int((x+w/2)*(1-1/scale_step)):int((x+w/2)*(1-1/scale_step))+int(W/scale_step)]
|
90 |
-
mask_new_full = mask[int((y+h/2)*(1-1/scale_step)):int((y+h/2)*(1-1/scale_step))+int(H/scale_step), int((x+w/2)*(1-1/scale_step)):int((x+w/2)*(1-1/scale_step))+int(W/scale_step)]
|
91 |
-
img_new = img[int((y+h/2)*(1-1/scale_step)):int((y+h/2)*(1-1/scale_step))+int(H/scale_step), int((x+w/2)*(1-1/scale_step)):int((x+w/2)*(1-1/scale_step))+int(W/scale_step)]
|
92 |
-
|
93 |
-
img_new = cv2.resize(img_new, (W,H))
|
94 |
-
mask_new = cv2.resize(mask_new, (W,H))
|
95 |
-
mask_new_full = cv2.resize(mask_new_full, (W,H))
|
96 |
-
|
97 |
-
return img_new, mask_new, mask_new_full
|
98 |
-
|
99 |
-
def rescale(img, mask, scale=None, max_steps=50):
|
100 |
-
bbox, rate = get_bbox_and_rate(mask)
|
101 |
-
if bbox is None:
|
102 |
-
return None, None, None
|
103 |
-
num_steps = 0
|
104 |
-
mask_full = mask.copy()
|
105 |
-
while np.floor(rate*100) != scale*100. and abs(rate-scale) > 0.015:
|
106 |
-
# while not (abs(bbox[0]-0)<10 or abs(bbox[1]-0)<10 or abs(bbox[0]+bbox[2]-img.shape[1])<10 or abs(bbox[1]+bbox[3]-img.shape[0])<10):
|
107 |
-
operation = 'upsample' if np.floor(rate*100) < scale*100. else 'downsample'
|
108 |
-
scale_step = np.sqrt(scale/rate)
|
109 |
-
# img, mask = resize_around_the_center(img, mask, bbox, operation, scale_step=scale_step)
|
110 |
-
img, mask, mask_full = resize_around_the_center_padding(img, mask, bbox, scale_step=scale_step)
|
111 |
-
bbox, rate_ = get_bbox_and_rate(mask)
|
112 |
-
if (operation == 'upsample' and rate_ < rate) or (operation == 'downsample' and rate_ > rate):
|
113 |
-
return None, None, None
|
114 |
-
num_steps += 1
|
115 |
-
rate = rate_
|
116 |
-
print(rate)
|
117 |
-
if num_steps > max_steps:
|
118 |
-
return None, None, None
|
119 |
-
return img, mask_full, mask
|
120 |
-
|
121 |
-
|
122 |
-
def rescale_maximum(img, mask, scale=None, max_steps=50):
|
123 |
-
bbox, rate = get_bbox_and_rate(mask)
|
124 |
-
if bbox is None:
|
125 |
-
return None, None, None
|
126 |
-
x,y,w,h = bbox
|
127 |
-
H,W,C = img.shape
|
128 |
-
if H/h < W/w:
|
129 |
-
y_start, y_end = y, y+h
|
130 |
-
new_w = w/H*h
|
131 |
-
c_x = x + w//2
|
132 |
-
c_x_new = new_w*c_x/W
|
133 |
-
x_start = c_x - c_x_new
|
134 |
-
x_end = x_start + new_w
|
135 |
-
else:
|
136 |
-
x_start, x_end = x, x+w
|
137 |
-
new_h = h/W*w
|
138 |
-
c_y = y+h//2
|
139 |
-
c_y_new = new_h*c_y/H
|
140 |
-
y_start = c_y - c_y_new
|
141 |
-
y_end = y_start + new_h
|
142 |
-
img_new = img[min(y, int(y_start)):max(int(y_end), y+h), min(x, int(x_start)):max(int(x_end),x+w), :]
|
143 |
-
mask_new = mask[min(y, int(y_start)):max(int(y_end),y+h),min(x, int(x_start)):max(int(x_end),x+w),:]
|
144 |
-
|
145 |
-
img_new = cv2.resize(img_new, (W,H))
|
146 |
-
mask_new = cv2.resize(mask_new, (W,H))
|
147 |
-
|
148 |
-
return img_new, mask_new, mask_new
|
149 |
-
|
150 |
-
|
151 |
-
if __name__ == '__main__':
|
152 |
-
args = parser.parse_args()
|
153 |
-
scale = args.scale
|
154 |
-
img_path_save = 'results/img_rescaled.png'
|
155 |
-
mask_path_save = 'results/mask_rescaled.png'
|
156 |
-
if scale == None:
|
157 |
-
shutil.copy(args.img_path, img_path_save)
|
158 |
-
shutil.copy(args.mask_path, mask_path_save)
|
159 |
-
else:
|
160 |
-
try:
|
161 |
-
finals = []
|
162 |
-
img = cv2.imread(args.img_path)
|
163 |
-
mask = cv2.imread(args.mask_path)
|
164 |
-
|
165 |
-
img_rescale, mask_rescale, mask_obj = rescale_maximum(img.copy(), mask.copy(), scale=scale)
|
166 |
-
bbox, max_rate = get_bbox_and_rate(mask_obj)
|
167 |
-
if scale < max_rate:
|
168 |
-
img_rescale, mask_rescale, mask_obj = rescale(img.copy(), mask.copy(), scale=scale)
|
169 |
-
if img_rescale is None:
|
170 |
-
print('Invalid size')
|
171 |
-
shutil.copy(args.img_path, img_path_save)
|
172 |
-
shutil.copy(args.mask_path, mask_path_save)
|
173 |
-
sys.exit()
|
174 |
-
final = [img, img_rescale, mask, mask_rescale, mask_obj]
|
175 |
-
# cv2.imwrite('tmp.png', cv2.hconcat(final))
|
176 |
-
|
177 |
-
cv2.imwrite(img_path_save, img_rescale)
|
178 |
-
cv2.imwrite(mask_path_save, mask_obj)
|
179 |
-
# cv2.imwrite(mask_path_save_full, mask_rescale)
|
180 |
-
except:
|
181 |
-
print('Invalid size, using the original one')
|
182 |
-
shutil.copy(args.img_path, img_path_save)
|
183 |
-
shutil.copy(args.mask_path, mask_path_save)
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/GODROOP/roop/ui.py
DELETED
@@ -1,232 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import webbrowser
|
3 |
-
import customtkinter as ctk
|
4 |
-
from typing import Callable, Tuple
|
5 |
-
import cv2
|
6 |
-
from PIL import Image, ImageOps
|
7 |
-
|
8 |
-
import roop.globals
|
9 |
-
import roop.metadata
|
10 |
-
from roop.face_analyser import get_one_face
|
11 |
-
from roop.capturer import get_video_frame, get_video_frame_total
|
12 |
-
from roop.predictor import predict_frame
|
13 |
-
from roop.processors.frame.core import get_frame_processors_modules
|
14 |
-
from roop.utilities import is_image, is_video, resolve_relative_path
|
15 |
-
|
16 |
-
ROOT = None
|
17 |
-
ROOT_HEIGHT = 700
|
18 |
-
ROOT_WIDTH = 600
|
19 |
-
|
20 |
-
PREVIEW = None
|
21 |
-
PREVIEW_MAX_HEIGHT = 700
|
22 |
-
PREVIEW_MAX_WIDTH = 1200
|
23 |
-
|
24 |
-
RECENT_DIRECTORY_SOURCE = None
|
25 |
-
RECENT_DIRECTORY_TARGET = None
|
26 |
-
RECENT_DIRECTORY_OUTPUT = None
|
27 |
-
|
28 |
-
preview_label = None
|
29 |
-
preview_slider = None
|
30 |
-
source_label = None
|
31 |
-
target_label = None
|
32 |
-
status_label = None
|
33 |
-
|
34 |
-
|
35 |
-
def init(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
|
36 |
-
global ROOT, PREVIEW
|
37 |
-
|
38 |
-
ROOT = create_root(start, destroy)
|
39 |
-
PREVIEW = create_preview(ROOT)
|
40 |
-
|
41 |
-
return ROOT
|
42 |
-
|
43 |
-
|
44 |
-
def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
|
45 |
-
global source_label, target_label, status_label
|
46 |
-
|
47 |
-
ctk.deactivate_automatic_dpi_awareness()
|
48 |
-
ctk.set_appearance_mode('system')
|
49 |
-
ctk.set_default_color_theme(resolve_relative_path('ui.json'))
|
50 |
-
|
51 |
-
root = ctk.CTk()
|
52 |
-
root.minsize(ROOT_WIDTH, ROOT_HEIGHT)
|
53 |
-
root.title(f'{roop.metadata.name} {roop.metadata.version}')
|
54 |
-
root.configure()
|
55 |
-
root.protocol('WM_DELETE_WINDOW', lambda: destroy())
|
56 |
-
|
57 |
-
source_label = ctk.CTkLabel(root, text=None)
|
58 |
-
source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
|
59 |
-
|
60 |
-
target_label = ctk.CTkLabel(root, text=None)
|
61 |
-
target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
|
62 |
-
|
63 |
-
source_button = ctk.CTkButton(root, text='Select a face', cursor='hand2', command=lambda: select_source_path())
|
64 |
-
source_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
|
65 |
-
|
66 |
-
target_button = ctk.CTkButton(root, text='Select a target', cursor='hand2', command=lambda: select_target_path())
|
67 |
-
target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
|
68 |
-
|
69 |
-
keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps)
|
70 |
-
keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps))
|
71 |
-
keep_fps_checkbox.place(relx=0.1, rely=0.6)
|
72 |
-
|
73 |
-
keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames)
|
74 |
-
keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get()))
|
75 |
-
keep_frames_switch.place(relx=0.1, rely=0.65)
|
76 |
-
|
77 |
-
keep_audio_value = ctk.BooleanVar(value=roop.globals.keep_audio)
|
78 |
-
keep_audio_switch = ctk.CTkSwitch(root, text='Keep audio', variable=keep_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_audio', keep_audio_value.get()))
|
79 |
-
keep_audio_switch.place(relx=0.6, rely=0.6)
|
80 |
-
|
81 |
-
many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces)
|
82 |
-
many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, cursor='hand2', command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get()))
|
83 |
-
many_faces_switch.place(relx=0.6, rely=0.65)
|
84 |
-
|
85 |
-
start_button = ctk.CTkButton(root, text='Start', cursor='hand2', command=lambda: select_output_path(start))
|
86 |
-
start_button.place(relx=0.15, rely=0.75, relwidth=0.2, relheight=0.05)
|
87 |
-
|
88 |
-
stop_button = ctk.CTkButton(root, text='Destroy', cursor='hand2', command=lambda: destroy())
|
89 |
-
stop_button.place(relx=0.4, rely=0.75, relwidth=0.2, relheight=0.05)
|
90 |
-
|
91 |
-
preview_button = ctk.CTkButton(root, text='Preview', cursor='hand2', command=lambda: toggle_preview())
|
92 |
-
preview_button.place(relx=0.65, rely=0.75, relwidth=0.2, relheight=0.05)
|
93 |
-
|
94 |
-
status_label = ctk.CTkLabel(root, text=None, justify='center')
|
95 |
-
status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
|
96 |
-
|
97 |
-
donate_label = ctk.CTkLabel(root, text='^_^ Donate to project ^_^', justify='center', cursor='hand2')
|
98 |
-
donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
|
99 |
-
donate_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color'))
|
100 |
-
donate_label.bind('<Button>', lambda event: webbrowser.open('https://github.com/sponsors/s0md3v'))
|
101 |
-
|
102 |
-
return root
|
103 |
-
|
104 |
-
|
105 |
-
def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel:
|
106 |
-
global preview_label, preview_slider
|
107 |
-
|
108 |
-
preview = ctk.CTkToplevel(parent)
|
109 |
-
preview.withdraw()
|
110 |
-
preview.title('Preview')
|
111 |
-
preview.configure()
|
112 |
-
preview.protocol('WM_DELETE_WINDOW', lambda: toggle_preview())
|
113 |
-
preview.resizable(width=False, height=False)
|
114 |
-
|
115 |
-
preview_label = ctk.CTkLabel(preview, text=None)
|
116 |
-
preview_label.pack(fill='both', expand=True)
|
117 |
-
|
118 |
-
preview_slider = ctk.CTkSlider(preview, from_=0, to=0, command=lambda frame_value: update_preview(frame_value))
|
119 |
-
|
120 |
-
return preview
|
121 |
-
|
122 |
-
|
123 |
-
def update_status(text: str) -> None:
|
124 |
-
status_label.configure(text=text)
|
125 |
-
ROOT.update()
|
126 |
-
|
127 |
-
|
128 |
-
def select_source_path() -> None:
|
129 |
-
global RECENT_DIRECTORY_SOURCE
|
130 |
-
|
131 |
-
PREVIEW.withdraw()
|
132 |
-
source_path = ctk.filedialog.askopenfilename(title='select an source image', initialdir=RECENT_DIRECTORY_SOURCE)
|
133 |
-
if is_image(source_path):
|
134 |
-
roop.globals.source_path = source_path
|
135 |
-
RECENT_DIRECTORY_SOURCE = os.path.dirname(roop.globals.source_path)
|
136 |
-
image = render_image_preview(roop.globals.source_path, (200, 200))
|
137 |
-
source_label.configure(image=image)
|
138 |
-
else:
|
139 |
-
roop.globals.source_path = None
|
140 |
-
source_label.configure(image=None)
|
141 |
-
|
142 |
-
|
143 |
-
def select_target_path() -> None:
|
144 |
-
global RECENT_DIRECTORY_TARGET
|
145 |
-
|
146 |
-
PREVIEW.withdraw()
|
147 |
-
target_path = ctk.filedialog.askopenfilename(title='select an target image or video', initialdir=RECENT_DIRECTORY_TARGET)
|
148 |
-
if is_image(target_path):
|
149 |
-
roop.globals.target_path = target_path
|
150 |
-
RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
|
151 |
-
image = render_image_preview(roop.globals.target_path, (200, 200))
|
152 |
-
target_label.configure(image=image)
|
153 |
-
elif is_video(target_path):
|
154 |
-
roop.globals.target_path = target_path
|
155 |
-
RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
|
156 |
-
video_frame = render_video_preview(target_path, (200, 200))
|
157 |
-
target_label.configure(image=video_frame)
|
158 |
-
else:
|
159 |
-
roop.globals.target_path = None
|
160 |
-
target_label.configure(image=None)
|
161 |
-
|
162 |
-
|
163 |
-
def select_output_path(start: Callable[[], None]) -> None:
|
164 |
-
global RECENT_DIRECTORY_OUTPUT
|
165 |
-
|
166 |
-
if is_image(roop.globals.target_path):
|
167 |
-
output_path = ctk.filedialog.asksaveasfilename(title='save image output file', defaultextension='.png', initialfile='output.png', initialdir=RECENT_DIRECTORY_OUTPUT)
|
168 |
-
elif is_video(roop.globals.target_path):
|
169 |
-
output_path = ctk.filedialog.asksaveasfilename(title='save video output file', defaultextension='.mp4', initialfile='output.mp4', initialdir=RECENT_DIRECTORY_OUTPUT)
|
170 |
-
else:
|
171 |
-
output_path = None
|
172 |
-
if output_path:
|
173 |
-
roop.globals.output_path = output_path
|
174 |
-
RECENT_DIRECTORY_OUTPUT = os.path.dirname(roop.globals.output_path)
|
175 |
-
start()
|
176 |
-
|
177 |
-
|
178 |
-
def render_image_preview(image_path: str, size: Tuple[int, int]) -> ctk.CTkImage:
|
179 |
-
image = Image.open(image_path)
|
180 |
-
if size:
|
181 |
-
image = ImageOps.fit(image, size, Image.LANCZOS)
|
182 |
-
return ctk.CTkImage(image, size=image.size)
|
183 |
-
|
184 |
-
|
185 |
-
def render_video_preview(video_path: str, size: Tuple[int, int], frame_number: int = 0) -> ctk.CTkImage:
|
186 |
-
capture = cv2.VideoCapture(video_path)
|
187 |
-
if frame_number:
|
188 |
-
capture.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
189 |
-
has_frame, frame = capture.read()
|
190 |
-
if has_frame:
|
191 |
-
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
192 |
-
if size:
|
193 |
-
image = ImageOps.fit(image, size, Image.LANCZOS)
|
194 |
-
return ctk.CTkImage(image, size=image.size)
|
195 |
-
capture.release()
|
196 |
-
cv2.destroyAllWindows()
|
197 |
-
|
198 |
-
|
199 |
-
def toggle_preview() -> None:
|
200 |
-
if PREVIEW.state() == 'normal':
|
201 |
-
PREVIEW.withdraw()
|
202 |
-
elif roop.globals.source_path and roop.globals.target_path:
|
203 |
-
init_preview()
|
204 |
-
update_preview()
|
205 |
-
PREVIEW.deiconify()
|
206 |
-
|
207 |
-
|
208 |
-
def init_preview() -> None:
|
209 |
-
if is_image(roop.globals.target_path):
|
210 |
-
preview_slider.pack_forget()
|
211 |
-
if is_video(roop.globals.target_path):
|
212 |
-
video_frame_total = get_video_frame_total(roop.globals.target_path)
|
213 |
-
preview_slider.configure(to=video_frame_total)
|
214 |
-
preview_slider.pack(fill='x')
|
215 |
-
preview_slider.set(0)
|
216 |
-
|
217 |
-
|
218 |
-
def update_preview(frame_number: int = 0) -> None:
|
219 |
-
if roop.globals.source_path and roop.globals.target_path:
|
220 |
-
temp_frame = get_video_frame(roop.globals.target_path, frame_number)
|
221 |
-
if predict_frame(temp_frame):
|
222 |
-
quit()
|
223 |
-
for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
|
224 |
-
temp_frame = frame_processor.process_frame(
|
225 |
-
get_one_face(cv2.imread(roop.globals.source_path)),
|
226 |
-
temp_frame
|
227 |
-
)
|
228 |
-
image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB))
|
229 |
-
image = ImageOps.contain(image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS)
|
230 |
-
image = ctk.CTkImage(image, size=image.size)
|
231 |
-
preview_label.configure(image=image)
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/bert_vits2/text/english_bert_mock.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
|
4 |
-
def get_bert_feature(norm_text, word2ph):
|
5 |
-
return torch.zeros(1024, sum(word2ph))
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/tests/utils.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
|
2 |
-
from contextlib import contextmanager
|
3 |
-
from io import StringIO
|
4 |
-
import sys
|
5 |
-
import os
|
6 |
-
|
7 |
-
|
8 |
-
class StreamTTY(StringIO):
|
9 |
-
def isatty(self):
|
10 |
-
return True
|
11 |
-
|
12 |
-
class StreamNonTTY(StringIO):
|
13 |
-
def isatty(self):
|
14 |
-
return False
|
15 |
-
|
16 |
-
@contextmanager
|
17 |
-
def osname(name):
|
18 |
-
orig = os.name
|
19 |
-
os.name = name
|
20 |
-
yield
|
21 |
-
os.name = orig
|
22 |
-
|
23 |
-
@contextmanager
|
24 |
-
def replace_by(stream):
|
25 |
-
orig_stdout = sys.stdout
|
26 |
-
orig_stderr = sys.stderr
|
27 |
-
sys.stdout = stream
|
28 |
-
sys.stderr = stream
|
29 |
-
yield
|
30 |
-
sys.stdout = orig_stdout
|
31 |
-
sys.stderr = orig_stderr
|
32 |
-
|
33 |
-
@contextmanager
|
34 |
-
def replace_original_by(stream):
|
35 |
-
orig_stdout = sys.__stdout__
|
36 |
-
orig_stderr = sys.__stderr__
|
37 |
-
sys.__stdout__ = stream
|
38 |
-
sys.__stderr__ = stream
|
39 |
-
yield
|
40 |
-
sys.__stdout__ = orig_stdout
|
41 |
-
sys.__stderr__ = orig_stderr
|
42 |
-
|
43 |
-
@contextmanager
|
44 |
-
def pycharm():
|
45 |
-
os.environ["PYCHARM_HOSTED"] = "1"
|
46 |
-
non_tty = StreamNonTTY()
|
47 |
-
with replace_by(non_tty), replace_original_by(non_tty):
|
48 |
-
yield
|
49 |
-
del os.environ["PYCHARM_HOSTED"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/Applio-RVC-Fork/utils/clonerepo_experimental.py
DELETED
@@ -1,253 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
import shutil
|
4 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
5 |
-
from tqdm.notebook import tqdm
|
6 |
-
from pathlib import Path
|
7 |
-
import requests
|
8 |
-
|
9 |
-
def run_script():
|
10 |
-
def run_cmd(cmd):
|
11 |
-
process = subprocess.run(cmd, shell=True, check=True, text=True)
|
12 |
-
return process.stdout
|
13 |
-
|
14 |
-
# Change the current directory to /content/
|
15 |
-
os.chdir('/content/')
|
16 |
-
print("Changing dir to /content/")
|
17 |
-
|
18 |
-
# Your function to edit the file
|
19 |
-
def edit_file(file_path):
|
20 |
-
temp_file_path = "/tmp/temp_file.py"
|
21 |
-
changes_made = False
|
22 |
-
with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file:
|
23 |
-
previous_line = ""
|
24 |
-
second_previous_line = ""
|
25 |
-
for line in file:
|
26 |
-
new_line = line.replace("value=160", "value=128")
|
27 |
-
if new_line != line:
|
28 |
-
print("Replaced 'value=160' with 'value=128'")
|
29 |
-
changes_made = True
|
30 |
-
line = new_line
|
31 |
-
|
32 |
-
new_line = line.replace("crepe hop length: 160", "crepe hop length: 128")
|
33 |
-
if new_line != line:
|
34 |
-
print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'")
|
35 |
-
changes_made = True
|
36 |
-
line = new_line
|
37 |
-
|
38 |
-
new_line = line.replace("value=0.88", "value=0.75")
|
39 |
-
if new_line != line:
|
40 |
-
print("Replaced 'value=0.88' with 'value=0.75'")
|
41 |
-
changes_made = True
|
42 |
-
line = new_line
|
43 |
-
|
44 |
-
if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line:
|
45 |
-
new_line = line.replace("value=1,", "value=0.25,")
|
46 |
-
if new_line != line:
|
47 |
-
print("Replaced 'value=1,' with 'value=0.25,' based on the condition")
|
48 |
-
changes_made = True
|
49 |
-
line = new_line
|
50 |
-
|
51 |
-
if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line:
|
52 |
-
new_line = line.replace("value=20,", "value=500,")
|
53 |
-
if new_line != line:
|
54 |
-
print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH")
|
55 |
-
changes_made = True
|
56 |
-
line = new_line
|
57 |
-
|
58 |
-
if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line:
|
59 |
-
if 'value="pm",' in line:
|
60 |
-
new_line = line.replace('value="pm",', 'value="mangio-crepe",')
|
61 |
-
if new_line != line:
|
62 |
-
print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition")
|
63 |
-
changes_made = True
|
64 |
-
line = new_line
|
65 |
-
|
66 |
-
new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"')
|
67 |
-
if new_line != line:
|
68 |
-
print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'")
|
69 |
-
changes_made = True
|
70 |
-
line = new_line
|
71 |
-
|
72 |
-
if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line:
|
73 |
-
if 'value=i18n("否"),' in line:
|
74 |
-
new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
|
75 |
-
if new_line != line:
|
76 |
-
print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST")
|
77 |
-
changes_made = True
|
78 |
-
line = new_line
|
79 |
-
|
80 |
-
if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line:
|
81 |
-
if 'value=i18n("否"),' in line:
|
82 |
-
new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
|
83 |
-
if new_line != line:
|
84 |
-
print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS")
|
85 |
-
changes_made = True
|
86 |
-
line = new_line
|
87 |
-
|
88 |
-
temp_file.write(line)
|
89 |
-
second_previous_line = previous_line
|
90 |
-
previous_line = line
|
91 |
-
|
92 |
-
# After finished, we replace the original file with the temp one
|
93 |
-
import shutil
|
94 |
-
shutil.move(temp_file_path, file_path)
|
95 |
-
|
96 |
-
if changes_made:
|
97 |
-
print("Changes made and file saved successfully.")
|
98 |
-
else:
|
99 |
-
print("No changes were needed.")
|
100 |
-
|
101 |
-
# Define the repo path
|
102 |
-
repo_path = '/content/Applio-RVC-Fork'
|
103 |
-
|
104 |
-
def copy_all_files_in_directory(src_dir, dest_dir):
|
105 |
-
# Iterate over all files in source directory
|
106 |
-
for item in Path(src_dir).glob('*'):
|
107 |
-
if item.is_file():
|
108 |
-
# Copy each file to destination directory
|
109 |
-
shutil.copy(item, dest_dir)
|
110 |
-
else:
|
111 |
-
# If it's a directory, make a new directory in the destination and copy the files recursively
|
112 |
-
new_dest = Path(dest_dir) / item.name
|
113 |
-
new_dest.mkdir(exist_ok=True)
|
114 |
-
copy_all_files_in_directory(str(item), str(new_dest))
|
115 |
-
|
116 |
-
def clone_and_copy_repo(repo_path):
|
117 |
-
# New repository link
|
118 |
-
new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/"
|
119 |
-
# Temporary path to clone the repository
|
120 |
-
temp_repo_path = "/content/temp_Applio-RVC-Fork"
|
121 |
-
# New folder name
|
122 |
-
new_folder_name = "Applio-RVC-Fork"
|
123 |
-
|
124 |
-
# Clone the latest code from the new repository to a temporary location
|
125 |
-
run_cmd(f"git clone {new_repo_link} {temp_repo_path}")
|
126 |
-
os.chdir(temp_repo_path)
|
127 |
-
|
128 |
-
run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402")
|
129 |
-
run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4")
|
130 |
-
run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679")
|
131 |
-
run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8")
|
132 |
-
run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61")
|
133 |
-
run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de")
|
134 |
-
run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec")
|
135 |
-
run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902")
|
136 |
-
run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27")
|
137 |
-
run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb")
|
138 |
-
run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764")
|
139 |
-
run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8")
|
140 |
-
run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51")
|
141 |
-
run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2")
|
142 |
-
run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7")
|
143 |
-
run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862")
|
144 |
-
run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9")
|
145 |
-
run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398")
|
146 |
-
run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2")
|
147 |
-
run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a")
|
148 |
-
run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b")
|
149 |
-
run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157")
|
150 |
-
run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742")
|
151 |
-
run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9")
|
152 |
-
run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9")
|
153 |
-
run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77")
|
154 |
-
|
155 |
-
# Edit the file here, before copying
|
156 |
-
#edit_file(f"{temp_repo_path}/infer-web.py")
|
157 |
-
|
158 |
-
# Copy all files from the cloned repository to the existing path
|
159 |
-
copy_all_files_in_directory(temp_repo_path, repo_path)
|
160 |
-
print(f"Copying all {new_folder_name} files from GitHub.")
|
161 |
-
|
162 |
-
# Change working directory back to /content/
|
163 |
-
os.chdir('/content/')
|
164 |
-
print("Changed path back to /content/")
|
165 |
-
|
166 |
-
# Remove the temporary cloned repository
|
167 |
-
shutil.rmtree(temp_repo_path)
|
168 |
-
|
169 |
-
# Call the function
|
170 |
-
clone_and_copy_repo(repo_path)
|
171 |
-
|
172 |
-
# Download the credentials file for RVC archive sheet
|
173 |
-
os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True)
|
174 |
-
run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json")
|
175 |
-
|
176 |
-
# Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case
|
177 |
-
shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True)
|
178 |
-
shutil.rmtree('/content/torchcrepe', ignore_errors=True)
|
179 |
-
|
180 |
-
# Download the torchcrepe folder from the maxrmorrison/torchcrepe repository
|
181 |
-
run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git")
|
182 |
-
shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/')
|
183 |
-
shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder
|
184 |
-
|
185 |
-
# Change the current directory to /content/Applio-RVC-Fork
|
186 |
-
os.chdir('/content/Applio-RVC-Fork')
|
187 |
-
os.makedirs('pretrained', exist_ok=True)
|
188 |
-
os.makedirs('uvr5_weights', exist_ok=True)
|
189 |
-
|
190 |
-
def download_file(url, filepath):
|
191 |
-
response = requests.get(url, stream=True)
|
192 |
-
response.raise_for_status()
|
193 |
-
|
194 |
-
with open(filepath, "wb") as file:
|
195 |
-
for chunk in response.iter_content(chunk_size=8192):
|
196 |
-
if chunk:
|
197 |
-
file.write(chunk)
|
198 |
-
|
199 |
-
def download_pretrained_models():
|
200 |
-
pretrained_models = {
|
201 |
-
"pretrained": [
|
202 |
-
"D40k.pth",
|
203 |
-
"G40k.pth",
|
204 |
-
"f0D40k.pth",
|
205 |
-
"f0G40k.pth"
|
206 |
-
],
|
207 |
-
"pretrained_v2": [
|
208 |
-
"D40k.pth",
|
209 |
-
"G40k.pth",
|
210 |
-
"f0D40k.pth",
|
211 |
-
"f0G40k.pth",
|
212 |
-
"f0G48k.pth",
|
213 |
-
"f0D48k.pth"
|
214 |
-
],
|
215 |
-
"uvr5_weights": [
|
216 |
-
"HP2-人声vocals+非人声instrumentals.pth",
|
217 |
-
"HP5-主旋律人声vocals+其他instrumentals.pth",
|
218 |
-
"VR-DeEchoNormal.pth",
|
219 |
-
"VR-DeEchoDeReverb.pth",
|
220 |
-
"VR-DeEchoAggressive.pth",
|
221 |
-
"HP5_only_main_vocal.pth",
|
222 |
-
"HP3_all_vocals.pth",
|
223 |
-
"HP2_all_vocals.pth"
|
224 |
-
]
|
225 |
-
}
|
226 |
-
part2 = "I"
|
227 |
-
base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/"
|
228 |
-
base_path = "/content/Applio-RVC-Fork/"
|
229 |
-
base_pathm = base_path
|
230 |
-
|
231 |
-
# Calculate total number of files to download
|
232 |
-
total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt
|
233 |
-
|
234 |
-
with tqdm(total=total_files, desc="Downloading files") as pbar:
|
235 |
-
for folder, models in pretrained_models.items():
|
236 |
-
folder_path = os.path.join(base_path, folder)
|
237 |
-
os.makedirs(folder_path, exist_ok=True)
|
238 |
-
for model in models:
|
239 |
-
url = base_url + folder + "/" + model
|
240 |
-
filepath = os.path.join(folder_path, model)
|
241 |
-
download_file(url, filepath)
|
242 |
-
pbar.update()
|
243 |
-
|
244 |
-
# Download hubert_base.pt to the base path
|
245 |
-
hubert_url = base_url + "hubert_base.pt"
|
246 |
-
hubert_filepath = os.path.join(base_pathm, "hubert_base.pt")
|
247 |
-
download_file(hubert_url, hubert_filepath)
|
248 |
-
pbar.update()
|
249 |
-
def clone_repository(run_download):
|
250 |
-
with ThreadPoolExecutor(max_workers=2) as executor:
|
251 |
-
executor.submit(run_script)
|
252 |
-
if run_download:
|
253 |
-
executor.submit(download_pretrained_models)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Asfalto 8 Mod Apk Dinero Ilimitado Y Fichas ltima Versin 2023.md
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Asfalto 8 Mod APK: El último juego de carreras para Android</h1>
|
3 |
-
<p>Si eres un fan de los juegos de carreras, probablemente hayas oído hablar de Asphalt 8, uno de los juegos más populares y emocionantes del género. Asphalt 8 es un juego que te permite experimentar la adrenalina de conducir algunos de los coches más increíbles del mundo, desde Lamborghini hasta Ferrari, en pistas impresionantes de todo el mundo. Puedes realizar acrobacias increíbles, como barriles y saltos, mientras corres contra otros jugadores o contra la IA.</p>
|
4 |
-
<p>Sin embargo, por divertido que sea Asphalt 8, también tiene algunas limitaciones que pueden afectar tu disfrute. Por ejemplo, necesita gastar dinero real o moler durante horas para desbloquear nuevos coches, actualizarlos o acceder a funciones premium. También necesitas una conexión a Internet estable para jugar online, lo que puede ser un problema si tienes una red lenta o poco fiable. </p>
|
5 |
-
<h2>asfalto 8 mod apk dinero ilimitado y fichas última versión 2023</h2><br /><p><b><b>Download</b> ✅ <a href="https://bltlly.com/2v6L33">https://bltlly.com/2v6L33</a></b></p><br /><br />
|
6 |
-
<p>Es por eso que le recomendamos descargar la versión apk mod de asfalto 8, que le da dinero ilimitado y fichas, todos los coches desbloqueados, y muchos otros beneficios. Con este mod apk, se puede disfrutar del juego sin restricciones o molestias. Puedes descargarlo gratis desde nuestro sitio web e instalarlo en tu dispositivo Android en unos sencillos pasos. </p>
|
7 |
-
<h2>Características del asfalto 8 Mod APK</h2>
|
8 |
-
<h3>Dinero ilimitado y fichas</h3>
|
9 |
-
<p>Una de las mejores características de asfalto 8 mod apk es que le da dinero ilimitado y fichas, que son las principales monedas en el juego. Puede utilizarlos para comprar coches nuevos, actualizarlos, personalizarlos o acceder a funciones premium. No tienes que preocuparte por quedarte sin dinero o fichas nunca más. Puedes disfrutar del juego sin limitaciones o interrupciones. </p>
|
10 |
-
<h3>Todos los coches desbloqueados</h3>
|
11 |
-
|
12 |
-
<h3>Gráficos y sonido de alta calidad</h3>
|
13 |
-
<p>Asfalto 8 mod apk también conserva los gráficos de alta calidad y el sonido del juego original. Puedes disfrutar de las impresionantes imágenes y la física realista del juego en tu dispositivo Android. También puedes experimentar los efectos de sonido inmersivos y la música que te hacen sentir como si estuvieras en una carrera real. Puede ajustar los gráficos y los ajustes de sonido según sus preferencias y capacidades del dispositivo. </p>
|
14 |
-
<h3>Modo multijugador y eventos</h3>
|
15 |
-
<p>Asfalto 8 mod apk también le permite jugar en línea con otros jugadores de todo el mundo. Puedes unirte o crear salas con hasta ocho jugadores y competir en diferentes modos, como clásico, eliminación, infectado o derribo. También puedes participar en varios eventos y desafíos que ofrecen recompensas y premios. Puedes mostrar tus habilidades y posicionarte en las tablas de clasificación. </p>
|
16 |
-
<h2>Cómo descargar e instalar asfalto 8 Mod APK</h2>
|
17 |
-
<h3>Requisitos y permisos</h3>
|
18 |
-
<p>Para descargar e instalar Asphalt 8 mod apk en su dispositivo Android, es necesario cumplir con algunos requisitos y permisos <p>Aquí está la continuación del artículo:</p>
|
19 |
-
<p>- Necesitas tener un dispositivo Android con al menos 4.4 versión y 2 GB de RAM.</p>
|
20 |
-
<p></p>
|
21 |
-
<p>- Necesitas tener al menos 2 GB de espacio de almacenamiento gratuito en tu dispositivo o tarjeta SD. </p>
|
22 |
-
<p>- Es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. </p>
|
23 |
-
<p>- Es necesario conceder algunos permisos a la aplicación, como el acceso al almacenamiento, la red y la información del dispositivo. </p>
|
24 |
-
<h3>Pasos para descargar e instalar</h3>
|
25 |
-
<p>Para descargar e instalar Asphalt 8 mod apk en su dispositivo Android, es necesario seguir estos pasos:</p>
|
26 |
-
<ol>
|
27 |
-
<li>Haga clic en el botón de descarga a continuación para descargar el archivo apk mod de nuestro sitio web. Es seguro y libre de virus. </li>
|
28 |
-
<li>Una vez que se complete la descarga, busque el archivo en el administrador de archivos de su dispositivo y toque en él para iniciar el proceso de instalación. </li>
|
29 |
-
|
30 |
-
<li>Inicie la aplicación y disfrutar del juego con dinero ilimitado y fichas, todos los coches desbloqueados, y más. </li>
|
31 |
-
</ol>
|
32 |
-
<h2>Cómo jugar asfalto 8 Mod APK</h2>
|
33 |
-
<h3>Elija su coche y pista</h3>
|
34 |
-
<p>Al iniciar el juego, puede elegir entre una variedad de modos, como carrera, juego rápido, multijugador o eventos. También puede seleccionar su coche de más de 300 opciones, que van desde clásico a futurista. Puede personalizar su automóvil con diferentes colores, calcomanías, ruedas y más. También puede mejorar el rendimiento de su automóvil, como la velocidad, la aceleración, el manejo y el nitro. También puedes elegir tu pista entre más de 50 lugares, como Venecia, Tokio, Nevada o Islandia. Cada pista tiene sus propios retos y características, como rampas, túneles, atajos u obstáculos. </p>
|
35 |
-
<h3>Personaliza tus controles y ajustes</h3>
|
36 |
-
<p>Tambi��n puedes personalizar tus controles y ajustes de acuerdo a tus preferencias. Puede elegir entre cuatro opciones de control diferentes: inclinación, toque, toque para dirigir o botones en pantalla. También puede ajustar la sensibilidad y la calibración de los controles. También puedes cambiar la configuración del juego, como la calidad gráfica, el volumen de sonido, el idioma o el ángulo de la cámara. También puede activar o desactivar algunas funciones, como aceleración automática, frenado automático o asistencia con la dirección. </p>
|
37 |
-
<h3>Realizar acrobacias y trucos</h3>
|
38 |
-
<p>Uno de los aspectos más divertidos de asfalto 8 mod apk es que se puede realizar acrobacias y trucos increíbles durante las carreras. Puede utilizar las rampas, bucles, barriles o puentes para lanzar su coche en el aire y realizar volteretas, rollos, giros o giros. También puede utilizar el impulso nitro para acelerar su coche y aplastar a través de obstáculos o oponentes. También puede desplazarse alrededor de las esquinas o realizar casi errores para ganar puntos extra y bonos. Realizar acrobacias y trucos llenará tu barra de nitro y aumentará tu multiplicador de puntuación. </p>
|
39 |
-
<h3>Compite con otros jugadores</h3>
|
40 |
-
|
41 |
-
<h2>Pros y contras del asfalto 8 Mod APK</h2>
|
42 |
-
<h3>Pros</h3>
|
43 |
-
<ul>
|
44 |
-
<li>Puedes disfrutar de dinero ilimitado y fichas que te permiten comprar coches nuevos, actualizarlos, personalizarlos o acceder a funciones premium. </li>
|
45 |
-
<li> Puede desbloquear todos los coches en el juego de forma gratuita sin tener que gastar dinero real o moler durante horas. </li>
|
46 |
-
<li> Puedes disfrutar de gráficos y sonidos de alta calidad que te hacen sentir como si estuvieras en una carrera real. </li>
|
47 |
-
<li>Puedes jugar online con otros jugadores de todo el mundo y competir en diferentes modos y eventos. </li>
|
48 |
-
</ul>
|
49 |
-
<h3>Contras</h3>
|
50 |
-
<ul>
|
51 |
-
<li> Puede encontrar algunos errores o fallos que afectan el juego o el rendimiento del juego. </li>
|
52 |
-
<li>Es posible que tenga problemas de compatibilidad con algunos dispositivos o versiones de Android. </li>
|
53 |
-
<li> Usted puede obtener prohibido del juego si utiliza el apk mod de una manera injusta o violar los términos del servicio. </li>
|
54 |
-
<li>Puedes perder tu progreso o datos si desinstalas el juego o lo actualizas sin hacer una copia de seguridad. </li>
|
55 |
-
</ul>
|
56 |
-
<h2>Conclusión</h2>
|
57 |
-
<p>Asfalto 8 mod apk es una gran manera de disfrutar de uno de los mejores juegos de carreras en Android sin limitaciones ni problemas. Puede descargarlo de forma gratuita desde nuestro sitio web e instalarlo en su dispositivo en <p>unos sencillos pasos. Puede disfrutar de dinero y fichas ilimitadas, todos los coches desbloqueados, gráficos y sonido de alta calidad, modo multijugador y eventos, y más. También puede realizar acrobacias y trucos increíbles, personalizar sus controles y configuraciones, y competir con otros jugadores en línea. Sin embargo, también debe ser consciente de algunos de los inconvenientes de usar el apk mod, tales como errores, problemas de compatibilidad, riesgo de prohibición, o pérdida de datos. También debe utilizar el apk mod de forma responsable y no abusar de él o engañar en el juego. Asfalto 8 mod apk es una gran manera de divertirse y experimentar la emoción de las carreras en su dispositivo Android. </p>
|
58 |
-
<h2>Preguntas frecuentes</h2>
|
59 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre asfalto 8 mod apk:</p>
|
60 |
-
<ol>
|
61 |
-
<li>Q: ¿Es el asfalto 8 mod apk seguro para descargar y usar? </li>
|
62 |
-
|
63 |
-
<li>Q: ¿Cómo puedo actualizar Asphalt 8 mod apk? </li>
|
64 |
-
<li>A: Para actualizar Asphalt 8 mod apk, es necesario descargar la última versión del archivo mod apk de nuestro sitio web e instalarlo sobre el existente. Sin embargo, siempre debe hacer una copia de seguridad de sus datos antes de actualizar para evitar perder su progreso o datos. </li>
|
65 |
-
<li>Q: ¿Puedo jugar Asphalt 8 mod apk offline? </li>
|
66 |
-
<li>A: Sí, se puede jugar asfalto 8 mod apk offline. Se puede disfrutar del modo carrera, modo de juego rápido, o eventos sin conexión a Internet. Sin embargo, necesita una conexión a Internet para jugar en línea con otros jugadores o acceder a algunas de las funciones en línea. </li>
|
67 |
-
<li>Q: ¿Puedo usar Asphalt 8 mod apk en PC o dispositivos iOS? </li>
|
68 |
-
<li>A: No, no se puede utilizar Asphalt 8 mod apk en PC o dispositivos iOS. Solo es compatible con dispositivos Android. Sin embargo, puede utilizar un emulador de Android en su PC para ejecutar el apk mod en su ordenador. </li>
|
69 |
-
<li>Q: ¿Cuáles son algunos de los mejores coches en asfalto 8 mod apk? </li>
|
70 |
-
<li>A: Algunos de los mejores coches en asfalto 8 mod apk son:</li>
|
71 |
-
<ul>
|
72 |
-
<li>Lamborghini Centenario LP 770-4</li>
|
73 |
-
<li>Bugatti Chiron</li>
|
74 |
-
<li>Ferrari FXX K</li>
|
75 |
-
<li>Koenigsegg uno:1</li>
|
76 |
-
<li>Aston Martin Vulcan</li>
|
77 |
-
</ul>
|
78 |
-
</ol></p> 64aa2da5cf<br />
|
79 |
-
<br />
|
80 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Bgmi 2.0 90 Fps Archivo De Configuracin.md
DELETED
@@ -1,105 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>BGMI 2.0 90 Archivo de configuración de FPS Descargar: Cómo aumentar su rendimiento de juego</h1>
|
3 |
-
<p>Si usted es un fan de Battlegrounds Mobile India (BGMI), es posible que se pregunte cómo obtener la mejor experiencia de juego posible. Una de las formas de hacerlo es habilitar el modo de 90 fotogramas por segundo (FPS), que puede hacer que su juego sea más suave, más rápido y más realista. Sin embargo, no todos los dispositivos admiten esta función, e incluso si lo hacen, es posible que no pueda acceder a ella en la configuración del juego. Es por eso que algunos reproductores utilizan un archivo de configuración personalizado que puede desbloquear la opción 90 FPS para BGMI 2.0. En este artículo, le mostraremos cómo descargar e instalar el archivo de configuración de 90 FPS para BGMI 2.0, así como los beneficios y desventajas de usarlo. También sugeriremos algunas alternativas al archivo de configuración que pueden ayudarle a mejorar su rendimiento de juego. </p>
|
4 |
-
<h2>bgmi 2.0 90 fps archivo de configuración</h2><br /><p><b><b>Download File</b> ––– <a href="https://bltlly.com/2v6K9B">https://bltlly.com/2v6K9B</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es BGMI y por qué necesitas 90 FPS? </h2>
|
6 |
-
<h3>BGMI es la versión india de PUBG Mobile</h3>
|
7 |
-
<p>BGMI significa Battlegrounds Mobile India, que es un juego de batalla real móvil desarrollado por Krafton para el mercado indio. Se basa en PUBG Mobile, que fue prohibido en la India en 2020 debido a problemas de privacidad y seguridad. BGMI fue lanzado en julio de 2021 como una forma de traer de vuelta el juego popular a los jugadores indios, con algunos cambios y características adaptadas a la cultura y preferencias locales. BGMI tiene la misma mecánica de juego, mapas, modos, armas y vehículos que PUBG Mobile, pero con algunas diferencias en gráficos, sonidos, personajes, eventos y recompensas. </p>
|
8 |
-
<h3>90 FPS puede mejorar su jugabilidad y gráficos</h3>
|
9 |
-
|
10 |
-
<p>Sin embargo, no todos los dispositivos pueden soportar modos FPS altos, ya que requieren más potencia de procesamiento y duración de la batería. Es por eso que la mayoría de los dispositivos tienen un límite predeterminado de FPS de 30 o 60, que es suficiente para la mayoría de los juegos y aplicaciones casuales. Para habilitar modos FPS más altos como 90 o incluso 120, necesita un dispositivo que tenga un procesador de alta gama, GPU, RAM, pantalla y batería. Incluso si tiene un dispositivo de este tipo, es posible que no pueda acceder a la opción FPS más alta en la configuración de BGMI, ya que podría estar restringido por los desarrolladores de juegos o el fabricante del dispositivo. </p>
|
11 |
-
<h2>Cómo descargar e instalar el archivo de configuración de 90 FPS para BGMI 2.0</h2>
|
12 |
-
<h3>Descargar el archivo de configuración de una fuente de confianza</h3>
|
13 |
-
<p>Un archivo de configuración es un archivo que contiene varias configuraciones y parámetros que afectan la forma en que un juego se ejecuta en su dispositivo. Al modificar o reemplazar el archivo de configuración original de BGMI con uno personalizado que tenga diferentes valores para el modo 90 FPS, puede desbloquear esta función y disfrutar de un juego más fluido. Sin embargo, debe tener cuidado al descargar e instalar el archivo de configuración, ya que podría contener malware, virus u otros elementos dañinos que pueden dañar su dispositivo o comprometer su cuenta. Por lo tanto, solo debe descargar el archivo de configuración de una fuente confiable, como un sitio web de buena reputación, un foro o un canal de YouTube. También debe escanear el archivo con una aplicación antivirus antes de abrirlo. </p>
|
14 |
-
<p></p>
|
15 |
-
<h3>Copia de seguridad de su archivo de configuración original y reemplazarlo con el nuevo</h3>
|
16 |
-
|
17 |
-
<p>Entonces, necesita reemplazar el archivo de configuración original con el nuevo que descargó. Para ello, debe eliminar el archivo UserCustom.ini original y pegar el nuevo en la misma carpeta. Debe asegurarse de que el nuevo archivo tenga el mismo nombre y extensión que el original. </p>
|
18 |
-
<h3>Habilitar la opción 90 FPS en la configuración del juego</h3>
|
19 |
-
<p>Después de haber instalado el archivo de configuración de 90 FPS, debe habilitar la opción de 90 FPS en la configuración del juego. Para ello, debe iniciar BGMI e ir a Configuración > Gráficos. Allí, debería ver una nueva opción para la velocidad de fotogramas que dice 90 FPS. Debe seleccionar esta opción y aplicar los cambios. También debes revisar tus otros ajustes gráficos y ajustarlos según el rendimiento y las preferencias de tu dispositivo. </p>
|
20 |
-
<p>Ahora, puedes disfrutar jugando BGMI a 90 FPS y experimentar un juego más suave y realista. </p>
|
21 |
-
<h2>Beneficios y desventajas de usar el archivo de configuración de 90 FPS</h2>
|
22 |
-
<h3>Beneficios: juego más suave, respuesta más rápida, mejores efectos visuales</h3>
|
23 |
-
<p>Usar el archivo de configuración de 90 FPS puede tener algunos beneficios para el rendimiento de tu juego, como:</p>
|
24 |
-
<ul>
|
25 |
-
<li>Juego más fluido: puedes ver más fotogramas por segundo en tu pantalla, lo que significa menos tartamudeo, retraso o congelación. Esto puede hacer que tu juego sea más fluido y agradable. </li>
|
26 |
-
<li>Respuesta más rápida: Puedes reaccionar más rápido a los movimientos y acciones de tus enemigos, ya que puedes verlos con mayor claridad y precisión. Esto puede darte una ventaja en combate y aumentar tus posibilidades de supervivencia. </li>
|
27 |
-
<li>Mejores imágenes: Puedes disfrutar de más detalles y efectos en tu pantalla, como sombras, reflejos, texturas y partículas. Esto puede hacer que tu juego se vea más hermoso e inmersivo. </li>
|
28 |
-
</ul>
|
29 |
-
<h3>Inconvenientes: mayor consumo de batería, calentamiento del dispositivo, posible riesgo de prohibición</h3>
|
30 |
-
<p>Sin embargo, usar el archivo de configuración de 90 FPS también puede tener algunos inconvenientes para su dispositivo y cuenta, como:</p>
|
31 |
-
<ul>
|
32 |
-
|
33 |
-
<li>Calentamiento de dispositivos: Ejecutar BGMI a 90 FPS también puede generar más calor de su dispositivo, ya que pone más estrés en su procesador y GPU. Esto puede causar que el dispositivo se sobrecaliente y afectar su rendimiento y durabilidad. </li>
|
34 |
-
<li>Posible riesgo de prohibición: El uso del archivo de configuración 90 FPS puede ser considerado como una forma de trampa o piratería por los desarrolladores del juego o el sistema anti-cheat. Esto puede resultar en que su cuenta sea prohibida o suspendida de jugar BGMI.</li>
|
35 |
-
</ul>
|
36 |
-
<h2>Alternativas al archivo de configuración de 90 FPS</h2>
|
37 |
-
<h3>Utilice un dispositivo de gama alta que soporte 90 FPS de forma nativa</h3>
|
38 |
-
<p>La mejor manera de jugar BGMI a 90 FPS es utilizar un dispositivo de gama alta que soporta esta característica de forma nativa, sin necesidad de ningún archivo de configuración personalizado o modificación. Algunos de los dispositivos que pueden ejecutar BGMI a 90 FPS son:</p>
|
39 |
-
<borde de la tabla="1">
|
40 |
-
<tr><th>Nombre del dispositivo</th><th>Procesador</th><th>GPU</th><th><RAM</th><th>Visualización</th></tr>
|
41 |
-
<tr><td>OnePlus 9 Pro</td><td>Snapdragon 888</td><td><td>Adreno 660</td><td>8/12 GB</td><td>6.7 pulgadas AMOLED (120 Hz)</td></tr>
|
42 |
-
<tr><td>Samsung Galaxy S21 Ultra</td ><td>Exynos 2100</td><td><td>Mali-G78 MP14</td><td><td>12/16 GB</td><td>6.8 pulgadas AMOLED (120 Hz)</td></tr>
|
43 |
-
<tr><td>Asus ROG Phone 5</td><td>Snapdragon 888</td><td><td>Adreno 660</td><td>>8/12/16/18 GB</td><td>6.78 pulgadas AMOLED (144 Hz)</td></tr>
|
44 |
-
<tr><td>Xiaomi Mi 11 Ultra</td><td>Snapdragon 888</td><td><td>Adreno 660</td><td>8/12 GB</td><td>6.81 pulgadas AMOLED (120 Hz)</td></tr>
|
45 |
-
<tr><td>iQOO 7 Leyenda</td><td>Snapdragon 888</td><td>Adreno 660</td><td><8/12 GB</td><td>6.62 pulgadas AMOLED (120 Hz)</td></tr>
|
46 |
-
<tr><td>Realme GT Master Edition</td><td>Snapdragon 778G</td><td><td>Adreno 642L</td><td>6/8 GB</td><td>6.43 pulgadas AMOLED (120 Hz)</td></tr>
|
47 |
-
</tabla>
|
48 |
-
<p>Si tienes uno de estos dispositivos, puedes simplemente habilitar la opción 90 FPS en la configuración de BGMI y disfrutar del juego sin problemas. </p>
|
49 |
-
<h3>Usa una aplicación o herramienta de terceros que pueda optimizar el rendimiento de tu juego</h3>
|
50 |
-
|
51 |
-
<ul>
|
52 |
-
<li>GFX Tool: Esta es una aplicación popular que puede personalizar la configuración de gráficos para BGMI y otros juegos. Puedes usarlo para cambiar tu resolución, anti-aliasing, sombras, texturas y FPS. También puede usarlo para desbloquear la opción 90 FPS para BGMI, pero necesita tener un dispositivo rooteado para eso. </li>
|
53 |
-
<li>BGMI Booster: Esta es una aplicación que puede optimizar la memoria RAM, CPU y GPU de su dispositivo para BGMI y otros juegos. Puedes usarlo para limpiar tu caché, aplicaciones en segundo plano y archivos basura, así como para mejorar el rendimiento y la duración de la batería de tu dispositivo. </li>
|
54 |
-
<li>BGMI Configurator: Esta es una herramienta que puede generar un archivo de configuración personalizado para BGMI basado en las especificaciones y preferencias de su dispositivo. Puede usarlo para ajustar la configuración de gráficos y FPS para BGMI, así como para habilitar la opción 90 FPS. </li>
|
55 |
-
</ul>
|
56 |
-
<p>Sin embargo, debe tener cuidado al usar estas aplicaciones o herramientas, ya que podrían no ser compatibles con su dispositivo o versión del juego. También pueden causar algunos problemas o errores en tu juego o dispositivo, como fallos, fallos o prohibiciones. Por lo tanto, siempre debe realizar copias de seguridad de sus datos y el archivo de configuración antes de usarlos, y utilizarlos bajo su propio riesgo. </p>
|
57 |
-
<h3>Ajuste la configuración del juego para adaptarse a las capacidades de su dispositivo</h3>
|
58 |
-
<p>Si no desea utilizar ningún archivo de configuración personalizado o aplicación o herramienta de terceros, aún puede intentar mejorar el rendimiento de su juego ajustando la configuración del juego para adaptarse a las capacidades de su dispositivo. Puedes hacer esto siguiendo estos pasos:</p>
|
59 |
-
<ol>
|
60 |
-
<li>Inicie BGMI y vaya a Configuración > Gráficos.</li>
|
61 |
-
<li>Seleccione la calidad gráfica que coincida con el rendimiento de su dispositivo. Por ejemplo, si tiene un dispositivo de gama baja, puede elegir Suave o Equilibrado. Si tiene un dispositivo de gama media, puede elegir HD o HDR. Si tiene un dispositivo de gama alta, puede elegir Ultra HD o UHD.</li>
|
62 |
-
|
63 |
-
<li>Seleccione el estilo que coincida con su preferencia. Por ejemplo, si desea colores más realistas, puede elegir Realista o Suave. Si quieres colores más vibrantes, puedes elegir Colorido o Película.</li>
|
64 |
-
<li>Seleccione la opción anti-aliasing que coincida con la GPU de su dispositivo. Por ejemplo, si tiene una GPU potente, puede elegir Habilitar o Ultra. Si tiene una GPU débil, puede elegir Desactivar o Bajo.</li>
|
65 |
-
<li>Seleccione la opción de sombras que coincida con la CPU de su dispositivo. Por ejemplo, si tiene una CPU potente, puede elegir Habilitar o Alta. Si tiene una CPU débil, puede elegir Desactivar o Bajo.</li>
|
66 |
-
<li>Seleccione la opción de brillo que coincida con la batería del dispositivo. Por ejemplo, si tiene una capacidad de batería alta, puede elegir Alta o Máxima. Si tiene una capacidad de batería baja, puede elegir Baja o Media.</li>
|
67 |
-
<li>Seleccione la opción de pantalla no estándar que coincida con la relación de visualización de su dispositivo. Por ejemplo, si tiene una pantalla 16:9, puede elegir Con muescas. Si tiene una pantalla de 18:9, puede elegir Esquinas redondeadas. Si tiene una pantalla de 19:9, puede elegir Gota de agua.</li>
|
68 |
-
<li>Aplicar los cambios y reiniciar el juego. </li>
|
69 |
-
</ol>
|
70 |
-
<p>Al ajustar la configuración del juego para adaptarse a las capacidades de su dispositivo, puede optimizar su rendimiento de juego y evitar cualquier retraso innecesario o tartamudeo. </p>
|
71 |
-
<h2>Conclusión</h2>
|
72 |
-
<p>BGMI es un popular juego de batalla móvil royale que puede ofrecer una experiencia de juego emocionante e inmersiva. Sin embargo, para disfrutar del juego en su mejor momento, es posible que desee activar el modo de 90 FPS, que puede hacer que su juego sea más suave, más rápido y más realista. Sin embargo, no todos los dispositivos admiten esta función, e incluso si lo hacen, es posible que no pueda acceder a ella en la configuración del juego. Es por eso que algunos reproductores utilizan un archivo de configuración personalizado que puede desbloquear la opción 90 FPS para BGMI 2.0. </p>
|
73 |
-
|
74 |
-
<h2>Preguntas frecuentes</h2>
|
75 |
-
<h3>Q: ¿Es seguro usar el archivo de configuración de 90 FPS para BGMI 2.0? </h3>
|
76 |
-
<p>A: No hay una respuesta definitiva a esta pregunta, ya que depende de la fuente y la calidad del archivo de configuración, así como de los términos de servicio del juego y del sistema anti-cheat. Sin embargo, en general, usar el archivo de configuración de 90 FPS puede ser arriesgado, ya que podría contener malware, virus u otros elementos dañinos que pueden dañar su dispositivo o comprometer su cuenta. También puede ser considerado como una forma de trampa o piratería por los desarrolladores de juegos o sistema anti-cheat, que puede resultar en su cuenta está prohibido o suspendido de jugar BGMI. Por lo tanto, solo debe usar el archivo de configuración de una fuente confiable, escanearlo con una aplicación antivirus antes de abrirlo, hacer una copia de seguridad de su archivo de configuración original antes de reemplazarlo y usarlo bajo su propio riesgo. </p>
|
77 |
-
<h3>Q: ¿Cómo puedo comprobar mi FPS en BGMI? </h3>
|
78 |
-
<p>A: Hay varias formas de comprobar tu FPS en BGMI, como:</p>
|
79 |
-
<ul>
|
80 |
-
<li>Usando una opción en el juego: BGMI tiene una opción para mostrar tu FPS en la pantalla mientras juegas. Para habilitar esta opción, vaya a Configuración > Básico > Mostrar medidor de FPS y enciéndalo. Verá un pequeño número en la esquina superior izquierda de la pantalla que indica su FPS actual.</li>
|
81 |
-
<li>Usando una aplicación de terceros: Hay muchas aplicaciones que pueden mostrar su FPS en la pantalla mientras juega cualquier juego. Algunas de las aplicaciones populares son Game Booster, Game Tuner, FPS Meter y GameBench. Puede descargar estas aplicaciones desde Google Play Store u otras fuentes y usarlas para monitorear su FPS en BGMI.</li>
|
82 |
-
<li>Usando un dispositivo de hardware: Algunos dispositivos tienen una función incorporada que puede mostrar su FPS en la pantalla mientras juega cualquier juego. Por ejemplo, algunos teléfonos Asus ROG tienen una función AirTrigger que puede mostrar su FPS en el lado de su teléfono. Puede habilitar esta función yendo a Configuración > Avanzado > AirTrigger y activando Mostrar FPS.</li>
|
83 |
-
</ul>
|
84 |
-
<h3>Q: ¿Cuál es la diferencia entre 60 FPS y 90 FPS? </h3>
|
85 |
-
|
86 |
-
<h3>Q: ¿Cuáles son algunos consejos para mejorar mi rendimiento de juego en BGMI? </h3>
|
87 |
-
<p>A: Además de usar el archivo de configuración de 90 FPS o ajustar la configuración de tu juego, hay algunos otros consejos que pueden ayudarte a mejorar tu rendimiento de juego en BGMI, como:</p>
|
88 |
-
<ul>
|
89 |
-
<li>Cerrar todas las aplicaciones y procesos en segundo plano que no están relacionados con BGMI. Esto puede liberar sus recursos de RAM, CPU y GPU y evitar cualquier interferencia o retraso. </li>
|
90 |
-
<li>Actualice el software y los controladores de su dispositivo a la última versión. Esto puede solucionar cualquier error o problema que pueda afectar su rendimiento de juego. </li>
|
91 |
-
<li>Utilice una conexión a Internet estable y rápida, preferiblemente Wi-Fi o 4G. Esto puede reducir el ping, la latencia y la pérdida de paquetes, lo que puede afectar la calidad y la velocidad del juego. </li>
|
92 |
-
<li>Utilice un buen par de auriculares o auriculares para escuchar el juego suena con claridad y precisión. Esto puede ayudarte a localizar a tus enemigos, vehículos y armas, así como a comunicarte con tus compañeros de equipo. </li>
|
93 |
-
<li> Utilice un agarre cómodo y ergonómico y postura para jugar BGMI. Esto puede prevenir cualquier tensión o fatiga en las manos, los dedos, los ojos y el cuello, que puede afectar su rendimiento de juego. </li>
|
94 |
-
</ul>
|
95 |
-
<h3>Q: ¿Cómo puedo actualizar BGMI a la última versión? </h3>
|
96 |
-
<p>A: Para actualizar BGMI a la última versión, puede seguir estos pasos:</p>
|
97 |
-
<ol>
|
98 |
-
<li>Ir a Google Play Store o App Store y buscar BGMI.</li>
|
99 |
-
<li> Si hay una actualización disponible, verá un botón de actualización junto a la aplicación. Toque en ella y espere a que la actualización se descargue e instale. </li>
|
100 |
-
<li>Si no hay actualización disponible, verá un botón Abrir junto a la aplicación. Toque en él y ejecute BGMI.</li>
|
101 |
-
<li>Si hay una actualización en el juego disponible, verás un mensaje emergente en la pantalla principal del juego. Toque en Aceptar y espere a que la actualización se descargue e instale. </li>
|
102 |
-
<li>Después de la actualización se ha completado, se puede disfrutar de jugar BGMI con las últimas características y mejoras. </li>
|
103 |
-
</ol></p> 64aa2da5cf<br />
|
104 |
-
<br />
|
105 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cmo Puedo Descargar Candy Crush Saga En Facebook.md
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar Candy Crush Saga en Facebook</h1>
|
3 |
-
<h2>Introducción</h2>
|
4 |
-
<p>Si te gusta jugar juegos de puzzle de partido 3, es posible que haya oído hablar de Candy Crush Saga. Es uno de los juegos más populares del mundo, con más de mil millones de descargas y millones de jugadores. ¿Pero sabías que también puedes jugar a Candy Crush Saga en Facebook? En este artículo, te mostraremos cómo descargar Candy Crush Saga en Facebook y disfrutar de este dulce juego con tus amigos. </p>
|
5 |
-
<h2>¿Cómo puedo descargar Candy Crush Saga en Facebook</h2><br /><p><b><b>DOWNLOAD</b> ⇒ <a href="https://bltlly.com/2v6LBA">https://bltlly.com/2v6LBA</a></b></p><br /><br />
|
6 |
-
<h3>¿Qué es Candy Crush Saga? </h3>
|
7 |
-
<p>Candy Crush Saga es un juego de puzzle match-3 desarrollado por King. El juego consiste en combinar caramelos de diferentes colores y formas para eliminarlos del tablero y completar varios objetivos. El juego tiene miles de niveles, cada uno con diferentes desafíos y recompensas. También puedes desbloquear caramelos y potenciadores especiales que pueden ayudarte a superar niveles difíciles. </p>
|
8 |
-
<h3>¿Por qué jugar Candy Crush Saga en Facebook? </h3>
|
9 |
-
<p>Jugar a Candy Crush Saga en Facebook tiene muchos beneficios. Por un lado, puedes sincronizar tu progreso a través de diferentes dispositivos y plataformas. Esto significa que puede cambiar de su teléfono a su computadora o tableta sin perder su nivel o vidas. <p>Otra razón para jugar Candy Crush Saga en Facebook es que puedes interactuar con tus amigos y otros jugadores. Puede enviar y recibir vidas, regalos y mensajes. También puede competir con ellos en las tablas de clasificación y ver quién puede obtener la puntuación más alta. Jugar con amigos puede hacer el juego más divertido y social. </p>
|
10 |
-
<h2>Cómo descargar Candy Crush Saga en Facebook</h2>
|
11 |
-
<p>Hay diferentes formas de descargar Candy Crush Saga en Facebook, dependiendo de qué dispositivo o plataforma esté utilizando. Aquí hay tres métodos que puedes probar:</p>
|
12 |
-
<h3>Método 1: Utilice la aplicación de Facebook en su teléfono o tableta</h3>
|
13 |
-
|
14 |
-
<h4>Paso 1: Eliminar la aplicación de Facebook desde su dispositivo</h4>
|
15 |
-
<p>Esto puede sonar contradictorio, pero eliminar la aplicación de Facebook de tu dispositivo puede ayudarte a evitar algunos problemas con el juego. Algunos usuarios han informado que el juego no se carga o se bloquea cuando intentan jugar a través de la aplicación de Facebook. Eliminar la aplicación puede resolver este problema. </p>
|
16 |
-
<p></p>
|
17 |
-
<h4>Paso 2: Borrar la caché en la configuración de su dispositivo</h4>
|
18 |
-
<p>Después de eliminar la aplicación de Facebook, también debe borrar la caché en la configuración de su dispositivo. Esto puede ayudarle a liberar algo de espacio y mejorar el rendimiento de su dispositivo. Para borrar la caché, ve a la configuración del dispositivo y busca la opción de borrar la caché o el almacenamiento. Toca en ella y confirma. </p>
|
19 |
-
<h4>Paso 3: Abra Candy Crush Saga y toque Conectar</h4>
|
20 |
-
<p>Ahora, puedes abrir Candy Crush Saga en tu dispositivo. Si aún no lo tienes, puedes descargarlo desde [1](https://play.google.com/store/apps/detailss?id=id=com.king.candycrushsaga) para dispositivos Android o [2](https:/apps.apple.com/us/appy-crush-saga/id3834731) para dispositivos iOS. Una vez que abra el juego, toque en el botón Conectar en la parte inferior de la pantalla. Esto le pedirá que inicie sesión con su cuenta de Facebook. </p>
|
21 |
-
<h4>Paso 4: Inicia sesión en Facebook a través de un navegador móvil</h4>
|
22 |
-
<p>Cuando toque en Conectar, será redirigido a un navegador móvil donde puede iniciar sesión en Facebook. Introduzca su dirección de correo electrónico o número de teléfono y contraseña y toque en Iniciar sesión. También es posible que tengas que permitir que Candy Crush Saga acceda a parte de tu información, como tu nombre, foto de perfil y lista de amigos. Toca Continuar para confirmar. </p>
|
23 |
-
<p>Felicidades! Usted ha descargado con éxito Candy Crush Saga en Facebook usando su teléfono o tableta. Ahora puedes jugar el juego y sincronizar tu progreso con tu cuenta de Facebook. </p> <h3>Método 2: Utilice el sitio web de Facebook en su computadora o navegador</h3>
|
24 |
-
|
25 |
-
<h4>Paso 1: Vaya a [5](https://www.facebook.com/candycrushsaga/) o [6](https://apps.facebook.com/candycrush/) en el navegador de su computadora</h4>
|
26 |
-
<p>Abra el navegador de su computadora y vaya a [5](https://www.facebook.com/candycrushsaga/) o [6](https://apps.facebook.com/candycrush/). Estas son las páginas oficiales de Candy Crush Saga en Facebook. También puedes buscar Candy Crush Saga en Google y hacer clic en el primer resultado. </p>
|
27 |
-
<h4>Paso 2: Haga clic en Jugar ahora o Iniciar sesión</h4>
|
28 |
-
<p>Si ya has iniciado sesión en Facebook, puedes hacer clic en el botón Jugar ahora para comenzar a jugar. Si no ha iniciado sesión, deberá hacer clic en el botón Iniciar sesión e ingresar su dirección de correo electrónico o número de teléfono y contraseña. También es posible que tengas que permitir que Candy Crush Saga acceda a parte de tu información, como tu nombre, foto de perfil y lista de amigos. Haga clic en Continuar para confirmar. </p>
|
29 |
-
<h4>Paso 3: Buscar Candy Crush Saga juego en la barra de búsqueda de Facebook</h4>
|
30 |
-
<p>Si no ves el juego en la página, puedes buscarlo en la barra de búsqueda de Facebook en la parte superior de la pantalla. Escribe Candy Crush Saga y pulsa Enter. Deberías ver el juego como el primer resultado. Haz clic en él para abrirlo. </p>
|
31 |
-
<h4>Paso 4: Haga clic en el juego para jugar</h4>
|
32 |
-
<p>Una vez que abra el juego, verá una pantalla de carga con un bastón de caramelo. Espere unos segundos hasta que el juego se cargue. A continuación, verá un mapa con diferentes episodios y niveles. Haga clic en el nivel que desea jugar y disfrutar! </p>
|
33 |
-
<p>Felicidades! Usted ha descargado con éxito Candy Crush Saga en Facebook utilizando su ordenador o navegador. Ahora puedes jugar el juego y sincronizar tu progreso con tu cuenta de Facebook. </p> <h3>Método 3: Use king.com/games o descargue la aplicación windows 10 desde la tienda de Microsoft</h3>
|
34 |
-
<p>Si no quieres usar Facebook para jugar a Candy Crush Saga, también puedes usar el sitio web oficial de King o descargar la aplicación windows 10 desde la tienda de Microsoft. Estos métodos también son fáciles y convenientes. Aquí están los pasos:</p>
|
35 |
-
|
36 |
-
<p>Abra su navegador y vaya a [2](https://www.king.com/game/candycrush) o [3](https://www.microsoft.com/en-us/p/candy-crush-saga/9nblggh18846). Estas son las páginas oficiales de Candy Crush Saga en King y Microsoft. También puedes buscar Candy Crush Saga en Google y hacer clic en el segundo o tercer resultado. </p>
|
37 |
-
<h4>Paso 2: Haga clic en Jugar ahora o Obtener</h4>
|
38 |
-
<p>Si vas a King, puedes hacer clic en el botón Play Now para comenzar a jugar el juego. Si vas a Microsoft, puedes hacer clic en el botón Obtener para descargar la aplicación. Es posible que necesite iniciar sesión con su cuenta de Microsoft o crear una si no tiene una. </p>
|
39 |
-
<h4>Paso 3: Inicia sesión con tu cuenta King o crea una si no tienes una</h4>
|
40 |
-
<p>Si juegas en King, necesitarás iniciar sesión con tu cuenta de King o crear una si no tienes una. Una cuenta King es una cuenta gratuita que te permite jugar juegos en King y sincronizar tu progreso en diferentes dispositivos y plataformas. Para iniciar sesión o crear una cuenta King, haga clic en el botón Iniciar sesión en la esquina superior derecha de la pantalla e introduzca su dirección de correo electrónico y contraseña. También puedes iniciar sesión con tu cuenta de Facebook si tienes una. </p>
|
41 |
-
<h4>Paso 4: Conecta tu cuenta de King con tu cuenta de Facebook</h4>
|
42 |
-
<p>Si quieres jugar Candy Crush Saga con tus amigos de Facebook, puedes conectar tu cuenta de King con tu cuenta de Facebook. Esto te permitirá ver las puntuaciones de tus amigos y enviar y recibir vidas, regalos y mensajes. Para conectar sus cuentas, haga clic en el botón Conectar en la parte inferior de la pantalla e inicie sesión con su cuenta de Facebook. También es posible que tengas que permitir que Candy Crush Saga acceda a parte de tu información, como tu nombre, foto de perfil y lista de amigos. Haga clic en Continuar para confirmar. </p>
|
43 |
-
<p>¡Felicidades! Has descargado exitosamente Candy Crush Saga en Facebook usando King o Microsoft. Ahora puedes jugar el juego y sincronizar tu progreso con tu cuenta de Facebook. </p>
|
44 |
-
<h2>Conclusión</h2>
|
45 |
-
|
46 |
-
<h2>Preguntas frecuentes</h2>
|
47 |
-
<p>Aquí hay algunas preguntas frecuentes sobre la descarga de Candy Crush Saga en Facebook:</p>
|
48 |
-
<ul>
|
49 |
-
<li><b>Q: ¿Cómo puedo actualizar Candy Crush Saga en Facebook? </b></li>
|
50 |
-
<li>A: Para actualizar Candy Crush Saga en Facebook, es necesario comprobar si hay alguna actualización disponible para el juego. Puedes hacer esto yendo a la página del juego en Facebook y buscando una notificación que diga "Actualización disponible". Si hay una, haz clic en ella y sigue las instrucciones. Alternativamente, también puedes eliminar y reinstalar el juego como se explica en el método 1 anterior. </li>
|
51 |
-
<li><b>Q: ¿Cómo puedo restaurar mi progreso en Candy Crush Saga en Facebook? </b></li>
|
52 |
-
<li>A: Para restaurar su progreso en Candy Crush Saga en Facebook, debe asegurarse de que está conectado a su cuenta de Facebook. Puedes hacer esto tocando o haciendo clic en el botón Conectar en la parte inferior de la pantalla e iniciando sesión con tu cuenta de Facebook. Esto sincronizará tu progreso con tu cuenta de Facebook y te permitirá continuar desde donde lo dejaste. </li>
|
53 |
-
<li><b>Q: ¿Cómo puedo eliminar Candy Crush Saga de Facebook? </b></li>
|
54 |
-
<li>A: Para eliminar Candy Crush Saga de Facebook, es necesario ir a la configuración de Facebook y buscar la opción de administrar aplicaciones y sitios web. Allí, verá una lista de aplicaciones y sitios web que ha conectado con su cuenta de Facebook. Encuentra Candy Crush Saga y haz clic en el botón Eliminar al lado. Esto eliminará el juego de tu cuenta de Facebook y eliminará todos sus datos. </li <li><b>Q: ¿Cómo puedo obtener más vidas en Candy Crush Saga en Facebook? </b></li>
|
55 |
-
|
56 |
-
<li><b>Q: ¿Cómo puedo jugar Candy Crush Saga offline? </b></li>
|
57 |
-
<li>A: Para jugar Candy Crush Saga fuera de línea, es necesario descargar el juego en su dispositivo y jugarlo sin conectarse a Internet. Sin embargo, no podrás sincronizar tu progreso con tu cuenta de Facebook ni interactuar con tus amigos. Tampoco podrás acceder a algunas características del juego, como el bono diario, los eventos o las misiones. </li>
|
58 |
-
</ul></p> 64aa2da5cf<br />
|
59 |
-
<br />
|
60 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descarga 22h2 Windows 10 Actualizacin.md
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar 22H2 Windows 10 Actualización: Todo lo que necesita saber</h1>
|
3 |
-
<p>Windows 10 es el sistema operativo más popular del mundo, alimentando millones de dispositivos en todo el mundo. Pero ¿sabía que Windows 10 está en constante evolución y mejora con nuevas actualizaciones y características? Una de las últimas actualizaciones es la versión de Windows 10 22H2, también conocida como la actualización de Windows 10 2022. En este artículo, le diremos todo lo que necesita saber sobre esta actualización, incluyendo lo que es, cómo descargarlo e instalarlo, y por qué debe hacerlo. ¡Vamos a empezar! </p>
|
4 |
-
<h2>descarga 22h2 windows 10 actualización</h2><br /><p><b><b>Download</b> ⚙ <a href="https://bltlly.com/2v6IAw">https://bltlly.com/2v6IAw</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es la versión de Windows 10 22H2? </h2>
|
6 |
-
<p>Windows 10 versión 22H2 es la última actualización del sistema operativo para clientes de Windows 10, que comenzó a implementarse en octubre de 2022. Al igual que la versión de Windows 10 21H2, que fue lanzado en 2021, la versión 22H2 para Windows 10 es una versión muy pequeña que se centra en un conjunto de mejoras bajo el capó más beneficioso para los clientes empresariales de Microsoft. </p>
|
7 |
-
<h3>La última actualización del sistema operativo para clientes de Windows 10</h3>
|
8 |
-
<p>Windows 10 versión 22H2 es la segunda actualización semestral para Windows 10 en 2022, después de Windows 10 versión 21H1, que fue lanzado en mayo. Estas actualizaciones son parte del ciclo de lanzamiento semestral de Microsoft, que tiene como objetivo ofrecer nuevas características y mejoras a Windows 10 dos veces al año. Sin embargo, a diferencia de las principales actualizaciones anteriores, como Windows 10 versión 20H2 o Windows 10 versión 2004, que introdujo cambios significativos y adiciones al sistema operativo, Windows 10 versión 22H2 es una actualización relativamente pequeña que no trae ningún cambio visual o funcional notable a la experiencia del usuario. </p>
|
9 |
-
<h3>Una versión menor con mejoras bajo el capó</h3>
|
10 |
-
|
11 |
-
<ul>
|
12 |
-
<li>Soporte WPA3 H2E para una seguridad Wi-Fi mejorada</li>
|
13 |
-
<li>Compatibilidad con GPU en el subsistema de Windows para Linux (WSL) y Azure IoT Edge para Linux en implementaciones de Windows (EFLOW) </li>
|
14 |
-
<li>Experiencia de audio Bluetooth mejorada</li>
|
15 |
-
<li>Capacidades mejoradas de administración de dispositivos</li>
|
16 |
-
<li>Varias correcciones de errores y parches de seguridad</li>
|
17 |
-
</ul>
|
18 |
-
<h3>Las principales características y cambios de Windows 10 versión 22H2</h3>
|
19 |
-
<p>Si bien la versión 22H2 de Windows 10 no trae cambios importantes al sistema operativo, todavía hay algunas características notables y cambios que debe tener en cuenta. Estos son algunos de ellos:</p>
|
20 |
-
<p></p>
|
21 |
-
<ul>
|
22 |
-
<li>El menú Inicio tiene un nuevo diseño que coincide con el tema de Windows 11, con esquinas redondeadas e iconos centrados. </li>
|
23 |
-
<li>La barra de tareas tiene un nuevo icono para el widget de Noticias e Intereses, que muestra noticias personalizadas, clima, deportes y más. </li>
|
24 |
-
<li> La aplicación Configuración tiene un nuevo banner que muestra el nombre del dispositivo, el estado y las acciones rápidas. </li>
|
25 |
-
<li>El Explorador de archivos tiene una nueva barra de comandos que reemplaza la interfaz de cinta y ofrece más opciones contextuales. </li <li>El navegador de Microsoft Edge tiene una nueva función de pestañas verticales que le permite administrar sus pestañas más fácilmente. </li>
|
26 |
-
<li>El sistema de autenticación biométrica de Windows Hello tiene una nueva opción para configurar una copia de seguridad de PIN o contraseña en caso de que su dispositivo no reconozca su cara o huella digital. </li>
|
27 |
-
<li>La aplicación Windows Security tiene una nueva función de protección contra ransomware que le ayuda a proteger sus archivos contra el cifrado malicioso. </li>
|
28 |
-
</ul>
|
29 |
-
<h2>¿Cómo descargar e instalar Windows 10 versión 22H2? </h2>
|
30 |
-
<p>Ahora que sabe lo que es la versión de Windows 10 22H2 y lo que ofrece, es posible que se pregunte cómo descargarlo e instalarlo en su dispositivo. Hay varias maneras de hacer esto, dependiendo de su preferencia y situación. Estos son algunos de los métodos más comunes:</p>
|
31 |
-
<h3>Buscar actualizaciones en la configuración de Windows Update</h3>
|
32 |
-
|
33 |
-
<ol>
|
34 |
-
<li>Abra la aplicación Configuración haciendo clic en el menú Inicio y seleccionando el icono de engranaje, o presionando Windows + I en el teclado. </li>
|
35 |
-
<li>Haga clic en Actualizar & Seguridad, y luego en Windows Update.</li>
|
36 |
-
<li>Haga clic en Buscar actualizaciones, y espere a que Windows busque las actualizaciones disponibles. </li>
|
37 |
-
<li>Si ve la opción Actualizar características a Windows 10, versión 22H2, haga clic en Descargar e instalar. Si no lo ves, significa que la actualización aún no está disponible para tu dispositivo, o que ya lo tienes instalado. </li>
|
38 |
-
<li>Siga las instrucciones en pantalla para completar el proceso de instalación. Es posible que necesite reiniciar el dispositivo varias veces durante el proceso. </li>
|
39 |
-
</ol>
|
40 |
-
<h3>Utilice la herramienta de creación de medios o el asistente de actualización</h3>
|
41 |
-
<p>Si desea descargar e instalar manualmente Windows 10 versión 22H2, o si tiene problemas con el método Windows Update, puede usar la herramienta de creación de medios o el asistente de actualización. Estas son herramientas oficiales de Microsoft que le permiten crear una unidad USB o DVD de arranque con la última versión de Windows 10, o actualizar su versión actual de Windows 10 a la versión 22H2. Para usar estas herramientas, siga estos pasos:</p>
|
42 |
-
<ol>
|
43 |
-
<li>Ir a la página [Descargar Windows 10] en el sitio web de Microsoft. </li>
|
44 |
-
<li>Desplácese hacia abajo a la sección Crear medios de instalación de Windows 10, y haga clic en Descargar herramienta ahora para obtener la herramienta de creación de medios, o haga clic en Actualizar ahora para obtener el asistente de actualización. </li>
|
45 |
-
<li>Ejecute la herramienta que descargó y acepte los términos de la licencia. </li>
|
46 |
-
|
47 |
-
<li>Siga las instrucciones en pantalla para completar el proceso de instalación. Es posible que necesite reiniciar el dispositivo varias veces durante el proceso. </li>
|
48 |
-
</ol>
|
49 |
-
<h3>Solucionar problemas y problemas comunes</h3>
|
50 |
-
<p>A veces, puede encontrar algunos problemas o problemas al descargar o instalar Windows 10 versión 22H2. Estos pueden ser causados por varios factores, como hardware o software incompatible, poco espacio en disco, archivos de sistema dañados o errores de red. Estos son algunos de los problemas y problemas más comunes que los usuarios han informado, y cómo solucionarlos:</p>
|
51 |
-
|
52 |
-
<p>Ahora que sabe cómo descargar e instalar Windows 10 versión 22H2, es posible que se pregunte por qué debe hacerlo en primer lugar. Después de todo, esta actualización no parece ofrecer mayores beneficios o mejoras al sistema operativo. Sin embargo, todavía hay algunas buenas razones por las que debe descargar Windows 10 versión 22H2, como:</p>
|
53 |
-
<h3>Disfruta de un mejor rendimiento y estabilidad</h3>
|
54 |
-
<p>Windows 10 versión 22H2 está diseñado para mejorar el rendimiento y la estabilidad de su dispositivo, mediante la fijación de varios errores y problemas que podrían afectar a su experiencia de usuario. Por ejemplo, esta actualización mejora la experiencia de audio Bluetooth, la compatibilidad con GPU en implementaciones WSL y EFLOW y las capacidades de administración de dispositivos. Estas mejoras pueden ayudarlo a disfrutar de un funcionamiento más suave y rápido de su dispositivo, especialmente si lo usa para fines de trabajo o entretenimiento. </p>
|
55 |
-
<h3>Obtenga las últimas actualizaciones de seguridad y calidad</h3>
|
56 |
-
<p>La versión 22H2 de Windows 10 también incluye las últimas actualizaciones de seguridad y calidad para su dispositivo, que pueden ayudarlo a proteger sus datos y privacidad de amenazas y ataques potenciales. Por ejemplo, esta actualización incluye soporte WPA3 H2E para una seguridad Wi-Fi mejorada, así como varios parches de seguridad para diferentes componentes del sistema operativo. Estas actualizaciones pueden ayudarlo a prevenir infecciones de malware, violaciones de datos, robo de identidad y otros delitos cibernéticos que podrían dañarlo a usted o a su dispositivo. </p>
|
57 |
-
<h3>Prepárese para el futuro de Windows 10</h3>
|
58 |
-
|
59 |
-
<h2>Conclusión</h2>
|
60 |
-
<p>En conclusión, la versión 22H2 de Windows 10 es la última actualización del sistema operativo para clientes de Windows 10, que comenzó a implementarse en octubre de 2022. Es una versión menor que no trae nuevas características o capacidades al sistema operativo, sino que se centra en mejorar su rendimiento, estabilidad, seguridad y calidad. También introduce algunos cambios de diseño que coinciden con el tema de Windows 11, que es la próxima actualización importante para Windows 10. Puede descargar e instalar Windows 10 versión 22H2 mediante la función Windows Update, o mediante la herramienta de creación de medios o el asistente de actualización. También puede solucionar cualquier problema o problema común que pueda encontrar durante o después del proceso de instalación. Al descargar Windows 10 versión 22H2, puede disfrutar de una mejor experiencia de usuario en su dispositivo, obtener las últimas actualizaciones de seguridad y calidad, y prepararse para el futuro de Windows 10. </p>
|
61 |
-
<p>Esperamos que este artículo te haya ayudado a entender todo lo que necesitas saber sobre Windows 10 versión 22H2. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
|
62 |
-
<h3>Preguntas frecuentes</h3>
|
63 |
-
<ul>
|
64 |
-
<li><b>Q: ¿Cuánto tiempo se tarda en descargar e instalar Windows 10 versión 22H2? </b></li>
|
65 |
-
<li>A: El tiempo que se tarda en descargar e instalar Windows 10 versión 22H2 depende de varios factores, como la velocidad de Internet, las especificaciones del dispositivo, la disponibilidad de espacio en disco y el método de actualización. En términos generales, debe tomar entre 15 minutos y una hora para completar el proceso. </li>
|
66 |
-
<li><b>Q: ¿Cuánto espacio en disco necesito para descargar e instalar Windows 10 versión 22H2? </b></li>
|
67 |
-
<li>A: El requisito de espacio en disco para descargar e instalar Windows 10 versión 22H2 varía dependiendo de la versión actual de Windows 10. Si está actualizando desde la versión de Windows 10 21H1, 21H2 o 20H2, necesitará unos 500 MB de espacio en disco. Si está actualizando desde una versión anterior de Windows 10, necesitará aproximadamente 4 GB de espacio en disco. </li>
|
68 |
-
|
69 |
-
<li>A: Puede comprobar si tiene la versión de Windows 10 22H2 instalada en su dispositivo yendo a Configuración > Sistema > Acerca, y mirando los campos Versión y OS Build. Si ves 22H2 y 19044.xxx, respectivamente, entonces tienes Windows 10 versión 22H2 instalado en tu dispositivo. </li>
|
70 |
-
<li><b>Q: ¿Cómo puedo desinstalar Windows 10 versión 22H2 si no me gusta o si causa problemas? </b></li>
|
71 |
-
<li>A: Puede desinstalar Windows 10 versión 22H2 si no te gusta o si causa problemas al ir a Configuración > Actualización y seguridad > Recuperación > Volver a la versión anterior de Windows 10. Sin embargo, solo puede hacer esto dentro de los 10 días de instalar la actualización, y solo si no ha eliminado la carpeta Windows.old de su disco. </li>
|
72 |
-
<li><b>Q: ¿Cuál es la diferencia entre la versión de Windows 10 22H2 y Windows 11? </b></li>
|
73 |
-
<li>A: Windows 10 versión 22H2 y Windows 11 son dos versiones diferentes del mismo sistema operativo, con diferentes características y requisitos. Windows 10 versión 22H2 es una actualización menor para los clientes de Windows 10, mientras que Windows 11 es una actualización importante que introduce una nueva interfaz de usuario, nuevas características y nuevos requisitos de hardware. Se espera que Windows 11 esté disponible para los usuarios de Windows 10 a finales de 2023 o principios de 2024, dependiendo de su compatibilidad con el dispositivo. </li>
|
74 |
-
</ul></p> 64aa2da5cf<br />
|
75 |
-
<br />
|
76 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Coche Deportivo 3 Apk.md
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Descargar Sport Car 3 APK: Un juego de simulador de conducción gratis para Android</h1>
|
3 |
-
<p>Si usted es un fan de los coches y un amante de la conducción, entonces usted debe comprobar hacia fuera <fuerte>Sport Car 3 APK</strong>, una aplicación gratuita para Android que le permite experimentar la emoción de conducir diferentes coches deportivos en varios escenarios. En este artículo, le diremos qué es Sport Car 3 APK, cómo descargarlo e instalarlo en su dispositivo Android, por qué debería jugarlo y cuáles son los mejores autos deportivos en 2023 que puede conducir en el juego. </p>
|
4 |
-
<h2>descargar coche deportivo 3 apk</h2><br /><p><b><b>Download Zip</b> –––––>>> <a href="https://bltlly.com/2v6Ksd">https://bltlly.com/2v6Ksd</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el coche deportivo 3 APK? </h2>
|
6 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
7 |
-
<p>Sport Car 3 APK es un juego de simulador de conducción desarrollado por SportCarGames. Le permite elegir entre más de 50 coches deportivos y conducirlos en diferentes modos, como taxi, policía, viaje libre, deriva, arrastre y carrera. También puede personalizar sus coches con diferentes colores, ruedas, spoilers y pegatinas. El juego cuenta con gráficos realistas, física y sonidos que te hacen sentir como si estuvieras al volante de un coche deportivo real. También puede explorar diferentes entornos, como ciudad, desierto, montaña, aeropuerto y autopista. </p>
|
8 |
-
<h3> Cómo descargar e instalar Sport Car 3 APK en su dispositivo Android</h3>
|
9 |
-
<p>Para descargar e instalar Sport Car 3 APK en su dispositivo Android, necesita una conexión a Internet y un navegador. Estos son los pasos a seguir:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Ir a <a href="( 1 )">Coche deportivo 3 : Taxi Police - simulador de unidad APK</a> en su navegador y toque en el <strong>Descargar APK</strong> botón. </li>
|
12 |
-
<li>Espere a que el archivo APK se descargue en su dispositivo. </li>
|
13 |
-
<li>Abra la aplicación de administrador de archivos en su dispositivo y busque el archivo APK descargado. </li>
|
14 |
-
<li>Toque en el archivo y permita la instalación desde fuentes desconocidas si se le solicita. </li>
|
15 |
-
<li>Siga las instrucciones en la pantalla para completar la instalación. </li>
|
16 |
-
<li>Iniciar el juego y disfrutar de la conducción de sus coches deportivos favoritos. </li>
|
17 |
-
</ol>
|
18 |
-
<h2> ¿Por qué usted debe jugar Sport Car 3 APK</h2>
|
19 |
-
|
20 |
-
<p>Jugar Sport Car 3 APK puede ofrecerle muchos beneficios, tales como:</p>
|
21 |
-
<p></p>
|
22 |
-
<ul>
|
23 |
-
<li>Usted puede divertirse y relajarse conduciendo diferentes coches deportivos en varios modos. </li>
|
24 |
-
<li>Puedes mejorar tus habilidades y reflejos al enfrentar diferentes desafíos y obstáculos. </li>
|
25 |
-
<li>Puedes dar rienda suelta a tu creatividad y personalidad personalizando tus coches con diferentes opciones. </li>
|
26 |
-
<li>Puedes aprender más sobre los diferentes coches deportivos y sus características leyendo sus descripciones. </li>
|
27 |
-
</ul>
|
28 |
-
<h3>Los inconvenientes de jugar Sport Car 3 APK</h3>
|
29 |
-
<p>Jugando Sport Car 3 APK también puede tener <p>algunos inconvenientes, tales como:</p>
|
30 |
-
<ul>
|
31 |
-
<li>Puedes encontrar anuncios y compras en la aplicación que pueden interrumpir tu juego o tentarte a gastar dinero. </li>
|
32 |
-
<li>Puedes enfrentar problemas de compatibilidad con algunos dispositivos o versiones de Android que pueden afectar el rendimiento o la funcionalidad del juego. </li>
|
33 |
-
<li>Es posible que necesite mucho espacio de almacenamiento en su dispositivo para descargar e instalar el juego y sus actualizaciones. </li>
|
34 |
-
</ul>
|
35 |
-
<p>Sin embargo, estos inconvenientes no son demasiado graves y se pueden superar siguiendo algunos consejos, como:</p>
|
36 |
-
<ul>
|
37 |
-
<li>Puedes desactivar la conexión a Internet o usar un bloqueador de anuncios para evitar anuncios mientras juegas. </li>
|
38 |
-
<li> Puede comprobar los requisitos mínimos y las revisiones del juego antes de descargarlo e instalarlo en su dispositivo. </li>
|
39 |
-
<li>Puedes despejar algo de espacio en tu dispositivo o usar una tarjeta de memoria externa para almacenar el juego y sus actualizaciones. </li>
|
40 |
-
</ul>
|
41 |
-
<h2>Los mejores coches deportivos en 2023 para conducir en coche deportivo 3 APK</h2>
|
42 |
-
<p>Si se está preguntando cuáles son los mejores coches deportivos en 2023 que se puede conducir en Sport Car 3 APK, aquí hay una lista basada en los resultados de búsqueda web:</p>
|
43 |
-
<tabla>
|
44 |
-
<tr>
|
45 |
-
<th>Coche</th>
|
46 |
-
<th>Descripción</th>
|
47 |
-
</tr>
|
48 |
-
<tr>
|
49 |
-
<td><h4>Chevrolet Corvette</h4></td>
|
50 |
-
|
51 |
-
</tr>
|
52 |
-
<tr>
|
53 |
-
<td><h4>Porsche 911 Turbo S</h4></td>
|
54 |
-
<td><p>El Porsche 911 Turbo S es una versión de alto rendimiento del icónico coche deportivo alemán que ha estado en producción desde 1963. El último modelo, el 992, es el 911 más potente y rápido jamás hecho. Tiene un 3,8 litros de doble turbocompresor plana de seis motores que produce 640 caballos de fuerza y 590 libras-pie de par. Puede acelerar de 0 a 60 mph en 2.6 segundos y alcanzar una velocidad máxima de 205 mph. También tiene un diseño sofisticado, un interior refinado y un alerón trasero que se ajusta a diferentes modos de conducción. </p></td>
|
55 |
-
</tr>
|
56 |
-
<tr>
|
57 |
-
<td><h4>BMW M2</h4></td>
|
58 |
-
<td><p>El BMW M2 es un automóvil deportivo compacto que forma parte de la división M del fabricante de automóviles alemán. El último modelo, el M2 CS, es la versión más hardcore y centrada en la pista del M2. Tiene un 3.0 litros de doble turbocompresor en línea de seis motores que produce 444 caballos de fuerza y 406 libras-pie de par. Puede acelerar de 0 a 60 mph en 3.8 segundos y alcanzar una velocidad máxima de 174 mph. También tiene un diseño muscular, un interior deportivo y un techo y una capucha de fibra de carbono. </p></td>
|
59 |
-
</tr>
|
60 |
-
<tr>
|
61 |
-
<td><h4>Nissan Z</h4></td>
|
62 |
-
<td><p>El Nissan Z es un clásico deportivo japonés que ha estado en producción desde 1969. Se espera que el último modelo, el Z35, debute a finales de 2023 como sucesor del Z34 (370Z). Se rumorea que tiene un 3.0 litros doble turboalimentado motor V6 que produce alrededor de 400 caballos de fuerza y 350 libras de par. También se especula que tiene un diseño de inspiración retro, un interior moderno y una opción de transmisión manual. </p></td>
|
63 |
-
</tr>
|
64 |
-
<tr>
|
65 |
-
<td><h4>Toyota GR-Supra</h4></td>
|
66 |
-
|
67 |
-
</tr>
|
68 |
-
</tabla>
|
69 |
-
<h2>Conclusión</h2>
|
70 |
-
<p>Sport Car 3 APK es un juego de simulador de conducción gratuito para Android que le permite conducir diferentes coches deportivos en varios modos y entornos. Tiene gráficos realistas, física y sonidos que te hacen sentir como si estuvieras al volante de un coche deportivo real. También puede personalizar sus coches con diferentes colores, ruedas, spoilers y pegatinas. También puede explorar diferentes entornos, como ciudad, desierto, montaña, aeropuerto y autopista. </p>
|
71 |
-
<p>Si usted está buscando un divertido y realista juego de simulador de conducción para Android, usted debe descargar Sport Car 3 APK y disfrutar de la conducción de sus coches deportivos favoritos. Puedes descargar el juego desde el enlace de abajo e iniciar tu motor. </p>
|
72 |
-
<p><a href=">Descargar Sport Car 3 APK</a></p>
|
73 |
-
<h2>Preguntas frecuentes</h2>
|
74 |
-
<h3> ¿Cuál es la última versión de Sport Car 3 APK? </h3>
|
75 |
-
<p>La última versión de Sport Car 3 APK es 1.0.5, que fue lanzado el 15 de junio de 2023. Se corrigieron algunos errores y mejoró el rendimiento del juego. </p>
|
76 |
-
<h3> ¿Cuánto espacio requiere Sport Car 3 APK en su dispositivo? </h3>
|
77 |
-
<p>Sport Car 3 APK requiere unos 300 MB de espacio libre en su dispositivo para descargar e instalar. Es posible que necesite más espacio para las actualizaciones y los archivos de datos del juego. </p>
|
78 |
-
<h3> ¿Es seguro descargar y jugar Sport Car 3 APK? </h3>
|
79 |
-
<p>Sí, Sport Car 3 APK es seguro para descargar y jugar. No contiene ningún virus o malware que pueda dañar su dispositivo o su privacidad. Sin embargo, siempre debes descargar el juego desde una fuente de confianza y escanearlo con una aplicación antivirus antes de instalarlo. </p>
|
80 |
-
<h3>¿Puedes jugar Sport Car 3 APK fuera de línea? </h3>
|
81 |
-
<p>Sí, puede jugar Sport Car 3 APK sin conexión a Internet. Sin embargo, algunas características del juego, como anuncios y compras en la aplicación, pueden no funcionar correctamente sin conexión. </p>
|
82 |
-
<h3>¿Puedes jugar Sport Car 3 APK con amigos? </h3> 64aa2da5cf<br />
|
83 |
-
<br />
|
84 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/unpacking.py
DELETED
@@ -1,257 +0,0 @@
|
|
1 |
-
"""Utilities related archives.
|
2 |
-
"""
|
3 |
-
|
4 |
-
import logging
|
5 |
-
import os
|
6 |
-
import shutil
|
7 |
-
import stat
|
8 |
-
import tarfile
|
9 |
-
import zipfile
|
10 |
-
from typing import Iterable, List, Optional
|
11 |
-
from zipfile import ZipInfo
|
12 |
-
|
13 |
-
from pip._internal.exceptions import InstallationError
|
14 |
-
from pip._internal.utils.filetypes import (
|
15 |
-
BZ2_EXTENSIONS,
|
16 |
-
TAR_EXTENSIONS,
|
17 |
-
XZ_EXTENSIONS,
|
18 |
-
ZIP_EXTENSIONS,
|
19 |
-
)
|
20 |
-
from pip._internal.utils.misc import ensure_dir
|
21 |
-
|
22 |
-
logger = logging.getLogger(__name__)
|
23 |
-
|
24 |
-
|
25 |
-
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
|
26 |
-
|
27 |
-
try:
|
28 |
-
import bz2 # noqa
|
29 |
-
|
30 |
-
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
|
31 |
-
except ImportError:
|
32 |
-
logger.debug("bz2 module is not available")
|
33 |
-
|
34 |
-
try:
|
35 |
-
# Only for Python 3.3+
|
36 |
-
import lzma # noqa
|
37 |
-
|
38 |
-
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
|
39 |
-
except ImportError:
|
40 |
-
logger.debug("lzma module is not available")
|
41 |
-
|
42 |
-
|
43 |
-
def current_umask() -> int:
|
44 |
-
"""Get the current umask which involves having to set it temporarily."""
|
45 |
-
mask = os.umask(0)
|
46 |
-
os.umask(mask)
|
47 |
-
return mask
|
48 |
-
|
49 |
-
|
50 |
-
def split_leading_dir(path: str) -> List[str]:
|
51 |
-
path = path.lstrip("/").lstrip("\\")
|
52 |
-
if "/" in path and (
|
53 |
-
("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path
|
54 |
-
):
|
55 |
-
return path.split("/", 1)
|
56 |
-
elif "\\" in path:
|
57 |
-
return path.split("\\", 1)
|
58 |
-
else:
|
59 |
-
return [path, ""]
|
60 |
-
|
61 |
-
|
62 |
-
def has_leading_dir(paths: Iterable[str]) -> bool:
|
63 |
-
"""Returns true if all the paths have the same leading path name
|
64 |
-
(i.e., everything is in one subdirectory in an archive)"""
|
65 |
-
common_prefix = None
|
66 |
-
for path in paths:
|
67 |
-
prefix, rest = split_leading_dir(path)
|
68 |
-
if not prefix:
|
69 |
-
return False
|
70 |
-
elif common_prefix is None:
|
71 |
-
common_prefix = prefix
|
72 |
-
elif prefix != common_prefix:
|
73 |
-
return False
|
74 |
-
return True
|
75 |
-
|
76 |
-
|
77 |
-
def is_within_directory(directory: str, target: str) -> bool:
|
78 |
-
"""
|
79 |
-
Return true if the absolute path of target is within the directory
|
80 |
-
"""
|
81 |
-
abs_directory = os.path.abspath(directory)
|
82 |
-
abs_target = os.path.abspath(target)
|
83 |
-
|
84 |
-
prefix = os.path.commonprefix([abs_directory, abs_target])
|
85 |
-
return prefix == abs_directory
|
86 |
-
|
87 |
-
|
88 |
-
def set_extracted_file_to_default_mode_plus_executable(path: str) -> None:
|
89 |
-
"""
|
90 |
-
Make file present at path have execute for user/group/world
|
91 |
-
(chmod +x) is no-op on windows per python docs
|
92 |
-
"""
|
93 |
-
os.chmod(path, (0o777 & ~current_umask() | 0o111))
|
94 |
-
|
95 |
-
|
96 |
-
def zip_item_is_executable(info: ZipInfo) -> bool:
|
97 |
-
mode = info.external_attr >> 16
|
98 |
-
# if mode and regular file and any execute permissions for
|
99 |
-
# user/group/world?
|
100 |
-
return bool(mode and stat.S_ISREG(mode) and mode & 0o111)
|
101 |
-
|
102 |
-
|
103 |
-
def unzip_file(filename: str, location: str, flatten: bool = True) -> None:
|
104 |
-
"""
|
105 |
-
Unzip the file (with path `filename`) to the destination `location`. All
|
106 |
-
files are written based on system defaults and umask (i.e. permissions are
|
107 |
-
not preserved), except that regular file members with any execute
|
108 |
-
permissions (user, group, or world) have "chmod +x" applied after being
|
109 |
-
written. Note that for windows, any execute changes using os.chmod are
|
110 |
-
no-ops per the python docs.
|
111 |
-
"""
|
112 |
-
ensure_dir(location)
|
113 |
-
zipfp = open(filename, "rb")
|
114 |
-
try:
|
115 |
-
zip = zipfile.ZipFile(zipfp, allowZip64=True)
|
116 |
-
leading = has_leading_dir(zip.namelist()) and flatten
|
117 |
-
for info in zip.infolist():
|
118 |
-
name = info.filename
|
119 |
-
fn = name
|
120 |
-
if leading:
|
121 |
-
fn = split_leading_dir(name)[1]
|
122 |
-
fn = os.path.join(location, fn)
|
123 |
-
dir = os.path.dirname(fn)
|
124 |
-
if not is_within_directory(location, fn):
|
125 |
-
message = (
|
126 |
-
"The zip file ({}) has a file ({}) trying to install "
|
127 |
-
"outside target directory ({})"
|
128 |
-
)
|
129 |
-
raise InstallationError(message.format(filename, fn, location))
|
130 |
-
if fn.endswith("/") or fn.endswith("\\"):
|
131 |
-
# A directory
|
132 |
-
ensure_dir(fn)
|
133 |
-
else:
|
134 |
-
ensure_dir(dir)
|
135 |
-
# Don't use read() to avoid allocating an arbitrarily large
|
136 |
-
# chunk of memory for the file's content
|
137 |
-
fp = zip.open(name)
|
138 |
-
try:
|
139 |
-
with open(fn, "wb") as destfp:
|
140 |
-
shutil.copyfileobj(fp, destfp)
|
141 |
-
finally:
|
142 |
-
fp.close()
|
143 |
-
if zip_item_is_executable(info):
|
144 |
-
set_extracted_file_to_default_mode_plus_executable(fn)
|
145 |
-
finally:
|
146 |
-
zipfp.close()
|
147 |
-
|
148 |
-
|
149 |
-
def untar_file(filename: str, location: str) -> None:
|
150 |
-
"""
|
151 |
-
Untar the file (with path `filename`) to the destination `location`.
|
152 |
-
All files are written based on system defaults and umask (i.e. permissions
|
153 |
-
are not preserved), except that regular file members with any execute
|
154 |
-
permissions (user, group, or world) have "chmod +x" applied after being
|
155 |
-
written. Note that for windows, any execute changes using os.chmod are
|
156 |
-
no-ops per the python docs.
|
157 |
-
"""
|
158 |
-
ensure_dir(location)
|
159 |
-
if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"):
|
160 |
-
mode = "r:gz"
|
161 |
-
elif filename.lower().endswith(BZ2_EXTENSIONS):
|
162 |
-
mode = "r:bz2"
|
163 |
-
elif filename.lower().endswith(XZ_EXTENSIONS):
|
164 |
-
mode = "r:xz"
|
165 |
-
elif filename.lower().endswith(".tar"):
|
166 |
-
mode = "r"
|
167 |
-
else:
|
168 |
-
logger.warning(
|
169 |
-
"Cannot determine compression type for file %s",
|
170 |
-
filename,
|
171 |
-
)
|
172 |
-
mode = "r:*"
|
173 |
-
tar = tarfile.open(filename, mode, encoding="utf-8")
|
174 |
-
try:
|
175 |
-
leading = has_leading_dir([member.name for member in tar.getmembers()])
|
176 |
-
for member in tar.getmembers():
|
177 |
-
fn = member.name
|
178 |
-
if leading:
|
179 |
-
fn = split_leading_dir(fn)[1]
|
180 |
-
path = os.path.join(location, fn)
|
181 |
-
if not is_within_directory(location, path):
|
182 |
-
message = (
|
183 |
-
"The tar file ({}) has a file ({}) trying to install "
|
184 |
-
"outside target directory ({})"
|
185 |
-
)
|
186 |
-
raise InstallationError(message.format(filename, path, location))
|
187 |
-
if member.isdir():
|
188 |
-
ensure_dir(path)
|
189 |
-
elif member.issym():
|
190 |
-
try:
|
191 |
-
tar._extract_member(member, path)
|
192 |
-
except Exception as exc:
|
193 |
-
# Some corrupt tar files seem to produce this
|
194 |
-
# (specifically bad symlinks)
|
195 |
-
logger.warning(
|
196 |
-
"In the tar file %s the member %s is invalid: %s",
|
197 |
-
filename,
|
198 |
-
member.name,
|
199 |
-
exc,
|
200 |
-
)
|
201 |
-
continue
|
202 |
-
else:
|
203 |
-
try:
|
204 |
-
fp = tar.extractfile(member)
|
205 |
-
except (KeyError, AttributeError) as exc:
|
206 |
-
# Some corrupt tar files seem to produce this
|
207 |
-
# (specifically bad symlinks)
|
208 |
-
logger.warning(
|
209 |
-
"In the tar file %s the member %s is invalid: %s",
|
210 |
-
filename,
|
211 |
-
member.name,
|
212 |
-
exc,
|
213 |
-
)
|
214 |
-
continue
|
215 |
-
ensure_dir(os.path.dirname(path))
|
216 |
-
assert fp is not None
|
217 |
-
with open(path, "wb") as destfp:
|
218 |
-
shutil.copyfileobj(fp, destfp)
|
219 |
-
fp.close()
|
220 |
-
# Update the timestamp (useful for cython compiled files)
|
221 |
-
tar.utime(member, path)
|
222 |
-
# member have any execute permissions for user/group/world?
|
223 |
-
if member.mode & 0o111:
|
224 |
-
set_extracted_file_to_default_mode_plus_executable(path)
|
225 |
-
finally:
|
226 |
-
tar.close()
|
227 |
-
|
228 |
-
|
229 |
-
def unpack_file(
|
230 |
-
filename: str,
|
231 |
-
location: str,
|
232 |
-
content_type: Optional[str] = None,
|
233 |
-
) -> None:
|
234 |
-
filename = os.path.realpath(filename)
|
235 |
-
if (
|
236 |
-
content_type == "application/zip"
|
237 |
-
or filename.lower().endswith(ZIP_EXTENSIONS)
|
238 |
-
or zipfile.is_zipfile(filename)
|
239 |
-
):
|
240 |
-
unzip_file(filename, location, flatten=not filename.endswith(".whl"))
|
241 |
-
elif (
|
242 |
-
content_type == "application/x-gzip"
|
243 |
-
or tarfile.is_tarfile(filename)
|
244 |
-
or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)
|
245 |
-
):
|
246 |
-
untar_file(filename, location)
|
247 |
-
else:
|
248 |
-
# FIXME: handle?
|
249 |
-
# FIXME: magic signatures?
|
250 |
-
logger.critical(
|
251 |
-
"Cannot unpack file %s (downloaded from %s, content-type: %s); "
|
252 |
-
"cannot detect archive format",
|
253 |
-
filename,
|
254 |
-
location,
|
255 |
-
content_type,
|
256 |
-
)
|
257 |
-
raise InstallationError(f"Cannot determine archive format of {location}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/certifi/__main__.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
|
3 |
-
from pip._vendor.certifi import contents, where
|
4 |
-
|
5 |
-
parser = argparse.ArgumentParser()
|
6 |
-
parser.add_argument("-c", "--contents", action="store_true")
|
7 |
-
args = parser.parse_args()
|
8 |
-
|
9 |
-
if args.contents:
|
10 |
-
print(contents())
|
11 |
-
else:
|
12 |
-
print(where())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BlinkDL/RWKV-World-7B/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Raven RWKV 7B
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.23.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CCOM/README/README.md
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: README
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
<p>
|
10 |
-
The Central Conservatory of Music (CCOM) is a magnet for musical talents from all over the world. During its over 70 years of development, it has proudly maintained a strong team of faculty and administrative staff, including a number of outstanding specialists and scholars in music education, composition, performance and research. Many aspiring young musicians have been attracted to further their professional training at CCOM. Dozens of thousands of talented music students, including hundreds of international students, have been successfully trained. Among them, many have become internationally renowned composers, musicologists, music educators, performing artists, as well as leaders and important members in specialized art and cultural institutions.
|
11 |
-
</p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/train.py
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import time
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import utils
|
6 |
-
from torch.autograd import Variable
|
7 |
-
|
8 |
-
|
9 |
-
def instance_bce_with_logits(logits, labels):
|
10 |
-
assert logits.dim() == 2
|
11 |
-
|
12 |
-
loss = nn.functional.binary_cross_entropy_with_logits(logits, labels)
|
13 |
-
loss *= labels.size(1)
|
14 |
-
return loss
|
15 |
-
|
16 |
-
|
17 |
-
def compute_score_with_logits(logits, labels):
|
18 |
-
logits = torch.max(logits, 1)[1].data # argmax
|
19 |
-
one_hots = torch.zeros(*labels.size()).cuda()
|
20 |
-
one_hots.scatter_(1, logits.view(-1, 1), 1)
|
21 |
-
scores = (one_hots * labels)
|
22 |
-
return scores
|
23 |
-
|
24 |
-
|
25 |
-
def train(model, train_loader, eval_loader, num_epochs, output, dis_eval=False, save_last=False):
|
26 |
-
utils.create_dir(output)
|
27 |
-
optim = torch.optim.Adamax(model.parameters())
|
28 |
-
logger = utils.Logger(os.path.join(output, 'log.txt'))
|
29 |
-
best_eval_score = 0
|
30 |
-
|
31 |
-
for epoch in range(num_epochs):
|
32 |
-
total_loss = 0
|
33 |
-
train_score = 0
|
34 |
-
t = time.time()
|
35 |
-
|
36 |
-
for i, (v, b, q, a) in enumerate(train_loader):
|
37 |
-
v = Variable(v).cuda()
|
38 |
-
b = Variable(b).cuda()
|
39 |
-
q = Variable(q).cuda()
|
40 |
-
a = Variable(a).cuda()
|
41 |
-
|
42 |
-
pred = model(v, b, q, a)
|
43 |
-
loss = instance_bce_with_logits(pred, a)
|
44 |
-
loss.backward()
|
45 |
-
nn.utils.clip_grad_norm(model.parameters(), 0.25)
|
46 |
-
optim.step()
|
47 |
-
optim.zero_grad()
|
48 |
-
|
49 |
-
batch_score = compute_score_with_logits(pred, a.data).sum()
|
50 |
-
# total_loss += loss.data[0] * v.size(0)
|
51 |
-
total_loss += loss.data * v.size(0)
|
52 |
-
train_score += batch_score
|
53 |
-
|
54 |
-
total_loss /= len(train_loader.dataset)
|
55 |
-
train_score = 100 * train_score / len(train_loader.dataset)
|
56 |
-
if not dis_eval:
|
57 |
-
model.train(False)
|
58 |
-
eval_score, bound = evaluate(model, eval_loader)
|
59 |
-
model.train(True)
|
60 |
-
|
61 |
-
logger.write('epoch %d, time: %.2f' % (epoch, time.time()-t))
|
62 |
-
logger.write('\ttrain_loss: %.2f, score: %.2f' % (total_loss, train_score))
|
63 |
-
if not dis_eval:
|
64 |
-
logger.write('\teval score: %.2f (%.2f)' % (100 * eval_score, 100 * bound))
|
65 |
-
|
66 |
-
# if eval_score > best_eval_score:
|
67 |
-
# model_path = os.path.join(output, 'model.pth')
|
68 |
-
# torch.save(model.state_dict(), model_path)
|
69 |
-
# best_eval_score = eval_score
|
70 |
-
|
71 |
-
# Modified to save after every epoch with stamp
|
72 |
-
if not save_last or epoch == (num_epochs - 1):
|
73 |
-
model_path = os.path.join(output, 'model_%i.pth'%epoch)
|
74 |
-
torch.save(model.state_dict(), model_path)
|
75 |
-
|
76 |
-
|
77 |
-
def evaluate(model, dataloader):
|
78 |
-
score = 0
|
79 |
-
upper_bound = 0
|
80 |
-
num_data = 0
|
81 |
-
for v, b, q, a in iter(dataloader):
|
82 |
-
v = Variable(v).cuda()
|
83 |
-
b = Variable(b).cuda()
|
84 |
-
q = Variable(q).cuda()
|
85 |
-
pred = model(v, b, q, None)
|
86 |
-
batch_score = compute_score_with_logits(pred, a.cuda()).sum()
|
87 |
-
score += batch_score
|
88 |
-
upper_bound += (a.max(1)[0]).sum()
|
89 |
-
num_data += pred.size(0)
|
90 |
-
|
91 |
-
score = score / len(dataloader.dataset)
|
92 |
-
upper_bound = upper_bound / len(dataloader.dataset)
|
93 |
-
return score, upper_bound
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/proposal_generator/rpn_outputs.py
DELETED
@@ -1,453 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import itertools
|
3 |
-
import logging
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
from fvcore.nn import smooth_l1_loss
|
8 |
-
|
9 |
-
from detectron2.layers import batched_nms, cat
|
10 |
-
from detectron2.structures import Boxes, Instances, pairwise_iou
|
11 |
-
from detectron2.utils.events import get_event_storage
|
12 |
-
from detectron2.utils.memory import retry_if_cuda_oom
|
13 |
-
|
14 |
-
from ..sampling import subsample_labels
|
15 |
-
|
16 |
-
logger = logging.getLogger(__name__)
|
17 |
-
|
18 |
-
# TODO: comments for future refactoring of this module
|
19 |
-
#
|
20 |
-
# From @rbg:
|
21 |
-
# This code involves a significant amount of tensor reshaping and permuting. Look for
|
22 |
-
# ways to simplify this.
|
23 |
-
|
24 |
-
"""
|
25 |
-
Shape shorthand in this module:
|
26 |
-
|
27 |
-
N: number of images in the minibatch
|
28 |
-
L: number of feature maps per image on which RPN is run
|
29 |
-
A: number of cell anchors (must be the same for all feature maps)
|
30 |
-
Hi, Wi: height and width of the i-th feature map
|
31 |
-
4: size of the box parameterization
|
32 |
-
|
33 |
-
Naming convention:
|
34 |
-
|
35 |
-
objectness: refers to the binary classification of an anchor as object vs. not
|
36 |
-
object.
|
37 |
-
|
38 |
-
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
|
39 |
-
transform (see :class:`box_regression.Box2BoxTransform`).
|
40 |
-
|
41 |
-
pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use
|
42 |
-
sigmoid(pred_objectness_logits) to estimate P(object).
|
43 |
-
|
44 |
-
gt_objectness_logits: ground-truth binary classification labels for objectness
|
45 |
-
|
46 |
-
pred_anchor_deltas: predicted box2box transform deltas
|
47 |
-
|
48 |
-
gt_anchor_deltas: ground-truth box2box transform deltas
|
49 |
-
"""
|
50 |
-
|
51 |
-
|
52 |
-
def find_top_rpn_proposals(
|
53 |
-
proposals,
|
54 |
-
pred_objectness_logits,
|
55 |
-
images,
|
56 |
-
nms_thresh,
|
57 |
-
pre_nms_topk,
|
58 |
-
post_nms_topk,
|
59 |
-
min_box_side_len,
|
60 |
-
training,
|
61 |
-
):
|
62 |
-
"""
|
63 |
-
For each feature map, select the `pre_nms_topk` highest scoring proposals,
|
64 |
-
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
|
65 |
-
highest scoring proposals among all the feature maps if `training` is True,
|
66 |
-
otherwise, returns the highest `post_nms_topk` scoring proposals for each
|
67 |
-
feature map.
|
68 |
-
|
69 |
-
Args:
|
70 |
-
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4).
|
71 |
-
All proposal predictions on the feature maps.
|
72 |
-
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
|
73 |
-
images (ImageList): Input images as an :class:`ImageList`.
|
74 |
-
nms_thresh (float): IoU threshold to use for NMS
|
75 |
-
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
|
76 |
-
When RPN is run on multiple feature maps (as in FPN) this number is per
|
77 |
-
feature map.
|
78 |
-
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
|
79 |
-
When RPN is run on multiple feature maps (as in FPN) this number is total,
|
80 |
-
over all feature maps.
|
81 |
-
min_box_side_len (float): minimum proposal box side length in pixels (absolute units
|
82 |
-
wrt input images).
|
83 |
-
training (bool): True if proposals are to be used in training, otherwise False.
|
84 |
-
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
|
85 |
-
comment.
|
86 |
-
|
87 |
-
Returns:
|
88 |
-
proposals (list[Instances]): list of N Instances. The i-th Instances
|
89 |
-
stores post_nms_topk object proposals for image i, sorted by their
|
90 |
-
objectness score in descending order.
|
91 |
-
"""
|
92 |
-
image_sizes = images.image_sizes # in (h, w) order
|
93 |
-
num_images = len(image_sizes)
|
94 |
-
device = proposals[0].device
|
95 |
-
|
96 |
-
# 1. Select top-k anchor for every level and every image
|
97 |
-
topk_scores = [] # #lvl Tensor, each of shape N x topk
|
98 |
-
topk_proposals = []
|
99 |
-
level_ids = [] # #lvl Tensor, each of shape (topk,)
|
100 |
-
batch_idx = torch.arange(num_images, device=device)
|
101 |
-
for level_id, proposals_i, logits_i in zip(
|
102 |
-
itertools.count(), proposals, pred_objectness_logits
|
103 |
-
):
|
104 |
-
Hi_Wi_A = logits_i.shape[1]
|
105 |
-
num_proposals_i = min(pre_nms_topk, Hi_Wi_A)
|
106 |
-
|
107 |
-
# sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
|
108 |
-
# topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
|
109 |
-
logits_i, idx = logits_i.sort(descending=True, dim=1)
|
110 |
-
topk_scores_i = logits_i[batch_idx, :num_proposals_i]
|
111 |
-
topk_idx = idx[batch_idx, :num_proposals_i]
|
112 |
-
|
113 |
-
# each is N x topk
|
114 |
-
topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4
|
115 |
-
|
116 |
-
topk_proposals.append(topk_proposals_i)
|
117 |
-
topk_scores.append(topk_scores_i)
|
118 |
-
level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
|
119 |
-
|
120 |
-
# 2. Concat all levels together
|
121 |
-
topk_scores = cat(topk_scores, dim=1)
|
122 |
-
topk_proposals = cat(topk_proposals, dim=1)
|
123 |
-
level_ids = cat(level_ids, dim=0)
|
124 |
-
|
125 |
-
# 3. For each image, run a per-level NMS, and choose topk results.
|
126 |
-
results = []
|
127 |
-
for n, image_size in enumerate(image_sizes):
|
128 |
-
boxes = Boxes(topk_proposals[n])
|
129 |
-
scores_per_img = topk_scores[n]
|
130 |
-
lvl = level_ids
|
131 |
-
|
132 |
-
valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img)
|
133 |
-
if not valid_mask.all():
|
134 |
-
if training:
|
135 |
-
raise FloatingPointError(
|
136 |
-
"Predicted boxes or scores contain Inf/NaN. Training has diverged."
|
137 |
-
)
|
138 |
-
boxes = boxes[valid_mask]
|
139 |
-
scores_per_img = scores_per_img[valid_mask]
|
140 |
-
lvl = lvl[valid_mask]
|
141 |
-
boxes.clip(image_size)
|
142 |
-
|
143 |
-
# filter empty boxes
|
144 |
-
keep = boxes.nonempty(threshold=min_box_side_len)
|
145 |
-
if keep.sum().item() != len(boxes):
|
146 |
-
boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep]
|
147 |
-
|
148 |
-
keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh)
|
149 |
-
# In Detectron1, there was different behavior during training vs. testing.
|
150 |
-
# (https://github.com/facebookresearch/Detectron/issues/459)
|
151 |
-
# During training, topk is over the proposals from *all* images in the training batch.
|
152 |
-
# During testing, it is over the proposals for each image separately.
|
153 |
-
# As a result, the training behavior becomes batch-dependent,
|
154 |
-
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
|
155 |
-
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
|
156 |
-
keep = keep[:post_nms_topk] # keep is already sorted
|
157 |
-
|
158 |
-
res = Instances(image_size)
|
159 |
-
res.proposal_boxes = boxes[keep]
|
160 |
-
res.objectness_logits = scores_per_img[keep]
|
161 |
-
results.append(res)
|
162 |
-
return results
|
163 |
-
|
164 |
-
|
165 |
-
def rpn_losses(
|
166 |
-
gt_objectness_logits,
|
167 |
-
gt_anchor_deltas,
|
168 |
-
pred_objectness_logits,
|
169 |
-
pred_anchor_deltas,
|
170 |
-
smooth_l1_beta,
|
171 |
-
):
|
172 |
-
"""
|
173 |
-
Args:
|
174 |
-
gt_objectness_logits (Tensor): shape (N,), each element in {-1, 0, 1} representing
|
175 |
-
ground-truth objectness labels with: -1 = ignore; 0 = not object; 1 = object.
|
176 |
-
gt_anchor_deltas (Tensor): shape (N, box_dim), row i represents ground-truth
|
177 |
-
box2box transform targets (dx, dy, dw, dh) or (dx, dy, dw, dh, da) that map anchor i to
|
178 |
-
its matched ground-truth box.
|
179 |
-
pred_objectness_logits (Tensor): shape (N,), each element is a predicted objectness
|
180 |
-
logit.
|
181 |
-
pred_anchor_deltas (Tensor): shape (N, box_dim), each row is a predicted box2box
|
182 |
-
transform (dx, dy, dw, dh) or (dx, dy, dw, dh, da)
|
183 |
-
smooth_l1_beta (float): The transition point between L1 and L2 loss in
|
184 |
-
the smooth L1 loss function. When set to 0, the loss becomes L1. When
|
185 |
-
set to +inf, the loss becomes constant 0.
|
186 |
-
|
187 |
-
Returns:
|
188 |
-
objectness_loss, localization_loss, both unnormalized (summed over samples).
|
189 |
-
"""
|
190 |
-
pos_masks = gt_objectness_logits == 1
|
191 |
-
localization_loss = smooth_l1_loss(
|
192 |
-
pred_anchor_deltas[pos_masks], gt_anchor_deltas[pos_masks], smooth_l1_beta, reduction="sum"
|
193 |
-
)
|
194 |
-
|
195 |
-
valid_masks = gt_objectness_logits >= 0
|
196 |
-
objectness_loss = F.binary_cross_entropy_with_logits(
|
197 |
-
pred_objectness_logits[valid_masks],
|
198 |
-
gt_objectness_logits[valid_masks].to(torch.float32),
|
199 |
-
reduction="sum",
|
200 |
-
)
|
201 |
-
return objectness_loss, localization_loss
|
202 |
-
|
203 |
-
|
204 |
-
class RPNOutputs(object):
|
205 |
-
def __init__(
|
206 |
-
self,
|
207 |
-
box2box_transform,
|
208 |
-
anchor_matcher,
|
209 |
-
batch_size_per_image,
|
210 |
-
positive_fraction,
|
211 |
-
images,
|
212 |
-
pred_objectness_logits,
|
213 |
-
pred_anchor_deltas,
|
214 |
-
anchors,
|
215 |
-
boundary_threshold=0,
|
216 |
-
gt_boxes=None,
|
217 |
-
smooth_l1_beta=0.0,
|
218 |
-
):
|
219 |
-
"""
|
220 |
-
Args:
|
221 |
-
box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for
|
222 |
-
anchor-proposal transformations.
|
223 |
-
anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to
|
224 |
-
ground-truth boxes; used to determine training labels.
|
225 |
-
batch_size_per_image (int): number of proposals to sample when training
|
226 |
-
positive_fraction (float): target fraction of sampled proposals that should be positive
|
227 |
-
images (ImageList): :class:`ImageList` instance representing N input images
|
228 |
-
pred_objectness_logits (list[Tensor]): A list of L elements.
|
229 |
-
Element i is a tensor of shape (N, A, Hi, Wi) representing
|
230 |
-
the predicted objectness logits for anchors.
|
231 |
-
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
|
232 |
-
(N, A*4, Hi, Wi) representing the predicted "deltas" used to transform anchors
|
233 |
-
to proposals.
|
234 |
-
anchors (list[list[Boxes]]): A list of N elements. Each element is a list of L
|
235 |
-
Boxes. The Boxes at (n, l) stores the entire anchor array for feature map l in image
|
236 |
-
n (i.e. the cell anchors repeated over all locations in feature map (n, l)).
|
237 |
-
boundary_threshold (int): if >= 0, then anchors that extend beyond the image
|
238 |
-
boundary by more than boundary_thresh are not used in training. Set to a very large
|
239 |
-
number or < 0 to disable this behavior. Only needed in training.
|
240 |
-
gt_boxes (list[Boxes], optional): A list of N elements. Element i a Boxes storing
|
241 |
-
the ground-truth ("gt") boxes for image i.
|
242 |
-
smooth_l1_beta (float): The transition point between L1 and L2 loss in
|
243 |
-
the smooth L1 loss function. When set to 0, the loss becomes L1. When
|
244 |
-
set to +inf, the loss becomes constant 0.
|
245 |
-
"""
|
246 |
-
self.box2box_transform = box2box_transform
|
247 |
-
self.anchor_matcher = anchor_matcher
|
248 |
-
self.batch_size_per_image = batch_size_per_image
|
249 |
-
self.positive_fraction = positive_fraction
|
250 |
-
self.pred_objectness_logits = pred_objectness_logits
|
251 |
-
self.pred_anchor_deltas = pred_anchor_deltas
|
252 |
-
|
253 |
-
self.anchors = anchors
|
254 |
-
self.gt_boxes = gt_boxes
|
255 |
-
self.num_feature_maps = len(pred_objectness_logits)
|
256 |
-
self.num_images = len(images)
|
257 |
-
self.image_sizes = images.image_sizes
|
258 |
-
self.boundary_threshold = boundary_threshold
|
259 |
-
self.smooth_l1_beta = smooth_l1_beta
|
260 |
-
|
261 |
-
def _get_ground_truth(self):
|
262 |
-
"""
|
263 |
-
Returns:
|
264 |
-
gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the
|
265 |
-
total number of anchors in image i (i.e., len(anchors[i])). Label values are
|
266 |
-
in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
|
267 |
-
gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), 4).
|
268 |
-
"""
|
269 |
-
gt_objectness_logits = []
|
270 |
-
gt_anchor_deltas = []
|
271 |
-
# Concatenate anchors from all feature maps into a single Boxes per image
|
272 |
-
anchors = [Boxes.cat(anchors_i) for anchors_i in self.anchors]
|
273 |
-
for image_size_i, anchors_i, gt_boxes_i in zip(self.image_sizes, anchors, self.gt_boxes):
|
274 |
-
"""
|
275 |
-
image_size_i: (h, w) for the i-th image
|
276 |
-
anchors_i: anchors for i-th image
|
277 |
-
gt_boxes_i: ground-truth boxes for i-th image
|
278 |
-
"""
|
279 |
-
match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors_i)
|
280 |
-
matched_idxs, gt_objectness_logits_i = retry_if_cuda_oom(self.anchor_matcher)(
|
281 |
-
match_quality_matrix
|
282 |
-
)
|
283 |
-
# Matching is memory-expensive and may result in CPU tensors. But the result is small
|
284 |
-
gt_objectness_logits_i = gt_objectness_logits_i.to(device=gt_boxes_i.device)
|
285 |
-
del match_quality_matrix
|
286 |
-
|
287 |
-
if self.boundary_threshold >= 0:
|
288 |
-
# Discard anchors that go out of the boundaries of the image
|
289 |
-
# NOTE: This is legacy functionality that is turned off by default in Detectron2
|
290 |
-
anchors_inside_image = anchors_i.inside_box(image_size_i, self.boundary_threshold)
|
291 |
-
gt_objectness_logits_i[~anchors_inside_image] = -1
|
292 |
-
|
293 |
-
if len(gt_boxes_i) == 0:
|
294 |
-
# These values won't be used anyway since the anchor is labeled as background
|
295 |
-
gt_anchor_deltas_i = torch.zeros_like(anchors_i.tensor)
|
296 |
-
else:
|
297 |
-
# TODO wasted computation for ignored boxes
|
298 |
-
matched_gt_boxes = gt_boxes_i[matched_idxs]
|
299 |
-
gt_anchor_deltas_i = self.box2box_transform.get_deltas(
|
300 |
-
anchors_i.tensor, matched_gt_boxes.tensor
|
301 |
-
)
|
302 |
-
|
303 |
-
gt_objectness_logits.append(gt_objectness_logits_i)
|
304 |
-
gt_anchor_deltas.append(gt_anchor_deltas_i)
|
305 |
-
|
306 |
-
return gt_objectness_logits, gt_anchor_deltas
|
307 |
-
|
308 |
-
def losses(self):
|
309 |
-
"""
|
310 |
-
Return the losses from a set of RPN predictions and their associated ground-truth.
|
311 |
-
|
312 |
-
Returns:
|
313 |
-
dict[loss name -> loss value]: A dict mapping from loss name to loss value.
|
314 |
-
Loss names are: `loss_rpn_cls` for objectness classification and
|
315 |
-
`loss_rpn_loc` for proposal localization.
|
316 |
-
"""
|
317 |
-
|
318 |
-
def resample(label):
|
319 |
-
"""
|
320 |
-
Randomly sample a subset of positive and negative examples by overwriting
|
321 |
-
the label vector to the ignore value (-1) for all elements that are not
|
322 |
-
included in the sample.
|
323 |
-
"""
|
324 |
-
pos_idx, neg_idx = subsample_labels(
|
325 |
-
label, self.batch_size_per_image, self.positive_fraction, 0
|
326 |
-
)
|
327 |
-
# Fill with the ignore label (-1), then set positive and negative labels
|
328 |
-
label.fill_(-1)
|
329 |
-
label.scatter_(0, pos_idx, 1)
|
330 |
-
label.scatter_(0, neg_idx, 0)
|
331 |
-
return label
|
332 |
-
|
333 |
-
gt_objectness_logits, gt_anchor_deltas = self._get_ground_truth()
|
334 |
-
"""
|
335 |
-
gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the
|
336 |
-
total number of anchors in image i (i.e., len(anchors[i]))
|
337 |
-
gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), B),
|
338 |
-
where B is the box dimension
|
339 |
-
"""
|
340 |
-
# Collect all objectness labels and delta targets over feature maps and images
|
341 |
-
# The final ordering is L, N, H, W, A from slowest to fastest axis.
|
342 |
-
num_anchors_per_map = [np.prod(x.shape[1:]) for x in self.pred_objectness_logits]
|
343 |
-
num_anchors_per_image = sum(num_anchors_per_map)
|
344 |
-
|
345 |
-
# Stack to: (N, num_anchors_per_image)
|
346 |
-
gt_objectness_logits = torch.stack(
|
347 |
-
[resample(label) for label in gt_objectness_logits], dim=0
|
348 |
-
)
|
349 |
-
|
350 |
-
# Log the number of positive/negative anchors per-image that's used in training
|
351 |
-
num_pos_anchors = (gt_objectness_logits == 1).sum().item()
|
352 |
-
num_neg_anchors = (gt_objectness_logits == 0).sum().item()
|
353 |
-
storage = get_event_storage()
|
354 |
-
storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / self.num_images)
|
355 |
-
storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / self.num_images)
|
356 |
-
|
357 |
-
assert gt_objectness_logits.shape[1] == num_anchors_per_image
|
358 |
-
# Split to tuple of L tensors, each with shape (N, num_anchors_per_map)
|
359 |
-
gt_objectness_logits = torch.split(gt_objectness_logits, num_anchors_per_map, dim=1)
|
360 |
-
# Concat from all feature maps
|
361 |
-
gt_objectness_logits = cat([x.flatten() for x in gt_objectness_logits], dim=0)
|
362 |
-
|
363 |
-
# Stack to: (N, num_anchors_per_image, B)
|
364 |
-
gt_anchor_deltas = torch.stack(gt_anchor_deltas, dim=0)
|
365 |
-
assert gt_anchor_deltas.shape[1] == num_anchors_per_image
|
366 |
-
B = gt_anchor_deltas.shape[2] # box dimension (4 or 5)
|
367 |
-
|
368 |
-
# Split to tuple of L tensors, each with shape (N, num_anchors_per_image)
|
369 |
-
gt_anchor_deltas = torch.split(gt_anchor_deltas, num_anchors_per_map, dim=1)
|
370 |
-
# Concat from all feature maps
|
371 |
-
gt_anchor_deltas = cat([x.reshape(-1, B) for x in gt_anchor_deltas], dim=0)
|
372 |
-
|
373 |
-
# Collect all objectness logits and delta predictions over feature maps
|
374 |
-
# and images to arrive at the same shape as the labels and targets
|
375 |
-
# The final ordering is L, N, H, W, A from slowest to fastest axis.
|
376 |
-
pred_objectness_logits = cat(
|
377 |
-
[
|
378 |
-
# Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N*Hi*Wi*A, )
|
379 |
-
x.permute(0, 2, 3, 1).flatten()
|
380 |
-
for x in self.pred_objectness_logits
|
381 |
-
],
|
382 |
-
dim=0,
|
383 |
-
)
|
384 |
-
pred_anchor_deltas = cat(
|
385 |
-
[
|
386 |
-
# Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B)
|
387 |
-
# -> (N*Hi*Wi*A, B)
|
388 |
-
x.view(x.shape[0], -1, B, x.shape[-2], x.shape[-1])
|
389 |
-
.permute(0, 3, 4, 1, 2)
|
390 |
-
.reshape(-1, B)
|
391 |
-
for x in self.pred_anchor_deltas
|
392 |
-
],
|
393 |
-
dim=0,
|
394 |
-
)
|
395 |
-
|
396 |
-
objectness_loss, localization_loss = rpn_losses(
|
397 |
-
gt_objectness_logits,
|
398 |
-
gt_anchor_deltas,
|
399 |
-
pred_objectness_logits,
|
400 |
-
pred_anchor_deltas,
|
401 |
-
self.smooth_l1_beta,
|
402 |
-
)
|
403 |
-
normalizer = 1.0 / (self.batch_size_per_image * self.num_images)
|
404 |
-
loss_cls = objectness_loss * normalizer # cls: classification loss
|
405 |
-
loss_loc = localization_loss * normalizer # loc: localization loss
|
406 |
-
losses = {"loss_rpn_cls": loss_cls, "loss_rpn_loc": loss_loc}
|
407 |
-
|
408 |
-
return losses
|
409 |
-
|
410 |
-
def predict_proposals(self):
|
411 |
-
"""
|
412 |
-
Transform anchors into proposals by applying the predicted anchor deltas.
|
413 |
-
|
414 |
-
Returns:
|
415 |
-
proposals (list[Tensor]): A list of L tensors. Tensor i has shape
|
416 |
-
(N, Hi*Wi*A, B), where B is box dimension (4 or 5).
|
417 |
-
"""
|
418 |
-
proposals = []
|
419 |
-
# Transpose anchors from images-by-feature-maps (N, L) to feature-maps-by-images (L, N)
|
420 |
-
anchors = list(zip(*self.anchors))
|
421 |
-
# For each feature map
|
422 |
-
for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas):
|
423 |
-
B = anchors_i[0].tensor.size(1)
|
424 |
-
N, _, Hi, Wi = pred_anchor_deltas_i.shape
|
425 |
-
# Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N*Hi*Wi*A, B)
|
426 |
-
pred_anchor_deltas_i = (
|
427 |
-
pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B)
|
428 |
-
)
|
429 |
-
# Concatenate all anchors to shape (N*Hi*Wi*A, B)
|
430 |
-
# type(anchors_i[0]) is Boxes (B = 4) or RotatedBoxes (B = 5)
|
431 |
-
anchors_i = type(anchors_i[0]).cat(anchors_i)
|
432 |
-
proposals_i = self.box2box_transform.apply_deltas(
|
433 |
-
pred_anchor_deltas_i, anchors_i.tensor
|
434 |
-
)
|
435 |
-
# Append feature map proposals with shape (N, Hi*Wi*A, B)
|
436 |
-
proposals.append(proposals_i.view(N, -1, B))
|
437 |
-
return proposals
|
438 |
-
|
439 |
-
def predict_objectness_logits(self):
|
440 |
-
"""
|
441 |
-
Return objectness logits in the same format as the proposals returned by
|
442 |
-
:meth:`predict_proposals`.
|
443 |
-
|
444 |
-
Returns:
|
445 |
-
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape
|
446 |
-
(N, Hi*Wi*A).
|
447 |
-
"""
|
448 |
-
pred_objectness_logits = [
|
449 |
-
# Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
|
450 |
-
score.permute(0, 2, 3, 1).reshape(self.num_images, -1)
|
451 |
-
for score in self.pred_objectness_logits
|
452 |
-
]
|
453 |
-
return pred_objectness_logits
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/find.h
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a fill of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// the purpose of this header is to #include the find.h header
|
22 |
-
// of the sequential, host, and device systems. It should be #included in any
|
23 |
-
// code which uses adl to dispatch find
|
24 |
-
|
25 |
-
#include <thrust/system/detail/sequential/find.h>
|
26 |
-
|
27 |
-
// SCons can't see through the #defines below to figure out what this header
|
28 |
-
// includes, so we fake it out by specifying all possible files we might end up
|
29 |
-
// including inside an #if 0.
|
30 |
-
#if 0
|
31 |
-
#include <thrust/system/cpp/detail/find.h>
|
32 |
-
#include <thrust/system/cuda/detail/find.h>
|
33 |
-
#include <thrust/system/omp/detail/find.h>
|
34 |
-
#include <thrust/system/tbb/detail/find.h>
|
35 |
-
#endif
|
36 |
-
|
37 |
-
#define __THRUST_HOST_SYSTEM_FIND_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/find.h>
|
38 |
-
#include __THRUST_HOST_SYSTEM_FIND_HEADER
|
39 |
-
#undef __THRUST_HOST_SYSTEM_FIND_HEADER
|
40 |
-
|
41 |
-
#define __THRUST_DEVICE_SYSTEM_FIND_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/find.h>
|
42 |
-
#include __THRUST_DEVICE_SYSTEM_FIND_HEADER
|
43 |
-
#undef __THRUST_DEVICE_SYSTEM_FIND_HEADER
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/tag.h
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
|
18 |
-
/*! \file generic/tag.h
|
19 |
-
* \brief Implementation of the generic backend's tag.
|
20 |
-
*/
|
21 |
-
|
22 |
-
#pragma once
|
23 |
-
|
24 |
-
#include <thrust/detail/config.h>
|
25 |
-
|
26 |
-
namespace thrust
|
27 |
-
{
|
28 |
-
namespace system
|
29 |
-
{
|
30 |
-
namespace detail
|
31 |
-
{
|
32 |
-
namespace generic
|
33 |
-
{
|
34 |
-
|
35 |
-
// tag exists only to make the generic entry points the least priority match
|
36 |
-
// during ADL. tag should not be derived from and is constructible from anything
|
37 |
-
struct tag
|
38 |
-
{
|
39 |
-
template<typename T>
|
40 |
-
__host__ __device__ inline
|
41 |
-
tag(const T &) {}
|
42 |
-
};
|
43 |
-
|
44 |
-
} // end generic
|
45 |
-
} // end detail
|
46 |
-
} // end system
|
47 |
-
} // end thrust
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CarlDennis/HYTTS/models.py
DELETED
@@ -1,498 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import Conv1d, ConvTranspose1d, Conv2d
|
6 |
-
from torch.nn import functional as F
|
7 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
8 |
-
|
9 |
-
import attentions
|
10 |
-
import commons
|
11 |
-
import modules
|
12 |
-
from commons import init_weights, get_padding
|
13 |
-
|
14 |
-
|
15 |
-
class StochasticDurationPredictor(nn.Module):
|
16 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
|
17 |
-
super().__init__()
|
18 |
-
filter_channels = in_channels # it needs to be removed from future version.
|
19 |
-
self.in_channels = in_channels
|
20 |
-
self.filter_channels = filter_channels
|
21 |
-
self.kernel_size = kernel_size
|
22 |
-
self.p_dropout = p_dropout
|
23 |
-
self.n_flows = n_flows
|
24 |
-
self.gin_channels = gin_channels
|
25 |
-
|
26 |
-
self.log_flow = modules.Log()
|
27 |
-
self.flows = nn.ModuleList()
|
28 |
-
self.flows.append(modules.ElementwiseAffine(2))
|
29 |
-
for i in range(n_flows):
|
30 |
-
self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
31 |
-
self.flows.append(modules.Flip())
|
32 |
-
|
33 |
-
self.post_pre = nn.Conv1d(1, filter_channels, 1)
|
34 |
-
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
35 |
-
self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
36 |
-
self.post_flows = nn.ModuleList()
|
37 |
-
self.post_flows.append(modules.ElementwiseAffine(2))
|
38 |
-
for i in range(4):
|
39 |
-
self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
|
40 |
-
self.post_flows.append(modules.Flip())
|
41 |
-
|
42 |
-
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
|
43 |
-
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
|
44 |
-
self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
|
45 |
-
if gin_channels != 0:
|
46 |
-
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
|
47 |
-
|
48 |
-
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
|
49 |
-
x = torch.detach(x)
|
50 |
-
x = self.pre(x)
|
51 |
-
if g is not None:
|
52 |
-
g = torch.detach(g)
|
53 |
-
x = x + self.cond(g)
|
54 |
-
x = self.convs(x, x_mask)
|
55 |
-
x = self.proj(x) * x_mask
|
56 |
-
|
57 |
-
if not reverse:
|
58 |
-
flows = self.flows
|
59 |
-
assert w is not None
|
60 |
-
|
61 |
-
logdet_tot_q = 0
|
62 |
-
h_w = self.post_pre(w)
|
63 |
-
h_w = self.post_convs(h_w, x_mask)
|
64 |
-
h_w = self.post_proj(h_w) * x_mask
|
65 |
-
e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
|
66 |
-
z_q = e_q
|
67 |
-
for flow in self.post_flows:
|
68 |
-
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
|
69 |
-
logdet_tot_q += logdet_q
|
70 |
-
z_u, z1 = torch.split(z_q, [1, 1], 1)
|
71 |
-
u = torch.sigmoid(z_u) * x_mask
|
72 |
-
z0 = (w - u) * x_mask
|
73 |
-
logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
|
74 |
-
logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
|
75 |
-
|
76 |
-
logdet_tot = 0
|
77 |
-
z0, logdet = self.log_flow(z0, x_mask)
|
78 |
-
logdet_tot += logdet
|
79 |
-
z = torch.cat([z0, z1], 1)
|
80 |
-
for flow in flows:
|
81 |
-
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
|
82 |
-
logdet_tot = logdet_tot + logdet
|
83 |
-
nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
|
84 |
-
return nll + logq # [b]
|
85 |
-
else:
|
86 |
-
flows = list(reversed(self.flows))
|
87 |
-
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
|
88 |
-
z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
|
89 |
-
for flow in flows:
|
90 |
-
z = flow(z, x_mask, g=x, reverse=reverse)
|
91 |
-
z0, z1 = torch.split(z, [1, 1], 1)
|
92 |
-
logw = z0
|
93 |
-
return logw
|
94 |
-
|
95 |
-
|
96 |
-
class DurationPredictor(nn.Module):
|
97 |
-
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
|
98 |
-
super().__init__()
|
99 |
-
|
100 |
-
self.in_channels = in_channels
|
101 |
-
self.filter_channels = filter_channels
|
102 |
-
self.kernel_size = kernel_size
|
103 |
-
self.p_dropout = p_dropout
|
104 |
-
self.gin_channels = gin_channels
|
105 |
-
|
106 |
-
self.drop = nn.Dropout(p_dropout)
|
107 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
108 |
-
self.norm_1 = modules.LayerNorm(filter_channels)
|
109 |
-
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
|
110 |
-
self.norm_2 = modules.LayerNorm(filter_channels)
|
111 |
-
self.proj = nn.Conv1d(filter_channels, 1, 1)
|
112 |
-
|
113 |
-
if gin_channels != 0:
|
114 |
-
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
|
115 |
-
|
116 |
-
def forward(self, x, x_mask, g=None):
|
117 |
-
x = torch.detach(x)
|
118 |
-
if g is not None:
|
119 |
-
g = torch.detach(g)
|
120 |
-
x = x + self.cond(g)
|
121 |
-
x = self.conv_1(x * x_mask)
|
122 |
-
x = torch.relu(x)
|
123 |
-
x = self.norm_1(x)
|
124 |
-
x = self.drop(x)
|
125 |
-
x = self.conv_2(x * x_mask)
|
126 |
-
x = torch.relu(x)
|
127 |
-
x = self.norm_2(x)
|
128 |
-
x = self.drop(x)
|
129 |
-
x = self.proj(x * x_mask)
|
130 |
-
return x * x_mask
|
131 |
-
|
132 |
-
|
133 |
-
class TextEncoder(nn.Module):
|
134 |
-
def __init__(self,
|
135 |
-
n_vocab,
|
136 |
-
out_channels,
|
137 |
-
hidden_channels,
|
138 |
-
filter_channels,
|
139 |
-
n_heads,
|
140 |
-
n_layers,
|
141 |
-
kernel_size,
|
142 |
-
p_dropout):
|
143 |
-
super().__init__()
|
144 |
-
self.n_vocab = n_vocab
|
145 |
-
self.out_channels = out_channels
|
146 |
-
self.hidden_channels = hidden_channels
|
147 |
-
self.filter_channels = filter_channels
|
148 |
-
self.n_heads = n_heads
|
149 |
-
self.n_layers = n_layers
|
150 |
-
self.kernel_size = kernel_size
|
151 |
-
self.p_dropout = p_dropout
|
152 |
-
|
153 |
-
if self.n_vocab != 0:
|
154 |
-
self.emb = nn.Embedding(n_vocab, hidden_channels)
|
155 |
-
nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
|
156 |
-
|
157 |
-
self.encoder = attentions.Encoder(
|
158 |
-
hidden_channels,
|
159 |
-
filter_channels,
|
160 |
-
n_heads,
|
161 |
-
n_layers,
|
162 |
-
kernel_size,
|
163 |
-
p_dropout)
|
164 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
165 |
-
|
166 |
-
def forward(self, x, x_lengths):
|
167 |
-
if self.n_vocab != 0:
|
168 |
-
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
|
169 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
170 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
171 |
-
|
172 |
-
x = self.encoder(x * x_mask, x_mask)
|
173 |
-
stats = self.proj(x) * x_mask
|
174 |
-
|
175 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
176 |
-
return x, m, logs, x_mask
|
177 |
-
|
178 |
-
|
179 |
-
class ResidualCouplingBlock(nn.Module):
|
180 |
-
def __init__(self,
|
181 |
-
channels,
|
182 |
-
hidden_channels,
|
183 |
-
kernel_size,
|
184 |
-
dilation_rate,
|
185 |
-
n_layers,
|
186 |
-
n_flows=4,
|
187 |
-
gin_channels=0):
|
188 |
-
super().__init__()
|
189 |
-
self.channels = channels
|
190 |
-
self.hidden_channels = hidden_channels
|
191 |
-
self.kernel_size = kernel_size
|
192 |
-
self.dilation_rate = dilation_rate
|
193 |
-
self.n_layers = n_layers
|
194 |
-
self.n_flows = n_flows
|
195 |
-
self.gin_channels = gin_channels
|
196 |
-
|
197 |
-
self.flows = nn.ModuleList()
|
198 |
-
for i in range(n_flows):
|
199 |
-
self.flows.append(
|
200 |
-
modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
|
201 |
-
gin_channels=gin_channels, mean_only=True))
|
202 |
-
self.flows.append(modules.Flip())
|
203 |
-
|
204 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
205 |
-
if not reverse:
|
206 |
-
for flow in self.flows:
|
207 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
208 |
-
else:
|
209 |
-
for flow in reversed(self.flows):
|
210 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
211 |
-
return x
|
212 |
-
|
213 |
-
|
214 |
-
class PosteriorEncoder(nn.Module):
|
215 |
-
def __init__(self,
|
216 |
-
in_channels,
|
217 |
-
out_channels,
|
218 |
-
hidden_channels,
|
219 |
-
kernel_size,
|
220 |
-
dilation_rate,
|
221 |
-
n_layers,
|
222 |
-
gin_channels=0):
|
223 |
-
super().__init__()
|
224 |
-
self.in_channels = in_channels
|
225 |
-
self.out_channels = out_channels
|
226 |
-
self.hidden_channels = hidden_channels
|
227 |
-
self.kernel_size = kernel_size
|
228 |
-
self.dilation_rate = dilation_rate
|
229 |
-
self.n_layers = n_layers
|
230 |
-
self.gin_channels = gin_channels
|
231 |
-
|
232 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
233 |
-
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
|
234 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
235 |
-
|
236 |
-
def forward(self, x, x_lengths, g=None):
|
237 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
|
238 |
-
x = self.pre(x) * x_mask
|
239 |
-
x = self.enc(x, x_mask, g=g)
|
240 |
-
stats = self.proj(x) * x_mask
|
241 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
242 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
243 |
-
return z, m, logs, x_mask
|
244 |
-
|
245 |
-
|
246 |
-
class Generator(torch.nn.Module):
|
247 |
-
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
|
248 |
-
upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
|
249 |
-
super(Generator, self).__init__()
|
250 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
251 |
-
self.num_upsamples = len(upsample_rates)
|
252 |
-
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
|
253 |
-
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
|
254 |
-
|
255 |
-
self.ups = nn.ModuleList()
|
256 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
257 |
-
self.ups.append(weight_norm(
|
258 |
-
ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
|
259 |
-
k, u, padding=(k - u) // 2)))
|
260 |
-
|
261 |
-
self.resblocks = nn.ModuleList()
|
262 |
-
for i in range(len(self.ups)):
|
263 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
264 |
-
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
|
265 |
-
self.resblocks.append(resblock(ch, k, d))
|
266 |
-
|
267 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
268 |
-
self.ups.apply(init_weights)
|
269 |
-
|
270 |
-
if gin_channels != 0:
|
271 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
272 |
-
|
273 |
-
def forward(self, x, g=None):
|
274 |
-
x = self.conv_pre(x)
|
275 |
-
if g is not None:
|
276 |
-
x = x + self.cond(g)
|
277 |
-
|
278 |
-
for i in range(self.num_upsamples):
|
279 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
280 |
-
x = self.ups[i](x)
|
281 |
-
xs = None
|
282 |
-
for j in range(self.num_kernels):
|
283 |
-
if xs is None:
|
284 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
285 |
-
else:
|
286 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
287 |
-
x = xs / self.num_kernels
|
288 |
-
x = F.leaky_relu(x)
|
289 |
-
x = self.conv_post(x)
|
290 |
-
x = torch.tanh(x)
|
291 |
-
|
292 |
-
return x
|
293 |
-
|
294 |
-
def remove_weight_norm(self):
|
295 |
-
print('Removing weight norm...')
|
296 |
-
for l in self.ups:
|
297 |
-
remove_weight_norm(l)
|
298 |
-
for l in self.resblocks:
|
299 |
-
l.remove_weight_norm()
|
300 |
-
|
301 |
-
|
302 |
-
class DiscriminatorP(torch.nn.Module):
|
303 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
304 |
-
super(DiscriminatorP, self).__init__()
|
305 |
-
self.period = period
|
306 |
-
self.use_spectral_norm = use_spectral_norm
|
307 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
308 |
-
self.convs = nn.ModuleList([
|
309 |
-
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
310 |
-
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
311 |
-
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
312 |
-
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
|
313 |
-
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
|
314 |
-
])
|
315 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
316 |
-
|
317 |
-
def forward(self, x):
|
318 |
-
fmap = []
|
319 |
-
|
320 |
-
# 1d to 2d
|
321 |
-
b, c, t = x.shape
|
322 |
-
if t % self.period != 0: # pad first
|
323 |
-
n_pad = self.period - (t % self.period)
|
324 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
325 |
-
t = t + n_pad
|
326 |
-
x = x.view(b, c, t // self.period, self.period)
|
327 |
-
|
328 |
-
for l in self.convs:
|
329 |
-
x = l(x)
|
330 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
331 |
-
fmap.append(x)
|
332 |
-
x = self.conv_post(x)
|
333 |
-
fmap.append(x)
|
334 |
-
x = torch.flatten(x, 1, -1)
|
335 |
-
|
336 |
-
return x, fmap
|
337 |
-
|
338 |
-
|
339 |
-
class DiscriminatorS(torch.nn.Module):
|
340 |
-
def __init__(self, use_spectral_norm=False):
|
341 |
-
super(DiscriminatorS, self).__init__()
|
342 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
343 |
-
self.convs = nn.ModuleList([
|
344 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
345 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
346 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
347 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
348 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
349 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
350 |
-
])
|
351 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
352 |
-
|
353 |
-
def forward(self, x):
|
354 |
-
fmap = []
|
355 |
-
|
356 |
-
for l in self.convs:
|
357 |
-
x = l(x)
|
358 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
359 |
-
fmap.append(x)
|
360 |
-
x = self.conv_post(x)
|
361 |
-
fmap.append(x)
|
362 |
-
x = torch.flatten(x, 1, -1)
|
363 |
-
|
364 |
-
return x, fmap
|
365 |
-
|
366 |
-
|
367 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
368 |
-
def __init__(self, use_spectral_norm=False):
|
369 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
370 |
-
periods = [2, 3, 5, 7, 11]
|
371 |
-
|
372 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
373 |
-
discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
|
374 |
-
self.discriminators = nn.ModuleList(discs)
|
375 |
-
|
376 |
-
def forward(self, y, y_hat):
|
377 |
-
y_d_rs = []
|
378 |
-
y_d_gs = []
|
379 |
-
fmap_rs = []
|
380 |
-
fmap_gs = []
|
381 |
-
for i, d in enumerate(self.discriminators):
|
382 |
-
y_d_r, fmap_r = d(y)
|
383 |
-
y_d_g, fmap_g = d(y_hat)
|
384 |
-
y_d_rs.append(y_d_r)
|
385 |
-
y_d_gs.append(y_d_g)
|
386 |
-
fmap_rs.append(fmap_r)
|
387 |
-
fmap_gs.append(fmap_g)
|
388 |
-
|
389 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
390 |
-
|
391 |
-
|
392 |
-
class SynthesizerTrn(nn.Module):
|
393 |
-
"""
|
394 |
-
Synthesizer for Training
|
395 |
-
"""
|
396 |
-
|
397 |
-
def __init__(self,
|
398 |
-
n_vocab,
|
399 |
-
spec_channels,
|
400 |
-
segment_size,
|
401 |
-
inter_channels,
|
402 |
-
hidden_channels,
|
403 |
-
filter_channels,
|
404 |
-
n_heads,
|
405 |
-
n_layers,
|
406 |
-
kernel_size,
|
407 |
-
p_dropout,
|
408 |
-
resblock,
|
409 |
-
resblock_kernel_sizes,
|
410 |
-
resblock_dilation_sizes,
|
411 |
-
upsample_rates,
|
412 |
-
upsample_initial_channel,
|
413 |
-
upsample_kernel_sizes,
|
414 |
-
n_speakers=0,
|
415 |
-
gin_channels=0,
|
416 |
-
use_sdp=True,
|
417 |
-
**kwargs):
|
418 |
-
|
419 |
-
super().__init__()
|
420 |
-
self.n_vocab = n_vocab
|
421 |
-
self.spec_channels = spec_channels
|
422 |
-
self.inter_channels = inter_channels
|
423 |
-
self.hidden_channels = hidden_channels
|
424 |
-
self.filter_channels = filter_channels
|
425 |
-
self.n_heads = n_heads
|
426 |
-
self.n_layers = n_layers
|
427 |
-
self.kernel_size = kernel_size
|
428 |
-
self.p_dropout = p_dropout
|
429 |
-
self.resblock = resblock
|
430 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
431 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
432 |
-
self.upsample_rates = upsample_rates
|
433 |
-
self.upsample_initial_channel = upsample_initial_channel
|
434 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
435 |
-
self.segment_size = segment_size
|
436 |
-
self.n_speakers = n_speakers
|
437 |
-
self.gin_channels = gin_channels
|
438 |
-
|
439 |
-
self.use_sdp = use_sdp
|
440 |
-
|
441 |
-
self.enc_p = TextEncoder(n_vocab,
|
442 |
-
inter_channels,
|
443 |
-
hidden_channels,
|
444 |
-
filter_channels,
|
445 |
-
n_heads,
|
446 |
-
n_layers,
|
447 |
-
kernel_size,
|
448 |
-
p_dropout)
|
449 |
-
self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
|
450 |
-
upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
|
451 |
-
self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
|
452 |
-
gin_channels=gin_channels)
|
453 |
-
self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
|
454 |
-
|
455 |
-
if use_sdp:
|
456 |
-
self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
|
457 |
-
else:
|
458 |
-
self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
|
459 |
-
|
460 |
-
if n_speakers > 1:
|
461 |
-
self.emb_g = nn.Embedding(n_speakers, gin_channels)
|
462 |
-
|
463 |
-
def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
|
464 |
-
x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
|
465 |
-
if self.n_speakers > 0:
|
466 |
-
g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
|
467 |
-
else:
|
468 |
-
g = None
|
469 |
-
|
470 |
-
if self.use_sdp:
|
471 |
-
logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
|
472 |
-
else:
|
473 |
-
logw = self.dp(x, x_mask, g=g)
|
474 |
-
w = torch.exp(logw) * x_mask * length_scale
|
475 |
-
w_ceil = torch.ceil(w)
|
476 |
-
y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
|
477 |
-
y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
|
478 |
-
attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
|
479 |
-
attn = commons.generate_path(w_ceil, attn_mask)
|
480 |
-
|
481 |
-
m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
|
482 |
-
logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
|
483 |
-
2) # [b, t', t], [b, t, d] -> [b, d, t']
|
484 |
-
|
485 |
-
z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
|
486 |
-
z = self.flow(z_p, y_mask, g=g, reverse=True)
|
487 |
-
o = self.dec((z * y_mask)[:, :, :max_len], g=g)
|
488 |
-
return o, attn, y_mask, (z, z_p, m_p, logs_p)
|
489 |
-
|
490 |
-
def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
|
491 |
-
assert self.n_speakers > 0, "n_speakers have to be larger than 0."
|
492 |
-
g_src = self.emb_g(sid_src).unsqueeze(-1)
|
493 |
-
g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
|
494 |
-
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
|
495 |
-
z_p = self.flow(z, y_mask, g=g_src)
|
496 |
-
z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
|
497 |
-
o_hat = self.dec(z_hat * y_mask, g=g_tgt)
|
498 |
-
return o_hat, y_mask, (z, z_p, z_hat)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|