Commit
·
187401b
1
Parent(s):
b1478bd
Update parquet files (step 46 of 296)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1368565466ki/Satdia/app.py +0 -290
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/GameGeniePs3USBrar Learn How to Use the Game Genie Software on Your PC and PS3.md +0 -153
- spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.40 Crack Windows 7 _VERIFIED_.md +0 -44
- spaces/1gistliPinn/ChatGPT4/Examples/Condenados A Fugarse Audio Latino.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apk Gta 5 REPACK Download Official Gta 5 For Android Amp Ios.md +0 -61
- spaces/1phancelerku/anime-remove-background/ .md +0 -130
- spaces/1phancelerku/anime-remove-background/Download and Watch National Treasure 2 Book of Secrets in Hindi Dubbed 480p Filmyzilla - High Definition and Low Size.md +0 -94
- spaces/1phancelerku/anime-remove-background/Dragon Ball Z Kakarot APK - Download and Play the Amazing DBZ Game on Android.md +0 -101
- spaces/232labs/VToonify/vtoonify/model/encoder/encoders/helpers.py +0 -119
- spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/index.html +0 -39
- spaces/AIFILMS/image-to-sound-fx/app.py +0 -125
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/__init__.py +0 -8
- spaces/AIGText/GlyphControl/ldm/modules/ema.py +0 -80
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/streamToAsyncIterable.ts +0 -15
- spaces/Aditya9790/yolo7-object-tracking/utils/aws/mime.sh +0 -26
- spaces/Aditya9790/yolo7-object-tracking/utils/google_app_engine/Dockerfile +0 -25
- spaces/AgentVerse/agentVerse/ui/README.md +0 -1
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Clock.d.ts +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Factory.d.ts +0 -8
- spaces/Agusbs98/automatic-ecg-diagnosis/nets/backbones.py +0 -57
- spaces/AlekseyCalvin/dreambooth-training3/train_dreambooth.py +0 -889
- spaces/Altinas/vits-uma-genshin-honkais/utils.py +0 -225
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py +0 -772
- spaces/Andy1621/uniformer_image_detection/configs/fp16/README.md +0 -22
- spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py +0 -39
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/ISSUE_TEMPLATE/feature_request.md +0 -16
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/character_bias/script.py +0 -83
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/cityscapes.py +0 -217
- spaces/Anonymous-sub/Rerender/ControlNet/tool_transfer_control.py +0 -59
- spaces/Artrajz/vits-simple-api/voice.py +0 -325
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__init__.py +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_importlib.py +0 -47
- spaces/AzinZ/vitscn/commons.py +0 -161
- spaces/Benson/text-generation/Examples/Assoluto Racing Mod Apk 1.9.1.md +0 -124
- spaces/Benson/text-generation/Examples/Chicken Gun Apk Latest Version.md +0 -26
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/depends.py +0 -176
- spaces/CVPR/Text2Human/Text2Human/models/losses/accuracy.py +0 -46
- spaces/CVPR/TokenCut/README.md +0 -12
- spaces/ChenWu98/Stable-CycleDiffusion/ptp_utils.py +0 -130
- spaces/Cvandi/remake/app.py +0 -68
- spaces/DJQmUKV/rvc-inference/infer_pack/models_onnx_moess.py +0 -849
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageGrab.py +0 -169
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_fileresponse.py +0 -288
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_winconsole.py +0 -279
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_h_e_a_d.py +0 -124
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/ranged_response.py +0 -185
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Copy-9f1657c4.js +0 -2
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/index.html +0 -84
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/__init__.py +0 -62
- spaces/Datasculptor/AIart_sources_of_inspiration/README.md +0 -13
spaces/1368565466ki/Satdia/app.py
DELETED
@@ -1,290 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
import os
|
3 |
-
import re
|
4 |
-
import argparse
|
5 |
-
import utils
|
6 |
-
import commons
|
7 |
-
import json
|
8 |
-
import torch
|
9 |
-
import gradio as gr
|
10 |
-
from models import SynthesizerTrn
|
11 |
-
from text import text_to_sequence, _clean_text
|
12 |
-
from torch import no_grad, LongTensor
|
13 |
-
import gradio.processing_utils as gr_processing_utils
|
14 |
-
import logging
|
15 |
-
logging.getLogger('numba').setLevel(logging.WARNING)
|
16 |
-
limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
|
17 |
-
|
18 |
-
hps_ms = utils.get_hparams_from_file(r'config/config.json')
|
19 |
-
|
20 |
-
audio_postprocess_ori = gr.Audio.postprocess
|
21 |
-
|
22 |
-
def audio_postprocess(self, y):
|
23 |
-
data = audio_postprocess_ori(self, y)
|
24 |
-
if data is None:
|
25 |
-
return None
|
26 |
-
return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
|
27 |
-
|
28 |
-
|
29 |
-
gr.Audio.postprocess = audio_postprocess
|
30 |
-
|
31 |
-
def get_text(text, hps, is_symbol):
|
32 |
-
text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
|
33 |
-
if hps.data.add_blank:
|
34 |
-
text_norm = commons.intersperse(text_norm, 0)
|
35 |
-
text_norm = LongTensor(text_norm)
|
36 |
-
return text_norm, clean_text
|
37 |
-
|
38 |
-
def create_tts_fn(net_g_ms, speaker_id):
|
39 |
-
def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
|
40 |
-
text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
|
41 |
-
if limitation:
|
42 |
-
text_len = len(re.sub("\[([A-Z]{2})\]", "", text))
|
43 |
-
max_len = 100
|
44 |
-
if is_symbol:
|
45 |
-
max_len *= 3
|
46 |
-
if text_len > max_len:
|
47 |
-
return "Error: Text is too long", None
|
48 |
-
if not is_symbol:
|
49 |
-
if language == 0:
|
50 |
-
text = f"[ZH]{text}[ZH]"
|
51 |
-
elif language == 1:
|
52 |
-
text = f"[JA]{text}[JA]"
|
53 |
-
else:
|
54 |
-
text = f"{text}"
|
55 |
-
stn_tst, clean_text = get_text(text, hps_ms, is_symbol)
|
56 |
-
with no_grad():
|
57 |
-
x_tst = stn_tst.unsqueeze(0).to(device)
|
58 |
-
x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
|
59 |
-
sid = LongTensor([speaker_id]).to(device)
|
60 |
-
audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
|
61 |
-
length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
|
62 |
-
|
63 |
-
return "Success", (22050, audio)
|
64 |
-
return tts_fn
|
65 |
-
|
66 |
-
def create_to_symbol_fn(hps):
|
67 |
-
def to_symbol_fn(is_symbol_input, input_text, temp_lang):
|
68 |
-
if temp_lang == 0:
|
69 |
-
clean_text = f'[ZH]{input_text}[ZH]'
|
70 |
-
elif temp_lang == 1:
|
71 |
-
clean_text = f'[JA]{input_text}[JA]'
|
72 |
-
else:
|
73 |
-
clean_text = input_text
|
74 |
-
return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else ''
|
75 |
-
|
76 |
-
return to_symbol_fn
|
77 |
-
def change_lang(language):
|
78 |
-
if language == 0:
|
79 |
-
return 0.6, 0.668, 1.2
|
80 |
-
elif language == 1:
|
81 |
-
return 0.6, 0.668, 1
|
82 |
-
else:
|
83 |
-
return 0.6, 0.668, 1
|
84 |
-
|
85 |
-
download_audio_js = """
|
86 |
-
() =>{{
|
87 |
-
let root = document.querySelector("body > gradio-app");
|
88 |
-
if (root.shadowRoot != null)
|
89 |
-
root = root.shadowRoot;
|
90 |
-
let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio");
|
91 |
-
let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea");
|
92 |
-
if (audio == undefined)
|
93 |
-
return;
|
94 |
-
text = text.value;
|
95 |
-
if (text == undefined)
|
96 |
-
text = Math.floor(Math.random()*100000000);
|
97 |
-
audio = audio.src;
|
98 |
-
let oA = document.createElement("a");
|
99 |
-
oA.download = text.substr(0, 20)+'.wav';
|
100 |
-
oA.href = audio;
|
101 |
-
document.body.appendChild(oA);
|
102 |
-
oA.click();
|
103 |
-
oA.remove();
|
104 |
-
}}
|
105 |
-
"""
|
106 |
-
|
107 |
-
if __name__ == '__main__':
|
108 |
-
parser = argparse.ArgumentParser()
|
109 |
-
parser.add_argument('--device', type=str, default='cpu')
|
110 |
-
parser.add_argument('--api', action="store_true", default=False)
|
111 |
-
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
112 |
-
parser.add_argument("--all", action="store_true", default=False, help="enable all models")
|
113 |
-
args = parser.parse_args()
|
114 |
-
device = torch.device(args.device)
|
115 |
-
categories = ["Honkai: Star Rail", "Blue Archive", "Lycoris Recoil"]
|
116 |
-
others = {
|
117 |
-
"Princess Connect! Re:Dive": "https://huggingface.co/spaces/sayashi/vits-models-pcr",
|
118 |
-
"Genshin Impact": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3",
|
119 |
-
"Honkai Impact 3rd": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3",
|
120 |
-
"Overwatch 2": "https://huggingface.co/spaces/sayashi/vits-models-ow2",
|
121 |
-
}
|
122 |
-
if args.all:
|
123 |
-
categories = ["Honkai: Star Rail", "Blue Archive", "Lycoris Recoil", "Princess Connect! Re:Dive", "Genshin Impact", "Honkai Impact 3rd", "Overwatch 2"]
|
124 |
-
others = {}
|
125 |
-
models = []
|
126 |
-
with open("pretrained_models/info.json", "r", encoding="utf-8") as f:
|
127 |
-
models_info = json.load(f)
|
128 |
-
for i, info in models_info.items():
|
129 |
-
if info['title'].split("-")[0] not in categories or not info['enable']:
|
130 |
-
continue
|
131 |
-
sid = info['sid']
|
132 |
-
name_en = info['name_en']
|
133 |
-
name_zh = info['name_zh']
|
134 |
-
title = info['title']
|
135 |
-
cover = f"pretrained_models/{i}/{info['cover']}"
|
136 |
-
example = info['example']
|
137 |
-
language = info['language']
|
138 |
-
net_g_ms = SynthesizerTrn(
|
139 |
-
len(hps_ms.symbols),
|
140 |
-
hps_ms.data.filter_length // 2 + 1,
|
141 |
-
hps_ms.train.segment_size // hps_ms.data.hop_length,
|
142 |
-
n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0,
|
143 |
-
**hps_ms.model)
|
144 |
-
utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None)
|
145 |
-
_ = net_g_ms.eval().to(device)
|
146 |
-
models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms)))
|
147 |
-
with gr.Blocks() as app:
|
148 |
-
gr.Markdown(
|
149 |
-
"# <center> vits-models\n"
|
150 |
-
"## <center> Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n"
|
151 |
-
"## <center> 请不要生成会对个人以及组织造成侵害的内容\n\n"
|
152 |
-
"[](https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)\n\n"
|
153 |
-
"[](https://huggingface.co/spaces/sayashi/vits-models?duplicate=true)\n\n"
|
154 |
-
"[](https://github.com/SayaSS/vits-finetuning)"
|
155 |
-
)
|
156 |
-
|
157 |
-
with gr.Tabs():
|
158 |
-
for category in categories:
|
159 |
-
with gr.TabItem(category):
|
160 |
-
with gr.TabItem("EN"):
|
161 |
-
for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
|
162 |
-
if title.split("-")[0] != category:
|
163 |
-
continue
|
164 |
-
with gr.TabItem(name_en):
|
165 |
-
with gr.Row():
|
166 |
-
gr.Markdown(
|
167 |
-
'<div align="center">'
|
168 |
-
f'<a><strong>{title}</strong></a>'
|
169 |
-
f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
|
170 |
-
'</div>'
|
171 |
-
)
|
172 |
-
with gr.Row():
|
173 |
-
with gr.Column():
|
174 |
-
input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}")
|
175 |
-
lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"],
|
176 |
-
type="index", value=language)
|
177 |
-
with gr.Accordion(label="Advanced Options", open=False):
|
178 |
-
symbol_input = gr.Checkbox(value=False, label="Symbol input")
|
179 |
-
symbol_list = gr.Dataset(label="Symbol list", components=[input_text],
|
180 |
-
samples=[[x] for x in hps_ms.symbols])
|
181 |
-
symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
|
182 |
-
btn = gr.Button(value="Generate", variant="primary")
|
183 |
-
with gr.Row():
|
184 |
-
ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
|
185 |
-
nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
|
186 |
-
ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
|
187 |
-
with gr.Column():
|
188 |
-
o1 = gr.Textbox(label="Output Message")
|
189 |
-
o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
|
190 |
-
download = gr.Button("Download Audio")
|
191 |
-
btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2], api_name=f"tts-{name_en}")
|
192 |
-
download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
|
193 |
-
lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
|
194 |
-
symbol_input.change(
|
195 |
-
to_symbol_fn,
|
196 |
-
[symbol_input, input_text, lang],
|
197 |
-
[input_text]
|
198 |
-
)
|
199 |
-
symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
|
200 |
-
_js=f"""
|
201 |
-
(i,symbols) => {{
|
202 |
-
let root = document.querySelector("body > gradio-app");
|
203 |
-
if (root.shadowRoot != null)
|
204 |
-
root = root.shadowRoot;
|
205 |
-
let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea");
|
206 |
-
let startPos = text_input.selectionStart;
|
207 |
-
let endPos = text_input.selectionEnd;
|
208 |
-
let oldTxt = text_input.value;
|
209 |
-
let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
|
210 |
-
text_input.value = result;
|
211 |
-
let x = window.scrollX, y = window.scrollY;
|
212 |
-
text_input.focus();
|
213 |
-
text_input.selectionStart = startPos + symbols[i].length;
|
214 |
-
text_input.selectionEnd = startPos + symbols[i].length;
|
215 |
-
text_input.blur();
|
216 |
-
window.scrollTo(x, y);
|
217 |
-
return text_input.value;
|
218 |
-
}}""")
|
219 |
-
with gr.TabItem("中文"):
|
220 |
-
for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
|
221 |
-
if title.split("-")[0] != category:
|
222 |
-
continue
|
223 |
-
with gr.TabItem(name_zh):
|
224 |
-
with gr.Row():
|
225 |
-
gr.Markdown(
|
226 |
-
'<div align="center">'
|
227 |
-
f'<a><strong>{title}</strong></a>'
|
228 |
-
f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
|
229 |
-
'</div>'
|
230 |
-
)
|
231 |
-
with gr.Row():
|
232 |
-
with gr.Column():
|
233 |
-
input_text = gr.Textbox(label="文本 (100字上限)" if limitation else "文本", lines=5, value=example, elem_id=f"input-text-zh-{name_zh}")
|
234 |
-
lang = gr.Dropdown(label="语言", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
|
235 |
-
type="index", value="中文"if language == "Chinese" else "日语")
|
236 |
-
with gr.Accordion(label="高级选项", open=False):
|
237 |
-
symbol_input = gr.Checkbox(value=False, label="符号输入")
|
238 |
-
symbol_list = gr.Dataset(label="符号列表", components=[input_text],
|
239 |
-
samples=[[x] for x in hps_ms.symbols])
|
240 |
-
symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
|
241 |
-
btn = gr.Button(value="生成", variant="primary")
|
242 |
-
with gr.Row():
|
243 |
-
ns = gr.Slider(label="控制感情变化程度", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
|
244 |
-
nsw = gr.Slider(label="控制音素发音长度", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
|
245 |
-
ls = gr.Slider(label="控制整体语速", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
|
246 |
-
with gr.Column():
|
247 |
-
o1 = gr.Textbox(label="输出信息")
|
248 |
-
o2 = gr.Audio(label="输出音频", elem_id=f"tts-audio-zh-{name_zh}")
|
249 |
-
download = gr.Button("下载音频")
|
250 |
-
btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2])
|
251 |
-
download.click(None, [], [], _js=download_audio_js.format(audio_id=f"zh-{name_zh}"))
|
252 |
-
lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
|
253 |
-
symbol_input.change(
|
254 |
-
to_symbol_fn,
|
255 |
-
[symbol_input, input_text, lang],
|
256 |
-
[input_text]
|
257 |
-
)
|
258 |
-
symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
|
259 |
-
_js=f"""
|
260 |
-
(i,symbols) => {{
|
261 |
-
let root = document.querySelector("body > gradio-app");
|
262 |
-
if (root.shadowRoot != null)
|
263 |
-
root = root.shadowRoot;
|
264 |
-
let text_input = root.querySelector("#input-text-zh-{name_zh}").querySelector("textarea");
|
265 |
-
let startPos = text_input.selectionStart;
|
266 |
-
let endPos = text_input.selectionEnd;
|
267 |
-
let oldTxt = text_input.value;
|
268 |
-
let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
|
269 |
-
text_input.value = result;
|
270 |
-
let x = window.scrollX, y = window.scrollY;
|
271 |
-
text_input.focus();
|
272 |
-
text_input.selectionStart = startPos + symbols[i].length;
|
273 |
-
text_input.selectionEnd = startPos + symbols[i].length;
|
274 |
-
text_input.blur();
|
275 |
-
window.scrollTo(x, y);
|
276 |
-
return text_input.value;
|
277 |
-
}}""")
|
278 |
-
for category, link in others.items():
|
279 |
-
with gr.TabItem(category):
|
280 |
-
gr.Markdown(
|
281 |
-
f'''
|
282 |
-
<center>
|
283 |
-
<h2>Click to Go</h2>
|
284 |
-
<a href="{link}">
|
285 |
-
<img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-xl-dark.svg"
|
286 |
-
</a>
|
287 |
-
</center>
|
288 |
-
'''
|
289 |
-
)
|
290 |
-
app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/GameGeniePs3USBrar Learn How to Use the Game Genie Software on Your PC and PS3.md
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>GameGeniePs3USBrar: How to Use Game Genie Save Editor for PS3</h1>
|
3 |
-
<p>Do you want to unlock all levels, get maximum money, ammo and experience, and have more fun with your PS3 games? If yes, then you need GameGeniePs3USBrar. In this article, I will show you what GameGeniePs3USBrar is, how to download and install it, how to use it to modify your PS3 saves, and what games and cheats are available with it. Let's get started!</p>
|
4 |
-
<h2>What is GameGeniePs3USBrar?</h2>
|
5 |
-
<p>GameGeniePs3USBrar is a file name that contains the setup program for Game Genie Save Editor for PS3. Game Genie Save Editor for PS3 is a software that allows you to access and edit your PS3 game saves on your PC with cheats that take effect once you load your game on your PS3. It is an easy-to-use program that works by copying your save from your PS3 to a USB drive, inserting it into your PC, choosing and applying cheats using Game Genie Save Editor for PS3, and copying your save back from the USB drive to your PS3.</p>
|
6 |
-
<h2>GameGeniePs3USBrar</h2><br /><p><b><b>Download File</b> ⭐ <a href="https://byltly.com/2uKxxF">https://byltly.com/2uKxxF</a></b></p><br /><br />
|
7 |
-
<h3>A brief introduction to Game Genie Save Editor for PS3</h3>
|
8 |
-
<p>Game Genie Save Editor for PS3 is a product developed by Hyperkin, a company that specializes in video game accessories and software. It was released in 2012 as a successor to the original Game Genie device that was popular in the 1990s. It works with European and American PS3 games, and does not require any illegal modifications or jailbreaking of your PS3. It is compatible with Windows XP, Vista, 7, 8, and 10.</p>
|
9 |
-
<h3>The benefits of using Game Genie Save Editor for PS3</h3>
|
10 |
-
<p>There are many benefits of using Game Genie Save Editor for PS3. Some of them are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>You can enjoy more freedom and creativity with your games by modifying them according to your preferences.</li>
|
13 |
-
<li>You can save time and effort by skipping difficult or tedious parts of the games.</li>
|
14 |
-
<li>You can enhance your gaming experience by unlocking new features, items, modes, characters, etc.</li>
|
15 |
-
<li>You can discover new secrets and easter eggs that you might have missed otherwise.</li>
|
16 |
-
<li>You can have more fun with your games by trying out different combinations of cheats.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>How to download and install GameGeniePs3USBrar?</h2>
|
19 |
-
<p>To use Game Genie Save Editor for PS3, you need to download and install GameGeniePs3USBrar on your PC. Here's how:</p>
|
20 |
-
<h3>The requirements for using Game Genie Save Editor for PS3</h3>
|
21 |
-
<p>Before you download and install GameGeniePs3USBrar, make sure you have the following requirements:</p>
|
22 |
-
<p>Game Genie Save Editor for PS3 download<br />
|
23 |
-
Game Genie PS3 cheats list<br />
|
24 |
-
Game Genie PS3 Europe<br />
|
25 |
-
Game Genie PS3 direct download<br />
|
26 |
-
Game Genie PS3 USB drive<br />
|
27 |
-
Game Genie PS3 setup<br />
|
28 |
-
Game Genie PS3 manual<br />
|
29 |
-
Game Genie PS3 games and cheats<br />
|
30 |
-
Game Genie PS3 update<br />
|
31 |
-
Game Genie PS3 license key<br />
|
32 |
-
Game Genie PS3 crack<br />
|
33 |
-
Game Genie PS3 free trial<br />
|
34 |
-
Game Genie PS3 review<br />
|
35 |
-
Game Genie PS3 tutorial<br />
|
36 |
-
Game Genie PS3 support<br />
|
37 |
-
Game Genie PS3 forum<br />
|
38 |
-
Game Genie PS3 alternative<br />
|
39 |
-
Game Genie PS3 vs Save Wizard<br />
|
40 |
-
Game Genie PS3 compatible games<br />
|
41 |
-
Game Genie PS3 modded saves<br />
|
42 |
-
Game Genie PS3 advanced mode<br />
|
43 |
-
Game Genie PS3 resign saves<br />
|
44 |
-
Game Genie PS3 region change<br />
|
45 |
-
Game Genie PS3 online mode<br />
|
46 |
-
Game Genie PS3 offline mode<br />
|
47 |
-
Game Genie PS3 error codes<br />
|
48 |
-
Game Genie PS3 troubleshooting<br />
|
49 |
-
Game Genie PS3 refund policy<br />
|
50 |
-
Game Genie PS3 discount code<br />
|
51 |
-
Game Genie PS3 buy online<br />
|
52 |
-
Game Genie PS3 Amazon<br />
|
53 |
-
Game Genie PS3 eBay<br />
|
54 |
-
Game Genie PS3 Walmart<br />
|
55 |
-
Game Genie PS3 Best Buy<br />
|
56 |
-
Game Genie PS3 Target<br />
|
57 |
-
Game Genie PS3 Costco<br />
|
58 |
-
Game Genie PS3 installation guide<br />
|
59 |
-
Game Genie PS3 system requirements<br />
|
60 |
-
Game Genie PS3 FAQ<br />
|
61 |
-
Game Genie PS3 tips and tricks<br />
|
62 |
-
Game Genie PS3 how to use it<br />
|
63 |
-
Game Genie PS3 how to cheat your way through your favorite games[^4^]<br />
|
64 |
-
Game Genie PS3 how to unlock all levels, money, ammo and more[^4^]<br />
|
65 |
-
Game Genie PS3 how to modify your saves on your PC[^2^]<br />
|
66 |
-
Game Genie PS3 how to copy your save from your USB drive to your PlayStation 3[^2^]<br />
|
67 |
-
Game Genie PS3 how to load your game with cheats[^2^]<br />
|
68 |
-
Game Genie PS3 how to access your saves like never before[^2^]<br />
|
69 |
-
Game Genie PS3 how to achieve a net energy gain when carrying out a nuclear fusion experiment[^2^]</p>
|
70 |
-
<ul>
|
71 |
-
<li>A PC with Windows XP, Vista, 7, 8, or 10.</li>
|
72 |
-
<li>A USB drive with at least 1 GB of free space.</li>
|
73 |
-
<li>A PS3 with a USB port.</li>
|
74 |
-
<li>A copy of Game Genie Save Editor for PS3. You can purchase it from <a href="http://www.thegamegenie.com/">www.thegamegenie.com</a> or <a href="http://www.gamegenie.eu/">www.gamegenie.eu</a>, depending on your region. You can also buy it as a physical product that comes with a USB drive or as a direct download version that you can download from the website after purchase.</li>
|
75 |
-
</ul>
|
76 |
-
<h3>The steps to download and install GameGeniePs3USBrar</h3>
|
77 |
-
<p>Once you have the requirements ready, follow these steps to download and install GameGeniePs3USBrar:</p>
|
78 |
-
<ol>
|
79 |
-
<li>Go to <a href="http://download.gamegenie.eu/ps3/">http://download.gamegenie.eu/ps3/</a> if you purchased the product from www.gamegenie.eu or go to <a href="http://www.thegamegenie.com/ps4/download.php">http://www.thegamegenie.com/ps4/download.php</a> if you purchased the product from www.thegamegenie.com.</li>
|
80 |
-
<li>Click on the link that says "Download Setup Here" under the appropriate section depending on whether you bought the physical product or the direct download version.</li>
|
81 |
-
<li>Save the file named "GameGeniePs4USBrar" or "GameGeniPS4EUrar" on your PC.</li>
|
82 |
-
<li>Extract the file using a program like WinRAR or 7-Zip.</li>
|
83 |
-
<li>Run the setup program named "GameGeniPS4Setup.exe" or "GameGeniPS4EUSetup.exe".</li>
|
84 |
-
<li>Follow the instructions on the screen to complete the installation process.</li>
|
85 |
-
<li>Launch the program by clicking on its icon on your desktop or start menu.</li>
|
86 |
-
</ol>
|
87 |
-
<h2>How to use GameGeniPS4USBrar to modify your PS4 saves?</h2>
|
88 |
-
<p>Now that you have downloaded and installed GameGeniPS4USBrar on your PC, you can use it to modify your PS4 saves. Here's how:</p>
|
89 |
-
<h4>The features of Game GeniPS4Save Editor for PS4</h4>
|
90 |
-
<p>Game GeniPS4Save Editor for PS4 has several features that make it easy and convenient to use. Some of them are:</p>
|
91 |
-
<ul>
|
92 |
-
<li>You can browse through hundreds of games and thousands of cheats that are available in its database.</li>
|
93 |
-
<li>You can search for games by name or by genre.</li>
|
94 |
-
<li>You can sort games by popularity or alphabetically.</li>
|
95 |
-
<li>You can view detailed information about each game and cheat, such as description, screenshots, video tutorials, etc.</li>
|
96 |
-
<li>You can customize each cheat by changing its value or enabling/disabling it.</li>
|
97 |
-
<li>You can create multiple profiles for different users or games.</li>
|
98 |
-
<li>You can backup and restore your saves in case something goes wrong.</li>
|
99 |
-
<li>You can update the program and its database automatically or manually.</li>
|
100 |
-
</ul>
|
101 |
-
<h4>The process of modifying your PS4 saves with Game GeniPS4Save Editor for PS4</h4>
|
102 |
-
<p>To modify your PS4 saves with Game GeniPS4Save Editor for PS4, you need to follow three main steps: copying your save from your PS4 to a USB drive, choosing and applying cheats using Game GeniPS4Save Editor for PC, and copying your save back from the USB drive to your PC and loading your game. Here's how:</p>
|
103 |
-
<h5>How to copy your save from your PC to a USB drive</h5>
|
104 |
-
<ol>
|
105 |
-
<li>Turn on your PC and insert your USB drive into an available port.</li>
|
106 |
-
<li>Create a folder named "PS4" on the root directory of your USB drive.</li>
|
107 |
-
<li>Create another folder named "SAVEDATA" inside the "PS4" folder.</li>
|
108 |
-
<li>Create another folder named "BLESXXXXX" inside the "SAVEDATA" folder. Replace XXXXX with the five-digit code that corresponds to the region of your game. For example, if you have a European version of The Elder Scrolls V: Skyrim, the code would be BLES01329.</li>
|
109 |
-
<li>Copy your save file from your PS3 to the "BLESXXXXX" folder on your USB drive. To do this, go to the Game menu on your PS3, select Saved Data Utility (PS3), find the game you want to copy, press the Triangle button, and choose Copy. Select your USB device as the destination and confirm.</li>
|
110 |
-
</ol>
|
111 |
-
<h5>How to choose and apply cheats using Game Genie Save Editor for PS3</h5>
|
112 |
-
<ol>
|
113 |
-
<li>Insert your USB drive into your PC and launch Game Genie Save Editor for PS3.</li>
|
114 |
-
<li>Select the profile you want to use or create a new one by clicking on the Profile button.</li>
|
115 |
-
<li>Click on the Open button and browse to your USB drive. Select the save file you want to modify and click Open.</li>
|
116 |
-
<li>Wait for the program to load the game information and the available cheats. You can also click on the Refresh button to update the cheats database.</li>
|
117 |
-
<li>Browse through the cheats by clicking on the arrows or using the search box. You can also sort them by name or category.</li>
|
118 |
-
<li>Check the box next to each cheat you want to apply. You can also change the value of some cheats by clicking on them and typing a new number.</li>
|
119 |
-
<li>Click on the Apply button to confirm your changes. You can also click on the Backup button to save a copy of your original save file.</li>
|
120 |
-
</ol>
|
121 |
-
<h5>How to copy your save back from the USB drive to your PS3 and load your game</h5>
|
122 |
-
<ol>
|
123 |
-
<li>Eject your USB drive from your PC and insert it into your PS3.</li>
|
124 |
-
<li>Go to the Game menu on your PS3, select Saved Data Utility (PS3), find your USB device, press the Triangle button, and choose Copy.</li>
|
125 |
-
<li>Select the save file you want to copy and confirm. If you have a backup of your original save file, you can choose to overwrite it or keep both versions.</li>
|
126 |
-
<li>Load your game and enjoy your modified save!</li>
|
127 |
-
</ol>
|
128 |
-
<h2>What games and cheats are available with GameGeniePs3USBrar?</h2>
|
129 |
-
<p>GameGeniePs3USBrar gives you access to hundreds of games and thousands of cheats that are available in its database. You can find games from various genres, such as action, adventure, role-playing, sports, racing, fighting, etc. You can also find cheats for different aspects of the games, such as health, money, ammo, items, stats, skills, levels, etc.</p>
|
130 |
-
<h3>The list of games and cheats included in Game Genie Save Editor for PS3</h3>
|
131 |
-
<p>To see the list of games and cheats included in Game Genie Save Editor for PS3, you can go to <a href="http://www.gamegenie.eu/">www.gamegenie.eu</a> or <a href="http://www.thegamegenie.com/">www.thegamegenie.com</a>, depending on your region. You can also view them in the program by clicking on the List button. The list is updated regularly with new games and cheats added every week. As of November 2016, there are 471 games and 23257 cheats in total.</p>
|
132 |
-
<h3>The updates and support for Game Genie Save Editor for PS3</h3>
|
133 |
-
<p>Game Genie Save Editor for PS3 is constantly updated with new games and cheats added every week. You can update the program and its database automatically or manually by clicking on the Update button. You can also check for updates by going to Help > Check for Updates. If you have any questions or problems with Game Genie Save Editor for PS3, you can contact the support team by going to Help > Contact Support or by sending an email to [email protected] or [email protected].</p>
|
134 |
-
<h2>Conclusion</h2>
|
135 |
-
<p>In conclusion, GameGeniePs3USBrar is a file name that contains the setup program for Game Genie Save Editor for PS3. Game Genie Save Editor for PS3 is a software that allows you to access and edit your PS3 game saves on your PC with cheats that take effect once you load your game on your PS3. It is an easy-to-use program that works by copying your save from your PS3 to a USB drive, inserting it into your PC, choosing and applying cheats using Game Genie Save Editor for PS3, and copying your save back from the USB drive to your PS3. It gives you access to hundreds of games and thousands of cheats that are available in its database. It is compatible with European and American PS3 games, and does not require any illegal modifications or jailbreaking of your PS3. It is a fun and convenient way to enhance your gaming experience with more freedom and creativity.</p>
|
136 |
-
<p>I hope this article has helped you understand what GameGeniePs3USBrar is, how to download and install it, how to use it to modify your PS3 saves, and what games and cheats are available with it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
|
137 |
-
<h2>Frequently Asked Questions</h2>
|
138 |
-
<p>Here are some frequently asked questions about GameGeniePs3USBrar:</p>
|
139 |
-
<ol>
|
140 |
-
<li><b>Is Game Genie Save Editor for PS3 legal?</b><br>
|
141 |
-
Yes, Game Genie Save Editor for PS3 is legal as long as you use it for personal use only. It does not modify or hack your PS3 system or firmware. It only modifies your own game saves that are stored on a USB drive.</li>
|
142 |
-
<li><b>Does Game Genie Save Editor for PS3 work with all PS3 games?</b><br>
|
143 |
-
No, Game Genie Save Editor for PS3 does not work with all PS3 games. It only works with games that are supported by its database. You can check if a game is supported by going to <a href="http://www.gamegenie.eu/">www.gamegenie.eu</a> or <a href="http://www.thegamegenie.com/">www.thegamegenie.com</a>, depending on your region.</li>
|
144 |
-
<li><b>Can I use Game Genie Save Editor for PS3 online?</b><br>
|
145 |
-
No, you cannot use Game Genie Save Editor for PS3 online. It is intended for offline use only. Using it online may result in banning or suspension from online services or multiplayer modes.</li>
|
146 |
-
<li><b>Can I share my modified saves with other users?</b><br>
|
147 |
-
No, you cannot share your modified saves with other users. Each save file is encrypted with a unique code that is tied to your profile and console. Sharing it may cause corruption or errors.</li>
|
148 |
-
<li><b>Can I undo the changes made by Game Genie Save Editor for PS3?</b><br>
|
149 |
-
Yes, you can undo the changes made by Game Genie Save Editor for PS3 by restoring your original save file. To do this, you need to have a backup of your original save file that you created before applying any cheats. You can restore it by copying it back from the USB drive to your PS3 using the same method as before.</li>
|
150 |
-
</ol>
|
151 |
-
</p> 0a6ba089eb<br />
|
152 |
-
<br />
|
153 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Autodata 3.40 Crack Windows 7 _VERIFIED_.md
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
<h2>autodata 3.40 crack windows 7</h2><br /><p><b><b>Download File</b> → <a href="https://imgfil.com/2uxZvq">https://imgfil.com/2uxZvq</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Details: The most important version of AutoData for the Mac, AutoData 3.45, is a must have backup solution for Mac users. You don't need a clean installation of the OSX or a backup server to run it on your Mac.
|
4 |
-
|
5 |
-
Learn how to download and install AutoData 3.45 free on your Mac with step by step guides in this tutorial. This tutorial will help you in: Open your Mac Install AutoData Download and use AutoData 3.45 The tutorial will guide you through each step and will teach you how to download and install AutoData 3.45 free on your Mac.
|
6 |
-
|
7 |
-
1. How to download AutoData 3.45 free:
|
8 |
-
|
9 |
-
Step 1. Click on the Download button to download the AutoData.dmg file.
|
10 |
-
|
11 |
-
Step 2. Save it to your desktop by choosing “Save As” from the file browser.
|
12 |
-
|
13 |
-
Step 3. Double click on the AutoData.dmg file to install AutoData 3.45.
|
14 |
-
|
15 |
-
Note: The AutoData 3.45 Free Download may ask you to activate by entering the serial number, but you don't need to enter the serial number.
|
16 |
-
|
17 |
-
Step 4. Choose “Upgrade from existing installation” if the version of the application you are currently running is not the same as the version you downloaded.
|
18 |
-
|
19 |
-
Step 5. You are now ready to use AutoData 3.45 free.
|
20 |
-
|
21 |
-
2. How to use AutoData 3.45 on your Mac:
|
22 |
-
|
23 |
-
Step 1. Launch the AutoData application from the desktop.
|
24 |
-
|
25 |
-
Step 2. Press “Backup”, “Restore”, “Make a backup of my data”, “Create a new backup”, “Delete” or “Revert”.
|
26 |
-
|
27 |
-
Step 3. Press “Backup” to backup your applications.
|
28 |
-
|
29 |
-
Step 4. Press “Restore” to restore your applications.
|
30 |
-
|
31 |
-
Step 5. You can also use the Backup Manager to backup your applications.
|
32 |
-
|
33 |
-
3. How to upgrade AutoData 3.45 Free?
|
34 |
-
|
35 |
-
Step 2. You will see the following window:
|
36 |
-
|
37 |
-
Note: If you don't see this window, then, download AutoData 3.45 and update it manually.
|
38 |
-
|
39 |
-
Step 3. Choose “Upgrade from existing installation”.
|
40 |
-
|
41 |
-
Step 4. You 4fefd39f24<br />
|
42 |
-
<br />
|
43 |
-
<br />
|
44 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Condenados A Fugarse Audio Latino.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Condenados a fugarse audio latino</h2><br /><p><b><b>Download Zip</b> > <a href="https://imgfil.com/2uy1sM">https://imgfil.com/2uy1sM</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Condenados a Fugarse Ver Online. 1 Español Latino Netu; 2 Español Latino Fembed; 3 Español Latino MegaVIPS; 4 Español Latino Mystream; 5 Español ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Apk Gta 5 REPACK Download Official Gta 5 For Android Amp Ios.md
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download GTA 5 on Android and iOS Devices</h1>
|
3 |
-
<p>GTA 5 is one of the most popular and acclaimed video games of all time. It is an action-adventure game that lets you experience the life of a criminal in the fictional city of Los Santos. You can play as one of three protagonists, each with their own story, personality, and skills. You can also switch between them at any time, creating a dynamic and immersive gameplay.</p>
|
4 |
-
<p>If you are a fan of GTA 5, you might be wondering if you can play it on your mobile devices, such as Android or iOS phones and tablets. The answer is yes, you can! In this article, we will show you how to download GTA 5 on Android and iOS devices, as well as some tips and tricks for playing it on the go.</p>
|
5 |
-
<h2>apk gta 5 download official gta 5 for android amp; ios</h2><br /><p><b><b>Download</b> ✏ <a href="https://urlin.us/2uT0sS">https://urlin.us/2uT0sS</a></b></p><br /><br />
|
6 |
-
<h2>What is GTA 5 and Why You Should Play It</h2>
|
7 |
-
<p>GTA 5 is the fifth main installment in the Grand Theft Auto series, developed by Rockstar Games. It was released in 2013 for PlayStation 3 and Xbox 360, and later for PlayStation 4, Xbox One, and PC. It is considered one of the best-selling and most critically acclaimed games of all time, with over 140 million copies sold worldwide.</p>
|
8 |
-
<h3>GTA 5 Features and Gameplay</h3>
|
9 |
-
<p>GTA 5 offers a vast and diverse open world that you can explore by foot, by car, by bike, by boat, by plane, or by helicopter. You can also interact with various characters, objects, and activities in the world, such as robbing stores, playing golf, racing cars, parachuting, hunting animals, or watching TV.</p>
|
10 |
-
<p>The game also features a compelling story mode that follows the lives of three protagonists: Michael, a retired bank robber who is unhappy with his family; Franklin, a young street hustler who wants to make it big; and Trevor, a psychotic drug dealer who lives in a trailer park. You can switch between them at any time during the game, creating different perspectives and outcomes.</p>
|
11 |
-
<p>Additionally, the game has an online mode called GTA Online, where you can create your own character and join other players in various missions, races, heists, deathmatches, or freemode events. You can also customize your character's appearance, skills, vehicles, weapons, properties, and businesses.</p>
|
12 |
-
<h3>GTA 5 Requirements and Compatibility</h3>
|
13 |
-
<p>To play GTA 5 on your mobile devices, you will need to have a compatible device that meets the minimum requirements. According to Rockstar Games, these are:</p>
|
14 |
-
<table>
|
15 |
-
<tr><th>Device</th><th>OS</th><th>RAM</th><th>Storage</th></tr>
|
16 |
-
<tr><td>Android</td><td>8.0 or higher</td><td>4 GB or higher</td><td>8 GB or higher</td></tr>
|
17 |
-
<tr><td>iOS</td><td>12.0 or higher</td><td>4 GB or higher</td><td>8 GB or higher</td></tr>
|
18 |
-
</table>
|
19 |
-
<p>You will also need to have a stable internet connection to download and play the game.</p>
|
20 |
-
<h2>How to Download GTA 5 on Android Devices</h2>
|
21 |
-
<p>There are three main ways to download GTA 5 on your Android devices:</p>
|
22 |
-
<p></p>
|
23 |
-
<h3>Download from the Official Rockstar Games Website</h3>
|
24 |
-
<p>The easiest way to download GTA 5 on your Android devices is to visit the official Rockstar Games website and follow the instructions. You will need to create a Rockstar Games Social Club account or log in with your existing one. Then, you will need to purchase the game for $19.99 and download the GTA 5 apk file on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can install the apk file and launch the game.</p>
|
25 |
-
<h3>Download from the Epic Games Store</h3>
|
26 |
-
<p>Another way to download GTA 5 on your Android devices is to use the Epic Games Store app, which is available on the Google Play Store. You will need to create an Epic Games account or log in with your existing one. Then, you will need to purchase the game for $19.99 and download it on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can launch the game from the app.</p>
|
27 |
-
<h3>Download from the BlueStacks App Player</h3>
|
28 |
-
<p>The third way to download GTA 5 on your Android devices is to use the BlueStacks App Player, which is a software that allows you to run Android apps on your PC. You will need to download and install the BlueStacks App Player on your PC from its official website. Then, you will need to download the GTA 5 apk file and data file from the Rockstar Games website or the Epic Games Store app. After that, you can transfer the files to your device using a USB cable or a cloud service. Then, you can install the apk file and launch the game.</p>
|
29 |
-
<h2>How to Download GTA 5 on iOS Devices</h2>
|
30 |
-
<p>There are two main ways to download GTA 5 on your iOS devices:</p>
|
31 |
-
<h3>Download from the App Store</h3>
|
32 |
-
<p>The easiest way to download GTA 5 on your iOS devices is to visit the App Store and search for GTA 5. You will need to have an Apple ID or create one if you don't have one. Then, you will need to purchase the game for $19.99 and download it on your device. You will also need to download the GTA 5 data file, which is about 3 GB in size. After that, you can launch the game from your home screen.</p>
|
33 |
-
<h3>Download from the Cloud Gaming Services</h3>
|
34 |
-
<p>Another way to download GTA 5 on your iOS devices is to use a cloud gaming service, such as Google Stadia, NVIDIA GeForce Now, or Microsoft xCloud. These are platforms that allow you to stream games from the cloud to your device without downloading them. You will need to have a subscription or a membership for these services, which vary in price and features. Then, you will need to have a compatible device and a stable internet connection. After that, you can access GTA 5 from the service's app or website and play it on your device.</p>
|
35 |
-
<h2>Tips and Tricks for Playing GTA 5 on Mobile Devices</h2>
|
36 |
-
<p>Playing GTA 5 on mobile devices can be challenging and fun at the same time. Here are some tips and tricks for playing it on the go:</p>
|
37 |
-
<h3>Adjust the Settings and Controls</h3>
|
38 |
-
<p>GTA 5 has a lot of settings and controls that you can customize according to your preference and device's performance. You can adjust the graphics quality, sound volume, camera angle, brightness, subtitles, and more. You can also change the control layout, sensitivity, vibration, and feedback. You can find these options in the pause menu under Settings.</p>
|
39 |
-
<h3>Use the Online Mode and Social Club Features</h3>
|
40 |
-
<p>GTA 5 has an online mode called GTA Online, where you can join other players in various missions, races, heists, deathmatches, or freemode events. You can also customize your character's appearance, skills, vehicles, weapons, properties, and businesses. To access GTA Online, you will need to have a Rockstar Games Social Club account and an internet connection. You can find this option in the pause menu under Online.</p>
|
41 |
-
<p>The Social Club also offers other features that enhance your gaming experience, such as leaderboards, stats, achievements, crews, friends, messages, screenshots, videos, and more. You can access these features from the pause menu under Social Club or from the Rockstar Games website or app.</p>
|
42 |
-
<h3>Explore the Open World and Complete the Missions</h3>
|
43 |
-
<p>GTA 5 has a vast and diverse open world that you can explore by foot, by car, by bike, by boat, by plane, or by helicopter. You can also interact with various characters, objects, and activities in the world, such as robbing stores, playing golf, racing cars, parachuting, hunting animals, or watching TV.</p>
|
44 |
-
<p>The game also has a compelling story mode that follows the lives of three protagonists: Michael, a retired bank robber who is unhappy with his family; Franklin, a young street hustler who wants to make it big; and Trevor, a psychotic drug dealer who lives in a trailer park. You can switch between them at any time during the game, creating different perspectives and outcomes.</p>
|
45 |
-
<p>To progress in the story mode, you will need to complete various missions that involve driving, shooting, stealth, planning, and teamwork. You can also choose how to approach each mission, such as being loud or quiet, aggressive or passive, or using different vehicles or weapons. You can find these missions on the map or by contacting the characters.</p>
|
46 |
-
<h2>Conclusion</h2>
|
47 |
-
<p>GTA 5 is an amazing game that you can enjoy on your mobile devices. You can download it from the official Rockstar Games website, the Epic Games Store app, the BlueStacks App Player, the App Store, or the cloud gaming services. You can also customize the settings and controls, use the online mode and social club features, and explore the open world and complete the missions. GTA 5 is a game that will keep you entertained for hours and hours.</p>
|
48 |
-
<h2>FAQs</h2>
|
49 |
-
<p>Here are some frequently asked questions about GTA 5 on mobile devices:</p>
|
50 |
-
<h3>Q: How much space does GTA 5 take on my device?</h3>
|
51 |
-
<p>A: GTA 5 takes about 8 GB of space on your device, plus another 3 GB for the data file. You will need to have enough free space on your device before downloading and installing the game.</p>
|
52 |
-
<h3>Q: Can I play GTA 5 offline on my device?</h3>
|
53 |
-
<p>A: Yes, you can play GTA 5 offline on your device. However, you will need to have an internet connection to download and install the game, as well as to access some features such as GTA Online and Social Club.</p>
|
54 |
-
<h3>Q: Can I play GTA 5 with my friends on my device?</h3>
|
55 |
-
<p>A: Yes, you can play GTA 5 with your friends on your device. You can join them in GTA Online or invite them to your game session. You will need to have a Rockstar Games Social Club account and an internet connection to do so.</p>
|
56 |
-
<h3>Q: Can I transfer my GTA 5 progress from my PC or console to my device?</h3>
|
57 |
-
<p>A: Yes, you can transfer your GTA 5 progress from your PC or console to your device. You will need to have a Rockstar Games Social Club account and link it to your PC or console account. Then, you will need to log in with the same account on your device and choose to sync your progress.</p>
|
58 |
-
<h3>Q: Can I use cheats or mods on GTA 5 on my device?</h3>
|
59 |
-
<p>A: No, you cannot use cheats or mods on GTA 5 on your device. Cheats and mods are not supported by Rockstar Games and may cause errors or bans on your account. You should only play GTA 5 on your device as intended by the developers.</p> 197e85843d<br />
|
60 |
-
<br />
|
61 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/ .md
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Скачать сканворд фан: как развлечься и поднять свой уровень знаний</h1>
|
3 |
-
<p>Вы любите решать сканворды, но не хотите тратить деньги на печатные издания или скачивать разные приложения? Тогда вам понравится игра <strong>сканворд фан</strong>, которая предлагает вам бесконечное количество бесплатных сканвордов на любой вкус и уровень сложности. В этой статье мы расскажем, что такое сканворд фан, как его скачать и как решать сканворды в нём, а также какие преимущества он даёт для вашего развития и отдыха.</p>
|
4 |
-
<h2>скачать сканворд фан</h2><br /><p><b><b>DOWNLOAD</b> ->>->>->> <a href="https://jinyurl.com/2uNUuh">https://jinyurl.com/2uNUuh</a></b></p><br /><br />
|
5 |
-
<h2>Что такое сканворд фан и почему он так популярен?</h2>
|
6 |
-
<p><strong>Сканворд фан</strong> - это приложение для Android и iOS, которое позволяет вам решать сканворды на своём телефоне или планшете. Сканворд - это вид кроссворда, в котором вопросы расположены внутри сетки, а ответы записываются по горизонтали или вертикали. Сканворды могут быть по разным темам, например, по истории, географии, культуре, спорту, науке и т.д.</p>
|
7 |
-
<p>Сканворд фан - это не просто коллекция сканвордов, а целая игра, которая имеет множество особенностей и возможностей для пользователей. Давайте рассмотрим их подробнее.</p>
|
8 |
-
<h3>Особенности игры сканворд фан</h3>
|
9 |
-
<h4>Бесплатные сканворды каждый день <h4>Бесплатные сканворды каждый день</h4>
|
10 |
-
<p>В игре сканворд фан вы не ограничены в количестве сканвордов, которые можете решать. Каждый день вам доступны новые сканворды разной сложности и тематики. Вы можете выбирать те, которые вам интересны, и пропускать те, которые вам не нравятся. Вы также можете вернуться к решению пропущенных или незавершённых сканвордов в любое время.</p>
|
11 |
-
<p>скачать сканворд фан бесплатно<br />
|
12 |
-
скачать сканворд фан на андроид<br />
|
13 |
-
скачать сканворд фан мод<br />
|
14 |
-
скачать сканворд фан полная версия<br />
|
15 |
-
скачать сканворд фан без интернета<br />
|
16 |
-
скачать сканворд фан взлом<br />
|
17 |
-
скачать сканворд фан игра<br />
|
18 |
-
скачать сканворд фан апк<br />
|
19 |
-
скачать сканворд фан новая версия<br />
|
20 |
-
скачать сканворд фан онлайн<br />
|
21 |
-
скачать сканворд фан отзывы<br />
|
22 |
-
скачать сканворд фан для пк<br />
|
23 |
-
скачать сканворд фан на русском<br />
|
24 |
-
скачать сканворд фан обновление<br />
|
25 |
-
скачать сканворд фан прохождение<br />
|
26 |
-
скачать сканворд фан green ball studio<br />
|
27 |
-
скачать сканворд фан установить<br />
|
28 |
-
скачать сканворд фан решение<br />
|
29 |
-
скачать сканворд фан лучшая игра<br />
|
30 |
-
скачать сканворд фан головоломки<br />
|
31 |
-
скачать сканворд фан кроссворды<br />
|
32 |
-
скачать сканворд фан словарь<br />
|
33 |
-
скачать сканворд фан подсказки<br />
|
34 |
-
скачать сканворд фан темы<br />
|
35 |
-
скачать сканворд фан настройки<br />
|
36 |
-
скачать сканворд фан бонусы<br />
|
37 |
-
скачать сканворд фан мини игры<br />
|
38 |
-
скачать сканворд фан полиглот<br />
|
39 |
-
скачать сканворд фан судоку<br />
|
40 |
-
скачать сканворд фан пятнашки<br />
|
41 |
-
скачать сканворд фан эрудиция<br />
|
42 |
-
скачать сканворд фан быстрый ум<br />
|
43 |
-
скачать ска</p>
|
44 |
-
<h4>Три темы на выбор</h4>
|
45 |
-
<p>В игре сканворд фан вы можете выбирать тему, по которой хотите решать сканворды. Есть три темы на выбор: общая, спорт и кино. В общей теме вы найдёте сканворды по разным областям знаний, в спортивной - по разным видам спорта и известным спортсменам, а в кинематографической - по разным фильмам и актёрам. Вы можете менять тему в любой момент или решать сканворды по всем трем темам.</p>
|
46 |
-
<h4>Большое количество настроек</h4>
|
47 |
-
<p>В игре сканворд фан вы можете настроить игру под свои предпочтения и удобство. Вы можете изменять размер шрифта, цвет фона, язык интерфейса, звук и музыку. Вы также можете включать или отключать автоматическое заполнение букв, подсветку ошибок, подсказки и статистику. Вы можете сохранять свой прогресс в игре и синхронизировать его с другими устройствами через облако.</p>
|
48 |
-
<h4>Бонусы за решение группы сканвордов</h4>
|
49 |
-
<p>В игре сканворд фан вы не только получаете удовольствие от решения сканвордов, но и зарабатываете бонусы за свои успехи. За каждый решённый сканворд вы получаете монеты, которые можно потратить на подсказки или мини-игры. А если вы решите группу из пяти сканвордов одной темы, вы получите дополнительный бонус - золотую монету, которая даёт вам доступ к специальному сканворду с большим количеством монет за решение.</p>
|
50 |
-
<h4>Мини-игры для разнообразия</h4>
|
51 |
-
<p>В игре сканворд фан вы можете не только решать сканворды, но и играть в разные мини-игры, которые помогут вам размять мозги и отдохнуть от сканвордов. Есть четыре мини-игры на выбор: анаграмма, слова из слова, судоку и пятнашки. В анаграмме вам нужно составить слово из заданных букв, в словах из слова - найти все возможные слова из одного большого слова, в судоку - заполнить сетку цифрами так, чтобы они не повторялись по строкам, столбцам и квадратам, а в пятнашках - переместить плитки так, чтобы они шли по порядку от 1 до 15. За каждую мини-игру вы также получает <p>За каждую мини-игру вы также получаете монеты, которые можно использовать в игре сканворд фан. Мини-игры доступны в любое время и не зависят от темы сканвордов.</p>
|
52 |
-
<h2>Как скачать сканворд фан на свой телефон или планшет?</h2>
|
53 |
-
<p>Скачать игру сканворд фан на своё устройство очень просто. В зависимости от того, какая у вас операционная система, вы можете сделать это по-разному.</p>
|
54 |
-
<h3>Для Android-устройств</h3>
|
55 |
-
<p>Если у вас есть телефон или планшет на базе Android, то вам нужно сделать следующее:</p>
|
56 |
-
<ol>
|
57 |
-
<li>Откройте приложение Google Play на своём устройстве.</li>
|
58 |
-
<li>В поисковой строке введите "сканворд фан" или "scanword fun".</li>
|
59 |
-
<li>Найдите игру сканворд фан среди результатов поиска и нажмите на неё.</li>
|
60 |
-
<li>Нажмите на кнопку "Установить" и дождитесь окончания загрузки и установки игры.</li>
|
61 |
-
<li>Нажмите на кнопку "Открыть" или найдите иконку игры на своём рабочем столе и запустите её.</li>
|
62 |
-
</ol>
|
63 |
-
<p>Поздравляем, вы успешно скачали и установили игру сканворд фан на своё Android-устройство!</p>
|
64 |
-
<h3>Для iOS-устройств</h3>
|
65 |
-
<p>Если у вас есть iPhone или iPad, то вам нужно сделать следующее:</p>
|
66 |
-
<ol>
|
67 |
-
<li>Откройте приложение App Store на своём устройстве.</li>
|
68 |
-
<li>В поисковой строке введите "сканворд фан" или "scanword fun".</li>
|
69 |
-
<li>Найдите игру сканворд фан среди результатов поиска и нажмите на неё.</li>
|
70 |
-
<li>Нажмите на кнопку "Загрузить" и дождитесь окончания загрузки и установки игры.</li>
|
71 |
-
<li>Нажмите на кнопку "Открыть" или найдите иконку игры на своём рабочем столе и запустите её.</li>
|
72 |
-
</ol>
|
73 |
-
<p>Поздравляем, вы успешно скачали и установили игру сканворд фан на своё iOS-устройство!</p> <h2>Как решать сканворды в игре сканворд фан?</h2>
|
74 |
-
<p>Решать сканворды в игре сканворд фан очень просто и увлекательно. Вам нужно только следовать нескольким шагам:</p>
|
75 |
-
<h3>Выберите уровень сложности</h3>
|
76 |
-
<p>В игре сканворд фан вы можете выбирать уровень сложности сканвордов, который вам подходит. Есть три уровня на выбор: легкий, средний и сложный. Легкий уровень подойдёт для начинающих или тех, кто хочет просто расслабиться. Средний уровень подойдёт для тех, кто хочет немного подумать и проверить свои знания. Сложный уровень подойдёт для тех, кто любит сложные задачи и хочет поставить себе вызов. Вы можете менять уровень сложности в любой момент или решать сканворды разных уровней.</p>
|
77 |
-
<h3>Введите буквы в ячейки</h3>
|
78 |
-
<p>В игре сканворд фан вы можете вводить буквы в ячейки сетки с помощью клавиатуры или пальца. Вы можете переключаться между горизонтальным и вертикальным направлением ввода букв с помощью кнопки в правом нижнем углу экрана. Вы также можете перемещаться по сетке с помощью стрелок или свайпов. Если вы введёте правильную букву, она останется в ячейке, а если нет, она исчезнет.</p>
|
79 |
-
<h3>Пользуйтесь подсказками</h3>
|
80 |
-
<p>В игре сканворд фан вы можете пользоваться подсказками, если застряли на каком-то вопросе или слове. Есть три типа подсказок на выбор: открыть букву, открыть слово или открыть сканворд. Открыть букву позволяет вам открыть одну букву в любом слове. Открыть слово позволяет вам открыть целое слово по горизонтали или вертикали. Открыть сканворд позволяет вам открыть все слова в сканворде. Вы можете использовать подсказки за монеты, которые вы зарабатываете за решение сканвордов или мини-игр.</p>
|
81 |
-
<h2>Какие преимущества даёт игра сканворд фан?</h2>
|
82 |
-
<p>Игра сканворд фан не только развлекает вас, но и приносит много пользы для вашего развития и отдыха. Давайте рассмотрим некоторые из них.</p>
|
83 |
-
<h3>Развивает эрудицию и быстроту мышления</h3>
|
84 |
-
<p>Играя в сканворд фан, вы тренируете свою эрудицию и быстроту мышления. Вы узнаёте много нового и интересного по разным темам, проверяете свои знания и память, а также придумываете слова по буквам и определениям. Это помогает вам расширить свой словарный запас, улучшить свою орфографию и грамматику, а также повысить свою концентрацию и логику.</p>
|
85 |
-
<h3>Расширяет к <h3>Расширяет кругозор и логическое мышление</h3>
|
86 |
-
<p>Играя в сканворд фан, вы расширяете свой кругозор и логическое мышление. Вы знакомитесь с разными фактами и событиями из истории, географии, культуры, спорта, науки и т.д. Вы также учитеся анализировать и сопоставлять разную информацию, делать выводы и гипотезы, находить связи и закономерности. Это помогает вам быть более образованным и умным, а также улучшить свои навыки решения проблем и принятия решений.</p>
|
87 |
-
<h3>Помогает расслабиться и отдохнуть</h3>
|
88 |
-
<p>Играя в сканворд фан, вы помогаете себе расслабиться и отдохнуть. Вы можете играть в сканворд фан в любое время и в любом месте, когда вам нужно снять стресс или скоротать время. Вы можете наслаждаться красивым дизайном игры, приятной музыкой и звуками, а также интересными мини-играми. Вы также можете получать удовлетворение от своих достижений, бонусов и наград. Игра сканворд фан - это отличный способ развлечься и поднять себе настроение.</p>
|
89 |
-
<h2>Заключение</h2>
|
90 |
-
<p>Игра сканворд фан - это уникальное приложение для любителей сканвордов и не только. Оно предлагает вам бесконечное количество бесплатных сканвордов разной сложности и тематики, а также множество особенностей и возможностей для вашего развития и отдыха. Вы можете скачать игру сканворд фан на свой телефон или планшет по ссылкам ниже и начать играть прямо сейчас. Не упустите шанс развлечься и поднять свой уровень знаний с игрой сканворд фан!</p>
|
91 |
-
<p><a href="https://play.google.com/store/apps/details?id=com.scanword.fun&hl=ru&gl=US">Скачать сканворд фан для Android</a></p>
|
92 |
-
<p><a href="https://apps.apple.com/ru/app/%D1%81%D0%BA%D0%B0%D0%BD%D0%B2%D0%BE%D1%80%D0%B4-%D1%84%D0%B0%D0%BD/id1545758949">Скачать сканворд фан для iOS</a></p>
|
93 |
-
<h2>FAQ</h2>
|
94 |
-
<p>В этом разделе мы ответим на некоторые часто задаваемые вопросы о игре сканворд фан.</p>
|
95 |
-
<h4>Можно ли играть в сканворд фан без интернета?</h4>
|
96 |
-
<p>Да, можно. Игра сканворд фан не требует постоянного подключения к интернету. Вы можете играть в неё в оффлайн-режиме, если вы уже загрузили нужные сканворды или мини-игры. Однако, для синхронизации вашего прогресса, получения новых сканвордов или доступа к специальным предложениям вам нужно подключиться к интернету.</p>
|
97 |
-
<h4>Как получить больше монет в игре сканворд ф <h4>Как получить больше монет в игре сканворд фан?</h4>
|
98 |
-
<p>Есть несколько способов получить больше монет в игре сканворд фан. Во-первых, вы можете зарабатывать монеты за решение сканвордов или мини-игр. Во-вторых, вы можете получать бонусы за решение группы сканвордов одной темы или за решение специального сканворда. В-третьих, вы можете смотреть рекламу или участвовать в акциях, чтобы получить дополнительные монеты. В-четвёртых, вы можете купить монеты за реальные деньги, если вам не хватает их для подсказок или мини-игр.</p>
|
99 |
-
<h4>Как сбросить свой прогресс в игре сканворд фан?</h4>
|
100 |
-
<p>Если вы хотите сбросить свой прогресс в игре сканворд фан и начать игру заново, вы можете сделать это в настройках игры. Для этого вам нужно сделать следующее:</p>
|
101 |
-
<ol>
|
102 |
-
<li>Откройте меню игры, нажав на три полоски в левом верхнем углу экрана.</li>
|
103 |
-
<li>Выберите пункт "Настройки".</li>
|
104 |
-
<li>Пролистайте вниз до пункта "Сбросить прогресс".</li>
|
105 |
-
<li>Нажмите на кнопку "Сбросить" и подтвердите своё действие.</li>
|
106 |
-
</ol>
|
107 |
-
<p>Обратите внимание, что сброс прогресса удалит все ваши решённые сканворды, монеты, бонусы и настройки. Вы не сможете восстановить их обратно. Поэтому сбросьте прогресс только в том случае, если вы уверены в своём решении.</p>
|
108 |
-
<h4>Как связаться с разработчиками игры сканворд фан?</h4>
|
109 |
-
<p>Если у вас есть какие-то вопросы, пожелания или проблемы по игре сканворд фан, вы можете связаться с разработчиками игры по электронной почте или через социальные сети. Вот их контакты:</p>
|
110 |
-
<ul>
|
111 |
-
<li>Электронная почта: [email protected]</li>
|
112 |
-
<li>Facebook: https://www.facebook.com/scanwordfun</li>
|
113 |
-
<li>VK: https://vk.com/scanwordfun</li>
|
114 |
-
<li>Instagram: https://www.instagram.com/scanwordfun</li>
|
115 |
-
</ul>
|
116 |
-
<p>Разработчики игры сканворд фан будут рады услышать ваше мнение и помочь вам в случае необходимости.</p>
|
117 |
-
<h4>Как оставить отзыв о игре сканворд фан?</h4>
|
118 |
-
<p>Если вам нравится игра сканворд фан и вы хотите поделиться своими впечатлениями с другими пользователями, вы можете оставить отзыв о игре на Google Play или App Store. Для этого вам нужно сделать следующее:</p>
|
119 |
-
<ol>
|
120 |
-
<li>Откройте приложение Google Play или App Store на своём устройстве.</li>
|
121 |
-
<li>Найдите игру сканворд фан среди своих установленных приложений и нажмите на неё.</li>
|
122 |
-
<li>Пролистайте вниз до раздела "Оценки и отзывы".</li>
|
123 |
-
<li>Нажмите на кнопку "Написать отзыв" или "Оценить". <li>Нажмите на кнопку "Написать отзыв" или "Оценить".</li>
|
124 |
-
<li>Выберите количество звёзд, которое вы хотите поставить игре, и напишите свой отзыв в текстовом поле.</li>
|
125 |
-
<li>Нажмите на кнопку "Отправить" или "Готово".</li>
|
126 |
-
</ol>
|
127 |
-
<p>Ваш отзыв будет опубликован на Google Play или App Store и будет виден другим пользователям. Разработчики игры сканворд фан также будут рады прочитать ваш отзыв и учесть ваше мнение.</p>
|
128 |
-
<p>Спасибо, что выбрали игру сканворд фан! Надеемся, что вам понравилась наша статья и вы узнали много полезной информации. Если у вас есть ещё вопросы, пишите нам на [email protected] или в социальных сетях. Желаем вам приятной игры и удачи в решении сканвордов!</p> 401be4b1e0<br />
|
129 |
-
<br />
|
130 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download and Watch National Treasure 2 Book of Secrets in Hindi Dubbed 480p Filmyzilla - High Definition and Low Size.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>National Treasure 2 Full Movie in Hindi Download 480p Filmyzilla</h1>
|
3 |
-
<p>If you are a fan of action-adventure movies with historical mysteries and puzzles, you might have enjoyed watching National Treasure (2004) and its sequel National Treasure: Book of Secrets (2007). The movies star Nicolas Cage as Benjamin Franklin Gates, a treasure hunter who follows clues hidden in historical artifacts and documents to uncover secrets and conspiracies. In this article, we will review the second movie in the franchise, National Treasure: Book of Secrets, and tell you how you can download it in Hindi for free from Filmyzilla, a popular website that offers pirated movies.</p>
|
4 |
-
<h2>Movie Review</h2>
|
5 |
-
<p>National Treasure: Book of Secrets is a 2007 American action-adventure film directed by Jon Turteltaub and produced by Jerry Bruckheimer. It is a sequel to the 2004 film National Treasure and is the second film of the National Treasure franchise. The film stars Nicolas Cage in the lead role, Jon Voight, Harvey Keitel, Ed Harris, Diane Kruger, Justin Bartha, Bruce Greenwood and Helen Mirren.</p>
|
6 |
-
<h2>national treasure 2 full movie in hindi download 480p filmyzilla</h2><br /><p><b><b>Download File</b> ••• <a href="https://jinyurl.com/2uNJTn">https://jinyurl.com/2uNJTn</a></b></p><br /><br />
|
7 |
-
<p>The plot follows Ben Gates as he tries to prove the innocence of his great-great-grandfather Thomas Gates, who is accused of being involved in the assassination of Abraham Lincoln by a rival treasure hunter Mitch Wilkinson (Ed Harris). To do so, he has to find a lost city of gold that is linked to a secret book that contains the history of the United States. Along the way, he has to deal with his estranged parents (Jon Voight and Helen Mirren), his ex-girlfriend (Diane Kruger), his best friend (Justin Bartha), and an FBI agent (Harvey Keitel).</p>
|
8 |
-
<p>The movie is a fast-paced and fun-filled adventure that takes the viewers to various locations such as Paris, London, Washington D.C., Mount Rushmore, and the Black Hills. The movie has some impressive action sequences, such as a car chase in London, a kidnapping at Buckingham Palace, a break-in at the White House, and a cave exploration at Mount Rushmore. The movie also has some humorous moments, such as Ben's awkward interactions with his parents and his ex-girlfriend, Riley's sarcastic comments, and Ben's encounter with the President of the United States (Bruce Greenwood).</p>
|
9 |
-
<p>However, the movie also has some flaws that might affect its appeal to some viewers. The movie is very similar to its predecessor in terms of its plot structure, characters, and themes. The movie relies heavily on historical inaccuracies, coincidences, and conveniences to move the story forward. The movie also has some logical inconsistencies and plot holes that might raise some questions among the viewers. For example, how did Mitch Wilkinson get access to John Wilkes Booth's diary? How did Ben manage to sneak into Buckingham Palace and the White House? How did Ben know where to find the entrance to the lost city of gold?</p>
|
10 |
-
<p>national treasure book of secrets hindi dubbed 480p download<br />
|
11 |
-
national treasure 2 full movie in hindi watch online filmyzilla<br />
|
12 |
-
national treasure 2 hindi 480p free download<br />
|
13 |
-
national treasure book of secrets full movie download in hindi 480p<br />
|
14 |
-
national treasure 2 dual audio 480p filmyzilla<br />
|
15 |
-
national treasure book of secrets hindi 480p filmywap<br />
|
16 |
-
national treasure 2 full movie in hindi download 720p filmyzilla<br />
|
17 |
-
national treasure book of secrets full movie in hindi watch online<br />
|
18 |
-
national treasure 2 hindi dubbed movie download filmyzilla<br />
|
19 |
-
national treasure book of secrets dual audio 480p download<br />
|
20 |
-
national treasure 2 full movie in hindi download 300mb filmyzilla<br />
|
21 |
-
national treasure book of secrets full movie online free in hindi<br />
|
22 |
-
national treasure 2 hindi dubbed 480p download filmywap<br />
|
23 |
-
national treasure book of secrets full movie download in hindi hd<br />
|
24 |
-
national treasure 2 full movie in hindi download mp4 filmyzilla<br />
|
25 |
-
national treasure book of secrets full movie in hindi free download<br />
|
26 |
-
national treasure 2 full movie in hindi online filmyzilla<br />
|
27 |
-
national treasure book of secrets full movie in hindi 480p worldfree4u<br />
|
28 |
-
national treasure 2 full movie download in hindi filmyzilla<br />
|
29 |
-
national treasure book of secrets full movie in hindi dubbed download<br />
|
30 |
-
national treasure 2 full movie in hindi hd filmyzilla<br />
|
31 |
-
national treasure book of secrets full movie in hindi filmyzilla<br />
|
32 |
-
national treasure 2 full movie in hindi free download filmyzilla<br />
|
33 |
-
national treasure book of secrets full movie in hindi hd online<br />
|
34 |
-
national treasure 2 full movie in hindi watch online free filmyzilla<br />
|
35 |
-
national treasure book of secrets full movie in hindi dailymotion<br />
|
36 |
-
national treasure 2 full movie in hindi download filmywap<br />
|
37 |
-
national treasure book of secrets full movie in hindi youtube<br />
|
38 |
-
national treasure 2 full movie in hindi dubbed watch online filmyzilla<br />
|
39 |
-
national treasure book of secrets full movie download filmyzilla<br />
|
40 |
-
national treasure 2 full movie in hindi download hd filmyzilla<br />
|
41 |
-
national treasure book of secrets full movie watch online free hd<br />
|
42 |
-
national treasure 2 full movie in hindi download mkv filmyzilla<br />
|
43 |
-
national treasure book of secrets full movie online with english subtitles<br />
|
44 |
-
national treasure 2 full movie in hindi download khatrimaza filmyzilla<br />
|
45 |
-
national treasure book of secrets full movie free download mp4<br />
|
46 |
-
national treasure 2 full movie in hindi download pagalworld filmyzilla<br />
|
47 |
-
national treasure book of secrets full movie online free no sign up<br />
|
48 |
-
national treasure 2 full movie in hindi download moviesflix filmyzilla<br />
|
49 |
-
national treasure book of secrets full movie online free putlockers<br />
|
50 |
-
national treasure 2 full movie in hindi download coolmoviez filmyzilla<br />
|
51 |
-
national treasure book of secrets full movie online free dailymotion<br />
|
52 |
-
national treasure 2 full movie in hindi download worldfree4u filmyzilla<br />
|
53 |
-
national treasure book of secrets full movie online free youtube<br />
|
54 |
-
national treasure 2 full movie in hindi download bolly4u filmyzilla<br />
|
55 |
-
national treasure book of secrets full movie online free reddit<br />
|
56 |
-
national treasure 2 full movie in hindi download skymovieshd filmyzilla</p>
|
57 |
-
<p>The movie is not meant to be taken seriously or realistically. It is meant to be an entertaining and escapist fantasy that appeals to the fans of history, mystery, and adventure. The movie does not have any deep or profound messages or themes. It is simply a popcorn flick that delivers what it promises: action, humor, romance, and treasure.</p>
|
58 |
-
<h2>Movie Trivia</h2>
|
59 |
-
<p>Here are some interesting facts and behind-the-scenes stories about National Treasure: Book of Secrets that you might not know:</p>
|
60 |
-
<ul>
|
61 |
-
<li>The movie was originally titled National Treasure 2: The Book of Secrets.</li>
|
62 |
-
<li>The movie was filmed in various locations such as France, England, South Dakota, Maryland, Virginia, Washington D.C., California, New York City.</li>
|
63 |
-
<li>The movie features several historical figures and events such as Abraham Lincoln, John Wilkes Booth, Mary Surratt, Samuel Mudd, Edwin Stanton, the Civil War, the Knights of the Golden Circle, the Resolute desks, the Statue of Liberty, the Mount Vernon Ladies' Association, and Cibola.</li>
|
64 |
-
<li>The movie also references several fictional works such as The Da Vinci Code, The Wizard of Oz, and The Adventures of Tom Sawyer.</li>
|
65 |
-
<li>The movie features several real-life artifacts and documents such as the Booth diary, the missing pages of the diary, the cipher wheel, the playbill, the twin Resolute desks, the Book of Secrets, and the President's seal.</li>
|
66 |
-
<li>The movie also features several fictional artifacts and documents such as the plank, the pipe, the scale model of Paris, the letter from Queen Victoria, the clue on the Statue of Liberty, and the map on the Resolute desk.</li>
|
67 |
-
<li>The movie had a budget of $130 million and grossed $457 million worldwide, making it a commercial success. It received mixed reviews from critics and audiences, with some praising its entertainment value and others criticizing its historical inaccuracies and implausibilities.</li>
|
68 |
-
</ul>
|
69 |
-
<h2>Filmyzilla Website</h2>
|
70 |
-
<p>If you want to watch National Treasure: Book of Secrets in Hindi for free, you might be tempted to visit Filmyzilla, a popular website that offers free downloads of movies in various languages and formats. Filmyzilla is one of the many websites that provide pirated content to users who want to avoid paying for subscriptions or tickets. Filmyzilla has a large collection of movies from Hollywood, Bollywood, Tollywood, and other industries. You can find movies in genres such as action, comedy, drama, horror, thriller, romance, sci-fi, fantasy, animation, and more. You can also find movies in different resolutions such as 480p, 720p, 1080p, and 4K. You can download movies in formats such as MP4, MKV, AVI, and WMV.</p>
|
71 |
-
<p>Filmyzilla claims to provide high-quality and fast downloads of movies to its users. It also claims to update its library regularly with new releases and old classics. It has a user-friendly interface that allows you to search for movies by name, genre, year, or language. It also has a section for trending movies and a request option for users who want to request a specific movie.</p>
|
72 |
-
<p>However, before you visit Filmyzilla or any other similar website, you should be aware of some important facts and risks. First of all, downloading or streaming pirated content is illegal and unethical. It violates the intellectual property rights of the creators and distributors of the movies. It also harms the film industry by reducing its revenue and profits. By using Filmyzilla or any other pirated website, you are supporting piracy and contributing to its negative impact on the entertainment sector.</p>
|
73 |
-
<p>Secondly, using Filmyzilla or any other pirated website is unsafe and risky for your device and data. These websites often contain malware, viruses, spyware, adware, and other harmful software that can infect your device and compromise your security and privacy. These websites also display annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further. These websites also require you to disable your antivirus or firewall software or allow unknown sources to access your device, which can expose you to more dangers.</p>
|
74 |
-
<p>Therefore, we strongly advise you to avoid using Filmyzilla or any other pirated website to download or stream National Treasure: Book of Secrets in Hindi or any other movie. Instead, we recommend you to use legal and safe platforms such as Netflix, Amazon Prime Video, Disney Plus, or YouTube to watch National Treasure: Book of Secrets in Hindi or any other language. These platforms are legal and safe to use and they offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies. You might have to pay a subscription fee or a rental fee to use these platforms, but it is worth it for the quality and security they provide.</p>
|
75 |
-
<h2>Conclusion</h2>
|
76 |
-
<p>National Treasure: Book of Secrets is a movie that can be enjoyed by anyone who likes history, mystery, and adventure. It is a sequel to the 2004 movie National Treasure and it follows the same formula of clues, puzzles, and treasure hunting. The movie has some exciting action scenes, some funny moments, and some interesting historical references. The movie also has some flaws, such as its historical inaccuracies, its implausibilities, and its similarities to its predecessor. The movie is not meant to be taken seriously or realistically. It is meant to be an entertaining and escapist fantasy that appeals to the fans of the genre.</p>
|
77 |
-
<p>If you want to watch National Treasure: Book of Secrets in Hindi for free, you might be tempted to visit Filmyzilla, a website that offers free downloads of pirated movies. However, we strongly advise you to avoid using Filmyzilla or any other pirated website to download or stream movies. These websites are illegal and unethical and they harm the film industry by violating the intellectual property rights of the creators and distributors of the movies. These websites are also unsafe and risky for your device and data as they contain malware, viruses, and other harmful software that can infect your device and compromise your security and privacy. These websites also display annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further.</p>
|
78 |
-
<p>Therefore, we recommend you to use legal and safe platforms such as Netflix, Amazon Prime Video, Disney Plus, or YouTube to watch National Treasure: Book of Secrets in Hindi or any other language. These platforms are legal and safe to use and they offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies. You might have to pay a subscription fee or a rental fee to use these platforms, but it is worth it for the quality and security they provide.</p>
|
79 |
-
<h2>FAQs</h2>
|
80 |
-
<p>Here are some frequently asked questions and answers about National Treasure: Book of Secrets and Filmyzilla:</p>
|
81 |
-
<ol>
|
82 |
-
<li>Q: Is National Treasure: Book of Secrets based on a true story?<br>
|
83 |
-
A: No, National Treasure: Book of Secrets is not based on a true story. It is a fictional story that uses some historical figures and events as inspiration.</li>
|
84 |
-
<li>Q: Is there a third movie in the National Treasure franchise?<br>
|
85 |
-
A: Yes, there is a third movie in the National Treasure franchise in development. It was announced in January 2020 that Chris Bremner was hired to write the script for National Treasure 3. However, there is no official release date or cast information yet.</li>
|
86 |
-
<li>Q: Is Filmyzilla legal?<br>
|
87 |
-
A: No, Filmyzilla is not legal. It is a website that offers free downloads of pirated movies that violate the intellectual property rights of the creators and distributors of the movies.</li>
|
88 |
-
<li>Q: Is Filmyzilla safe?<br>
|
89 |
-
A: No, Filmyzilla is not safe. It is a website that contains malware, viruses, and other harmful software that can infect your device and compromise your security and privacy. It also displays annoying and intrusive ads that can redirect you to malicious or inappropriate websites that can harm you further.</li>
|
90 |
-
<li>Q: What are some alternatives to Filmyzilla?<br>
|
91 |
-
A: Some alternatives to Filmyzilla are Netflix, Amazon Prime Video, Disney Plus, or YouTube. These are legal and safe platforms that offer high-quality and fast streaming of movies. They also have a variety of movies and shows to choose from and they respect the rights of the creators and distributors of the movies.</li>
|
92 |
-
</ol></p> 401be4b1e0<br />
|
93 |
-
<br />
|
94 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Dragon Ball Z Kakarot APK - Download and Play the Amazing DBZ Game on Android.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Dragon Ball Z Kakarot: How to Download and Play on Android</h1>
|
3 |
-
<p>If you are a fan of the Dragon Ball anime and manga series, you might have heard of Dragon Ball Z Kakarot, a role-playing game that lets you relive the epic saga of Goku and his friends. In this game, you can explore the vast open world of Dragon Ball, fight against powerful enemies, and experience the story from different perspectives. But did you know that you can also play this game on your Android device? In this article, we will show you how to download and play Dragon Ball Z Kakarot on Android using APKPure, a reliable and safe source for Android games.</p>
|
4 |
-
<h2>What is Dragon Ball Z Kakarot?</h2>
|
5 |
-
<p>Dragon Ball Z Kakarot is a game developed by CyberConnect2 and published by Bandai Namco Entertainment in 2020. It is based on the Dragon Ball Z anime series, which follows the adventures of Goku, a Saiyan warrior who protects Earth from various threats. The game covers the four main sagas of the series: Saiyan Saga, Frieza Saga, Cell Saga, and Buu Saga. You can play as Goku and other characters, such as Vegeta, Gohan, Piccolo, and Trunks.</p>
|
6 |
-
<h2>dragon ball z kakarot download apkpure</h2><br /><p><b><b>Download</b> 🆗 <a href="https://jinyurl.com/2uNSMb">https://jinyurl.com/2uNSMb</a></b></p><br /><br />
|
7 |
-
<p>The game features a large open world that you can explore by flying, driving, fishing, eating, and more. You can also interact with various characters from the series, complete side quests, collect items, and upgrade your skills. The game also has a dynamic combat system that allows you to unleash powerful attacks and transformations. You can also use support characters to assist you in battle.</p>
|
8 |
-
<h2>Why download Dragon Ball Z Kakarot from APKPure?</h2>
|
9 |
-
<p>APKPure is a website that offers free and safe downloads of Android games and apps. You can find thousands of games from different genres and categories on APKPure, including popular titles like PUBG Mobile, Genshin Impact, Among Us, and more. You can also discover new and trending games that are not available on Google Play Store.</p>
|
10 |
-
<p>One of the advantages of using APKPure is that it provides fast and secure downloads. You don't need to worry about viruses or malware when downloading from APKPure. You also don't need to register or sign up to use APKPure. You can simply search for the game you want and download it with one click.</p>
|
11 |
-
<p>Another benefit of using APKPure is that it supports multiple languages and regions. You can choose the language and region that suits you best when browsing APKPure. You can also find games that are compatible with your device's specifications and preferences.</p>
|
12 |
-
<h3>How to download and install Dragon Ball Z Kakarot from APKPure</h3>
|
13 |
-
<p>To download and install Dragon Ball Z Kakarot from APKPure, you need to follow these simple steps:</p>
|
14 |
-
<ol>
|
15 |
-
<li>Go to <a href="(^1^)">APKCombo</a> website on your browser.</li>
|
16 |
-
<li>Type "dragon ball z kakarot" in the search box and press enter.</li>
|
17 |
-
<li>Select the game from the list of results.</li>
|
18 |
-
<li>Click on the "Download" button and wait for the download to finish.</li>
|
19 |
-
<li>Once the download is complete, open the file manager app on your device and locate the downloaded file.</li>
|
20 |
-
<li>Tap on the file and allow the installation of unknown sources if prompted.</li>
|
21 |
-
<li>Follow the instructions on the screen to install the game.</li>
|
22 |
-
<li>Launch the game and enjoy!</li>
|
23 |
-
</ol>
|
24 |
-
<h2>How to play Dragon Ball Z Kakarot on Android</h2>
|
25 |
-
<p>To play Dragon Ball Z Kakarot on Android, you need to have a compatible device and a stable internet connection. The game requires at least 4 GB of RAM and 5 GB of free storage space. You also need to have Android 7.0 or higher as your operating system. The game may not run smoothly on low-end devices or devices with insufficient memory. The game is easy to play with touch controls. You can move your character by using the virtual joystick on the left side of the screen. You can also use the buttons on the right side of the screen to perform actions such as attacking, dodging, charging, and using items. You can switch between characters by tapping on their icons on the top left corner of the screen. You can also access the menu by tapping on the three dots on the top right corner of the screen. The game follows the story of Dragon Ball Z, so you can expect to encounter many familiar scenes and characters. You can also explore the world and find hidden secrets and collectibles. You can level up your characters by completing quests, fighting enemies, and training. You can also customize your characters by equipping skills, items, and costumes. <h3>Gameplay tips and tricks for beginners and advanced players</h3>
|
26 |
-
<p>Here are some gameplay tips and tricks that can help you enjoy Dragon Ball Z Kakarot more:</p>
|
27 |
-
<ul>
|
28 |
-
<li>Use the map to find your objectives and waypoints. You can also use the map to fast travel to different locations.</li>
|
29 |
-
<li>Collect Z orbs and D medals as you explore. Z orbs are used to upgrade your skills, while D medals are used to unlock new skills.</li>
|
30 |
-
<li>Interact with NPCs and complete side quests. They can give you rewards such as items, money, and experience.</li>
|
31 |
-
<li>Use the community board to activate bonuses and perks. You can place different characters in different communities and increase their friendship levels.</li>
|
32 |
-
<li>Use the training grounds to learn new skills and techniques. You can also fight against past enemies and bosses to test your skills.</li>
|
33 |
-
<li>Use the cooking system to prepare meals that boost your stats and health. You can also eat at restaurants or campsites for temporary buffs.</li>
|
34 |
-
<li>Use the transformation system to gain an edge in battle. You can transform into different forms such as Super Saiyan, Super Saiyan 2, Super Saiyan 3, and more.</li>
|
35 |
-
<li>Use the support system to get help from your allies. You can call them to assist you in combat or switch with them if you are low on health.</li>
|
36 |
-
<li>Use the combo system to deal more damage and stun your enemies. You can chain different attacks and skills together for devastating effects.</li>
|
37 |
-
<li>Use the ki blast system to attack from a distance or break your enemy's guard. You can also charge your ki by holding down the attack button.</li>
|
38 |
-
</ul>
|
39 |
-
<h2>Conclusion</h2>
|
40 |
-
<p>Dragon Ball Z Kakarot is a game that every Dragon Ball fan should try. It is a game that lets you experience the story of Dragon Ball Z in a new and immersive way. You can download and play this game on your Android device by using APKPure, a website that offers free and safe downloads of Android games and apps. APKPure has many advantages such as fast and secure downloads, multiple languages and regions support, and compatibility with various devices. To play Dragon Ball Z Kakarot on Android, you just need to follow the steps we mentioned above and enjoy the game.</p>
|
41 |
-
<p>If you have any questions or feedback about Dragon Ball Z Kakarot or APKPure, feel free to leave a comment below. We would love to hear from you!</p>
|
42 |
-
<p>dragon ball z kakarot mobile apk download<br />
|
43 |
-
dragon ball z kakarot android apk free download<br />
|
44 |
-
dragon ball z kakarot apk obb download for android<br />
|
45 |
-
dragon ball z kakarot apk mod download<br />
|
46 |
-
dragon ball z kakarot apk ios download<br />
|
47 |
-
dragon ball z kakarot game download apkpure<br />
|
48 |
-
dragon ball z kakarot apk data download<br />
|
49 |
-
dragon ball z kakarot apk offline download<br />
|
50 |
-
dragon ball z kakarot apk latest version download<br />
|
51 |
-
dragon ball z kakarot apk full game download<br />
|
52 |
-
dragon ball z kakarot apkpure free download<br />
|
53 |
-
dragon ball z kakarot apk+obb free download<br />
|
54 |
-
dragon ball z kakarot apk unlimited money download<br />
|
55 |
-
dragon ball z kakarot apk rexdl download<br />
|
56 |
-
dragon ball z kakarot apk revdl download<br />
|
57 |
-
dragon ball z kakarot apkpure mod download<br />
|
58 |
-
dragon ball z kakarot apkpure hack download<br />
|
59 |
-
dragon ball z kakarot apkpure cheats download<br />
|
60 |
-
dragon ball z kakarot apkpure unlocked download<br />
|
61 |
-
dragon ball z kakarot apkpure update download<br />
|
62 |
-
dragon ball z kakarot apkpure new version download<br />
|
63 |
-
dragon ball z kakarot apkpure online download<br />
|
64 |
-
dragon ball z kakarot apkpure offline mode download<br />
|
65 |
-
dragon ball z kakarot apkpure english version download<br />
|
66 |
-
dragon ball z kakarot apkpure no verification download<br />
|
67 |
-
how to download dragon ball z kakarot apk on android<br />
|
68 |
-
how to install dragon ball z kakarot apk on android<br />
|
69 |
-
how to play dragon ball z kakarot apk on android<br />
|
70 |
-
how to get dragon ball z kakarot apk for free on android<br />
|
71 |
-
how to update dragon ball z kakarot apk on android<br />
|
72 |
-
how to run dragon ball z kakarot apk on android<br />
|
73 |
-
how to fix dragon ball z kakarot apk on android<br />
|
74 |
-
how to hack dragon ball z kakarot apk on android<br />
|
75 |
-
how to mod dragon ball z kakarot apk on android<br />
|
76 |
-
how to cheat in dragon ball z kakarot apk on android<br />
|
77 |
-
how to unlock all characters in dragon ball z kakarot apk on android<br />
|
78 |
-
how to save game in dragon ball z kakarot apk on android<br />
|
79 |
-
how to change language in dragon ball z kakarot apk on android<br />
|
80 |
-
how to connect controller in dragon ball z kakarot apk on android<br />
|
81 |
-
how to play online in dragon ball z kakarot apk on android<br />
|
82 |
-
is there a dragon ball z kakarot apk for android<br />
|
83 |
-
is dragon ball z kakarot apk real or fake for android<br />
|
84 |
-
is dragon ball z kakarot apk safe or virus for android<br />
|
85 |
-
is dragon ball z kakarot apk worth it for android<br />
|
86 |
-
is dragon ball z kakarot apk compatible with my device for android<br />
|
87 |
-
is dragon ball z kakarot apk legal or illegal for android<br />
|
88 |
-
is dragon ball z kakarot apk official or unofficial for android<br />
|
89 |
-
is dragon ball z kakarot apk original or modded for android<br />
|
90 |
-
is dragon ball z kakarot apk working or not for android</p>
|
91 |
-
<h3>Frequently Asked Questions</h3>
|
92 |
-
<p>Here are some frequently asked questions about Dragon Ball Z Kakarot and APKPure:</p>
|
93 |
-
<ol>
|
94 |
-
<li><b>Is Dragon Ball Z Kakarot free to play?</b><br>No, Dragon Ball Z Kakarot is not a free-to-play game. It is a paid game that costs $59.99 on Steam and $39.99 on PlayStation 4 and Xbox One. However, you can download it for free from APKPure if you have an Android device.</li>
|
95 |
-
<li><b>Is Dragon Ball Z Kakarot online or offline?</b><br>Dragon Ball Z Kakarot is mainly an offline game that does not require an internet connection to play. However, some features such as online events, leaderboards, achievements, and updates may require an internet connection.</li>
|
96 |
-
<li><b>Is Dragon Ball Z Kakarot multiplayer or single-player?</b><br>Dragon Ball Z Kakarot is a single-player game that does not have a multiplayer mode. However, you can play with other players online in some events such as raids, boss battles, and tournaments.</li>
|
97 |
-
<li><b>Is APKPure safe to use?</b><br>Yes, APKPure is safe to use as it does not contain any viruses or malware. APKPure also verifies the authenticity and integrity of the files it provides. You can trust APKPure to download and install Android games and apps without any worries.</li>
|
98 |
-
<li><b>How to update Dragon Ball Z Kakarot on Android?</b><br>To update Dragon Ball Z Kakarot on Android, you need to visit APKPure website again and check if there is a new version available. If there is, you can download and install it over the existing one. You can also enable the auto-update feature on APKPure app to get the latest updates automatically.</li>
|
99 |
-
</ol></p> 197e85843d<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/encoder/encoders/helpers.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
from collections import namedtuple
|
2 |
-
import torch
|
3 |
-
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
|
4 |
-
|
5 |
-
"""
|
6 |
-
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
|
7 |
-
"""
|
8 |
-
|
9 |
-
|
10 |
-
class Flatten(Module):
|
11 |
-
def forward(self, input):
|
12 |
-
return input.view(input.size(0), -1)
|
13 |
-
|
14 |
-
|
15 |
-
def l2_norm(input, axis=1):
|
16 |
-
norm = torch.norm(input, 2, axis, True)
|
17 |
-
output = torch.div(input, norm)
|
18 |
-
return output
|
19 |
-
|
20 |
-
|
21 |
-
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
|
22 |
-
""" A named tuple describing a ResNet block. """
|
23 |
-
|
24 |
-
|
25 |
-
def get_block(in_channel, depth, num_units, stride=2):
|
26 |
-
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
|
27 |
-
|
28 |
-
|
29 |
-
def get_blocks(num_layers):
|
30 |
-
if num_layers == 50:
|
31 |
-
blocks = [
|
32 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
33 |
-
get_block(in_channel=64, depth=128, num_units=4),
|
34 |
-
get_block(in_channel=128, depth=256, num_units=14),
|
35 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
36 |
-
]
|
37 |
-
elif num_layers == 100:
|
38 |
-
blocks = [
|
39 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
40 |
-
get_block(in_channel=64, depth=128, num_units=13),
|
41 |
-
get_block(in_channel=128, depth=256, num_units=30),
|
42 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
43 |
-
]
|
44 |
-
elif num_layers == 152:
|
45 |
-
blocks = [
|
46 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
47 |
-
get_block(in_channel=64, depth=128, num_units=8),
|
48 |
-
get_block(in_channel=128, depth=256, num_units=36),
|
49 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
50 |
-
]
|
51 |
-
else:
|
52 |
-
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
|
53 |
-
return blocks
|
54 |
-
|
55 |
-
|
56 |
-
class SEModule(Module):
|
57 |
-
def __init__(self, channels, reduction):
|
58 |
-
super(SEModule, self).__init__()
|
59 |
-
self.avg_pool = AdaptiveAvgPool2d(1)
|
60 |
-
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
|
61 |
-
self.relu = ReLU(inplace=True)
|
62 |
-
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
|
63 |
-
self.sigmoid = Sigmoid()
|
64 |
-
|
65 |
-
def forward(self, x):
|
66 |
-
module_input = x
|
67 |
-
x = self.avg_pool(x)
|
68 |
-
x = self.fc1(x)
|
69 |
-
x = self.relu(x)
|
70 |
-
x = self.fc2(x)
|
71 |
-
x = self.sigmoid(x)
|
72 |
-
return module_input * x
|
73 |
-
|
74 |
-
|
75 |
-
class bottleneck_IR(Module):
|
76 |
-
def __init__(self, in_channel, depth, stride):
|
77 |
-
super(bottleneck_IR, self).__init__()
|
78 |
-
if in_channel == depth:
|
79 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
80 |
-
else:
|
81 |
-
self.shortcut_layer = Sequential(
|
82 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
83 |
-
BatchNorm2d(depth)
|
84 |
-
)
|
85 |
-
self.res_layer = Sequential(
|
86 |
-
BatchNorm2d(in_channel),
|
87 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
|
88 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
|
89 |
-
)
|
90 |
-
|
91 |
-
def forward(self, x):
|
92 |
-
shortcut = self.shortcut_layer(x)
|
93 |
-
res = self.res_layer(x)
|
94 |
-
return res + shortcut
|
95 |
-
|
96 |
-
|
97 |
-
class bottleneck_IR_SE(Module):
|
98 |
-
def __init__(self, in_channel, depth, stride):
|
99 |
-
super(bottleneck_IR_SE, self).__init__()
|
100 |
-
if in_channel == depth:
|
101 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
102 |
-
else:
|
103 |
-
self.shortcut_layer = Sequential(
|
104 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
105 |
-
BatchNorm2d(depth)
|
106 |
-
)
|
107 |
-
self.res_layer = Sequential(
|
108 |
-
BatchNorm2d(in_channel),
|
109 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
|
110 |
-
PReLU(depth),
|
111 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
|
112 |
-
BatchNorm2d(depth),
|
113 |
-
SEModule(depth, 16)
|
114 |
-
)
|
115 |
-
|
116 |
-
def forward(self, x):
|
117 |
-
shortcut = self.shortcut_layer(x)
|
118 |
-
res = self.res_layer(x)
|
119 |
-
return res + shortcut
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AFischer1985/wizardlm-13b-v1-2-q4-0-gguf/index.html
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html>
|
3 |
-
<head>
|
4 |
-
<title>Wizardlm-13b-v1.2.Q4_0.gguf</title>
|
5 |
-
</head>
|
6 |
-
<body>
|
7 |
-
<h1>Wizardlm-13b-v1.2.Q4_0.gguf</h1>
|
8 |
-
<p>
|
9 |
-
With the utilization of the
|
10 |
-
<a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-python</a>
|
11 |
-
package, we are excited to introduce the GGUF model hosted in the Hugging
|
12 |
-
Face Docker Spaces, made accessible through an OpenAI-compatible API. This
|
13 |
-
space includes comprehensive API documentation to facilitate seamless
|
14 |
-
integration.
|
15 |
-
</p>
|
16 |
-
<ul>
|
17 |
-
<li>
|
18 |
-
The API endpoint:
|
19 |
-
<a
|
20 |
-
href="https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1/models"
|
21 |
-
>https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/v1
|
22 |
-
</a>
|
23 |
-
</li>
|
24 |
-
<li>
|
25 |
-
The API doc:
|
26 |
-
<a
|
27 |
-
href="https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/docs"
|
28 |
-
>https://afischer1985-wizardlm-13b-v1-2-q4-0-gguf.hf.space/docs
|
29 |
-
</a>
|
30 |
-
</li>
|
31 |
-
</ul>
|
32 |
-
<p>
|
33 |
-
If you find this resource valuable, your support in the form of starring
|
34 |
-
the space would be greatly appreciated. Your engagement plays a vital role
|
35 |
-
in furthering the application for a community GPU grant, ultimately
|
36 |
-
enhancing the capabilities and accessibility of this space.
|
37 |
-
</p>
|
38 |
-
</body>
|
39 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/image-to-sound-fx/app.py
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
import time
|
4 |
-
from moviepy.editor import *
|
5 |
-
from share_btn import community_icon_html, loading_icon_html, share_js
|
6 |
-
|
7 |
-
#token = os.environ.get('HF_TOKEN')
|
8 |
-
caption = gr.Blocks.load(name="spaces/laion/CoCa")
|
9 |
-
audio_gen = gr.Blocks.load(name="spaces/haoheliu/audioldm-text-to-audio-generation")
|
10 |
-
|
11 |
-
ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)"
|
12 |
-
|
13 |
-
def input_changes(input_img):
|
14 |
-
|
15 |
-
if input_img == None:
|
16 |
-
return manual_cap.update(value="",placeholder=ph_message), caption_output.update(value=None), sound_output.update(value=None)
|
17 |
-
else:
|
18 |
-
cap = caption(input_img, fn_index=0)
|
19 |
-
print("CoCa caption: '" + cap + "' • ")
|
20 |
-
ph_update = "CoCa caption: '" + cap + "' • "
|
21 |
-
|
22 |
-
return manual_cap.update(value="",placeholder=f"{ph_update}{ph_message}"), caption_output.update(value=cap), sound_output.update(value=None)
|
23 |
-
|
24 |
-
def infer(image_input, manual_caption, duration_in, seed, caption_output):
|
25 |
-
|
26 |
-
print(duration_in)
|
27 |
-
if manual_caption == "":
|
28 |
-
cap = caption_output
|
29 |
-
#cap = caption(image_input, fn_index=0)
|
30 |
-
#print("CoCa caption: '" + cap + "' • ")
|
31 |
-
#ph_update = "CoCa caption: '" + cap + "' • "
|
32 |
-
else:
|
33 |
-
cap = manual_caption
|
34 |
-
print("manual caption: " + cap)
|
35 |
-
ph_update=""
|
36 |
-
|
37 |
-
sound = audio_gen(cap, duration_in, 2.5, seed, 3, fn_index=0)
|
38 |
-
|
39 |
-
#return cap, sound[1], gr.Textbox.update(placeholder=f"{ph_update}{ph_message}"), gr.Group.update(visible=True)
|
40 |
-
return cap, sound[1], gr.Group.update(visible=True)
|
41 |
-
|
42 |
-
title = """
|
43 |
-
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
44 |
-
<div
|
45 |
-
style="
|
46 |
-
display: inline-flex;
|
47 |
-
align-items: center;
|
48 |
-
gap: 0.8rem;
|
49 |
-
font-size: 1.75rem;
|
50 |
-
"
|
51 |
-
>
|
52 |
-
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
|
53 |
-
Image to Sound Effect
|
54 |
-
</h1>
|
55 |
-
</div>
|
56 |
-
<p style="margin-bottom: 10px; font-size: 94%">
|
57 |
-
Convert an image to a corresponding sound effect generated through CoCa Image Captioning & AudioLDM
|
58 |
-
</p>
|
59 |
-
</div>
|
60 |
-
"""
|
61 |
-
|
62 |
-
article = """
|
63 |
-
|
64 |
-
<div class="footer">
|
65 |
-
<p>
|
66 |
-
|
67 |
-
Follow <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a> for future updates 🤗
|
68 |
-
</p>
|
69 |
-
</div>
|
70 |
-
|
71 |
-
<div id="may-like-container" style="display: flex;justify-content: center;flex-direction: column;align-items: center;margin-bottom: 30px;">
|
72 |
-
<p>You may also like: </p>
|
73 |
-
|
74 |
-
<div id="may-like-content" style="display:flex;flex-wrap: wrap;align-items:center;height:20px;">
|
75 |
-
|
76 |
-
<svg height="20" width="208" style="margin-left:4px;margin-bottom: 6px;">
|
77 |
-
<a href="https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation" target="_blank">
|
78 |
-
<image href="https://img.shields.io/badge/🤗 Spaces-AudioLDM_Text_to_Audio-blue" src="https://img.shields.io/badge/🤗 Spaces-AudioLDM_Text_to_Audio-blue.png" height="20"/>
|
79 |
-
</a>
|
80 |
-
</svg>
|
81 |
-
|
82 |
-
<svg height="20" width="122" style="margin-left:4px;margin-bottom: 6px;">
|
83 |
-
<a href="https://huggingface.co/spaces/fffiloni/spectrogram-to-music" target="_blank">
|
84 |
-
<image href="https://img.shields.io/badge/🤗 Spaces-Riffusion-blue" src="https://img.shields.io/badge/🤗 Spaces-Riffusion-blue.png" height="20"/>
|
85 |
-
</a>
|
86 |
-
</svg>
|
87 |
-
|
88 |
-
</div>
|
89 |
-
</div>
|
90 |
-
"""
|
91 |
-
|
92 |
-
with gr.Blocks(css="style.css") as demo:
|
93 |
-
with gr.Column(elem_id="col-container"):
|
94 |
-
|
95 |
-
gr.HTML(title)
|
96 |
-
|
97 |
-
input_img = gr.Image(type="filepath", elem_id="input-img")
|
98 |
-
|
99 |
-
with gr.Column():
|
100 |
-
manual_cap = gr.Textbox(label="Manual Image description (optional)", lines=3, placeholder=ph_message)
|
101 |
-
with gr.Row():
|
102 |
-
duration_in = gr.Slider(minimum=5, maximum=10, step=5, value=5, label="Duration")
|
103 |
-
seed_in = gr.Slider(label="Seed", value=440, minimum=45, maximum=10000, step=1)
|
104 |
-
|
105 |
-
caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption")
|
106 |
-
sound_output = gr.Audio(label="Result", elem_id="sound-output")
|
107 |
-
|
108 |
-
generate = gr.Button("Generate SFX from Image")
|
109 |
-
|
110 |
-
with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
|
111 |
-
community_icon = gr.HTML(community_icon_html)
|
112 |
-
loading_icon = gr.HTML(loading_icon_html)
|
113 |
-
share_button = gr.Button("Share to community", elem_id="share-btn")
|
114 |
-
|
115 |
-
gr.HTML(article)
|
116 |
-
|
117 |
-
change_out = [manual_cap, caption_output, sound_output]
|
118 |
-
input_img.change(input_changes, input_img, change_out, queue=False)
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
generate.click(infer, inputs=[input_img, manual_cap, duration_in, seed_in, caption_output], outputs=[caption_output, sound_output, share_group], api_name="i2fx")
|
123 |
-
share_button.click(None, [], [], _js=share_js)
|
124 |
-
|
125 |
-
demo.queue(max_size=32).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from .factory import list_models, create_model, create_model_and_transforms, add_model_config
|
2 |
-
from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics
|
3 |
-
from .model import CLAP, CLAPTextCfg, CLAPVisionCfg, CLAPAudioCfp, convert_weights_to_fp16, trace_model
|
4 |
-
from .openai import load_openai_model, list_openai_models
|
5 |
-
from .pretrained import list_pretrained, list_pretrained_tag_models, list_pretrained_model_tags,\
|
6 |
-
get_pretrained_url, download_pretrained
|
7 |
-
from .tokenizer import SimpleTokenizer, tokenize
|
8 |
-
from .transform import image_transform
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/modules/ema.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
|
4 |
-
|
5 |
-
class LitEma(nn.Module):
|
6 |
-
def __init__(self, model, decay=0.9999, init_num_updates = 0, use_num_upates=True):
|
7 |
-
super().__init__()
|
8 |
-
if decay < 0.0 or decay > 1.0:
|
9 |
-
raise ValueError('Decay must be between 0 and 1')
|
10 |
-
|
11 |
-
self.m_name2s_name = {}
|
12 |
-
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
|
13 |
-
self.register_buffer('num_updates', torch.tensor(init_num_updates, dtype=torch.int) if use_num_upates
|
14 |
-
else torch.tensor(-1, dtype=torch.int)) # 0
|
15 |
-
|
16 |
-
for name, p in model.named_parameters():
|
17 |
-
if p.requires_grad:
|
18 |
-
# remove as '.'-character is not allowed in buffers
|
19 |
-
s_name = name.replace('.', '')
|
20 |
-
self.m_name2s_name.update({name: s_name})
|
21 |
-
self.register_buffer(s_name, p.clone().detach().data)
|
22 |
-
|
23 |
-
self.collected_params = []
|
24 |
-
|
25 |
-
def reset_num_updates(self):
|
26 |
-
del self.num_updates
|
27 |
-
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
|
28 |
-
|
29 |
-
def forward(self, model):
|
30 |
-
decay = self.decay
|
31 |
-
|
32 |
-
if self.num_updates >= 0:
|
33 |
-
self.num_updates += 1
|
34 |
-
decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
|
35 |
-
|
36 |
-
one_minus_decay = 1.0 - decay
|
37 |
-
|
38 |
-
with torch.no_grad():
|
39 |
-
m_param = dict(model.named_parameters())
|
40 |
-
shadow_params = dict(self.named_buffers())
|
41 |
-
|
42 |
-
for key in m_param:
|
43 |
-
if m_param[key].requires_grad:
|
44 |
-
sname = self.m_name2s_name[key]
|
45 |
-
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
|
46 |
-
shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
|
47 |
-
else:
|
48 |
-
assert not key in self.m_name2s_name
|
49 |
-
|
50 |
-
def copy_to(self, model):
|
51 |
-
m_param = dict(model.named_parameters())
|
52 |
-
shadow_params = dict(self.named_buffers())
|
53 |
-
for key in m_param:
|
54 |
-
if m_param[key].requires_grad:
|
55 |
-
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
|
56 |
-
else:
|
57 |
-
assert not key in self.m_name2s_name
|
58 |
-
|
59 |
-
def store(self, parameters):
|
60 |
-
"""
|
61 |
-
Save the current parameters for restoring later.
|
62 |
-
Args:
|
63 |
-
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
|
64 |
-
temporarily stored.
|
65 |
-
"""
|
66 |
-
self.collected_params = [param.clone() for param in parameters]
|
67 |
-
|
68 |
-
def restore(self, parameters):
|
69 |
-
"""
|
70 |
-
Restore the parameters stored with the `store` method.
|
71 |
-
Useful to validate the model with EMA parameters without affecting the
|
72 |
-
original optimization process. Store the parameters before the
|
73 |
-
`copy_to` method. After validation (or model saving), use this to
|
74 |
-
restore the former parameters.
|
75 |
-
Args:
|
76 |
-
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
|
77 |
-
updated with the stored parameters.
|
78 |
-
"""
|
79 |
-
for c_param, param in zip(self.collected_params, parameters):
|
80 |
-
param.data.copy_(c_param.data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/streamToAsyncIterable.ts
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for-await...of#iterating_over_async_generators
|
2 |
-
export async function* streamToAsyncIterable(
|
3 |
-
stream: ReadableStream<Uint8Array>
|
4 |
-
): AsyncIterableIterator<Uint8Array> {
|
5 |
-
const reader = stream.getReader();
|
6 |
-
try {
|
7 |
-
while (true) {
|
8 |
-
const { done, value } = await reader.read();
|
9 |
-
if (done) return;
|
10 |
-
yield value;
|
11 |
-
}
|
12 |
-
} finally {
|
13 |
-
reader.releaseLock();
|
14 |
-
}
|
15 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aditya9790/yolo7-object-tracking/utils/aws/mime.sh
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
|
2 |
-
# This script will run on every instance restart, not only on first start
|
3 |
-
# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
|
4 |
-
|
5 |
-
Content-Type: multipart/mixed; boundary="//"
|
6 |
-
MIME-Version: 1.0
|
7 |
-
|
8 |
-
--//
|
9 |
-
Content-Type: text/cloud-config; charset="us-ascii"
|
10 |
-
MIME-Version: 1.0
|
11 |
-
Content-Transfer-Encoding: 7bit
|
12 |
-
Content-Disposition: attachment; filename="cloud-config.txt"
|
13 |
-
|
14 |
-
#cloud-config
|
15 |
-
cloud_final_modules:
|
16 |
-
- [scripts-user, always]
|
17 |
-
|
18 |
-
--//
|
19 |
-
Content-Type: text/x-shellscript; charset="us-ascii"
|
20 |
-
MIME-Version: 1.0
|
21 |
-
Content-Transfer-Encoding: 7bit
|
22 |
-
Content-Disposition: attachment; filename="userdata.txt"
|
23 |
-
|
24 |
-
#!/bin/bash
|
25 |
-
# --- paste contents of userdata.sh here ---
|
26 |
-
--//
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aditya9790/yolo7-object-tracking/utils/google_app_engine/Dockerfile
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
FROM gcr.io/google-appengine/python
|
2 |
-
|
3 |
-
# Create a virtualenv for dependencies. This isolates these packages from
|
4 |
-
# system-level packages.
|
5 |
-
# Use -p python3 or -p python3.7 to select python version. Default is version 2.
|
6 |
-
RUN virtualenv /env -p python3
|
7 |
-
|
8 |
-
# Setting these environment variables are the same as running
|
9 |
-
# source /env/bin/activate.
|
10 |
-
ENV VIRTUAL_ENV /env
|
11 |
-
ENV PATH /env/bin:$PATH
|
12 |
-
|
13 |
-
RUN apt-get update && apt-get install -y python-opencv
|
14 |
-
|
15 |
-
# Copy the application's requirements.txt and run pip to install all
|
16 |
-
# dependencies into the virtualenv.
|
17 |
-
ADD requirements.txt /app/requirements.txt
|
18 |
-
RUN pip install -r /app/requirements.txt
|
19 |
-
|
20 |
-
# Add the application source code.
|
21 |
-
ADD . /app
|
22 |
-
|
23 |
-
# Run a WSGI server to serve the application. gunicorn must be declared as
|
24 |
-
# a dependency in requirements.txt.
|
25 |
-
CMD gunicorn -b :$PORT main:app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/README.md
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
# Work in progress
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/clock/Clock.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import Base from '../base/Base';
|
2 |
-
export default class Clock extends Base { }
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/container/Factory.d.ts
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import Container from "./Container";
|
3 |
-
|
4 |
-
export default function (
|
5 |
-
x?: number, y?: number,
|
6 |
-
width?: number, height?: number,
|
7 |
-
children?: Phaser.GameObjects.GameObject[]
|
8 |
-
): Container;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Agusbs98/automatic-ecg-diagnosis/nets/backbones.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
|
2 |
-
import os, sys
|
3 |
-
from libs import *
|
4 |
-
from .layers import *
|
5 |
-
from .modules import *
|
6 |
-
from .bblocks import *
|
7 |
-
|
8 |
-
class LightSEResNet18(nn.Module):
|
9 |
-
def __init__(self,
|
10 |
-
base_channels = 64,
|
11 |
-
):
|
12 |
-
super(LightSEResNet18, self).__init__()
|
13 |
-
self.bblock = LightSEResBlock
|
14 |
-
self.stem = nn.Sequential(
|
15 |
-
nn.Conv1d(
|
16 |
-
1, base_channels,
|
17 |
-
kernel_size = 15, padding = 7, stride = 2,
|
18 |
-
),
|
19 |
-
nn.BatchNorm1d(base_channels),
|
20 |
-
nn.ReLU(),
|
21 |
-
nn.MaxPool1d(
|
22 |
-
kernel_size = 3, padding = 1, stride = 2,
|
23 |
-
),
|
24 |
-
)
|
25 |
-
self.stage_0 = nn.Sequential(
|
26 |
-
self.bblock(base_channels),
|
27 |
-
self.bblock(base_channels),
|
28 |
-
)
|
29 |
-
|
30 |
-
self.stage_1 = nn.Sequential(
|
31 |
-
self.bblock(base_channels*1, downsample = True),
|
32 |
-
self.bblock(base_channels*2),
|
33 |
-
)
|
34 |
-
self.stage_2 = nn.Sequential(
|
35 |
-
self.bblock(base_channels*2, downsample = True),
|
36 |
-
self.bblock(base_channels*4),
|
37 |
-
)
|
38 |
-
self.stage_3 = nn.Sequential(
|
39 |
-
self.bblock(base_channels*4, downsample = True),
|
40 |
-
self.bblock(base_channels*8),
|
41 |
-
)
|
42 |
-
|
43 |
-
self.pool = nn.AdaptiveAvgPool1d(1)
|
44 |
-
|
45 |
-
def forward(self,
|
46 |
-
input,
|
47 |
-
):
|
48 |
-
output = self.stem(input)
|
49 |
-
output = self.stage_0(output)
|
50 |
-
|
51 |
-
output = self.stage_1(output)
|
52 |
-
output = self.stage_2(output)
|
53 |
-
output = self.stage_3(output)
|
54 |
-
|
55 |
-
output = self.pool(output)
|
56 |
-
|
57 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyCalvin/dreambooth-training3/train_dreambooth.py
DELETED
@@ -1,889 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import itertools
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
from pathlib import Path
|
6 |
-
from typing import Optional
|
7 |
-
import subprocess
|
8 |
-
import sys
|
9 |
-
import gc
|
10 |
-
import random
|
11 |
-
|
12 |
-
import torch
|
13 |
-
import torch.nn.functional as F
|
14 |
-
import torch.utils.checkpoint
|
15 |
-
from torch.utils.data import Dataset
|
16 |
-
|
17 |
-
from accelerate import Accelerator
|
18 |
-
from accelerate.logging import get_logger
|
19 |
-
from accelerate.utils import set_seed
|
20 |
-
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
|
21 |
-
from diffusers.utils.import_utils import is_xformers_available
|
22 |
-
from diffusers.optimization import get_scheduler
|
23 |
-
from huggingface_hub import HfFolder, Repository, whoami
|
24 |
-
from PIL import Image
|
25 |
-
from torchvision import transforms
|
26 |
-
from tqdm.auto import tqdm
|
27 |
-
from transformers import CLIPTextModel, CLIPTokenizer
|
28 |
-
|
29 |
-
|
30 |
-
logger = get_logger(__name__)
|
31 |
-
|
32 |
-
|
33 |
-
def parse_args():
|
34 |
-
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
35 |
-
parser.add_argument(
|
36 |
-
"--pretrained_model_name_or_path",
|
37 |
-
type=str,
|
38 |
-
default=None,
|
39 |
-
#required=True,
|
40 |
-
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
41 |
-
)
|
42 |
-
parser.add_argument(
|
43 |
-
"--tokenizer_name",
|
44 |
-
type=str,
|
45 |
-
default=None,
|
46 |
-
help="Pretrained tokenizer name or path if not the same as model_name",
|
47 |
-
)
|
48 |
-
parser.add_argument(
|
49 |
-
"--instance_data_dir",
|
50 |
-
type=str,
|
51 |
-
default=None,
|
52 |
-
#required=True,
|
53 |
-
help="A folder containing the training data of instance images.",
|
54 |
-
)
|
55 |
-
parser.add_argument(
|
56 |
-
"--class_data_dir",
|
57 |
-
type=str,
|
58 |
-
default=None,
|
59 |
-
#required=False,
|
60 |
-
help="A folder containing the training data of class images.",
|
61 |
-
)
|
62 |
-
parser.add_argument(
|
63 |
-
"--instance_prompt",
|
64 |
-
type=str,
|
65 |
-
default=None,
|
66 |
-
help="The prompt with identifier specifying the instance",
|
67 |
-
)
|
68 |
-
parser.add_argument(
|
69 |
-
"--class_prompt",
|
70 |
-
type=str,
|
71 |
-
default="",
|
72 |
-
help="The prompt to specify images in the same class as provided instance images.",
|
73 |
-
)
|
74 |
-
parser.add_argument(
|
75 |
-
"--with_prior_preservation",
|
76 |
-
default=False,
|
77 |
-
action="store_true",
|
78 |
-
help="Flag to add prior preservation loss.",
|
79 |
-
)
|
80 |
-
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
|
81 |
-
parser.add_argument(
|
82 |
-
"--num_class_images",
|
83 |
-
type=int,
|
84 |
-
default=100,
|
85 |
-
help=(
|
86 |
-
"Minimal class images for prior preservation loss. If not have enough images, additional images will be"
|
87 |
-
" sampled with class_prompt."
|
88 |
-
),
|
89 |
-
)
|
90 |
-
parser.add_argument(
|
91 |
-
"--output_dir",
|
92 |
-
type=str,
|
93 |
-
default="",
|
94 |
-
help="The output directory where the model predictions and checkpoints will be written.",
|
95 |
-
)
|
96 |
-
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
97 |
-
parser.add_argument(
|
98 |
-
"--resolution",
|
99 |
-
type=int,
|
100 |
-
default=512,
|
101 |
-
help=(
|
102 |
-
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
103 |
-
" resolution"
|
104 |
-
),
|
105 |
-
)
|
106 |
-
parser.add_argument(
|
107 |
-
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
|
108 |
-
)
|
109 |
-
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
|
110 |
-
parser.add_argument(
|
111 |
-
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
|
112 |
-
)
|
113 |
-
parser.add_argument(
|
114 |
-
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
|
115 |
-
)
|
116 |
-
parser.add_argument("--num_train_epochs", type=int, default=1)
|
117 |
-
parser.add_argument(
|
118 |
-
"--max_train_steps",
|
119 |
-
type=int,
|
120 |
-
default=None,
|
121 |
-
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
122 |
-
)
|
123 |
-
parser.add_argument(
|
124 |
-
"--gradient_accumulation_steps",
|
125 |
-
type=int,
|
126 |
-
default=1,
|
127 |
-
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
128 |
-
)
|
129 |
-
parser.add_argument(
|
130 |
-
"--gradient_checkpointing",
|
131 |
-
action="store_true",
|
132 |
-
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
|
133 |
-
)
|
134 |
-
parser.add_argument(
|
135 |
-
"--learning_rate",
|
136 |
-
type=float,
|
137 |
-
default=5e-6,
|
138 |
-
help="Initial learning rate (after the potential warmup period) to use.",
|
139 |
-
)
|
140 |
-
parser.add_argument(
|
141 |
-
"--scale_lr",
|
142 |
-
action="store_true",
|
143 |
-
default=False,
|
144 |
-
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
145 |
-
)
|
146 |
-
parser.add_argument(
|
147 |
-
"--lr_scheduler",
|
148 |
-
type=str,
|
149 |
-
default="constant",
|
150 |
-
help=(
|
151 |
-
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
152 |
-
' "constant", "constant_with_warmup"]'
|
153 |
-
),
|
154 |
-
)
|
155 |
-
parser.add_argument(
|
156 |
-
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
157 |
-
)
|
158 |
-
parser.add_argument(
|
159 |
-
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
|
160 |
-
)
|
161 |
-
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
162 |
-
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
163 |
-
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
164 |
-
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
165 |
-
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
166 |
-
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
167 |
-
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
168 |
-
parser.add_argument(
|
169 |
-
"--hub_model_id",
|
170 |
-
type=str,
|
171 |
-
default=None,
|
172 |
-
help="The name of the repository to keep in sync with the local `output_dir`.",
|
173 |
-
)
|
174 |
-
parser.add_argument(
|
175 |
-
"--logging_dir",
|
176 |
-
type=str,
|
177 |
-
default="logs",
|
178 |
-
help=(
|
179 |
-
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
180 |
-
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
181 |
-
),
|
182 |
-
)
|
183 |
-
parser.add_argument(
|
184 |
-
"--mixed_precision",
|
185 |
-
type=str,
|
186 |
-
default="no",
|
187 |
-
choices=["no", "fp16", "bf16"],
|
188 |
-
help=(
|
189 |
-
"Whether to use mixed precision. Choose"
|
190 |
-
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
191 |
-
"and an Nvidia Ampere GPU."
|
192 |
-
),
|
193 |
-
)
|
194 |
-
|
195 |
-
parser.add_argument(
|
196 |
-
"--save_n_steps",
|
197 |
-
type=int,
|
198 |
-
default=1,
|
199 |
-
help=("Save the model every n global_steps"),
|
200 |
-
)
|
201 |
-
|
202 |
-
|
203 |
-
parser.add_argument(
|
204 |
-
"--save_starting_step",
|
205 |
-
type=int,
|
206 |
-
default=1,
|
207 |
-
help=("The step from which it starts saving intermediary checkpoints"),
|
208 |
-
)
|
209 |
-
|
210 |
-
parser.add_argument(
|
211 |
-
"--stop_text_encoder_training",
|
212 |
-
type=int,
|
213 |
-
default=1000000,
|
214 |
-
help=("The step at which the text_encoder is no longer trained"),
|
215 |
-
)
|
216 |
-
|
217 |
-
|
218 |
-
parser.add_argument(
|
219 |
-
"--image_captions_filename",
|
220 |
-
action="store_true",
|
221 |
-
help="Get captions from filename",
|
222 |
-
)
|
223 |
-
|
224 |
-
|
225 |
-
parser.add_argument(
|
226 |
-
"--dump_only_text_encoder",
|
227 |
-
action="store_true",
|
228 |
-
default=False,
|
229 |
-
help="Dump only text encoder",
|
230 |
-
)
|
231 |
-
|
232 |
-
parser.add_argument(
|
233 |
-
"--train_only_unet",
|
234 |
-
action="store_true",
|
235 |
-
default=False,
|
236 |
-
help="Train only the unet",
|
237 |
-
)
|
238 |
-
|
239 |
-
parser.add_argument(
|
240 |
-
"--cache_latents",
|
241 |
-
action="store_true",
|
242 |
-
default=False,
|
243 |
-
help="Train only the unet",
|
244 |
-
)
|
245 |
-
|
246 |
-
parser.add_argument(
|
247 |
-
"--Session_dir",
|
248 |
-
type=str,
|
249 |
-
default="",
|
250 |
-
help="Current session directory",
|
251 |
-
)
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
257 |
-
|
258 |
-
args = parser.parse_args()
|
259 |
-
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
260 |
-
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
261 |
-
args.local_rank = env_local_rank
|
262 |
-
|
263 |
-
#if args.instance_data_dir is None:
|
264 |
-
# raise ValueError("You must specify a train data directory.")
|
265 |
-
|
266 |
-
#if args.with_prior_preservation:
|
267 |
-
# if args.class_data_dir is None:
|
268 |
-
# raise ValueError("You must specify a data directory for class images.")
|
269 |
-
# if args.class_prompt is None:
|
270 |
-
# raise ValueError("You must specify prompt for class images.")
|
271 |
-
|
272 |
-
return args
|
273 |
-
|
274 |
-
|
275 |
-
class DreamBoothDataset(Dataset):
|
276 |
-
"""
|
277 |
-
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
|
278 |
-
It pre-processes the images and the tokenizes prompts.
|
279 |
-
"""
|
280 |
-
|
281 |
-
def __init__(
|
282 |
-
self,
|
283 |
-
instance_data_root,
|
284 |
-
instance_prompt,
|
285 |
-
tokenizer,
|
286 |
-
args,
|
287 |
-
class_data_root=None,
|
288 |
-
class_prompt=None,
|
289 |
-
size=512,
|
290 |
-
center_crop=False,
|
291 |
-
):
|
292 |
-
self.size = size
|
293 |
-
self.center_crop = center_crop
|
294 |
-
self.tokenizer = tokenizer
|
295 |
-
self.image_captions_filename = None
|
296 |
-
|
297 |
-
self.instance_data_root = Path(instance_data_root)
|
298 |
-
if not self.instance_data_root.exists():
|
299 |
-
raise ValueError("Instance images root doesn't exists.")
|
300 |
-
|
301 |
-
self.instance_images_path = list(Path(instance_data_root).iterdir())
|
302 |
-
self.num_instance_images = len(self.instance_images_path)
|
303 |
-
self.instance_prompt = instance_prompt
|
304 |
-
self._length = self.num_instance_images
|
305 |
-
|
306 |
-
if args.image_captions_filename:
|
307 |
-
self.image_captions_filename = True
|
308 |
-
|
309 |
-
if class_data_root is not None:
|
310 |
-
self.class_data_root = Path(class_data_root)
|
311 |
-
self.class_data_root.mkdir(parents=True, exist_ok=True)
|
312 |
-
self.class_images_path = list(self.class_data_root.iterdir())
|
313 |
-
random.shuffle(self.class_images_path)
|
314 |
-
self.num_class_images = len(self.class_images_path)
|
315 |
-
self._length = max(self.num_class_images, self.num_instance_images)
|
316 |
-
self.class_prompt = class_prompt
|
317 |
-
else:
|
318 |
-
self.class_data_root = None
|
319 |
-
|
320 |
-
self.image_transforms = transforms.Compose(
|
321 |
-
[
|
322 |
-
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
|
323 |
-
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
|
324 |
-
transforms.ToTensor(),
|
325 |
-
transforms.Normalize([0.5], [0.5]),
|
326 |
-
]
|
327 |
-
)
|
328 |
-
|
329 |
-
def __len__(self):
|
330 |
-
return self._length
|
331 |
-
|
332 |
-
def __getitem__(self, index):
|
333 |
-
example = {}
|
334 |
-
path = self.instance_images_path[index % self.num_instance_images]
|
335 |
-
instance_image = Image.open(path)
|
336 |
-
if not instance_image.mode == "RGB":
|
337 |
-
instance_image = instance_image.convert("RGB")
|
338 |
-
|
339 |
-
instance_prompt = self.instance_prompt
|
340 |
-
|
341 |
-
if self.image_captions_filename:
|
342 |
-
filename = Path(path).stem
|
343 |
-
pt=''.join([i for i in filename if not i.isdigit()])
|
344 |
-
pt=pt.replace("_"," ")
|
345 |
-
pt=pt.replace("(","")
|
346 |
-
pt=pt.replace(")","")
|
347 |
-
pt=pt.replace("-","")
|
348 |
-
instance_prompt = pt
|
349 |
-
sys.stdout.write(" [0;32m" +instance_prompt+" [0m")
|
350 |
-
sys.stdout.flush()
|
351 |
-
|
352 |
-
|
353 |
-
example["instance_images"] = self.image_transforms(instance_image)
|
354 |
-
example["instance_prompt_ids"] = self.tokenizer(
|
355 |
-
instance_prompt,
|
356 |
-
padding="do_not_pad",
|
357 |
-
truncation=True,
|
358 |
-
max_length=self.tokenizer.model_max_length,
|
359 |
-
).input_ids
|
360 |
-
|
361 |
-
if self.class_data_root:
|
362 |
-
class_image = Image.open(self.class_images_path[index % self.num_class_images])
|
363 |
-
if not class_image.mode == "RGB":
|
364 |
-
class_image = class_image.convert("RGB")
|
365 |
-
example["class_images"] = self.image_transforms(class_image)
|
366 |
-
example["class_prompt_ids"] = self.tokenizer(
|
367 |
-
self.class_prompt,
|
368 |
-
padding="do_not_pad",
|
369 |
-
truncation=True,
|
370 |
-
max_length=self.tokenizer.model_max_length,
|
371 |
-
).input_ids
|
372 |
-
|
373 |
-
return example
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
class PromptDataset(Dataset):
|
378 |
-
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
|
379 |
-
|
380 |
-
def __init__(self, prompt, num_samples):
|
381 |
-
self.prompt = prompt
|
382 |
-
self.num_samples = num_samples
|
383 |
-
|
384 |
-
def __len__(self):
|
385 |
-
return self.num_samples
|
386 |
-
|
387 |
-
def __getitem__(self, index):
|
388 |
-
example = {}
|
389 |
-
example["prompt"] = self.prompt
|
390 |
-
example["index"] = index
|
391 |
-
return example
|
392 |
-
|
393 |
-
class LatentsDataset(Dataset):
|
394 |
-
def __init__(self, latents_cache, text_encoder_cache):
|
395 |
-
self.latents_cache = latents_cache
|
396 |
-
self.text_encoder_cache = text_encoder_cache
|
397 |
-
|
398 |
-
def __len__(self):
|
399 |
-
return len(self.latents_cache)
|
400 |
-
|
401 |
-
def __getitem__(self, index):
|
402 |
-
return self.latents_cache[index], self.text_encoder_cache[index]
|
403 |
-
|
404 |
-
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
|
405 |
-
if token is None:
|
406 |
-
token = HfFolder.get_token()
|
407 |
-
if organization is None:
|
408 |
-
username = whoami(token)["name"]
|
409 |
-
return f"{username}/{model_id}"
|
410 |
-
else:
|
411 |
-
return f"{organization}/{model_id}"
|
412 |
-
|
413 |
-
def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
|
414 |
-
"""
|
415 |
-
Starts from base starting dict and then adds the remaining key values from updater replacing the values from
|
416 |
-
the first starting/base dict with the second updater dict.
|
417 |
-
|
418 |
-
For later: how does d = {**d1, **d2} replace collision?
|
419 |
-
|
420 |
-
:param starting_dict:
|
421 |
-
:param updater_dict:
|
422 |
-
:return:
|
423 |
-
"""
|
424 |
-
new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
|
425 |
-
new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
|
426 |
-
return new_dict
|
427 |
-
|
428 |
-
def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
|
429 |
-
"""
|
430 |
-
|
431 |
-
ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
|
432 |
-
:param args1:
|
433 |
-
:param args2:
|
434 |
-
:return:
|
435 |
-
"""
|
436 |
-
# - the merged args
|
437 |
-
# The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
|
438 |
-
merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
|
439 |
-
args = argparse.Namespace(**merged_key_values_for_namespace)
|
440 |
-
return args
|
441 |
-
|
442 |
-
def run_training(args_imported):
|
443 |
-
args_default = parse_args()
|
444 |
-
args = merge_args(args_default, args_imported)
|
445 |
-
print(args)
|
446 |
-
logging_dir = Path(args.output_dir, args.logging_dir)
|
447 |
-
i=args.save_starting_step
|
448 |
-
accelerator = Accelerator(
|
449 |
-
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
450 |
-
mixed_precision=args.mixed_precision,
|
451 |
-
log_with="tensorboard",
|
452 |
-
logging_dir=logging_dir,
|
453 |
-
)
|
454 |
-
|
455 |
-
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
|
456 |
-
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
|
457 |
-
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
|
458 |
-
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
|
459 |
-
raise ValueError(
|
460 |
-
"Gradient accumulation is not supported when training the text encoder in distributed training. "
|
461 |
-
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
|
462 |
-
)
|
463 |
-
|
464 |
-
if args.seed is not None:
|
465 |
-
set_seed(args.seed)
|
466 |
-
|
467 |
-
if args.with_prior_preservation:
|
468 |
-
class_images_dir = Path(args.class_data_dir)
|
469 |
-
if not class_images_dir.exists():
|
470 |
-
class_images_dir.mkdir(parents=True)
|
471 |
-
cur_class_images = len(list(class_images_dir.iterdir()))
|
472 |
-
|
473 |
-
if cur_class_images < args.num_class_images:
|
474 |
-
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
|
475 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
476 |
-
args.pretrained_model_name_or_path, torch_dtype=torch_dtype
|
477 |
-
)
|
478 |
-
pipeline.set_progress_bar_config(disable=True)
|
479 |
-
|
480 |
-
num_new_images = args.num_class_images - cur_class_images
|
481 |
-
logger.info(f"Number of class images to sample: {num_new_images}.")
|
482 |
-
|
483 |
-
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
|
484 |
-
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
|
485 |
-
|
486 |
-
sample_dataloader = accelerator.prepare(sample_dataloader)
|
487 |
-
pipeline.to(accelerator.device)
|
488 |
-
|
489 |
-
for example in tqdm(
|
490 |
-
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
491 |
-
):
|
492 |
-
with torch.autocast("cuda"):
|
493 |
-
images = pipeline(example["prompt"]).images
|
494 |
-
|
495 |
-
for i, image in enumerate(images):
|
496 |
-
image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
|
497 |
-
|
498 |
-
del pipeline
|
499 |
-
if torch.cuda.is_available():
|
500 |
-
torch.cuda.empty_cache()
|
501 |
-
|
502 |
-
# Handle the repository creation
|
503 |
-
if accelerator.is_main_process:
|
504 |
-
if args.push_to_hub:
|
505 |
-
if args.hub_model_id is None:
|
506 |
-
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
507 |
-
else:
|
508 |
-
repo_name = args.hub_model_id
|
509 |
-
repo = Repository(args.output_dir, clone_from=repo_name)
|
510 |
-
|
511 |
-
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
512 |
-
if "step_*" not in gitignore:
|
513 |
-
gitignore.write("step_*\n")
|
514 |
-
if "epoch_*" not in gitignore:
|
515 |
-
gitignore.write("epoch_*\n")
|
516 |
-
elif args.output_dir is not None:
|
517 |
-
os.makedirs(args.output_dir, exist_ok=True)
|
518 |
-
|
519 |
-
# Load the tokenizer
|
520 |
-
if args.tokenizer_name:
|
521 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
|
522 |
-
elif args.pretrained_model_name_or_path:
|
523 |
-
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
|
524 |
-
|
525 |
-
# Load models and create wrapper for stable diffusion
|
526 |
-
if args.train_only_unet:
|
527 |
-
if os.path.exists(str(args.output_dir+"/text_encoder_trained")):
|
528 |
-
text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
|
529 |
-
elif os.path.exists(str(args.output_dir+"/text_encoder")):
|
530 |
-
text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder")
|
531 |
-
else:
|
532 |
-
text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
|
533 |
-
else:
|
534 |
-
text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
|
535 |
-
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
|
536 |
-
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
|
537 |
-
if is_xformers_available():
|
538 |
-
try:
|
539 |
-
print("Enabling memory efficient attention with xformers...")
|
540 |
-
unet.enable_xformers_memory_efficient_attention()
|
541 |
-
except Exception as e:
|
542 |
-
logger.warning(
|
543 |
-
f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}"
|
544 |
-
)
|
545 |
-
vae.requires_grad_(False)
|
546 |
-
if not args.train_text_encoder:
|
547 |
-
text_encoder.requires_grad_(False)
|
548 |
-
|
549 |
-
if args.gradient_checkpointing:
|
550 |
-
unet.enable_gradient_checkpointing()
|
551 |
-
if args.train_text_encoder:
|
552 |
-
text_encoder.gradient_checkpointing_enable()
|
553 |
-
|
554 |
-
if args.scale_lr:
|
555 |
-
args.learning_rate = (
|
556 |
-
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
557 |
-
)
|
558 |
-
|
559 |
-
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
|
560 |
-
if args.use_8bit_adam:
|
561 |
-
try:
|
562 |
-
import bitsandbytes as bnb
|
563 |
-
except ImportError:
|
564 |
-
raise ImportError(
|
565 |
-
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
|
566 |
-
)
|
567 |
-
|
568 |
-
optimizer_class = bnb.optim.AdamW8bit
|
569 |
-
else:
|
570 |
-
optimizer_class = torch.optim.AdamW
|
571 |
-
|
572 |
-
params_to_optimize = (
|
573 |
-
itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
|
574 |
-
)
|
575 |
-
optimizer = optimizer_class(
|
576 |
-
params_to_optimize,
|
577 |
-
lr=args.learning_rate,
|
578 |
-
betas=(args.adam_beta1, args.adam_beta2),
|
579 |
-
weight_decay=args.adam_weight_decay,
|
580 |
-
eps=args.adam_epsilon,
|
581 |
-
)
|
582 |
-
|
583 |
-
noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
|
584 |
-
|
585 |
-
train_dataset = DreamBoothDataset(
|
586 |
-
instance_data_root=args.instance_data_dir,
|
587 |
-
instance_prompt=args.instance_prompt,
|
588 |
-
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
|
589 |
-
class_prompt=args.class_prompt,
|
590 |
-
tokenizer=tokenizer,
|
591 |
-
size=args.resolution,
|
592 |
-
center_crop=args.center_crop,
|
593 |
-
args=args,
|
594 |
-
)
|
595 |
-
|
596 |
-
def collate_fn(examples):
|
597 |
-
input_ids = [example["instance_prompt_ids"] for example in examples]
|
598 |
-
pixel_values = [example["instance_images"] for example in examples]
|
599 |
-
|
600 |
-
# Concat class and instance examples for prior preservation.
|
601 |
-
# We do this to avoid doing two forward passes.
|
602 |
-
if args.with_prior_preservation:
|
603 |
-
input_ids += [example["class_prompt_ids"] for example in examples]
|
604 |
-
pixel_values += [example["class_images"] for example in examples]
|
605 |
-
|
606 |
-
pixel_values = torch.stack(pixel_values)
|
607 |
-
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
608 |
-
|
609 |
-
input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
|
610 |
-
|
611 |
-
batch = {
|
612 |
-
"input_ids": input_ids,
|
613 |
-
"pixel_values": pixel_values,
|
614 |
-
}
|
615 |
-
return batch
|
616 |
-
|
617 |
-
train_dataloader = torch.utils.data.DataLoader(
|
618 |
-
train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
|
619 |
-
)
|
620 |
-
|
621 |
-
# Scheduler and math around the number of training steps.
|
622 |
-
overrode_max_train_steps = False
|
623 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
624 |
-
if args.max_train_steps is None:
|
625 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
626 |
-
overrode_max_train_steps = True
|
627 |
-
|
628 |
-
lr_scheduler = get_scheduler(
|
629 |
-
args.lr_scheduler,
|
630 |
-
optimizer=optimizer,
|
631 |
-
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
|
632 |
-
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
633 |
-
)
|
634 |
-
|
635 |
-
if args.train_text_encoder:
|
636 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
637 |
-
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
|
638 |
-
)
|
639 |
-
else:
|
640 |
-
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
641 |
-
unet, optimizer, train_dataloader, lr_scheduler
|
642 |
-
)
|
643 |
-
|
644 |
-
weight_dtype = torch.float32
|
645 |
-
if args.mixed_precision == "fp16":
|
646 |
-
weight_dtype = torch.float16
|
647 |
-
elif args.mixed_precision == "bf16":
|
648 |
-
weight_dtype = torch.bfloat16
|
649 |
-
|
650 |
-
# Move text_encode and vae to gpu.
|
651 |
-
# For mixed precision training we cast the text_encoder and vae weights to half-precision
|
652 |
-
# as these models are only used for inference, keeping weights in full precision is not required.
|
653 |
-
vae.to(accelerator.device, dtype=weight_dtype)
|
654 |
-
if not args.train_text_encoder:
|
655 |
-
text_encoder.to(accelerator.device, dtype=weight_dtype)
|
656 |
-
|
657 |
-
|
658 |
-
if args.cache_latents:
|
659 |
-
latents_cache = []
|
660 |
-
text_encoder_cache = []
|
661 |
-
for batch in tqdm(train_dataloader, desc="Caching latents"):
|
662 |
-
with torch.no_grad():
|
663 |
-
batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype)
|
664 |
-
batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True)
|
665 |
-
latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
|
666 |
-
if args.train_text_encoder:
|
667 |
-
text_encoder_cache.append(batch["input_ids"])
|
668 |
-
else:
|
669 |
-
text_encoder_cache.append(text_encoder(batch["input_ids"])[0])
|
670 |
-
train_dataset = LatentsDataset(latents_cache, text_encoder_cache)
|
671 |
-
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True)
|
672 |
-
|
673 |
-
del vae
|
674 |
-
#if not args.train_text_encoder:
|
675 |
-
# del text_encoder
|
676 |
-
if torch.cuda.is_available():
|
677 |
-
torch.cuda.empty_cache()
|
678 |
-
|
679 |
-
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
680 |
-
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
681 |
-
if overrode_max_train_steps:
|
682 |
-
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
683 |
-
# Afterwards we recalculate our number of training epochs
|
684 |
-
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
685 |
-
|
686 |
-
# We need to initialize the trackers we use, and also store our configuration.
|
687 |
-
# The trackers initializes automatically on the main process.
|
688 |
-
if accelerator.is_main_process:
|
689 |
-
accelerator.init_trackers("dreambooth", config=vars(args))
|
690 |
-
|
691 |
-
def bar(prg):
|
692 |
-
br='|'+'█' * prg + ' ' * (25-prg)+'|'
|
693 |
-
return br
|
694 |
-
|
695 |
-
# Train!
|
696 |
-
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
697 |
-
|
698 |
-
logger.info("***** Running training *****")
|
699 |
-
logger.info(f" Num examples = {len(train_dataset)}")
|
700 |
-
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
|
701 |
-
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
702 |
-
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
703 |
-
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
704 |
-
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
705 |
-
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
706 |
-
# Only show the progress bar once on each machine.
|
707 |
-
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
708 |
-
global_step = 0
|
709 |
-
|
710 |
-
for epoch in range(args.num_train_epochs):
|
711 |
-
unet.train()
|
712 |
-
if args.train_text_encoder:
|
713 |
-
text_encoder.train()
|
714 |
-
for step, batch in enumerate(train_dataloader):
|
715 |
-
with accelerator.accumulate(unet):
|
716 |
-
# Convert images to latent space
|
717 |
-
with torch.no_grad():
|
718 |
-
if args.cache_latents:
|
719 |
-
latents_dist = batch[0][0]
|
720 |
-
else:
|
721 |
-
latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist
|
722 |
-
latents = latents_dist.sample() * 0.18215
|
723 |
-
|
724 |
-
# Sample noise that we'll add to the latents
|
725 |
-
noise = torch.randn_like(latents)
|
726 |
-
bsz = latents.shape[0]
|
727 |
-
# Sample a random timestep for each image
|
728 |
-
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
|
729 |
-
timesteps = timesteps.long()
|
730 |
-
|
731 |
-
# Add noise to the latents according to the noise magnitude at each timestep
|
732 |
-
# (this is the forward diffusion process)
|
733 |
-
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
|
734 |
-
|
735 |
-
# Get the text embedding for conditioning
|
736 |
-
if(args.cache_latents):
|
737 |
-
if args.train_text_encoder:
|
738 |
-
encoder_hidden_states = text_encoder(batch[0][1])[0]
|
739 |
-
else:
|
740 |
-
encoder_hidden_states = batch[0][1]
|
741 |
-
else:
|
742 |
-
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
|
743 |
-
|
744 |
-
# Predict the noise residual
|
745 |
-
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
|
746 |
-
|
747 |
-
# Get the target for loss depending on the prediction type
|
748 |
-
if noise_scheduler.config.prediction_type == "epsilon":
|
749 |
-
target = noise
|
750 |
-
elif noise_scheduler.config.prediction_type == "v_prediction":
|
751 |
-
target = noise_scheduler.get_velocity(latents, noise, timesteps)
|
752 |
-
else:
|
753 |
-
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
754 |
-
|
755 |
-
if args.with_prior_preservation:
|
756 |
-
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
|
757 |
-
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
|
758 |
-
target, target_prior = torch.chunk(target, 2, dim=0)
|
759 |
-
|
760 |
-
# Compute instance loss
|
761 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
|
762 |
-
|
763 |
-
# Compute prior loss
|
764 |
-
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
|
765 |
-
|
766 |
-
# Add the prior loss to the instance loss.
|
767 |
-
loss = loss + args.prior_loss_weight * prior_loss
|
768 |
-
else:
|
769 |
-
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
770 |
-
|
771 |
-
accelerator.backward(loss)
|
772 |
-
if accelerator.sync_gradients:
|
773 |
-
params_to_clip = (
|
774 |
-
itertools.chain(unet.parameters(), text_encoder.parameters())
|
775 |
-
if args.train_text_encoder
|
776 |
-
else unet.parameters()
|
777 |
-
)
|
778 |
-
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
779 |
-
optimizer.step()
|
780 |
-
lr_scheduler.step()
|
781 |
-
optimizer.zero_grad()
|
782 |
-
|
783 |
-
# Checks if the accelerator has performed an optimization step behind the scenes
|
784 |
-
if accelerator.sync_gradients:
|
785 |
-
progress_bar.update(1)
|
786 |
-
global_step += 1
|
787 |
-
|
788 |
-
fll=round((global_step*100)/args.max_train_steps)
|
789 |
-
fll=round(fll/4)
|
790 |
-
pr=bar(fll)
|
791 |
-
|
792 |
-
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
793 |
-
progress_bar.set_postfix(**logs)
|
794 |
-
progress_bar.set_description_str("Progress:"+pr)
|
795 |
-
accelerator.log(logs, step=global_step)
|
796 |
-
|
797 |
-
if global_step >= args.max_train_steps:
|
798 |
-
break
|
799 |
-
|
800 |
-
if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30:
|
801 |
-
if accelerator.is_main_process:
|
802 |
-
print(" [0;32m" +" Freezing the text_encoder ..."+" [0m")
|
803 |
-
frz_dir=args.output_dir + "/text_encoder_frozen"
|
804 |
-
if os.path.exists(frz_dir):
|
805 |
-
subprocess.call('rm -r '+ frz_dir, shell=True)
|
806 |
-
os.mkdir(frz_dir)
|
807 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
808 |
-
args.pretrained_model_name_or_path,
|
809 |
-
unet=accelerator.unwrap_model(unet),
|
810 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
811 |
-
)
|
812 |
-
pipeline.text_encoder.save_pretrained(frz_dir)
|
813 |
-
|
814 |
-
if args.save_n_steps >= 200:
|
815 |
-
if global_step < args.max_train_steps and global_step+1==i:
|
816 |
-
ckpt_name = "_step_" + str(global_step+1)
|
817 |
-
save_dir = Path(args.output_dir+ckpt_name)
|
818 |
-
save_dir=str(save_dir)
|
819 |
-
save_dir=save_dir.replace(" ", "_")
|
820 |
-
if not os.path.exists(save_dir):
|
821 |
-
os.mkdir(save_dir)
|
822 |
-
inst=save_dir[16:]
|
823 |
-
inst=inst.replace(" ", "_")
|
824 |
-
print(" [1;32mSAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt")
|
825 |
-
# Create the pipeline using the trained modules and save it.
|
826 |
-
if accelerator.is_main_process:
|
827 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
828 |
-
args.pretrained_model_name_or_path,
|
829 |
-
unet=accelerator.unwrap_model(unet),
|
830 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
831 |
-
)
|
832 |
-
pipeline.save_pretrained(save_dir)
|
833 |
-
frz_dir=args.output_dir + "/text_encoder_frozen"
|
834 |
-
if args.train_text_encoder and os.path.exists(frz_dir):
|
835 |
-
subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True)
|
836 |
-
subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True)
|
837 |
-
chkpth=args.Session_dir+"/"+inst+".ckpt"
|
838 |
-
subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True)
|
839 |
-
subprocess.call('rm -r '+ save_dir, shell=True)
|
840 |
-
i=i+args.save_n_steps
|
841 |
-
|
842 |
-
accelerator.wait_for_everyone()
|
843 |
-
|
844 |
-
# Create the pipeline using using the trained modules and save it.
|
845 |
-
if accelerator.is_main_process:
|
846 |
-
if args.dump_only_text_encoder:
|
847 |
-
txt_dir=args.output_dir + "/text_encoder_trained"
|
848 |
-
if not os.path.exists(txt_dir):
|
849 |
-
os.mkdir(txt_dir)
|
850 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
851 |
-
args.pretrained_model_name_or_path,
|
852 |
-
unet=accelerator.unwrap_model(unet),
|
853 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
854 |
-
)
|
855 |
-
pipeline.text_encoder.save_pretrained(txt_dir)
|
856 |
-
|
857 |
-
elif args.train_only_unet:
|
858 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
859 |
-
args.pretrained_model_name_or_path,
|
860 |
-
unet=accelerator.unwrap_model(unet),
|
861 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
862 |
-
)
|
863 |
-
pipeline.save_pretrained(args.output_dir)
|
864 |
-
txt_dir=args.output_dir + "/text_encoder_trained"
|
865 |
-
subprocess.call('rm -r '+txt_dir, shell=True)
|
866 |
-
|
867 |
-
else:
|
868 |
-
pipeline = StableDiffusionPipeline.from_pretrained(
|
869 |
-
args.pretrained_model_name_or_path,
|
870 |
-
unet=accelerator.unwrap_model(unet),
|
871 |
-
text_encoder=accelerator.unwrap_model(text_encoder),
|
872 |
-
)
|
873 |
-
frz_dir=args.output_dir + "/text_encoder_frozen"
|
874 |
-
pipeline.save_pretrained(args.output_dir)
|
875 |
-
if args.train_text_encoder and os.path.exists(frz_dir):
|
876 |
-
subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True)
|
877 |
-
subprocess.call('rm -r '+ frz_dir, shell=True)
|
878 |
-
|
879 |
-
if args.push_to_hub:
|
880 |
-
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
|
881 |
-
|
882 |
-
accelerator.end_training()
|
883 |
-
del pipeline
|
884 |
-
torch.cuda.empty_cache()
|
885 |
-
gc.collect()
|
886 |
-
if __name__ == "__main__":
|
887 |
-
pass
|
888 |
-
#main()
|
889 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Altinas/vits-uma-genshin-honkais/utils.py
DELETED
@@ -1,225 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import argparse
|
4 |
-
import logging
|
5 |
-
import json
|
6 |
-
import subprocess
|
7 |
-
import numpy as np
|
8 |
-
import librosa
|
9 |
-
import torch
|
10 |
-
|
11 |
-
MATPLOTLIB_FLAG = False
|
12 |
-
|
13 |
-
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
14 |
-
logger = logging
|
15 |
-
|
16 |
-
|
17 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
18 |
-
assert os.path.isfile(checkpoint_path)
|
19 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
20 |
-
iteration = checkpoint_dict['iteration']
|
21 |
-
learning_rate = checkpoint_dict['learning_rate']
|
22 |
-
if optimizer is not None:
|
23 |
-
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
24 |
-
saved_state_dict = checkpoint_dict['model']
|
25 |
-
if hasattr(model, 'module'):
|
26 |
-
state_dict = model.module.state_dict()
|
27 |
-
else:
|
28 |
-
state_dict = model.state_dict()
|
29 |
-
new_state_dict= {}
|
30 |
-
for k, v in state_dict.items():
|
31 |
-
try:
|
32 |
-
new_state_dict[k] = saved_state_dict[k]
|
33 |
-
except:
|
34 |
-
logger.info("%s is not in the checkpoint" % k)
|
35 |
-
new_state_dict[k] = v
|
36 |
-
if hasattr(model, 'module'):
|
37 |
-
model.module.load_state_dict(new_state_dict)
|
38 |
-
else:
|
39 |
-
model.load_state_dict(new_state_dict)
|
40 |
-
logger.info("Loaded checkpoint '{}' (iteration {})" .format(
|
41 |
-
checkpoint_path, iteration))
|
42 |
-
return model, optimizer, learning_rate, iteration
|
43 |
-
|
44 |
-
|
45 |
-
def plot_spectrogram_to_numpy(spectrogram):
|
46 |
-
global MATPLOTLIB_FLAG
|
47 |
-
if not MATPLOTLIB_FLAG:
|
48 |
-
import matplotlib
|
49 |
-
matplotlib.use("Agg")
|
50 |
-
MATPLOTLIB_FLAG = True
|
51 |
-
mpl_logger = logging.getLogger('matplotlib')
|
52 |
-
mpl_logger.setLevel(logging.WARNING)
|
53 |
-
import matplotlib.pylab as plt
|
54 |
-
import numpy as np
|
55 |
-
|
56 |
-
fig, ax = plt.subplots(figsize=(10,2))
|
57 |
-
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
58 |
-
interpolation='none')
|
59 |
-
plt.colorbar(im, ax=ax)
|
60 |
-
plt.xlabel("Frames")
|
61 |
-
plt.ylabel("Channels")
|
62 |
-
plt.tight_layout()
|
63 |
-
|
64 |
-
fig.canvas.draw()
|
65 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
66 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
67 |
-
plt.close()
|
68 |
-
return data
|
69 |
-
|
70 |
-
|
71 |
-
def plot_alignment_to_numpy(alignment, info=None):
|
72 |
-
global MATPLOTLIB_FLAG
|
73 |
-
if not MATPLOTLIB_FLAG:
|
74 |
-
import matplotlib
|
75 |
-
matplotlib.use("Agg")
|
76 |
-
MATPLOTLIB_FLAG = True
|
77 |
-
mpl_logger = logging.getLogger('matplotlib')
|
78 |
-
mpl_logger.setLevel(logging.WARNING)
|
79 |
-
import matplotlib.pylab as plt
|
80 |
-
import numpy as np
|
81 |
-
|
82 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
83 |
-
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
84 |
-
interpolation='none')
|
85 |
-
fig.colorbar(im, ax=ax)
|
86 |
-
xlabel = 'Decoder timestep'
|
87 |
-
if info is not None:
|
88 |
-
xlabel += '\n\n' + info
|
89 |
-
plt.xlabel(xlabel)
|
90 |
-
plt.ylabel('Encoder timestep')
|
91 |
-
plt.tight_layout()
|
92 |
-
|
93 |
-
fig.canvas.draw()
|
94 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
95 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
96 |
-
plt.close()
|
97 |
-
return data
|
98 |
-
|
99 |
-
|
100 |
-
def load_audio_to_torch(full_path, target_sampling_rate):
|
101 |
-
audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
|
102 |
-
return torch.FloatTensor(audio.astype(np.float32))
|
103 |
-
|
104 |
-
|
105 |
-
def load_filepaths_and_text(filename, split="|"):
|
106 |
-
with open(filename, encoding='utf-8') as f:
|
107 |
-
filepaths_and_text = [line.strip().split(split) for line in f]
|
108 |
-
return filepaths_and_text
|
109 |
-
|
110 |
-
|
111 |
-
def get_hparams(init=True):
|
112 |
-
parser = argparse.ArgumentParser()
|
113 |
-
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
114 |
-
help='JSON file for configuration')
|
115 |
-
parser.add_argument('-m', '--model', type=str, required=True,
|
116 |
-
help='Model name')
|
117 |
-
|
118 |
-
args = parser.parse_args()
|
119 |
-
model_dir = os.path.join("./logs", args.model)
|
120 |
-
|
121 |
-
if not os.path.exists(model_dir):
|
122 |
-
os.makedirs(model_dir)
|
123 |
-
|
124 |
-
config_path = args.config
|
125 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
126 |
-
if init:
|
127 |
-
with open(config_path, "r") as f:
|
128 |
-
data = f.read()
|
129 |
-
with open(config_save_path, "w") as f:
|
130 |
-
f.write(data)
|
131 |
-
else:
|
132 |
-
with open(config_save_path, "r") as f:
|
133 |
-
data = f.read()
|
134 |
-
config = json.loads(data)
|
135 |
-
|
136 |
-
hparams = HParams(**config)
|
137 |
-
hparams.model_dir = model_dir
|
138 |
-
return hparams
|
139 |
-
|
140 |
-
|
141 |
-
def get_hparams_from_dir(model_dir):
|
142 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
143 |
-
with open(config_save_path, "r") as f:
|
144 |
-
data = f.read()
|
145 |
-
config = json.loads(data)
|
146 |
-
|
147 |
-
hparams =HParams(**config)
|
148 |
-
hparams.model_dir = model_dir
|
149 |
-
return hparams
|
150 |
-
|
151 |
-
|
152 |
-
def get_hparams_from_file(config_path):
|
153 |
-
with open(config_path, "r") as f:
|
154 |
-
data = f.read()
|
155 |
-
config = json.loads(data)
|
156 |
-
|
157 |
-
hparams =HParams(**config)
|
158 |
-
return hparams
|
159 |
-
|
160 |
-
|
161 |
-
def check_git_hash(model_dir):
|
162 |
-
source_dir = os.path.dirname(os.path.realpath(__file__))
|
163 |
-
if not os.path.exists(os.path.join(source_dir, ".git")):
|
164 |
-
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
165 |
-
source_dir
|
166 |
-
))
|
167 |
-
return
|
168 |
-
|
169 |
-
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
170 |
-
|
171 |
-
path = os.path.join(model_dir, "githash")
|
172 |
-
if os.path.exists(path):
|
173 |
-
saved_hash = open(path).read()
|
174 |
-
if saved_hash != cur_hash:
|
175 |
-
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
176 |
-
saved_hash[:8], cur_hash[:8]))
|
177 |
-
else:
|
178 |
-
open(path, "w").write(cur_hash)
|
179 |
-
|
180 |
-
|
181 |
-
def get_logger(model_dir, filename="train.log"):
|
182 |
-
global logger
|
183 |
-
logger = logging.getLogger(os.path.basename(model_dir))
|
184 |
-
logger.setLevel(logging.DEBUG)
|
185 |
-
|
186 |
-
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
187 |
-
if not os.path.exists(model_dir):
|
188 |
-
os.makedirs(model_dir)
|
189 |
-
h = logging.FileHandler(os.path.join(model_dir, filename))
|
190 |
-
h.setLevel(logging.DEBUG)
|
191 |
-
h.setFormatter(formatter)
|
192 |
-
logger.addHandler(h)
|
193 |
-
return logger
|
194 |
-
|
195 |
-
|
196 |
-
class HParams():
|
197 |
-
def __init__(self, **kwargs):
|
198 |
-
for k, v in kwargs.items():
|
199 |
-
if type(v) == dict:
|
200 |
-
v = HParams(**v)
|
201 |
-
self[k] = v
|
202 |
-
|
203 |
-
def keys(self):
|
204 |
-
return self.__dict__.keys()
|
205 |
-
|
206 |
-
def items(self):
|
207 |
-
return self.__dict__.items()
|
208 |
-
|
209 |
-
def values(self):
|
210 |
-
return self.__dict__.values()
|
211 |
-
|
212 |
-
def __len__(self):
|
213 |
-
return len(self.__dict__)
|
214 |
-
|
215 |
-
def __getitem__(self, key):
|
216 |
-
return getattr(self, key)
|
217 |
-
|
218 |
-
def __setitem__(self, key, value):
|
219 |
-
return setattr(self, key, value)
|
220 |
-
|
221 |
-
def __contains__(self, key):
|
222 |
-
return key in self.__dict__
|
223 |
-
|
224 |
-
def __repr__(self):
|
225 |
-
return self.__dict__.__repr__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py
DELETED
@@ -1,772 +0,0 @@
|
|
1 |
-
# Copyright 2023 TencentARC and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
import warnings
|
17 |
-
from dataclasses import dataclass
|
18 |
-
from typing import Any, Callable, Dict, List, Optional, Union
|
19 |
-
|
20 |
-
import numpy as np
|
21 |
-
import PIL
|
22 |
-
import torch
|
23 |
-
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
24 |
-
|
25 |
-
from ...image_processor import VaeImageProcessor
|
26 |
-
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
|
27 |
-
from ...models import AutoencoderKL, MultiAdapter, T2IAdapter, UNet2DConditionModel
|
28 |
-
from ...schedulers import KarrasDiffusionSchedulers
|
29 |
-
from ...utils import (
|
30 |
-
PIL_INTERPOLATION,
|
31 |
-
BaseOutput,
|
32 |
-
is_accelerate_available,
|
33 |
-
is_accelerate_version,
|
34 |
-
logging,
|
35 |
-
randn_tensor,
|
36 |
-
replace_example_docstring,
|
37 |
-
)
|
38 |
-
from ..pipeline_utils import DiffusionPipeline
|
39 |
-
from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
40 |
-
|
41 |
-
|
42 |
-
@dataclass
|
43 |
-
class StableDiffusionAdapterPipelineOutput(BaseOutput):
|
44 |
-
"""
|
45 |
-
Args:
|
46 |
-
images (`List[PIL.Image.Image]` or `np.ndarray`)
|
47 |
-
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
|
48 |
-
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
|
49 |
-
nsfw_content_detected (`List[bool]`)
|
50 |
-
List of flags denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
51 |
-
(nsfw) content, or `None` if safety checking could not be performed.
|
52 |
-
"""
|
53 |
-
|
54 |
-
images: Union[List[PIL.Image.Image], np.ndarray]
|
55 |
-
nsfw_content_detected: Optional[List[bool]]
|
56 |
-
|
57 |
-
|
58 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
59 |
-
|
60 |
-
EXAMPLE_DOC_STRING = """
|
61 |
-
Examples:
|
62 |
-
```py
|
63 |
-
>>> from PIL import Image
|
64 |
-
>>> from diffusers.utils import load_image
|
65 |
-
>>> import torch
|
66 |
-
>>> from diffusers import StableDiffusionAdapterPipeline, T2IAdapter
|
67 |
-
|
68 |
-
>>> image = load_image(
|
69 |
-
... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/t2i-adapter/color_ref.png"
|
70 |
-
... )
|
71 |
-
|
72 |
-
>>> color_palette = image.resize((8, 8))
|
73 |
-
>>> color_palette = color_palette.resize((512, 512), resample=Image.Resampling.NEAREST)
|
74 |
-
|
75 |
-
>>> adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_color_sd14v1", torch_dtype=torch.float16)
|
76 |
-
>>> pipe = StableDiffusionAdapterPipeline.from_pretrained(
|
77 |
-
... "CompVis/stable-diffusion-v1-4",
|
78 |
-
... adapter=adapter,
|
79 |
-
... torch_dtype=torch.float16,
|
80 |
-
... )
|
81 |
-
|
82 |
-
>>> pipe.to("cuda")
|
83 |
-
|
84 |
-
>>> out_image = pipe(
|
85 |
-
... "At night, glowing cubes in front of the beach",
|
86 |
-
... image=color_palette,
|
87 |
-
... ).images[0]
|
88 |
-
```
|
89 |
-
"""
|
90 |
-
|
91 |
-
|
92 |
-
def _preprocess_adapter_image(image, height, width):
|
93 |
-
if isinstance(image, torch.Tensor):
|
94 |
-
return image
|
95 |
-
elif isinstance(image, PIL.Image.Image):
|
96 |
-
image = [image]
|
97 |
-
|
98 |
-
if isinstance(image[0], PIL.Image.Image):
|
99 |
-
image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
|
100 |
-
image = [
|
101 |
-
i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
|
102 |
-
] # expand [h, w] or [h, w, c] to [b, h, w, c]
|
103 |
-
image = np.concatenate(image, axis=0)
|
104 |
-
image = np.array(image).astype(np.float32) / 255.0
|
105 |
-
image = image.transpose(0, 3, 1, 2)
|
106 |
-
image = torch.from_numpy(image)
|
107 |
-
elif isinstance(image[0], torch.Tensor):
|
108 |
-
if image[0].ndim == 3:
|
109 |
-
image = torch.stack(image, dim=0)
|
110 |
-
elif image[0].ndim == 4:
|
111 |
-
image = torch.cat(image, dim=0)
|
112 |
-
else:
|
113 |
-
raise ValueError(
|
114 |
-
f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
|
115 |
-
)
|
116 |
-
return image
|
117 |
-
|
118 |
-
|
119 |
-
class StableDiffusionAdapterPipeline(DiffusionPipeline):
|
120 |
-
r"""
|
121 |
-
Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
|
122 |
-
https://arxiv.org/abs/2302.08453
|
123 |
-
|
124 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
125 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
126 |
-
|
127 |
-
Args:
|
128 |
-
adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
|
129 |
-
Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
|
130 |
-
list, the outputs from each Adapter are added together to create one combined additional conditioning.
|
131 |
-
adapter_weights (`List[float]`, *optional*, defaults to None):
|
132 |
-
List of floats representing the weight which will be multiply to each adapter's output before adding them
|
133 |
-
together.
|
134 |
-
vae ([`AutoencoderKL`]):
|
135 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
136 |
-
text_encoder ([`CLIPTextModel`]):
|
137 |
-
Frozen text-encoder. Stable Diffusion uses the text portion of
|
138 |
-
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
139 |
-
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
140 |
-
tokenizer (`CLIPTokenizer`):
|
141 |
-
Tokenizer of class
|
142 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
143 |
-
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
144 |
-
scheduler ([`SchedulerMixin`]):
|
145 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
146 |
-
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
147 |
-
safety_checker ([`StableDiffusionSafetyChecker`]):
|
148 |
-
Classification module that estimates whether generated images could be considered offensive or harmful.
|
149 |
-
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
|
150 |
-
feature_extractor ([`CLIPFeatureExtractor`]):
|
151 |
-
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
152 |
-
"""
|
153 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
154 |
-
|
155 |
-
def __init__(
|
156 |
-
self,
|
157 |
-
vae: AutoencoderKL,
|
158 |
-
text_encoder: CLIPTextModel,
|
159 |
-
tokenizer: CLIPTokenizer,
|
160 |
-
unet: UNet2DConditionModel,
|
161 |
-
adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]],
|
162 |
-
scheduler: KarrasDiffusionSchedulers,
|
163 |
-
safety_checker: StableDiffusionSafetyChecker,
|
164 |
-
feature_extractor: CLIPFeatureExtractor,
|
165 |
-
adapter_weights: Optional[List[float]] = None,
|
166 |
-
requires_safety_checker: bool = True,
|
167 |
-
):
|
168 |
-
super().__init__()
|
169 |
-
|
170 |
-
if safety_checker is None and requires_safety_checker:
|
171 |
-
logger.warning(
|
172 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
173 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
174 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
175 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
176 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
177 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
178 |
-
)
|
179 |
-
|
180 |
-
if safety_checker is not None and feature_extractor is None:
|
181 |
-
raise ValueError(
|
182 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
183 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
184 |
-
)
|
185 |
-
|
186 |
-
if isinstance(adapter, (list, tuple)):
|
187 |
-
adapter = MultiAdapter(adapter, adapter_weights=adapter_weights)
|
188 |
-
|
189 |
-
self.register_modules(
|
190 |
-
vae=vae,
|
191 |
-
text_encoder=text_encoder,
|
192 |
-
tokenizer=tokenizer,
|
193 |
-
unet=unet,
|
194 |
-
adapter=adapter,
|
195 |
-
scheduler=scheduler,
|
196 |
-
safety_checker=safety_checker,
|
197 |
-
feature_extractor=feature_extractor,
|
198 |
-
)
|
199 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
200 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
201 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
202 |
-
|
203 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
|
204 |
-
def enable_vae_slicing(self):
|
205 |
-
r"""
|
206 |
-
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
|
207 |
-
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
|
208 |
-
"""
|
209 |
-
self.vae.enable_slicing()
|
210 |
-
|
211 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
|
212 |
-
def disable_vae_slicing(self):
|
213 |
-
r"""
|
214 |
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
|
215 |
-
computing decoding in one step.
|
216 |
-
"""
|
217 |
-
self.vae.disable_slicing()
|
218 |
-
|
219 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
220 |
-
r"""
|
221 |
-
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
222 |
-
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
223 |
-
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
224 |
-
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
225 |
-
"""
|
226 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
227 |
-
from accelerate import cpu_offload_with_hook
|
228 |
-
else:
|
229 |
-
raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.")
|
230 |
-
|
231 |
-
device = torch.device(f"cuda:{gpu_id}")
|
232 |
-
|
233 |
-
if self.device.type != "cpu":
|
234 |
-
self.to("cpu", silence_dtype_warnings=True)
|
235 |
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
236 |
-
|
237 |
-
hook = None
|
238 |
-
for cpu_offloaded_model in [self.text_encoder, self.adapter, self.unet, self.vae]:
|
239 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
240 |
-
|
241 |
-
if self.safety_checker is not None:
|
242 |
-
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
243 |
-
|
244 |
-
# We'll offload the last model manually.
|
245 |
-
self.final_offload_hook = hook
|
246 |
-
|
247 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
248 |
-
def _encode_prompt(
|
249 |
-
self,
|
250 |
-
prompt,
|
251 |
-
device,
|
252 |
-
num_images_per_prompt,
|
253 |
-
do_classifier_free_guidance,
|
254 |
-
negative_prompt=None,
|
255 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
256 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
257 |
-
lora_scale: Optional[float] = None,
|
258 |
-
):
|
259 |
-
r"""
|
260 |
-
Encodes the prompt into text encoder hidden states.
|
261 |
-
|
262 |
-
Args:
|
263 |
-
prompt (`str` or `List[str]`, *optional*):
|
264 |
-
prompt to be encoded
|
265 |
-
device: (`torch.device`):
|
266 |
-
torch device
|
267 |
-
num_images_per_prompt (`int`):
|
268 |
-
number of images that should be generated per prompt
|
269 |
-
do_classifier_free_guidance (`bool`):
|
270 |
-
whether to use classifier free guidance or not
|
271 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
272 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
273 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
274 |
-
less than `1`).
|
275 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
276 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
277 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
278 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
279 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
280 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
281 |
-
argument.
|
282 |
-
lora_scale (`float`, *optional*):
|
283 |
-
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
284 |
-
"""
|
285 |
-
# set lora scale so that monkey patched LoRA
|
286 |
-
# function of text encoder can correctly access it
|
287 |
-
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
288 |
-
self._lora_scale = lora_scale
|
289 |
-
|
290 |
-
if prompt is not None and isinstance(prompt, str):
|
291 |
-
batch_size = 1
|
292 |
-
elif prompt is not None and isinstance(prompt, list):
|
293 |
-
batch_size = len(prompt)
|
294 |
-
else:
|
295 |
-
batch_size = prompt_embeds.shape[0]
|
296 |
-
|
297 |
-
if prompt_embeds is None:
|
298 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
299 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
300 |
-
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
301 |
-
|
302 |
-
text_inputs = self.tokenizer(
|
303 |
-
prompt,
|
304 |
-
padding="max_length",
|
305 |
-
max_length=self.tokenizer.model_max_length,
|
306 |
-
truncation=True,
|
307 |
-
return_tensors="pt",
|
308 |
-
)
|
309 |
-
text_input_ids = text_inputs.input_ids
|
310 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
311 |
-
|
312 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
313 |
-
text_input_ids, untruncated_ids
|
314 |
-
):
|
315 |
-
removed_text = self.tokenizer.batch_decode(
|
316 |
-
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
317 |
-
)
|
318 |
-
logger.warning(
|
319 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
320 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
321 |
-
)
|
322 |
-
|
323 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
324 |
-
attention_mask = text_inputs.attention_mask.to(device)
|
325 |
-
else:
|
326 |
-
attention_mask = None
|
327 |
-
|
328 |
-
prompt_embeds = self.text_encoder(
|
329 |
-
text_input_ids.to(device),
|
330 |
-
attention_mask=attention_mask,
|
331 |
-
)
|
332 |
-
prompt_embeds = prompt_embeds[0]
|
333 |
-
|
334 |
-
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
335 |
-
|
336 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
337 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
338 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
339 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
340 |
-
|
341 |
-
# get unconditional embeddings for classifier free guidance
|
342 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
343 |
-
uncond_tokens: List[str]
|
344 |
-
if negative_prompt is None:
|
345 |
-
uncond_tokens = [""] * batch_size
|
346 |
-
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
347 |
-
raise TypeError(
|
348 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
349 |
-
f" {type(prompt)}."
|
350 |
-
)
|
351 |
-
elif isinstance(negative_prompt, str):
|
352 |
-
uncond_tokens = [negative_prompt]
|
353 |
-
elif batch_size != len(negative_prompt):
|
354 |
-
raise ValueError(
|
355 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
356 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
357 |
-
" the batch size of `prompt`."
|
358 |
-
)
|
359 |
-
else:
|
360 |
-
uncond_tokens = negative_prompt
|
361 |
-
|
362 |
-
# textual inversion: procecss multi-vector tokens if necessary
|
363 |
-
if isinstance(self, TextualInversionLoaderMixin):
|
364 |
-
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
365 |
-
|
366 |
-
max_length = prompt_embeds.shape[1]
|
367 |
-
uncond_input = self.tokenizer(
|
368 |
-
uncond_tokens,
|
369 |
-
padding="max_length",
|
370 |
-
max_length=max_length,
|
371 |
-
truncation=True,
|
372 |
-
return_tensors="pt",
|
373 |
-
)
|
374 |
-
|
375 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
376 |
-
attention_mask = uncond_input.attention_mask.to(device)
|
377 |
-
else:
|
378 |
-
attention_mask = None
|
379 |
-
|
380 |
-
negative_prompt_embeds = self.text_encoder(
|
381 |
-
uncond_input.input_ids.to(device),
|
382 |
-
attention_mask=attention_mask,
|
383 |
-
)
|
384 |
-
negative_prompt_embeds = negative_prompt_embeds[0]
|
385 |
-
|
386 |
-
if do_classifier_free_guidance:
|
387 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
388 |
-
seq_len = negative_prompt_embeds.shape[1]
|
389 |
-
|
390 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
391 |
-
|
392 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
393 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
394 |
-
|
395 |
-
# For classifier free guidance, we need to do two forward passes.
|
396 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
397 |
-
# to avoid doing two forward passes
|
398 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
399 |
-
|
400 |
-
return prompt_embeds
|
401 |
-
|
402 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
403 |
-
def run_safety_checker(self, image, device, dtype):
|
404 |
-
if self.safety_checker is None:
|
405 |
-
has_nsfw_concept = None
|
406 |
-
else:
|
407 |
-
if torch.is_tensor(image):
|
408 |
-
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
409 |
-
else:
|
410 |
-
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
411 |
-
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
412 |
-
image, has_nsfw_concept = self.safety_checker(
|
413 |
-
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
414 |
-
)
|
415 |
-
return image, has_nsfw_concept
|
416 |
-
|
417 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
418 |
-
def decode_latents(self, latents):
|
419 |
-
warnings.warn(
|
420 |
-
"The decode_latents method is deprecated and will be removed in a future version. Please"
|
421 |
-
" use VaeImageProcessor instead",
|
422 |
-
FutureWarning,
|
423 |
-
)
|
424 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
425 |
-
image = self.vae.decode(latents, return_dict=False)[0]
|
426 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
427 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
428 |
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
429 |
-
return image
|
430 |
-
|
431 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
432 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
433 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
434 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
435 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
436 |
-
# and should be between [0, 1]
|
437 |
-
|
438 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
439 |
-
extra_step_kwargs = {}
|
440 |
-
if accepts_eta:
|
441 |
-
extra_step_kwargs["eta"] = eta
|
442 |
-
|
443 |
-
# check if the scheduler accepts generator
|
444 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
445 |
-
if accepts_generator:
|
446 |
-
extra_step_kwargs["generator"] = generator
|
447 |
-
return extra_step_kwargs
|
448 |
-
|
449 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
450 |
-
def check_inputs(
|
451 |
-
self,
|
452 |
-
prompt,
|
453 |
-
height,
|
454 |
-
width,
|
455 |
-
callback_steps,
|
456 |
-
negative_prompt=None,
|
457 |
-
prompt_embeds=None,
|
458 |
-
negative_prompt_embeds=None,
|
459 |
-
):
|
460 |
-
if height % 8 != 0 or width % 8 != 0:
|
461 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
462 |
-
|
463 |
-
if (callback_steps is None) or (
|
464 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
465 |
-
):
|
466 |
-
raise ValueError(
|
467 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
468 |
-
f" {type(callback_steps)}."
|
469 |
-
)
|
470 |
-
|
471 |
-
if prompt is not None and prompt_embeds is not None:
|
472 |
-
raise ValueError(
|
473 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
474 |
-
" only forward one of the two."
|
475 |
-
)
|
476 |
-
elif prompt is None and prompt_embeds is None:
|
477 |
-
raise ValueError(
|
478 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
479 |
-
)
|
480 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
481 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
482 |
-
|
483 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
484 |
-
raise ValueError(
|
485 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
486 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
487 |
-
)
|
488 |
-
|
489 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
490 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
491 |
-
raise ValueError(
|
492 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
493 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
494 |
-
f" {negative_prompt_embeds.shape}."
|
495 |
-
)
|
496 |
-
|
497 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
498 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
499 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
500 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
501 |
-
raise ValueError(
|
502 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
503 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
504 |
-
)
|
505 |
-
|
506 |
-
if latents is None:
|
507 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
508 |
-
else:
|
509 |
-
latents = latents.to(device)
|
510 |
-
|
511 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
512 |
-
latents = latents * self.scheduler.init_noise_sigma
|
513 |
-
return latents
|
514 |
-
|
515 |
-
def _default_height_width(self, height, width, image):
|
516 |
-
# NOTE: It is possible that a list of images have different
|
517 |
-
# dimensions for each image, so just checking the first image
|
518 |
-
# is not _exactly_ correct, but it is simple.
|
519 |
-
while isinstance(image, list):
|
520 |
-
image = image[0]
|
521 |
-
|
522 |
-
if height is None:
|
523 |
-
if isinstance(image, PIL.Image.Image):
|
524 |
-
height = image.height
|
525 |
-
elif isinstance(image, torch.Tensor):
|
526 |
-
height = image.shape[-2]
|
527 |
-
|
528 |
-
# round down to nearest multiple of `self.adapter.total_downscale_factor`
|
529 |
-
height = (height // self.adapter.total_downscale_factor) * self.adapter.total_downscale_factor
|
530 |
-
|
531 |
-
if width is None:
|
532 |
-
if isinstance(image, PIL.Image.Image):
|
533 |
-
width = image.width
|
534 |
-
elif isinstance(image, torch.Tensor):
|
535 |
-
width = image.shape[-1]
|
536 |
-
|
537 |
-
# round down to nearest multiple of `self.adapter.total_downscale_factor`
|
538 |
-
width = (width // self.adapter.total_downscale_factor) * self.adapter.total_downscale_factor
|
539 |
-
|
540 |
-
return height, width
|
541 |
-
|
542 |
-
@torch.no_grad()
|
543 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
544 |
-
def __call__(
|
545 |
-
self,
|
546 |
-
prompt: Union[str, List[str]] = None,
|
547 |
-
image: Union[torch.Tensor, PIL.Image.Image, List[PIL.Image.Image]] = None,
|
548 |
-
height: Optional[int] = None,
|
549 |
-
width: Optional[int] = None,
|
550 |
-
num_inference_steps: int = 50,
|
551 |
-
guidance_scale: float = 7.5,
|
552 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
553 |
-
num_images_per_prompt: Optional[int] = 1,
|
554 |
-
eta: float = 0.0,
|
555 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
556 |
-
latents: Optional[torch.FloatTensor] = None,
|
557 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
558 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
559 |
-
output_type: Optional[str] = "pil",
|
560 |
-
return_dict: bool = True,
|
561 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
562 |
-
callback_steps: int = 1,
|
563 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
564 |
-
adapter_conditioning_scale: Union[float, List[float]] = 1.0,
|
565 |
-
):
|
566 |
-
r"""
|
567 |
-
Function invoked when calling the pipeline for generation.
|
568 |
-
|
569 |
-
Args:
|
570 |
-
prompt (`str` or `List[str]`, *optional*):
|
571 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
572 |
-
instead.
|
573 |
-
image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
|
574 |
-
The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
|
575 |
-
type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
|
576 |
-
accepted as an image. The control image is automatically resized to fit the output image.
|
577 |
-
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
578 |
-
The height in pixels of the generated image.
|
579 |
-
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
580 |
-
The width in pixels of the generated image.
|
581 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
582 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
583 |
-
expense of slower inference.
|
584 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
585 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
586 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
587 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
588 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
589 |
-
usually at the expense of lower image quality.
|
590 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
591 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
592 |
-
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
|
593 |
-
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
594 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
595 |
-
The number of images to generate per prompt.
|
596 |
-
eta (`float`, *optional*, defaults to 0.0):
|
597 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
598 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
599 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
600 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
601 |
-
to make generation deterministic.
|
602 |
-
latents (`torch.FloatTensor`, *optional*):
|
603 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
604 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
605 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
606 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
607 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
608 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
609 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
610 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
611 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
612 |
-
argument.
|
613 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
614 |
-
The output format of the generate image. Choose between
|
615 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
616 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
617 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] instead
|
618 |
-
of a plain tuple.
|
619 |
-
callback (`Callable`, *optional*):
|
620 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
621 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
622 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
623 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
624 |
-
called at every step.
|
625 |
-
cross_attention_kwargs (`dict`, *optional*):
|
626 |
-
A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
|
627 |
-
`self.processor` in
|
628 |
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
629 |
-
adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
630 |
-
The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
|
631 |
-
residual in the original unet. If multiple adapters are specified in init, you can set the
|
632 |
-
corresponding scale as a list.
|
633 |
-
|
634 |
-
Examples:
|
635 |
-
|
636 |
-
Returns:
|
637 |
-
[`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
|
638 |
-
[`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
|
639 |
-
`tuple. When returning a tuple, the first element is a list with the generated images, and the second
|
640 |
-
element is a list of `bool`s denoting whether the corresponding generated image likely represents
|
641 |
-
"not-safe-for-work" (nsfw) content, according to the `safety_checker`.
|
642 |
-
"""
|
643 |
-
# 0. Default height and width to unet
|
644 |
-
height, width = self._default_height_width(height, width, image)
|
645 |
-
device = self._execution_device
|
646 |
-
|
647 |
-
# 1. Check inputs. Raise error if not correct
|
648 |
-
self.check_inputs(
|
649 |
-
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
650 |
-
)
|
651 |
-
|
652 |
-
is_multi_adapter = isinstance(self.adapter, MultiAdapter)
|
653 |
-
if is_multi_adapter:
|
654 |
-
adapter_input = [_preprocess_adapter_image(img, height, width).to(device) for img in image]
|
655 |
-
n, c, h, w = adapter_input[0].shape
|
656 |
-
adapter_input = torch.stack([x.reshape([n * c, h, w]) for x in adapter_input])
|
657 |
-
else:
|
658 |
-
adapter_input = _preprocess_adapter_image(image, height, width).to(device)
|
659 |
-
adapter_input = adapter_input.to(self.adapter.dtype)
|
660 |
-
|
661 |
-
# 2. Define call parameters
|
662 |
-
if prompt is not None and isinstance(prompt, str):
|
663 |
-
batch_size = 1
|
664 |
-
elif prompt is not None and isinstance(prompt, list):
|
665 |
-
batch_size = len(prompt)
|
666 |
-
else:
|
667 |
-
batch_size = prompt_embeds.shape[0]
|
668 |
-
|
669 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
670 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
671 |
-
# corresponds to doing no classifier free guidance.
|
672 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
673 |
-
|
674 |
-
# 3. Encode input prompt
|
675 |
-
prompt_embeds = self._encode_prompt(
|
676 |
-
prompt,
|
677 |
-
device,
|
678 |
-
num_images_per_prompt,
|
679 |
-
do_classifier_free_guidance,
|
680 |
-
negative_prompt,
|
681 |
-
prompt_embeds=prompt_embeds,
|
682 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
683 |
-
)
|
684 |
-
|
685 |
-
# 4. Prepare timesteps
|
686 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
687 |
-
timesteps = self.scheduler.timesteps
|
688 |
-
|
689 |
-
# 5. Prepare latent variables
|
690 |
-
num_channels_latents = self.unet.config.in_channels
|
691 |
-
latents = self.prepare_latents(
|
692 |
-
batch_size * num_images_per_prompt,
|
693 |
-
num_channels_latents,
|
694 |
-
height,
|
695 |
-
width,
|
696 |
-
prompt_embeds.dtype,
|
697 |
-
device,
|
698 |
-
generator,
|
699 |
-
latents,
|
700 |
-
)
|
701 |
-
|
702 |
-
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
703 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
704 |
-
|
705 |
-
# 7. Denoising loop
|
706 |
-
adapter_state = self.adapter(adapter_input)
|
707 |
-
for k, v in enumerate(adapter_state):
|
708 |
-
adapter_state[k] = v * adapter_conditioning_scale
|
709 |
-
if num_images_per_prompt > 1:
|
710 |
-
for k, v in enumerate(adapter_state):
|
711 |
-
adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
|
712 |
-
if do_classifier_free_guidance:
|
713 |
-
for k, v in enumerate(adapter_state):
|
714 |
-
adapter_state[k] = torch.cat([v] * 2, dim=0)
|
715 |
-
|
716 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
717 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
718 |
-
for i, t in enumerate(timesteps):
|
719 |
-
# expand the latents if we are doing classifier free guidance
|
720 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
721 |
-
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
722 |
-
|
723 |
-
# predict the noise residual
|
724 |
-
noise_pred = self.unet(
|
725 |
-
latent_model_input,
|
726 |
-
t,
|
727 |
-
encoder_hidden_states=prompt_embeds,
|
728 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
729 |
-
down_block_additional_residuals=[state.clone() for state in adapter_state],
|
730 |
-
).sample
|
731 |
-
|
732 |
-
# perform guidance
|
733 |
-
if do_classifier_free_guidance:
|
734 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
735 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
736 |
-
|
737 |
-
# compute the previous noisy sample x_t -> x_t-1
|
738 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
739 |
-
|
740 |
-
# call the callback, if provided
|
741 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
742 |
-
progress_bar.update()
|
743 |
-
if callback is not None and i % callback_steps == 0:
|
744 |
-
callback(i, t, latents)
|
745 |
-
|
746 |
-
if output_type == "latent":
|
747 |
-
image = latents
|
748 |
-
has_nsfw_concept = None
|
749 |
-
elif output_type == "pil":
|
750 |
-
# 8. Post-processing
|
751 |
-
image = self.decode_latents(latents)
|
752 |
-
|
753 |
-
# 9. Run safety checker
|
754 |
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
755 |
-
|
756 |
-
# 10. Convert to PIL
|
757 |
-
image = self.numpy_to_pil(image)
|
758 |
-
else:
|
759 |
-
# 8. Post-processing
|
760 |
-
image = self.decode_latents(latents)
|
761 |
-
|
762 |
-
# 9. Run safety checker
|
763 |
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
764 |
-
|
765 |
-
# Offload last model to CPU
|
766 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
767 |
-
self.final_offload_hook.offload()
|
768 |
-
|
769 |
-
if not return_dict:
|
770 |
-
return (image, has_nsfw_concept)
|
771 |
-
|
772 |
-
return StableDiffusionAdapterPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/fp16/README.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
# Mixed Precision Training
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[OTHERS]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@article{micikevicius2017mixed,
|
9 |
-
title={Mixed precision training},
|
10 |
-
author={Micikevicius, Paulius and Narang, Sharan and Alben, Jonah and Diamos, Gregory and Elsen, Erich and Garcia, David and Ginsburg, Boris and Houston, Michael and Kuchaiev, Oleksii and Venkatesh, Ganesh and others},
|
11 |
-
journal={arXiv preprint arXiv:1710.03740},
|
12 |
-
year={2017}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and Models
|
17 |
-
|
18 |
-
| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|
19 |
-
|:------------:|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:|
|
20 |
-
| Faster R-CNN | R-50 | pytorch | 1x | 3.4 | 28.8 | 37.5 | - |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) |
|
21 |
-
| Mask R-CNN | R-50 | pytorch | 1x | 3.6 | 24.1 | 38.1 | 34.7 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) |
|
22 |
-
| Retinanet | R-50 | pytorch | 1x | 2.8 | 31.6 | 36.4 | |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
_base_ = './ocrnet_hr18_512x512_80k_ade20k.py'
|
2 |
-
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
3 |
-
model = dict(
|
4 |
-
pretrained='open-mmlab://msra/hrnetv2_w48',
|
5 |
-
backbone=dict(
|
6 |
-
extra=dict(
|
7 |
-
stage2=dict(num_channels=(48, 96)),
|
8 |
-
stage3=dict(num_channels=(48, 96, 192)),
|
9 |
-
stage4=dict(num_channels=(48, 96, 192, 384)))),
|
10 |
-
decode_head=[
|
11 |
-
dict(
|
12 |
-
type='FCNHead',
|
13 |
-
in_channels=[48, 96, 192, 384],
|
14 |
-
channels=sum([48, 96, 192, 384]),
|
15 |
-
input_transform='resize_concat',
|
16 |
-
in_index=(0, 1, 2, 3),
|
17 |
-
kernel_size=1,
|
18 |
-
num_convs=1,
|
19 |
-
norm_cfg=norm_cfg,
|
20 |
-
concat_input=False,
|
21 |
-
dropout_ratio=-1,
|
22 |
-
num_classes=150,
|
23 |
-
align_corners=False,
|
24 |
-
loss_decode=dict(
|
25 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
26 |
-
dict(
|
27 |
-
type='OCRHead',
|
28 |
-
in_channels=[48, 96, 192, 384],
|
29 |
-
channels=512,
|
30 |
-
ocr_channels=256,
|
31 |
-
input_transform='resize_concat',
|
32 |
-
in_index=(0, 1, 2, 3),
|
33 |
-
norm_cfg=norm_cfg,
|
34 |
-
dropout_ratio=-1,
|
35 |
-
num_classes=150,
|
36 |
-
align_corners=False,
|
37 |
-
loss_decode=dict(
|
38 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
|
39 |
-
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/ISSUE_TEMPLATE/feature_request.md
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: Feature request
|
3 |
-
about: Suggest an improvement or new feature for the web UI
|
4 |
-
title: ''
|
5 |
-
labels: 'enhancement'
|
6 |
-
assignees: ''
|
7 |
-
|
8 |
-
---
|
9 |
-
|
10 |
-
**Description**
|
11 |
-
|
12 |
-
A clear and concise description of what you want to be implemented.
|
13 |
-
|
14 |
-
**Additional Context**
|
15 |
-
|
16 |
-
If applicable, please provide any extra information, external links, or screenshots that could be useful.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/character_bias/script.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
|
5 |
-
# get the current directory of the script
|
6 |
-
current_dir = os.path.dirname(os.path.abspath(__file__))
|
7 |
-
|
8 |
-
# check if the bias_options.txt file exists, if not, create it
|
9 |
-
bias_file = os.path.join(current_dir, "bias_options.txt")
|
10 |
-
if not os.path.isfile(bias_file):
|
11 |
-
with open(bias_file, "w") as f:
|
12 |
-
f.write("*I am so happy*\n*I am so sad*\n*I am so excited*\n*I am so bored*\n*I am so angry*")
|
13 |
-
|
14 |
-
# read bias options from the text file
|
15 |
-
with open(bias_file, "r") as f:
|
16 |
-
bias_options = [line.strip() for line in f.readlines()]
|
17 |
-
|
18 |
-
params = {
|
19 |
-
"activate": True,
|
20 |
-
"bias string": " *I am so happy*",
|
21 |
-
"use custom string": False,
|
22 |
-
}
|
23 |
-
|
24 |
-
|
25 |
-
def input_modifier(string):
|
26 |
-
"""
|
27 |
-
This function is applied to your text inputs before
|
28 |
-
they are fed into the model.
|
29 |
-
"""
|
30 |
-
return string
|
31 |
-
|
32 |
-
|
33 |
-
def output_modifier(string):
|
34 |
-
"""
|
35 |
-
This function is applied to the model outputs.
|
36 |
-
"""
|
37 |
-
return string
|
38 |
-
|
39 |
-
|
40 |
-
def bot_prefix_modifier(string):
|
41 |
-
"""
|
42 |
-
This function is only applied in chat mode. It modifies
|
43 |
-
the prefix text for the Bot and can be used to bias its
|
44 |
-
behavior.
|
45 |
-
"""
|
46 |
-
if params['activate']:
|
47 |
-
if params['use custom string']:
|
48 |
-
return f'{string} {params["custom string"].strip()} '
|
49 |
-
else:
|
50 |
-
return f'{string} {params["bias string"].strip()} '
|
51 |
-
else:
|
52 |
-
return string
|
53 |
-
|
54 |
-
|
55 |
-
def ui():
|
56 |
-
# Gradio elements
|
57 |
-
activate = gr.Checkbox(value=params['activate'], label='Activate character bias')
|
58 |
-
dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file')
|
59 |
-
use_custom_string = gr.Checkbox(value=False, label='Use custom bias textbox instead of dropdown')
|
60 |
-
custom_string = gr.Textbox(value="", placeholder="Enter custom bias string", label="Custom Character Bias", info='To use this textbox activate the checkbox above')
|
61 |
-
|
62 |
-
# Event functions to update the parameters in the backend
|
63 |
-
def update_bias_string(x):
|
64 |
-
if x:
|
65 |
-
params.update({"bias string": x})
|
66 |
-
else:
|
67 |
-
params.update({"bias string": dropdown_string.get()})
|
68 |
-
return x
|
69 |
-
|
70 |
-
def update_custom_string(x):
|
71 |
-
params.update({"custom string": x})
|
72 |
-
|
73 |
-
dropdown_string.change(update_bias_string, dropdown_string, None)
|
74 |
-
custom_string.change(update_custom_string, custom_string, None)
|
75 |
-
activate.change(lambda x: params.update({"activate": x}), activate, None)
|
76 |
-
use_custom_string.change(lambda x: params.update({"use custom string": x}), use_custom_string, None)
|
77 |
-
|
78 |
-
# Group elements together depending on the selected option
|
79 |
-
def bias_string_group():
|
80 |
-
if use_custom_string.value:
|
81 |
-
return gr.Group([use_custom_string, custom_string])
|
82 |
-
else:
|
83 |
-
return dropdown_string
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/cityscapes.py
DELETED
@@ -1,217 +0,0 @@
|
|
1 |
-
import os.path as osp
|
2 |
-
import tempfile
|
3 |
-
|
4 |
-
import annotator.uniformer.mmcv as mmcv
|
5 |
-
import numpy as np
|
6 |
-
from annotator.uniformer.mmcv.utils import print_log
|
7 |
-
from PIL import Image
|
8 |
-
|
9 |
-
from .builder import DATASETS
|
10 |
-
from .custom import CustomDataset
|
11 |
-
|
12 |
-
|
13 |
-
@DATASETS.register_module()
|
14 |
-
class CityscapesDataset(CustomDataset):
|
15 |
-
"""Cityscapes dataset.
|
16 |
-
|
17 |
-
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
|
18 |
-
fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset.
|
19 |
-
"""
|
20 |
-
|
21 |
-
CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
|
22 |
-
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
|
23 |
-
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
|
24 |
-
'bicycle')
|
25 |
-
|
26 |
-
PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
|
27 |
-
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
|
28 |
-
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
|
29 |
-
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
|
30 |
-
[0, 80, 100], [0, 0, 230], [119, 11, 32]]
|
31 |
-
|
32 |
-
def __init__(self, **kwargs):
|
33 |
-
super(CityscapesDataset, self).__init__(
|
34 |
-
img_suffix='_leftImg8bit.png',
|
35 |
-
seg_map_suffix='_gtFine_labelTrainIds.png',
|
36 |
-
**kwargs)
|
37 |
-
|
38 |
-
@staticmethod
|
39 |
-
def _convert_to_label_id(result):
|
40 |
-
"""Convert trainId to id for cityscapes."""
|
41 |
-
if isinstance(result, str):
|
42 |
-
result = np.load(result)
|
43 |
-
import cityscapesscripts.helpers.labels as CSLabels
|
44 |
-
result_copy = result.copy()
|
45 |
-
for trainId, label in CSLabels.trainId2label.items():
|
46 |
-
result_copy[result == trainId] = label.id
|
47 |
-
|
48 |
-
return result_copy
|
49 |
-
|
50 |
-
def results2img(self, results, imgfile_prefix, to_label_id):
|
51 |
-
"""Write the segmentation results to images.
|
52 |
-
|
53 |
-
Args:
|
54 |
-
results (list[list | tuple | ndarray]): Testing results of the
|
55 |
-
dataset.
|
56 |
-
imgfile_prefix (str): The filename prefix of the png files.
|
57 |
-
If the prefix is "somepath/xxx",
|
58 |
-
the png files will be named "somepath/xxx.png".
|
59 |
-
to_label_id (bool): whether convert output to label_id for
|
60 |
-
submission
|
61 |
-
|
62 |
-
Returns:
|
63 |
-
list[str: str]: result txt files which contains corresponding
|
64 |
-
semantic segmentation images.
|
65 |
-
"""
|
66 |
-
mmcv.mkdir_or_exist(imgfile_prefix)
|
67 |
-
result_files = []
|
68 |
-
prog_bar = mmcv.ProgressBar(len(self))
|
69 |
-
for idx in range(len(self)):
|
70 |
-
result = results[idx]
|
71 |
-
if to_label_id:
|
72 |
-
result = self._convert_to_label_id(result)
|
73 |
-
filename = self.img_infos[idx]['filename']
|
74 |
-
basename = osp.splitext(osp.basename(filename))[0]
|
75 |
-
|
76 |
-
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
|
77 |
-
|
78 |
-
output = Image.fromarray(result.astype(np.uint8)).convert('P')
|
79 |
-
import cityscapesscripts.helpers.labels as CSLabels
|
80 |
-
palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8)
|
81 |
-
for label_id, label in CSLabels.id2label.items():
|
82 |
-
palette[label_id] = label.color
|
83 |
-
|
84 |
-
output.putpalette(palette)
|
85 |
-
output.save(png_filename)
|
86 |
-
result_files.append(png_filename)
|
87 |
-
prog_bar.update()
|
88 |
-
|
89 |
-
return result_files
|
90 |
-
|
91 |
-
def format_results(self, results, imgfile_prefix=None, to_label_id=True):
|
92 |
-
"""Format the results into dir (standard format for Cityscapes
|
93 |
-
evaluation).
|
94 |
-
|
95 |
-
Args:
|
96 |
-
results (list): Testing results of the dataset.
|
97 |
-
imgfile_prefix (str | None): The prefix of images files. It
|
98 |
-
includes the file path and the prefix of filename, e.g.,
|
99 |
-
"a/b/prefix". If not specified, a temp file will be created.
|
100 |
-
Default: None.
|
101 |
-
to_label_id (bool): whether convert output to label_id for
|
102 |
-
submission. Default: False
|
103 |
-
|
104 |
-
Returns:
|
105 |
-
tuple: (result_files, tmp_dir), result_files is a list containing
|
106 |
-
the image paths, tmp_dir is the temporal directory created
|
107 |
-
for saving json/png files when img_prefix is not specified.
|
108 |
-
"""
|
109 |
-
|
110 |
-
assert isinstance(results, list), 'results must be a list'
|
111 |
-
assert len(results) == len(self), (
|
112 |
-
'The length of results is not equal to the dataset len: '
|
113 |
-
f'{len(results)} != {len(self)}')
|
114 |
-
|
115 |
-
if imgfile_prefix is None:
|
116 |
-
tmp_dir = tempfile.TemporaryDirectory()
|
117 |
-
imgfile_prefix = tmp_dir.name
|
118 |
-
else:
|
119 |
-
tmp_dir = None
|
120 |
-
result_files = self.results2img(results, imgfile_prefix, to_label_id)
|
121 |
-
|
122 |
-
return result_files, tmp_dir
|
123 |
-
|
124 |
-
def evaluate(self,
|
125 |
-
results,
|
126 |
-
metric='mIoU',
|
127 |
-
logger=None,
|
128 |
-
imgfile_prefix=None,
|
129 |
-
efficient_test=False):
|
130 |
-
"""Evaluation in Cityscapes/default protocol.
|
131 |
-
|
132 |
-
Args:
|
133 |
-
results (list): Testing results of the dataset.
|
134 |
-
metric (str | list[str]): Metrics to be evaluated.
|
135 |
-
logger (logging.Logger | None | str): Logger used for printing
|
136 |
-
related information during evaluation. Default: None.
|
137 |
-
imgfile_prefix (str | None): The prefix of output image file,
|
138 |
-
for cityscapes evaluation only. It includes the file path and
|
139 |
-
the prefix of filename, e.g., "a/b/prefix".
|
140 |
-
If results are evaluated with cityscapes protocol, it would be
|
141 |
-
the prefix of output png files. The output files would be
|
142 |
-
png images under folder "a/b/prefix/xxx.png", where "xxx" is
|
143 |
-
the image name of cityscapes. If not specified, a temp file
|
144 |
-
will be created for evaluation.
|
145 |
-
Default: None.
|
146 |
-
|
147 |
-
Returns:
|
148 |
-
dict[str, float]: Cityscapes/default metrics.
|
149 |
-
"""
|
150 |
-
|
151 |
-
eval_results = dict()
|
152 |
-
metrics = metric.copy() if isinstance(metric, list) else [metric]
|
153 |
-
if 'cityscapes' in metrics:
|
154 |
-
eval_results.update(
|
155 |
-
self._evaluate_cityscapes(results, logger, imgfile_prefix))
|
156 |
-
metrics.remove('cityscapes')
|
157 |
-
if len(metrics) > 0:
|
158 |
-
eval_results.update(
|
159 |
-
super(CityscapesDataset,
|
160 |
-
self).evaluate(results, metrics, logger, efficient_test))
|
161 |
-
|
162 |
-
return eval_results
|
163 |
-
|
164 |
-
def _evaluate_cityscapes(self, results, logger, imgfile_prefix):
|
165 |
-
"""Evaluation in Cityscapes protocol.
|
166 |
-
|
167 |
-
Args:
|
168 |
-
results (list): Testing results of the dataset.
|
169 |
-
logger (logging.Logger | str | None): Logger used for printing
|
170 |
-
related information during evaluation. Default: None.
|
171 |
-
imgfile_prefix (str | None): The prefix of output image file
|
172 |
-
|
173 |
-
Returns:
|
174 |
-
dict[str: float]: Cityscapes evaluation results.
|
175 |
-
"""
|
176 |
-
try:
|
177 |
-
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
|
178 |
-
except ImportError:
|
179 |
-
raise ImportError('Please run "pip install cityscapesscripts" to '
|
180 |
-
'install cityscapesscripts first.')
|
181 |
-
msg = 'Evaluating in Cityscapes style'
|
182 |
-
if logger is None:
|
183 |
-
msg = '\n' + msg
|
184 |
-
print_log(msg, logger=logger)
|
185 |
-
|
186 |
-
result_files, tmp_dir = self.format_results(results, imgfile_prefix)
|
187 |
-
|
188 |
-
if tmp_dir is None:
|
189 |
-
result_dir = imgfile_prefix
|
190 |
-
else:
|
191 |
-
result_dir = tmp_dir.name
|
192 |
-
|
193 |
-
eval_results = dict()
|
194 |
-
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
|
195 |
-
|
196 |
-
CSEval.args.evalInstLevelScore = True
|
197 |
-
CSEval.args.predictionPath = osp.abspath(result_dir)
|
198 |
-
CSEval.args.evalPixelAccuracy = True
|
199 |
-
CSEval.args.JSONOutput = False
|
200 |
-
|
201 |
-
seg_map_list = []
|
202 |
-
pred_list = []
|
203 |
-
|
204 |
-
# when evaluating with official cityscapesscripts,
|
205 |
-
# **_gtFine_labelIds.png is used
|
206 |
-
for seg_map in mmcv.scandir(
|
207 |
-
self.ann_dir, 'gtFine_labelIds.png', recursive=True):
|
208 |
-
seg_map_list.append(osp.join(self.ann_dir, seg_map))
|
209 |
-
pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
|
210 |
-
|
211 |
-
eval_results.update(
|
212 |
-
CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
|
213 |
-
|
214 |
-
if tmp_dir is not None:
|
215 |
-
tmp_dir.cleanup()
|
216 |
-
|
217 |
-
return eval_results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/tool_transfer_control.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
path_sd15 = './models/v1-5-pruned.ckpt'
|
2 |
-
path_sd15_with_control = './models/control_sd15_openpose.pth'
|
3 |
-
path_input = './models/anything-v3-full.safetensors'
|
4 |
-
path_output = './models/control_any3_openpose.pth'
|
5 |
-
|
6 |
-
|
7 |
-
import os
|
8 |
-
|
9 |
-
|
10 |
-
assert os.path.exists(path_sd15), 'Input path_sd15 does not exists!'
|
11 |
-
assert os.path.exists(path_sd15_with_control), 'Input path_sd15_with_control does not exists!'
|
12 |
-
assert os.path.exists(path_input), 'Input path_input does not exists!'
|
13 |
-
assert os.path.exists(os.path.dirname(path_output)), 'Output folder not exists!'
|
14 |
-
|
15 |
-
|
16 |
-
import torch
|
17 |
-
from share import *
|
18 |
-
from cldm.model import load_state_dict
|
19 |
-
|
20 |
-
|
21 |
-
sd15_state_dict = load_state_dict(path_sd15)
|
22 |
-
sd15_with_control_state_dict = load_state_dict(path_sd15_with_control)
|
23 |
-
input_state_dict = load_state_dict(path_input)
|
24 |
-
|
25 |
-
|
26 |
-
def get_node_name(name, parent_name):
|
27 |
-
if len(name) <= len(parent_name):
|
28 |
-
return False, ''
|
29 |
-
p = name[:len(parent_name)]
|
30 |
-
if p != parent_name:
|
31 |
-
return False, ''
|
32 |
-
return True, name[len(parent_name):]
|
33 |
-
|
34 |
-
|
35 |
-
keys = sd15_with_control_state_dict.keys()
|
36 |
-
|
37 |
-
final_state_dict = {}
|
38 |
-
for key in keys:
|
39 |
-
is_first_stage, _ = get_node_name(key, 'first_stage_model')
|
40 |
-
is_cond_stage, _ = get_node_name(key, 'cond_stage_model')
|
41 |
-
if is_first_stage or is_cond_stage:
|
42 |
-
final_state_dict[key] = input_state_dict[key]
|
43 |
-
continue
|
44 |
-
p = sd15_with_control_state_dict[key]
|
45 |
-
is_control, node_name = get_node_name(key, 'control_')
|
46 |
-
if is_control:
|
47 |
-
sd15_key_name = 'model.diffusion_' + node_name
|
48 |
-
else:
|
49 |
-
sd15_key_name = key
|
50 |
-
if sd15_key_name in input_state_dict:
|
51 |
-
p_new = p + input_state_dict[sd15_key_name] - sd15_state_dict[sd15_key_name]
|
52 |
-
# print(f'Offset clone from [{sd15_key_name}] to [{key}]')
|
53 |
-
else:
|
54 |
-
p_new = p
|
55 |
-
# print(f'Direct clone to [{key}]')
|
56 |
-
final_state_dict[key] = p_new
|
57 |
-
|
58 |
-
torch.save(final_state_dict, path_output)
|
59 |
-
print('Transferred model saved at ' + path_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/voice.py
DELETED
@@ -1,325 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import librosa
|
3 |
-
import re
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import xml.etree.ElementTree as ET
|
7 |
-
import config
|
8 |
-
import soundfile as sf
|
9 |
-
from io import BytesIO
|
10 |
-
from graiax import silkcoder
|
11 |
-
from logger import logger
|
12 |
-
from contants import ModelType
|
13 |
-
from scipy.signal import resample_poly
|
14 |
-
|
15 |
-
|
16 |
-
# torch.set_num_threads(1) # 设置torch线程为1
|
17 |
-
|
18 |
-
|
19 |
-
class TTS:
|
20 |
-
def __init__(self, voice_obj, voice_speakers, **kwargs):
|
21 |
-
self._voice_obj = voice_obj
|
22 |
-
self._voice_speakers = voice_speakers
|
23 |
-
self._strength_dict = {"x-weak": 0.25, "weak": 0.5, "Medium": 0.75, "Strong": 1, "x-strong": 1.25}
|
24 |
-
self._speakers_count = sum([len(self._voice_speakers[i]) for i in self._voice_speakers])
|
25 |
-
self._vits_speakers_count = len(self._voice_speakers[ModelType.VITS.value])
|
26 |
-
self._hubert_speakers_count = len(self._voice_speakers[ModelType.HUBERT_VITS.value])
|
27 |
-
self._w2v2_speakers_count = len(self._voice_speakers[ModelType.W2V2_VITS.value])
|
28 |
-
self._w2v2_emotion_count = kwargs.get("w2v2_emotion_count", 0)
|
29 |
-
self._bert_vits2_speakers_count = len(self._voice_speakers[ModelType.BERT_VITS2.value])
|
30 |
-
self.dem = None
|
31 |
-
|
32 |
-
# Initialization information
|
33 |
-
self.logger = logger
|
34 |
-
self.logger.info(f"torch:{torch.__version__} cuda_available:{torch.cuda.is_available()}")
|
35 |
-
self.logger.info(f'device:{kwargs.get("device")} device.type:{kwargs.get("device").type}')
|
36 |
-
|
37 |
-
if getattr(config, "DIMENSIONAL_EMOTION_MODEL", None) != None:
|
38 |
-
try:
|
39 |
-
import audonnx
|
40 |
-
root = os.path.dirname(config.DIMENSIONAL_EMOTION_MODEL)
|
41 |
-
model_file = config.DIMENSIONAL_EMOTION_MODEL
|
42 |
-
self.dem = audonnx.load(root=root, model_file=model_file)
|
43 |
-
except Exception as e:
|
44 |
-
self.logger.warning(f"Load DIMENSIONAL_EMOTION_MODEL failed {e}")
|
45 |
-
|
46 |
-
if self._vits_speakers_count != 0: self.logger.info(f"[{ModelType.VITS.value}] {self._vits_speakers_count} speakers")
|
47 |
-
if self._hubert_speakers_count != 0: self.logger.info(f"[{ModelType.HUBERT_VITS.value}] {self._hubert_speakers_count} speakers")
|
48 |
-
if self._w2v2_speakers_count != 0: self.logger.info(f"[{ModelType.W2V2_VITS.value}] {self._w2v2_speakers_count} speakers")
|
49 |
-
if self._bert_vits2_speakers_count != 0: self.logger.info(
|
50 |
-
f"[{ModelType.BERT_VITS2.value}] {self._bert_vits2_speakers_count} speakers")
|
51 |
-
self.logger.info(f"{self._speakers_count} speakers in total.")
|
52 |
-
if self._speakers_count == 0:
|
53 |
-
self.logger.warning(f"No model was loaded.")
|
54 |
-
|
55 |
-
@property
|
56 |
-
def voice_speakers(self):
|
57 |
-
return self._voice_speakers
|
58 |
-
|
59 |
-
@property
|
60 |
-
def speakers_count(self):
|
61 |
-
return self._speakers_count
|
62 |
-
|
63 |
-
@property
|
64 |
-
def vits_speakers_count(self):
|
65 |
-
return self._vits_speakers_count
|
66 |
-
|
67 |
-
@property
|
68 |
-
def hubert_speakers_count(self):
|
69 |
-
return self._hubert_speakers_count
|
70 |
-
|
71 |
-
@property
|
72 |
-
def w2v2_speakers_count(self):
|
73 |
-
return self._w2v2_speakers_count
|
74 |
-
|
75 |
-
@property
|
76 |
-
def w2v2_emotion_count(self):
|
77 |
-
return self._w2v2_emotion_count
|
78 |
-
|
79 |
-
@property
|
80 |
-
def bert_vits2_speakers_count(self):
|
81 |
-
return self._bert_vits2_speakers_count
|
82 |
-
|
83 |
-
def encode(self, sampling_rate, audio, format):
|
84 |
-
with BytesIO() as f:
|
85 |
-
if format.upper() == 'OGG':
|
86 |
-
sf.write(f, audio, sampling_rate, format="ogg")
|
87 |
-
return BytesIO(f.getvalue())
|
88 |
-
elif format.upper() == 'SILK':
|
89 |
-
sf.write(f, audio, sampling_rate, format="wav")
|
90 |
-
return BytesIO(silkcoder.encode(f))
|
91 |
-
elif format.upper() == 'MP3':
|
92 |
-
sf.write(f, audio, sampling_rate, format="mp3")
|
93 |
-
return BytesIO(f.getvalue())
|
94 |
-
elif format.upper() == 'WAV':
|
95 |
-
sf.write(f, audio, sampling_rate, format="wav")
|
96 |
-
return BytesIO(f.getvalue())
|
97 |
-
elif format.upper() == 'FLAC':
|
98 |
-
sf.write(f, audio, sampling_rate, format="flac")
|
99 |
-
return BytesIO(f.getvalue())
|
100 |
-
else:
|
101 |
-
raise ValueError(f"Unsupported format:{format}")
|
102 |
-
|
103 |
-
def convert_time_string(self, time_string):
|
104 |
-
time_value = float(re.findall(r'\d+\.?\d*', time_string)[0])
|
105 |
-
time_unit = re.findall(r'[a-zA-Z]+', time_string)[0].lower()
|
106 |
-
|
107 |
-
if time_unit.upper() == 'MS':
|
108 |
-
return time_value / 1000
|
109 |
-
elif time_unit.upper() == 'S':
|
110 |
-
return time_value
|
111 |
-
elif time_unit.upper() == 'MIN':
|
112 |
-
return time_value * 60
|
113 |
-
elif time_unit.upper() == 'H':
|
114 |
-
return time_value * 3600
|
115 |
-
elif time_unit.upper() == 'D':
|
116 |
-
return time_value * 24 * 3600 # 不会有人真写D吧?
|
117 |
-
else:
|
118 |
-
raise ValueError("Unsupported time unit: {}".format(time_unit))
|
119 |
-
|
120 |
-
def generate_audio_chunks(self, audio):
|
121 |
-
chunk_size = 4096
|
122 |
-
while True:
|
123 |
-
chunk = audio.read(chunk_size)
|
124 |
-
if not chunk:
|
125 |
-
break
|
126 |
-
yield chunk
|
127 |
-
|
128 |
-
def resample_audio(self, audio, orig_sr, target_sr):
|
129 |
-
if orig_sr == target_sr:
|
130 |
-
return audio
|
131 |
-
|
132 |
-
gcd = np.gcd(orig_sr, target_sr)
|
133 |
-
audio = resample_poly(audio, target_sr // gcd, orig_sr // gcd)
|
134 |
-
|
135 |
-
return audio
|
136 |
-
|
137 |
-
def parse_ssml(self, ssml):
|
138 |
-
root = ET.fromstring(ssml)
|
139 |
-
format = root.attrib.get("format", "wav")
|
140 |
-
voice_tasks = []
|
141 |
-
brk_count = 0
|
142 |
-
strength_dict = {"x-weak": 0.25, "weak": 0.5, "Medium": 0.75, "Strong": 1, "x-strong": 1.25}
|
143 |
-
|
144 |
-
for element in root.iter():
|
145 |
-
if element.tag == "voice":
|
146 |
-
id = int(element.attrib.get("id", root.attrib.get("id", config.ID)))
|
147 |
-
lang = element.attrib.get("lang", root.attrib.get("lang", config.LANG))
|
148 |
-
length = float(element.attrib.get("length", root.attrib.get("length", config.LENGTH)))
|
149 |
-
noise = float(element.attrib.get("noise", root.attrib.get("noise", config.NOISE)))
|
150 |
-
noisew = float(element.attrib.get("noisew", root.attrib.get("noisew", config.NOISEW)))
|
151 |
-
max = int(element.attrib.get("max", root.attrib.get("max", "0")))
|
152 |
-
# 不填写默认就是vits
|
153 |
-
model_type = element.attrib.get("model_type", root.attrib.get("model_type", "vits"))
|
154 |
-
# w2v2-vits/emotion-vits才有emotion
|
155 |
-
emotion = int(element.attrib.get("emotion", root.attrib.get("emotion", 0)))
|
156 |
-
# Bert-VITS2的参数
|
157 |
-
sdp_ratio = int(element.attrib.get("sdp_ratio", root.attrib.get("sdp_ratio", config.SDP_RATIO)))
|
158 |
-
|
159 |
-
voice_element = ET.tostring(element, encoding='unicode')
|
160 |
-
|
161 |
-
pattern_voice = r'<voice.*?>(.*?)</voice>'
|
162 |
-
pattern_break = r'<break\s*?(.*?)\s*?/>'
|
163 |
-
|
164 |
-
matches_voice = re.findall(pattern_voice, voice_element)[0]
|
165 |
-
matches_break = re.split(pattern_break, matches_voice)
|
166 |
-
for match in matches_break:
|
167 |
-
strength = re.search(r'\s*strength\s*=\s*[\'\"](.*?)[\'\"]', match)
|
168 |
-
time = re.search(r'\s*time\s*=\s*[\'\"](.*?)[\'\"]', match)
|
169 |
-
# break标签 strength属性
|
170 |
-
if strength:
|
171 |
-
brk = strength_dict[strength.group(1)]
|
172 |
-
voice_tasks.append({"break": brk})
|
173 |
-
brk_count += 1
|
174 |
-
# break标签 time属性
|
175 |
-
elif time:
|
176 |
-
brk = self.convert_time_string(time.group(1))
|
177 |
-
voice_tasks.append({"break": brk})
|
178 |
-
brk_count += 1
|
179 |
-
# break标签 为空说明只写了break,默认停顿0.75s
|
180 |
-
elif match == "":
|
181 |
-
voice_tasks.append({"break": 0.75})
|
182 |
-
brk_count += 1
|
183 |
-
# voice标签中除了break剩下的就是文本
|
184 |
-
else:
|
185 |
-
voice_tasks.append({"id": id,
|
186 |
-
"text": match,
|
187 |
-
"lang": lang,
|
188 |
-
"length": length,
|
189 |
-
"noise": noise,
|
190 |
-
"noisew": noisew,
|
191 |
-
"max": max,
|
192 |
-
"model_type": model_type,
|
193 |
-
"emotion": emotion,
|
194 |
-
"sdp_ratio": sdp_ratio
|
195 |
-
})
|
196 |
-
|
197 |
-
# 分段末尾停顿0.75s
|
198 |
-
voice_tasks.append({"break": 0.75})
|
199 |
-
elif element.tag == "break":
|
200 |
-
# brk_count大于0说明voice标签中有break
|
201 |
-
if brk_count > 0:
|
202 |
-
brk_count -= 1
|
203 |
-
continue
|
204 |
-
brk = strength_dict.get(element.attrib.get("strength"),
|
205 |
-
self.convert_time_string(element.attrib.get("time", "750ms")))
|
206 |
-
voice_tasks.append({"break": brk})
|
207 |
-
|
208 |
-
for i in voice_tasks:
|
209 |
-
self.logger.debug(i)
|
210 |
-
|
211 |
-
return voice_tasks, format
|
212 |
-
|
213 |
-
def process_ssml_infer_task(self, tasks, format):
|
214 |
-
audios = []
|
215 |
-
sampling_rates = []
|
216 |
-
last_sampling_rate = 22050
|
217 |
-
for task in tasks:
|
218 |
-
if task.get("break"):
|
219 |
-
audios.append(np.zeros(int(task.get("break") * 22050), dtype=np.int16))
|
220 |
-
sampling_rates.append(last_sampling_rate)
|
221 |
-
else:
|
222 |
-
model_type_str = task.get("model_type").upper()
|
223 |
-
if model_type_str not in [ModelType.VITS.value, ModelType.W2V2_VITS.value, ModelType.BERT_VITS2.value]:
|
224 |
-
raise ValueError(f"Unsupported model type: {task.get('model_type')}")
|
225 |
-
model_type = ModelType(model_type_str)
|
226 |
-
voice_obj = self._voice_obj[model_type][task.get("id")][1]
|
227 |
-
real_id = self._voice_obj[model_type][task.get("id")][0]
|
228 |
-
task["id"] = real_id
|
229 |
-
sampling_rates.append(voice_obj.sampling_rate)
|
230 |
-
last_sampling_rate = voice_obj.sampling_rate
|
231 |
-
audio = voice_obj.get_audio(task)
|
232 |
-
audios.append(audio)
|
233 |
-
# 得到最高的采样率
|
234 |
-
target_sr = max(sampling_rates)
|
235 |
-
# 所有音频要与最高采样率保持一致
|
236 |
-
resampled_audios = [self.resample_audio(audio, sr, target_sr) for audio, sr in zip(audios, sampling_rates)]
|
237 |
-
audio = np.concatenate(resampled_audios, axis=0)
|
238 |
-
encoded_audio = self.encode(target_sr, audio, format)
|
239 |
-
return encoded_audio
|
240 |
-
|
241 |
-
def vits_infer(self, task):
|
242 |
-
format = task.get("format", "wav")
|
243 |
-
voice_obj = self._voice_obj[ModelType.VITS][task.get("id")][1]
|
244 |
-
real_id = self._voice_obj[ModelType.VITS][task.get("id")][0]
|
245 |
-
task["id"] = real_id # Change to real id
|
246 |
-
sampling_rate = voice_obj.sampling_rate
|
247 |
-
audio = voice_obj.get_audio(task, auto_break=True)
|
248 |
-
encoded_audio = self.encode(sampling_rate, audio, format)
|
249 |
-
return encoded_audio
|
250 |
-
|
251 |
-
def stream_vits_infer(self, task, fname=None):
|
252 |
-
format = task.get("format", "wav")
|
253 |
-
voice_obj = self._voice_obj[ModelType.VITS][task.get("id")][1]
|
254 |
-
task["id"] = self._voice_obj[ModelType.VITS][task.get("id")][0]
|
255 |
-
sampling_rate = voice_obj.sampling_rate
|
256 |
-
genertator = voice_obj.get_stream_audio(task, auto_break=True)
|
257 |
-
# audio = BytesIO()
|
258 |
-
for chunk in genertator:
|
259 |
-
encoded_audio = self.encode(sampling_rate, chunk, format)
|
260 |
-
for encoded_audio_chunk in self.generate_audio_chunks(encoded_audio):
|
261 |
-
yield encoded_audio_chunk
|
262 |
-
# if getattr(config, "SAVE_AUDIO", False):
|
263 |
-
# audio.write(encoded_audio.getvalue())
|
264 |
-
# if getattr(config, "SAVE_AUDIO", False):
|
265 |
-
# path = f"{config.CACHE_PATH}/{fname}"
|
266 |
-
# utils.save_audio(audio.getvalue(), path)
|
267 |
-
|
268 |
-
def hubert_vits_infer(self, task):
|
269 |
-
format = task.get("format", "wav")
|
270 |
-
voice_obj = self._voice_obj[ModelType.HUBERT_VITS][task.get("id")][1]
|
271 |
-
task["id"] = self._voice_obj[ModelType.HUBERT_VITS][task.get("id")][0]
|
272 |
-
sampling_rate = voice_obj.sampling_rate
|
273 |
-
audio = voice_obj.get_audio(task)
|
274 |
-
encoded_audio = self.encode(sampling_rate, audio, format)
|
275 |
-
return encoded_audio
|
276 |
-
|
277 |
-
def w2v2_vits_infer(self, task):
|
278 |
-
format = task.get("format", "wav")
|
279 |
-
voice_obj = self._voice_obj[ModelType.W2V2_VITS][task.get("id")][1]
|
280 |
-
task["id"] = self._voice_obj[ModelType.W2V2_VITS][task.get("id")][0]
|
281 |
-
sampling_rate = voice_obj.sampling_rate
|
282 |
-
audio = voice_obj.get_audio(task, auto_break=True)
|
283 |
-
encoded_audio = self.encode(sampling_rate, audio, format)
|
284 |
-
return encoded_audio
|
285 |
-
|
286 |
-
def vits_voice_conversion(self, task):
|
287 |
-
original_id = task.get("original_id")
|
288 |
-
target_id = task.get("target_id")
|
289 |
-
format = task.get("format")
|
290 |
-
|
291 |
-
original_id_obj = int(self._voice_obj[ModelType.VITS][original_id][2])
|
292 |
-
target_id_obj = int(self._voice_obj[ModelType.VITS][target_id][2])
|
293 |
-
|
294 |
-
if original_id_obj != target_id_obj:
|
295 |
-
raise ValueError(f"speakers are in diffrent VITS Model")
|
296 |
-
|
297 |
-
task["original_id"] = int(self._voice_obj[ModelType.VITS][original_id][0])
|
298 |
-
task["target_id"] = int(self._voice_obj[ModelType.VITS][target_id][0])
|
299 |
-
|
300 |
-
voice_obj = self._voice_obj[ModelType.VITS][original_id][1]
|
301 |
-
sampling_rate = voice_obj.sampling_rate
|
302 |
-
|
303 |
-
audio = voice_obj.voice_conversion(task)
|
304 |
-
encoded_audio = self.encode(sampling_rate, audio, format)
|
305 |
-
return encoded_audio
|
306 |
-
|
307 |
-
def get_dimensional_emotion_npy(self, audio):
|
308 |
-
if self.dem is None:
|
309 |
-
raise ValueError(f"Please configure DIMENSIONAL_EMOTION_MODEL path in config.py")
|
310 |
-
audio16000, sampling_rate = librosa.load(audio, sr=16000, mono=True)
|
311 |
-
emotion = self.dem(audio16000, sampling_rate)['hidden_states']
|
312 |
-
emotion_npy = BytesIO()
|
313 |
-
np.save(emotion_npy, emotion.squeeze(0))
|
314 |
-
emotion_npy.seek(0)
|
315 |
-
|
316 |
-
return emotion_npy
|
317 |
-
|
318 |
-
def bert_vits2_infer(self, task):
|
319 |
-
format = task.get("format", "wav")
|
320 |
-
voice_obj = self._voice_obj[ModelType.BERT_VITS2][task.get("id")][1]
|
321 |
-
task["id"] = self._voice_obj[ModelType.BERT_VITS2][task.get("id")][0]
|
322 |
-
sampling_rate = voice_obj.sampling_rate
|
323 |
-
audio = voice_obj.get_audio(task, auto_break=True)
|
324 |
-
encoded_audio = self.encode(sampling_rate, audio, format)
|
325 |
-
return encoded_audio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__init__.py
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_importlib.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
|
3 |
-
|
4 |
-
def disable_importlib_metadata_finder(metadata):
|
5 |
-
"""
|
6 |
-
Ensure importlib_metadata doesn't provide older, incompatible
|
7 |
-
Distributions.
|
8 |
-
|
9 |
-
Workaround for #3102.
|
10 |
-
"""
|
11 |
-
try:
|
12 |
-
import importlib_metadata
|
13 |
-
except ImportError:
|
14 |
-
return
|
15 |
-
except AttributeError:
|
16 |
-
import warnings
|
17 |
-
|
18 |
-
msg = (
|
19 |
-
"`importlib-metadata` version is incompatible with `setuptools`.\n"
|
20 |
-
"This problem is likely to be solved by installing an updated version of "
|
21 |
-
"`importlib-metadata`."
|
22 |
-
)
|
23 |
-
warnings.warn(msg) # Ensure a descriptive message is shown.
|
24 |
-
raise # This exception can be suppressed by _distutils_hack
|
25 |
-
|
26 |
-
if importlib_metadata is metadata:
|
27 |
-
return
|
28 |
-
to_remove = [
|
29 |
-
ob
|
30 |
-
for ob in sys.meta_path
|
31 |
-
if isinstance(ob, importlib_metadata.MetadataPathFinder)
|
32 |
-
]
|
33 |
-
for item in to_remove:
|
34 |
-
sys.meta_path.remove(item)
|
35 |
-
|
36 |
-
|
37 |
-
if sys.version_info < (3, 10):
|
38 |
-
from setuptools.extern import importlib_metadata as metadata
|
39 |
-
disable_importlib_metadata_finder(metadata)
|
40 |
-
else:
|
41 |
-
import importlib.metadata as metadata # noqa: F401
|
42 |
-
|
43 |
-
|
44 |
-
if sys.version_info < (3, 9):
|
45 |
-
from setuptools.extern import importlib_resources as resources
|
46 |
-
else:
|
47 |
-
import importlib.resources as resources # noqa: F401
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzinZ/vitscn/commons.py
DELETED
@@ -1,161 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import numpy as np
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
def init_weights(m, mean=0.0, std=0.01):
|
9 |
-
classname = m.__class__.__name__
|
10 |
-
if classname.find("Conv") != -1:
|
11 |
-
m.weight.data.normal_(mean, std)
|
12 |
-
|
13 |
-
|
14 |
-
def get_padding(kernel_size, dilation=1):
|
15 |
-
return int((kernel_size*dilation - dilation)/2)
|
16 |
-
|
17 |
-
|
18 |
-
def convert_pad_shape(pad_shape):
|
19 |
-
l = pad_shape[::-1]
|
20 |
-
pad_shape = [item for sublist in l for item in sublist]
|
21 |
-
return pad_shape
|
22 |
-
|
23 |
-
|
24 |
-
def intersperse(lst, item):
|
25 |
-
result = [item] * (len(lst) * 2 + 1)
|
26 |
-
result[1::2] = lst
|
27 |
-
return result
|
28 |
-
|
29 |
-
|
30 |
-
def kl_divergence(m_p, logs_p, m_q, logs_q):
|
31 |
-
"""KL(P||Q)"""
|
32 |
-
kl = (logs_q - logs_p) - 0.5
|
33 |
-
kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
|
34 |
-
return kl
|
35 |
-
|
36 |
-
|
37 |
-
def rand_gumbel(shape):
|
38 |
-
"""Sample from the Gumbel distribution, protect from overflows."""
|
39 |
-
uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
|
40 |
-
return -torch.log(-torch.log(uniform_samples))
|
41 |
-
|
42 |
-
|
43 |
-
def rand_gumbel_like(x):
|
44 |
-
g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
|
45 |
-
return g
|
46 |
-
|
47 |
-
|
48 |
-
def slice_segments(x, ids_str, segment_size=4):
|
49 |
-
ret = torch.zeros_like(x[:, :, :segment_size])
|
50 |
-
for i in range(x.size(0)):
|
51 |
-
idx_str = ids_str[i]
|
52 |
-
idx_end = idx_str + segment_size
|
53 |
-
ret[i] = x[i, :, idx_str:idx_end]
|
54 |
-
return ret
|
55 |
-
|
56 |
-
|
57 |
-
def rand_slice_segments(x, x_lengths=None, segment_size=4):
|
58 |
-
b, d, t = x.size()
|
59 |
-
if x_lengths is None:
|
60 |
-
x_lengths = t
|
61 |
-
ids_str_max = x_lengths - segment_size + 1
|
62 |
-
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
|
63 |
-
ret = slice_segments(x, ids_str, segment_size)
|
64 |
-
return ret, ids_str
|
65 |
-
|
66 |
-
|
67 |
-
def get_timing_signal_1d(
|
68 |
-
length, channels, min_timescale=1.0, max_timescale=1.0e4):
|
69 |
-
position = torch.arange(length, dtype=torch.float)
|
70 |
-
num_timescales = channels // 2
|
71 |
-
log_timescale_increment = (
|
72 |
-
math.log(float(max_timescale) / float(min_timescale)) /
|
73 |
-
(num_timescales - 1))
|
74 |
-
inv_timescales = min_timescale * torch.exp(
|
75 |
-
torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
|
76 |
-
scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
|
77 |
-
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
|
78 |
-
signal = F.pad(signal, [0, 0, 0, channels % 2])
|
79 |
-
signal = signal.view(1, channels, length)
|
80 |
-
return signal
|
81 |
-
|
82 |
-
|
83 |
-
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
|
84 |
-
b, channels, length = x.size()
|
85 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
86 |
-
return x + signal.to(dtype=x.dtype, device=x.device)
|
87 |
-
|
88 |
-
|
89 |
-
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
|
90 |
-
b, channels, length = x.size()
|
91 |
-
signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
|
92 |
-
return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
|
93 |
-
|
94 |
-
|
95 |
-
def subsequent_mask(length):
|
96 |
-
mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
|
97 |
-
return mask
|
98 |
-
|
99 |
-
|
100 |
-
@torch.jit.script
|
101 |
-
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
|
102 |
-
n_channels_int = n_channels[0]
|
103 |
-
in_act = input_a + input_b
|
104 |
-
t_act = torch.tanh(in_act[:, :n_channels_int, :])
|
105 |
-
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
|
106 |
-
acts = t_act * s_act
|
107 |
-
return acts
|
108 |
-
|
109 |
-
|
110 |
-
def convert_pad_shape(pad_shape):
|
111 |
-
l = pad_shape[::-1]
|
112 |
-
pad_shape = [item for sublist in l for item in sublist]
|
113 |
-
return pad_shape
|
114 |
-
|
115 |
-
|
116 |
-
def shift_1d(x):
|
117 |
-
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
|
118 |
-
return x
|
119 |
-
|
120 |
-
|
121 |
-
def sequence_mask(length, max_length=None):
|
122 |
-
if max_length is None:
|
123 |
-
max_length = length.max()
|
124 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
125 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
126 |
-
|
127 |
-
|
128 |
-
def generate_path(duration, mask):
|
129 |
-
"""
|
130 |
-
duration: [b, 1, t_x]
|
131 |
-
mask: [b, 1, t_y, t_x]
|
132 |
-
"""
|
133 |
-
device = duration.device
|
134 |
-
|
135 |
-
b, _, t_y, t_x = mask.shape
|
136 |
-
cum_duration = torch.cumsum(duration, -1)
|
137 |
-
|
138 |
-
cum_duration_flat = cum_duration.view(b * t_x)
|
139 |
-
path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
|
140 |
-
path = path.view(b, t_x, t_y)
|
141 |
-
path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
|
142 |
-
path = path.unsqueeze(1).transpose(2,3) * mask
|
143 |
-
return path
|
144 |
-
|
145 |
-
|
146 |
-
def clip_grad_value_(parameters, clip_value, norm_type=2):
|
147 |
-
if isinstance(parameters, torch.Tensor):
|
148 |
-
parameters = [parameters]
|
149 |
-
parameters = list(filter(lambda p: p.grad is not None, parameters))
|
150 |
-
norm_type = float(norm_type)
|
151 |
-
if clip_value is not None:
|
152 |
-
clip_value = float(clip_value)
|
153 |
-
|
154 |
-
total_norm = 0
|
155 |
-
for p in parameters:
|
156 |
-
param_norm = p.grad.data.norm(norm_type)
|
157 |
-
total_norm += param_norm.item() ** norm_type
|
158 |
-
if clip_value is not None:
|
159 |
-
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
160 |
-
total_norm = total_norm ** (1. / norm_type)
|
161 |
-
return total_norm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Assoluto Racing Mod Apk 1.9.1.md
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Plague Inc 1.18 5 Mod Apk Việt Hóa: Cómo descargar y jugar el juego</h1>
|
3 |
-
<p>Plague Inc es un popular juego de simulación que te permite crear y desarrollar un patógeno para acabar con la humanidad con una pandemia mortal. Pero ¿qué pasa si quieres jugar el juego con más características, más idiomas y más diversión? En este artículo, te mostraremos cómo descargar y jugar Plague Inc 1.18 5 Mod Apk Việt Hóa, una versión modificada del juego que ofrece muchas ventajas sobre la original. </p>
|
4 |
-
<h2>¿Qué es Plague Inc? </h2>
|
5 |
-
<h3>Una breve introducción al juego y sus características</h3>
|
6 |
-
<p>Plague Inc es un juego de simulación de estrategia en tiempo real desarrollado por Ndemic Creations. El juego fue inspirado por la película de 2011 Contagion y el juego de 2008 Flash Pandemic 2. El juego ha sido descargado más de 160 millones de veces a partir de mayo de 2021. </p>
|
7 |
-
<h2>assoluto racing mod apk 1.9.1</h2><br /><p><b><b>Download File</b> ===> <a href="https://bltlly.com/2v6JQZ">https://bltlly.com/2v6JQZ</a></b></p><br /><br />
|
8 |
-
<p>El juego te permite elegir entre diferentes modos de juego y patógenos, tales como bacterias, virus, hongos, parásitos, priones, nano-virus, armas biológicas, gusanos neuráxicos, virus necroa, gripe simia y plaga de sombras. Cada patógeno tiene sus propias características y estrategias para dominar. </p>
|
9 |
-
<p>Tu objetivo es infectar y matar a la población mundial con tu plaga, mientras te adaptas a diversos entornos y superas las defensas humanas. Puedes desarrollar tu plaga gastando puntos de ADN en transmisión, síntomas y habilidades. También puede desencadenar eventos aleatorios y eventos mundiales que pueden afectar la propagación y gravedad de su plaga. </p>
|
10 |
-
|
11 |
-
<h3>La diferencia entre la versión original y la versión modificada</h3>
|
12 |
-
<p>Plague Inc 1.18 5 Mod Apk Việt Hóa es una versión modificada de Plague Inc que ofrece algunas ventajas sobre la versión original. Algunas de estas ventajas son:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Desbloquea todo el contenido premium de forma gratuita, como genes, escenarios, trucos, plagas especiales. </li>
|
15 |
-
<li>Te da puntos de ADN ilimitados para evolucionar tu plaga más rápido. </li>
|
16 |
-
<li>Soporta el idioma vietnamita (việt hóa), así como el inglés y otros idiomas. </li>
|
17 |
-
<li> Tiene gráficos y efectos de sonido mejorados para una mejor experiencia de juego. </li>
|
18 |
-
</ul>
|
19 |
-
<h2>Cómo descargar e instalar Plague Inc 1.18 5 Mod Apk Việt Hóa</h2>
|
20 |
-
<h3>Los requisitos y pasos para descargar e instalar el mod apk</h3>
|
21 |
-
<p>Para descargar e instalar Plague Inc 1.18 5 Mod Apk Việt Hóa, es necesario tener un dispositivo Android que cumple con los siguientes requisitos:</p>
|
22 |
-
<ul>
|
23 |
-
<li> Versión de Android 4.1 o superior. </li>
|
24 |
-
<li>Al menos 100 MB de espacio de almacenamiento libre. </li>
|
25 |
-
<li>Una conexión a Internet estable. </li>
|
26 |
-
</ul>
|
27 |
-
<p>Una vez que haya comprobado la compatibilidad de su dispositivo, puede seguir estos pasos para descargar e instalar el apk mod:</p>
|
28 |
-
<ol>
|
29 |
-
<li>Ir al enlace proporcionado a continuación para descargar el archivo apk mod. </li>
|
30 |
-
<li>Permita que su dispositivo instale aplicaciones desde fuentes desconocidas. Puede hacer esto yendo a Configuración > Seguridad > Fuentes desconocidas y habilitando la opción. </li>
|
31 |
-
<li>Busque el archivo apk mod descargado en el administrador de archivos de su dispositivo y toque en él para iniciar el proceso de instalación. </li>
|
32 |
-
<li>Siga las instrucciones en la pantalla para completar la instalación. </li>
|
33 |
-
<li>Iniciar el juego y disfrutar de jugar Plague Inc 1.18 5 Mod Apk Việt Hóa.</li>
|
34 |
-
</ol>
|
35 |
-
<h3>El enlace para descargar el mod apk</h3>
|
36 |
-
<p>Puede descargar Plague Inc 1.18 5 Mod Apk Việt Hóa desde este enlace: [Plague Inc 1.18 5 Mod Apk Việt Hóa]</p>
|
37 |
-
<h2>Cómo Jugar Peste Inc 1.18 5 Mod Apk Việt Hóa</h2>
|
38 |
-
<h3>Los modos de juego y patógenos disponibles en el mod apk</h3>
|
39 |
-
|
40 |
-
<ul>
|
41 |
-
<li>juego principal: este es el modo estándar donde puede crear y evolucionar su propia plaga y tratar de infectar y matar al mundo. </li>
|
42 |
-
<li>Speed Run: Este es un modo temporizado donde tienes que infectar y matar al mundo lo más rápido posible. </li>
|
43 |
-
<li>Modo Co-Op: Este es un modo multijugador donde puedes formar equipo con otro jugador y trabajar juntos para infectar y matar al mundo. </li>
|
44 |
-
<li>Versus Mode: Este es un modo multijugador donde puedes competir con otro jugador y tratar de infectar y matar a más personas que ellos. </li>
|
45 |
-
</ul>
|
46 |
-
<p>También puede elegir entre los siguientes patógenos:</p>
|
47 |
-
<p></p>
|
48 |
-
<tabla>
|
49 |
-
<tr>
|
50 |
-
<th>Patógeno</th>
|
51 |
-
<th>Descripción</th>
|
52 |
-
</tr>
|
53 |
-
<tr>
|
54 |
-
<td>Bacterias</td>
|
55 |
-
<td>El patógeno más común y bien redondeado. No tiene habilidades especiales pero puede evolucionar rápidamente. </td>
|
56 |
-
</tr>
|
57 |
-
<tr>
|
58 |
-
<td>Virus</td>
|
59 |
-
<td>Un patógeno que muta rápidamente que puede volverse difícil de curar. Tiene una alta probabilidad de desarrollar síntomas aleatorios, pero también puede volverse letal demasiado rápido. </td>
|
60 |
-
</tr>
|
61 |
-
<tr>
|
62 |
-
<td>Hongo</td>
|
63 |
-
<td>Un patógeno de propagación lenta que depende de las esporas para infectar nuevos países. Tiene una baja probabilidad de ser detectado, pero también puede luchar en climas cálidos. </td>
|
64 |
-
</tr>
|
65 |
-
<tr>
|
66 |
-
<td>Parásito</td>
|
67 |
-
<td>Un patógeno furtivo que puede evitar ser notado por los seres humanos. Tiene una gravedad baja, pero también puede reducir los puntos de ADN de los peligros biológicos rojos. </td>
|
68 |
-
</tr>
|
69 |
-
<tr>
|
70 |
-
<td>Prion</td>
|
71 |
-
<td>Un patógeno complejo que puede manipular el comportamiento de los humanos. Tiene una tasa de infección lenta, pero también puede desencadenar atrofia neuronal que hace que sea más difícil de curar. </td>
|
72 |
-
</tr>
|
73 |
-
<tr>
|
74 |
-
<td>Nano-Virus</td>
|
75 |
-
<td>Un patógeno sintético que se detecta desde el inicio del juego. Tiene una alta infectividad, pero también puede activar interruptores de eliminación que hacen que sea más fácil de curar. </td>
|
76 |
-
</tr>
|
77 |
-
<tr>
|
78 |
-
<td>Arma biológica</td>
|
79 |
-
<td>Un patógeno letal que puede matar a los humanos rápidamente. Tiene una gravedad alta pero también puede ser inestable y difícil de controlar. </td>
|
80 |
-
</tr>
|
81 |
-
<tr>
|
82 |
-
<td>Gusano de Neurax</td>
|
83 |
-
|
84 |
-
</tr>
|
85 |
-
<tr>
|
86 |
-
<td>Virus de necrosis</td>
|
87 |
-
<td>Un virus creador de zombis que puede reanimar humanos muertos. Tiene un árbol de síntomas único y también puede desencadenar una respuesta militar global. </td>
|
88 |
-
</tr>
|
89 |
-
<tr>
|
90 |
-
<td>Gripe simia</td>
|
91 |
-
<td>Un virus genéticamente modificado que puede infectar tanto a humanos como a simios. Tiene un árbol de habilidades único y también puede desencadenar un levantamiento simio. </td>
|
92 |
-
</tr>
|
93 |
-
<tr>
|
94 |
-
<td>Shadow Plague</td <td>Un patógeno vampírico que puede crear vampiros e infectar humanos. Tiene un sistema único de sed de sangre y también puede desencadenar una respuesta templaria. </td>
|
95 |
-
</tr>
|
96 |
-
</tabla>
|
97 |
-
<h3>Los consejos y trucos de juego para crear y propagar una plaga mortal</h3>
|
98 |
-
<p>Plague Inc 1.18 5 Mod Apk Việt Hóa es un juego desafiante que requiere que pienses de forma estratégica y creativa para lograr tu objetivo de acabar con la humanidad. Estos son algunos consejos y trucos generales que pueden ayudarte a mejorar tu juego:</p>
|
99 |
-
<ul>
|
100 |
-
<li>Elija su patógeno y el modo de juego sabiamente. Los diferentes patógenos y modos de juego tienen diferentes fortalezas y debilidades, por lo que debes elegir el que se adapte a tu estilo de juego y estrategia. </li>
|
101 |
-
Comienza tu plaga en un país populoso y pobre. Esto le dará más puntos de ADN y más oportunidades para propagar su plaga a otros países. </li>
|
102 |
-
<li>Equilibra tu transmisión, síntomas y habilidades. Necesitas desarrollar tu plaga de una manera que la haga más infecciosa, más severa y más resistente a diferentes factores, como el clima, la cura y la respuesta humana. </li>
|
103 |
-
<li>Cuidado con las noticias y los eventos mundiales. Estos pueden darle pistas sobre lo que está sucediendo en el mundo y cómo los seres humanos están reaccionando a su plaga. Puede utilizar esta información para ajustar su estrategia en consecuencia. </li>
|
104 |
-
|
105 |
-
</ul>
|
106 |
-
<h2>Conclusión</h2>
|
107 |
-
<h3>Un resumen de los puntos principales y una recomendación para el juego</h3>
|
108 |
-
<p>Plague Inc 1.18 5 Mod Apk Việt Hóa es un juego divertido y atractivo que le permite dar rienda suelta a su genio del mal interior y crear una pandemia global. El juego te ofrece muchas características, opciones y desafíos que lo hacen más agradable y realista que la versión original. Puede descargar e instalar el apk mod fácilmente desde el enlace proporcionado anteriormente. Si usted está buscando un juego que pone a prueba su creatividad, inteligencia y habilidades de estrategia, entonces Plague Inc 1.18 5 Mod Apk Việt Hóa es el juego para usted. </p>
|
109 |
-
<h2>Preguntas frecuentes</h2>
|
110 |
-
<h3>Cinco preguntas y respuestas únicas sobre el juego y el apk mod</h3>
|
111 |
-
<ol>
|
112 |
-
<li><b>Q: ¿Es Plague Inc 1.18 5 Mod Apk Việt Hóa seguro para descargar y jugar? </b></li>
|
113 |
-
<li>A: Sí, Plague Inc 1.18 5 Mod Apk Việt Hóa es seguro para descargar y jugar. El archivo apk mod ha sido escaneado en busca de virus y malware y no tiene efectos dañinos en su dispositivo o datos. </li>
|
114 |
-
<li><b>Q: ¿Cuáles son los beneficios de jugar Plague Inc 1.18 5 Mod Apk Việt Hóa sobre la versión original? </b></li>
|
115 |
-
<li>A: Plague Inc 1.18 5 Mod Apk Việt Hóa le ofrece muchos beneficios sobre la versión original, como desbloquear todo el contenido premium de forma gratuita, dándole puntos de ADN ilimitados, apoyando el lenguaje vietnamita y mejorando los gráficos y efectos de sonido. </li>
|
116 |
-
<li><b>Q: ¿Cómo puedo actualizar Plague Inc 1.18 5 Mod Apk Việt Hóa a la última versión? </b></li>
|
117 |
-
<li>A: Para actualizar Plague Inc 1.18 5 Mod Apk Việt Hóa a la última versión, es necesario desinstalar la versión actual de su dispositivo y descargar la nueva versión desde el mismo enlace proporcionado anteriormente. Luego, debe instalar la nueva versión siguiendo los mismos pasos que antes. </li>
|
118 |
-
<li><b>Q: ¿Cómo puedo contactar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa si tengo alguna pregunta o comentario? </b></li>
|
119 |
-
|
120 |
-
<li><b>Q: ¿Cómo puedo apoyar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa si me gusta su trabajo? </b></li>
|
121 |
-
<li>A: Usted puede apoyar al desarrollador de Plague Inc 1.18 5 Mod Apk Việt Hóa compartiendo su trabajo con sus amigos y familiares, dándoles comentarios positivos y calificaciones, o donando a ellos si tienen una opción de donación. </li>
|
122 |
-
</ol></p> 64aa2da5cf<br />
|
123 |
-
<br />
|
124 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Chicken Gun Apk Latest Version.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Pistola de pollo APK Ultima Versión: Un divertido y loco juego de disparos en línea</h1>
|
3 |
-
Si usted está buscando un divertido y loco juego de disparos en línea, usted debe tratar de Chicken Gun APK ultima versión. Este es un juego en el que juegas como pollos armados que disparan y luchan entre sí. Puedes elegir entre dos modos: 5 vs 5 equipos o gratis para todos. También puede personalizar su gallo, arma, pico, zapatillas y gorras. Lanzar huevos explosivos y organizar una masacre. ¡Únete al tiroteo de pollo y diviértete! <h2>¿Qué es la pistola de pollo APK? </h2>
|
4 |
-
Chicken Gun APK es un juego para Android desarrollado por ChaloApps. Es un juego que combina acción, humor y características multijugador. Estas son algunas de las cosas que puedes hacer en este juego: <h3>Un juego donde juegas como pollos armados</h3>
|
5 |
-
En Chicken Gun APK, usted no es un soldado humano, pero un guerrero de pollo. Puedes elegir entre diferentes razas de pollos, como blanco, negro, marrón o rojo. Cada pollo tiene sus propias estadísticas y habilidades. También puedes equipar a tu pollo con varias armas, como pistolas, escopetas, rifles o granadas. <h3>Un juego con dos modos: 5 vs 5 y gratis para todos</h3>
|
6 |
-
Pistola de pollo APK ofrece dos modos de juego: 5 vs 5 equipos o gratis para todos. En el modo equipo, puedes unirte a un equipo de cinco pollos y competir contra otro equipo de cinco pollos. El equipo con más muertes gana. En el modo libre para todos, puedes jugar contra otros nueve pollos en una caótica batalla real. El último pollo en pie gana. <h3>Un juego donde puedes personalizar tu gallo, arma, pico, zapatillas y gorras</h3>
|
7 |
-
Pistola de pollo APK le permite personalizar su gallo de muchas maneras. Puede cambiar su arma, pico, zapatillas y gorras. También puedes desbloquear nuevos objetos jugando al juego o comprándolos con monedas. Puedes hacer que tu gallo se vea genial, divertido o aterrador. <h2>¿Cómo descargar e instalar Chicken Gun APK? </h2>
|
8 |
-
|
9 |
-
Puede descargar el archivo APK desde una fuente de confianza, como [APKCombo]( 1 ), [APKLeon]( 3 ) o [APKBloch]( 2 ). Estos son sitios web que ofrecen descargas gratuitas y seguras de juegos y aplicaciones para Android. Puede buscar Chicken Gun APK en estos sitios web y descargar la última versión. <h3>Habilitar fuentes desconocidas en su dispositivo</h3>
|
10 |
-
Antes de que pueda instalar el archivo APK en su dispositivo, debe habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store. Para habilitar fuentes desconocidas, ve a Configuración > Seguridad > Fuentes desconocidas y conéctala. <h3>Instala el archivo APK y disfruta del juego</h3>
|
11 |
-
Después de haber descargado el archivo APK y habilitado fuentes desconocidas, puede instalar el archivo APK en su dispositivo. Para ello, localice el archivo en su carpeta de descargas y toque en él. Siga las instrucciones en la pantalla para instalar la aplicación. Una vez completada la instalación, puede abrir la aplicación y comenzar a jugar el juego. <h2>¿Cuáles son las características de Chicken Gun APK? </h2>
|
12 |
-
Pistola de pollo APK es un juego que ofrece muchas características que lo hacen divertido y emocionante. Estas son algunas de las características de este juego: <h3>Gráficos de alta calidad y efectos de sonido</h3>
|
13 |
-
Chicken Gun APK tiene gráficos de alta calidad y efectos de sonido que crean una experiencia realista e inmersiva. El juego tiene modelos 3D de pollos, armas y entornos. El juego también tiene física realista y animaciones. Los efectos de sonido son fuertes y claros, y se pueden escuchar los disparos, explosiones y ruidos de pollo. <h3>Varias armas y artículos para usar</h3>
|
14 |
-
Pistola de pollo APK tiene varias armas y artículos que se pueden utilizar para disparar y luchar con otros pollos. Puede elegir entre pistolas, escopetas, rifles, granadas, lanzacohetes, lanzallamas y más. También puedes usar huevos explosivos, kits de salud, armaduras y otros elementos para ayudarte en la batalla. <h3>Diferentes mapas y escenarios para explorar</h3>
|
15 |
-
|
16 |
-
Pistola de pollo APK tiene un modo multijugador en línea que le permite jugar con otros jugadores de todo el mundo. Puede unirse a una sala pública o privada, o crear su propia habitación. También puede chatear y chatear por voz con otros jugadores utilizando las funciones integradas. Usted puede hacer amigos o enemigos, cooperar o competir, y tener un montón de diversión. <h2>¿Cuáles son los consejos y trucos para jugar Chicken Gun APK? </h2>
|
17 |
-
Pistola de pollo APK es un juego que requiere habilidad, estrategia y suerte. Estos son algunos consejos y trucos que pueden ayudarte a mejorar tu juego: <h3>Intenta que la cabeza haga más daño</h3>
|
18 |
-
Una de las habilidades más importantes en Chicken Gun APK está apuntando. Es necesario apuntar a la cabeza de sus enemigos para hacer más daño y matarlos más rápido. Puedes usar el punto de mira o el visor para apuntar mejor. También puedes ajustar la sensibilidad de tus controles para adaptarlos a tus preferencias. <h3>Usa huevos explosivos para causar caos</h3>
|
19 |
-
Uno de los artículos más divertidos y eficaces en Chicken Gun APK es el huevo explosivo. Puedes lanzar estos huevos a tus enemigos o a sus alrededores para causar explosiones y daños. Puedes usar estos huevos para distraer, confundir o eliminar a tus enemigos. También puedes usar estos huevos para destruir paredes, puertas o vehículos. <h3>Esconderse detrás de la cubierta y moverse para evitar ser disparado</h3>
|
20 |
-
Una de las estrategias más importantes en Chicken Gun APK se esconde detrás de la cubierta y moverse para evitar ser disparado. Necesitas encontrar un buen lugar donde puedas esconderte de la vista de tus enemigos y dispararles con seguridad. También es necesario moverse con frecuencia para evitar ser un objetivo fácil. Puedes usar botones de agacharte, saltar o sprint para ayudarte a moverte más rápido o sigilosamente. <h3>Forma equipo con tus amigos y comunícate con ellos</h3>
|
21 |
-
|
22 |
-
Chicken Gun APK ultima versión es un divertido y loco juego de disparos en línea que debe probar si te gusta la acción, el humor y los juegos multijugador. Puedes jugar como pollos armados que disparan y luchan entre sí en diferentes modos, mapas y escenarios. También puede personalizar su gallo, arma, pico, zapatillas y gorras. Descargar e instalar Chicken Gun APK ahora y unirse al tiroteo de pollo! <h2>Preguntas frecuentes</h2>
|
23 |
-
Aquí hay algunas preguntas frecuentes sobre Chicken Gun APK: - P: ¿Es Chicken Gun APK libre? - A: Sí, Chicken Gun APK es gratis para descargar y jugar. Sin embargo, contiene anuncios y compras en la aplicación que se puede desactivar o comprar si lo desea. - P: ¿Es seguro Chicken Gun APK? - A: Sí, Chicken Gun APK es seguro para descargar e instalar si lo obtiene de una fuente de confianza, como [APKCombo], [APKLeon] o [APKBloch]. Estos son sitios web que ofrecen descargas gratuitas y seguras de juegos y aplicaciones para Android. También puede escanear el archivo APK con una aplicación antivirus antes de instalarlo para garantizar su seguridad. - P: ¿Cómo puedo actualizar Chicken Gun APK? - A: Puede actualizar Chicken Gun APK mediante la descarga e instalación de la última versión del archivo APK de la misma fuente que lo obtuvo de. También puede comprobar si hay actualizaciones dentro del juego yendo a Configuración > Acerca de > Buscar actualizaciones. - P: ¿Cómo puedo jugar Chicken Gun APK en PC? - A: Se puede jugar Chicken Gun APK en el PC mediante el uso de un emulador de Android, tales como [BlueStacks], [NoxPlayer] o [LDPlayer]. Estos son software que le permiten ejecutar aplicaciones y juegos de Android en su PC. Puede descargar e instalar un emulador en su PC, luego descargar e instalar Chicken Gun APK en el emulador, y luego jugar el juego como lo haría en su dispositivo. - P: ¿Cómo puedo contactar con el desarrollador de Chicken Gun APK? - A: Puede ponerse en contacto con el desarrollador de Chicken Gun APK enviando un correo electrónico a [email protected]. También puedes seguirlos en su [página de Facebook] o en su [canal de YouTube] para más actualizaciones y noticias sobre el juego. </p>
|
24 |
-
<h2>chicken gun apk latest version</h2><br /><p><b><b>DOWNLOAD</b> ✏ ✏ ✏ <a href="https://bltlly.com/2v6JFN">https://bltlly.com/2v6JFN</a></b></p><br /><br /> 64aa2da5cf<br />
|
25 |
-
<br />
|
26 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/depends.py
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import marshal
|
3 |
-
import contextlib
|
4 |
-
import dis
|
5 |
-
|
6 |
-
from setuptools.extern.packaging import version
|
7 |
-
|
8 |
-
from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
|
9 |
-
from . import _imp
|
10 |
-
|
11 |
-
|
12 |
-
__all__ = [
|
13 |
-
'Require', 'find_module', 'get_module_constant', 'extract_constant'
|
14 |
-
]
|
15 |
-
|
16 |
-
|
17 |
-
class Require:
|
18 |
-
"""A prerequisite to building or installing a distribution"""
|
19 |
-
|
20 |
-
def __init__(
|
21 |
-
self, name, requested_version, module, homepage='',
|
22 |
-
attribute=None, format=None):
|
23 |
-
|
24 |
-
if format is None and requested_version is not None:
|
25 |
-
format = version.Version
|
26 |
-
|
27 |
-
if format is not None:
|
28 |
-
requested_version = format(requested_version)
|
29 |
-
if attribute is None:
|
30 |
-
attribute = '__version__'
|
31 |
-
|
32 |
-
self.__dict__.update(locals())
|
33 |
-
del self.self
|
34 |
-
|
35 |
-
def full_name(self):
|
36 |
-
"""Return full package/distribution name, w/version"""
|
37 |
-
if self.requested_version is not None:
|
38 |
-
return '%s-%s' % (self.name, self.requested_version)
|
39 |
-
return self.name
|
40 |
-
|
41 |
-
def version_ok(self, version):
|
42 |
-
"""Is 'version' sufficiently up-to-date?"""
|
43 |
-
return self.attribute is None or self.format is None or \
|
44 |
-
str(version) != "unknown" and self.format(version) >= self.requested_version
|
45 |
-
|
46 |
-
def get_version(self, paths=None, default="unknown"):
|
47 |
-
"""Get version number of installed module, 'None', or 'default'
|
48 |
-
|
49 |
-
Search 'paths' for module. If not found, return 'None'. If found,
|
50 |
-
return the extracted version attribute, or 'default' if no version
|
51 |
-
attribute was specified, or the value cannot be determined without
|
52 |
-
importing the module. The version is formatted according to the
|
53 |
-
requirement's version format (if any), unless it is 'None' or the
|
54 |
-
supplied 'default'.
|
55 |
-
"""
|
56 |
-
|
57 |
-
if self.attribute is None:
|
58 |
-
try:
|
59 |
-
f, p, i = find_module(self.module, paths)
|
60 |
-
if f:
|
61 |
-
f.close()
|
62 |
-
return default
|
63 |
-
except ImportError:
|
64 |
-
return None
|
65 |
-
|
66 |
-
v = get_module_constant(self.module, self.attribute, default, paths)
|
67 |
-
|
68 |
-
if v is not None and v is not default and self.format is not None:
|
69 |
-
return self.format(v)
|
70 |
-
|
71 |
-
return v
|
72 |
-
|
73 |
-
def is_present(self, paths=None):
|
74 |
-
"""Return true if dependency is present on 'paths'"""
|
75 |
-
return self.get_version(paths) is not None
|
76 |
-
|
77 |
-
def is_current(self, paths=None):
|
78 |
-
"""Return true if dependency is present and up-to-date on 'paths'"""
|
79 |
-
version = self.get_version(paths)
|
80 |
-
if version is None:
|
81 |
-
return False
|
82 |
-
return self.version_ok(str(version))
|
83 |
-
|
84 |
-
|
85 |
-
def maybe_close(f):
|
86 |
-
@contextlib.contextmanager
|
87 |
-
def empty():
|
88 |
-
yield
|
89 |
-
return
|
90 |
-
if not f:
|
91 |
-
return empty()
|
92 |
-
|
93 |
-
return contextlib.closing(f)
|
94 |
-
|
95 |
-
|
96 |
-
def get_module_constant(module, symbol, default=-1, paths=None):
|
97 |
-
"""Find 'module' by searching 'paths', and extract 'symbol'
|
98 |
-
|
99 |
-
Return 'None' if 'module' does not exist on 'paths', or it does not define
|
100 |
-
'symbol'. If the module defines 'symbol' as a constant, return the
|
101 |
-
constant. Otherwise, return 'default'."""
|
102 |
-
|
103 |
-
try:
|
104 |
-
f, path, (suffix, mode, kind) = info = find_module(module, paths)
|
105 |
-
except ImportError:
|
106 |
-
# Module doesn't exist
|
107 |
-
return None
|
108 |
-
|
109 |
-
with maybe_close(f):
|
110 |
-
if kind == PY_COMPILED:
|
111 |
-
f.read(8) # skip magic & date
|
112 |
-
code = marshal.load(f)
|
113 |
-
elif kind == PY_FROZEN:
|
114 |
-
code = _imp.get_frozen_object(module, paths)
|
115 |
-
elif kind == PY_SOURCE:
|
116 |
-
code = compile(f.read(), path, 'exec')
|
117 |
-
else:
|
118 |
-
# Not something we can parse; we'll have to import it. :(
|
119 |
-
imported = _imp.get_module(module, paths, info)
|
120 |
-
return getattr(imported, symbol, None)
|
121 |
-
|
122 |
-
return extract_constant(code, symbol, default)
|
123 |
-
|
124 |
-
|
125 |
-
def extract_constant(code, symbol, default=-1):
|
126 |
-
"""Extract the constant value of 'symbol' from 'code'
|
127 |
-
|
128 |
-
If the name 'symbol' is bound to a constant value by the Python code
|
129 |
-
object 'code', return that value. If 'symbol' is bound to an expression,
|
130 |
-
return 'default'. Otherwise, return 'None'.
|
131 |
-
|
132 |
-
Return value is based on the first assignment to 'symbol'. 'symbol' must
|
133 |
-
be a global, or at least a non-"fast" local in the code block. That is,
|
134 |
-
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
|
135 |
-
must be present in 'code.co_names'.
|
136 |
-
"""
|
137 |
-
if symbol not in code.co_names:
|
138 |
-
# name's not there, can't possibly be an assignment
|
139 |
-
return None
|
140 |
-
|
141 |
-
name_idx = list(code.co_names).index(symbol)
|
142 |
-
|
143 |
-
STORE_NAME = 90
|
144 |
-
STORE_GLOBAL = 97
|
145 |
-
LOAD_CONST = 100
|
146 |
-
|
147 |
-
const = default
|
148 |
-
|
149 |
-
for byte_code in dis.Bytecode(code):
|
150 |
-
op = byte_code.opcode
|
151 |
-
arg = byte_code.arg
|
152 |
-
|
153 |
-
if op == LOAD_CONST:
|
154 |
-
const = code.co_consts[arg]
|
155 |
-
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
|
156 |
-
return const
|
157 |
-
else:
|
158 |
-
const = default
|
159 |
-
|
160 |
-
|
161 |
-
def _update_globals():
|
162 |
-
"""
|
163 |
-
Patch the globals to remove the objects not available on some platforms.
|
164 |
-
|
165 |
-
XXX it'd be better to test assertions about bytecode instead.
|
166 |
-
"""
|
167 |
-
|
168 |
-
if not sys.platform.startswith('java') and sys.platform != 'cli':
|
169 |
-
return
|
170 |
-
incompatible = 'extract_constant', 'get_module_constant'
|
171 |
-
for name in incompatible:
|
172 |
-
del globals()[name]
|
173 |
-
__all__.remove(name)
|
174 |
-
|
175 |
-
|
176 |
-
_update_globals()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Text2Human/Text2Human/models/losses/accuracy.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
def accuracy(pred, target, topk=1, thresh=None):
|
2 |
-
"""Calculate accuracy according to the prediction and target.
|
3 |
-
|
4 |
-
Args:
|
5 |
-
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
|
6 |
-
target (torch.Tensor): The target of each prediction, shape (N, , ...)
|
7 |
-
topk (int | tuple[int], optional): If the predictions in ``topk``
|
8 |
-
matches the target, the predictions will be regarded as
|
9 |
-
correct ones. Defaults to 1.
|
10 |
-
thresh (float, optional): If not None, predictions with scores under
|
11 |
-
this threshold are considered incorrect. Default to None.
|
12 |
-
|
13 |
-
Returns:
|
14 |
-
float | tuple[float]: If the input ``topk`` is a single integer,
|
15 |
-
the function will return a single float as accuracy. If
|
16 |
-
``topk`` is a tuple containing multiple integers, the
|
17 |
-
function will return a tuple containing accuracies of
|
18 |
-
each ``topk`` number.
|
19 |
-
"""
|
20 |
-
assert isinstance(topk, (int, tuple))
|
21 |
-
if isinstance(topk, int):
|
22 |
-
topk = (topk, )
|
23 |
-
return_single = True
|
24 |
-
else:
|
25 |
-
return_single = False
|
26 |
-
|
27 |
-
maxk = max(topk)
|
28 |
-
if pred.size(0) == 0:
|
29 |
-
accu = [pred.new_tensor(0.) for i in range(len(topk))]
|
30 |
-
return accu[0] if return_single else accu
|
31 |
-
assert pred.ndim == target.ndim + 1
|
32 |
-
assert pred.size(0) == target.size(0)
|
33 |
-
assert maxk <= pred.size(1), \
|
34 |
-
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
|
35 |
-
pred_value, pred_label = pred.topk(maxk, dim=1)
|
36 |
-
# transpose to shape (maxk, N, ...)
|
37 |
-
pred_label = pred_label.transpose(0, 1)
|
38 |
-
correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
|
39 |
-
if thresh is not None:
|
40 |
-
# Only prediction values larger than thresh are counted as correct
|
41 |
-
correct = correct & (pred_value > thresh).t()
|
42 |
-
res = []
|
43 |
-
for k in topk:
|
44 |
-
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
|
45 |
-
res.append(correct_k.mul_(100.0 / target.numel()))
|
46 |
-
return res[0] if return_single else res
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/TokenCut/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: TokenCut
|
3 |
-
emoji: 😎
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.15
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
This Demo is the TokenCut demo, the original demo is from https://huggingface.co/spaces/akhaliq/TokenCut. Thanks for Ahsen Khaliq's nicely contribution.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChenWu98/Stable-CycleDiffusion/ptp_utils.py
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
# Copyright 2022 Google LLC
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
from typing import Optional, Union, Tuple, Dict
|
18 |
-
|
19 |
-
|
20 |
-
def register_attention_control(model, controller):
|
21 |
-
def ca_forward(self, place_in_unet):
|
22 |
-
|
23 |
-
def forward(x, context=None, mask=None):
|
24 |
-
batch_size, sequence_length, dim = x.shape
|
25 |
-
h = self.heads
|
26 |
-
q = self.to_q(x)
|
27 |
-
is_cross = context is not None
|
28 |
-
context = context if is_cross else x
|
29 |
-
k = self.to_k(context)
|
30 |
-
v = self.to_v(context)
|
31 |
-
q = self.reshape_heads_to_batch_dim(q)
|
32 |
-
k = self.reshape_heads_to_batch_dim(k)
|
33 |
-
v = self.reshape_heads_to_batch_dim(v)
|
34 |
-
|
35 |
-
sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale
|
36 |
-
|
37 |
-
if mask is not None:
|
38 |
-
mask = mask.reshape(batch_size, -1)
|
39 |
-
max_neg_value = -torch.finfo(sim.dtype).max
|
40 |
-
mask = mask[:, None, :].repeat(h, 1, 1)
|
41 |
-
sim.masked_fill_(~mask, max_neg_value)
|
42 |
-
|
43 |
-
# attention, what we cannot get enough of
|
44 |
-
attn = sim.softmax(dim=-1)
|
45 |
-
attn = controller(attn, is_cross, place_in_unet)
|
46 |
-
out = torch.einsum("b i j, b j d -> b i d", attn, v)
|
47 |
-
out = self.reshape_batch_dim_to_heads(out)
|
48 |
-
|
49 |
-
# TODO: Chen (new version of diffusers)
|
50 |
-
# return self.to_out(out)
|
51 |
-
# linear proj
|
52 |
-
out = self.to_out[0](out)
|
53 |
-
# dropout
|
54 |
-
out = self.to_out[1](out)
|
55 |
-
return out
|
56 |
-
|
57 |
-
return forward
|
58 |
-
|
59 |
-
def register_recr(net_, count, place_in_unet):
|
60 |
-
if net_.__class__.__name__ == 'CrossAttention':
|
61 |
-
net_.forward = ca_forward(net_, place_in_unet)
|
62 |
-
return count + 1
|
63 |
-
elif hasattr(net_, 'children'):
|
64 |
-
for net__ in net_.children():
|
65 |
-
count = register_recr(net__, count, place_in_unet)
|
66 |
-
return count
|
67 |
-
|
68 |
-
cross_att_count = 0
|
69 |
-
sub_nets = model.unet.named_children()
|
70 |
-
for net in sub_nets:
|
71 |
-
if "down" in net[0]:
|
72 |
-
cross_att_count += register_recr(net[1], 0, "down")
|
73 |
-
elif "up" in net[0]:
|
74 |
-
cross_att_count += register_recr(net[1], 0, "up")
|
75 |
-
elif "mid" in net[0]:
|
76 |
-
cross_att_count += register_recr(net[1], 0, "mid")
|
77 |
-
controller.num_att_layers = cross_att_count
|
78 |
-
|
79 |
-
|
80 |
-
def get_word_inds(text: str, word_place: int, tokenizer):
|
81 |
-
split_text = text.split(" ")
|
82 |
-
if type(word_place) is str:
|
83 |
-
word_place = [i for i, word in enumerate(split_text) if word_place == word]
|
84 |
-
elif type(word_place) is int:
|
85 |
-
word_place = [word_place]
|
86 |
-
out = []
|
87 |
-
if len(word_place) > 0:
|
88 |
-
words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1]
|
89 |
-
cur_len, ptr = 0, 0
|
90 |
-
|
91 |
-
for i in range(len(words_encode)):
|
92 |
-
cur_len += len(words_encode[i])
|
93 |
-
if ptr in word_place:
|
94 |
-
out.append(i + 1)
|
95 |
-
if cur_len >= len(split_text[ptr]):
|
96 |
-
ptr += 1
|
97 |
-
cur_len = 0
|
98 |
-
return np.array(out)
|
99 |
-
|
100 |
-
|
101 |
-
def update_alpha_time_word(alpha, bounds: Union[float, Tuple[float, float]], prompt_ind: int, word_inds: Optional[torch.Tensor]=None):
|
102 |
-
if type(bounds) is float:
|
103 |
-
bounds = 0, bounds
|
104 |
-
start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0])
|
105 |
-
if word_inds is None:
|
106 |
-
word_inds = torch.arange(alpha.shape[2])
|
107 |
-
alpha[: start, prompt_ind, word_inds] = 0
|
108 |
-
alpha[start: end, prompt_ind, word_inds] = 1
|
109 |
-
alpha[end:, prompt_ind, word_inds] = 0
|
110 |
-
return alpha
|
111 |
-
|
112 |
-
|
113 |
-
def get_time_words_attention_alpha(prompts, num_steps, cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],
|
114 |
-
tokenizer, max_num_words=77):
|
115 |
-
if type(cross_replace_steps) is not dict:
|
116 |
-
cross_replace_steps = {"default_": cross_replace_steps}
|
117 |
-
if "default_" not in cross_replace_steps:
|
118 |
-
cross_replace_steps["default_"] = (0., 1.)
|
119 |
-
alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words)
|
120 |
-
for i in range(len(prompts) - 1):
|
121 |
-
alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"],
|
122 |
-
i)
|
123 |
-
for key, item in cross_replace_steps.items():
|
124 |
-
if key != "default_":
|
125 |
-
inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))]
|
126 |
-
for i, ind in enumerate(inds):
|
127 |
-
if len(ind) > 0:
|
128 |
-
alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind)
|
129 |
-
alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words) # time, batch, heads, pixels, words
|
130 |
-
return alpha_time_words
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cvandi/remake/app.py
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
os.system("pip install gradio==2.9b23")
|
3 |
-
import random
|
4 |
-
import gradio as gr
|
5 |
-
from PIL import Image
|
6 |
-
import torch
|
7 |
-
from random import randint
|
8 |
-
import sys
|
9 |
-
from subprocess import call
|
10 |
-
import psutil
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
torch.hub.download_url_to_file('http://people.csail.mit.edu/billf/project%20pages/sresCode/Markov%20Random%20Fields%20for%20Super-Resolution_files/100075_lowres.jpg', 'bear.jpg')
|
16 |
-
|
17 |
-
|
18 |
-
def run_cmd(command):
|
19 |
-
try:
|
20 |
-
print(command)
|
21 |
-
call(command, shell=True)
|
22 |
-
except KeyboardInterrupt:
|
23 |
-
print("Process interrupted")
|
24 |
-
sys.exit(1)
|
25 |
-
run_cmd("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P .")
|
26 |
-
run_cmd("pip install basicsr")
|
27 |
-
run_cmd("pip freeze")
|
28 |
-
|
29 |
-
os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P .")
|
30 |
-
|
31 |
-
|
32 |
-
def inference(img,mode):
|
33 |
-
_id = randint(1, 10000)
|
34 |
-
INPUT_DIR = "/tmp/input_image" + str(_id) + "/"
|
35 |
-
OUTPUT_DIR = "/tmp/output_image" + str(_id) + "/"
|
36 |
-
run_cmd("rm -rf " + INPUT_DIR)
|
37 |
-
run_cmd("rm -rf " + OUTPUT_DIR)
|
38 |
-
run_cmd("mkdir " + INPUT_DIR)
|
39 |
-
run_cmd("mkdir " + OUTPUT_DIR)
|
40 |
-
basewidth = 256
|
41 |
-
wpercent = (basewidth/float(img.size[0]))
|
42 |
-
hsize = int((float(img.size[1])*float(wpercent)))
|
43 |
-
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
|
44 |
-
img.save(INPUT_DIR + "1.jpg", "JPEG")
|
45 |
-
if mode == "base":
|
46 |
-
run_cmd("python inference_realesrgan.py -n RealESRGAN_x4plus -i "+ INPUT_DIR + " -o " + OUTPUT_DIR)
|
47 |
-
else:
|
48 |
-
os.system("python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i "+ INPUT_DIR + " -o " + OUTPUT_DIR)
|
49 |
-
return os.path.join(OUTPUT_DIR, "1_out.jpg")
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
title = "Real-ESRGAN"
|
55 |
-
description = "Gradio demo for Real-ESRGAN. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once"
|
56 |
-
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2107.10833'>Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data</a> | <a href='https://github.com/xinntao/Real-ESRGAN'>Github Repo</a></p>"
|
57 |
-
|
58 |
-
gr.Interface(
|
59 |
-
inference,
|
60 |
-
[gr.inputs.Image(type="pil", label="Input"),gr.inputs.Radio(["base","anime"], type="value", default="base", label="model type")],
|
61 |
-
gr.outputs.Image(type="file", label="Output"),
|
62 |
-
title=title,
|
63 |
-
description=description,
|
64 |
-
article=article,
|
65 |
-
examples=[
|
66 |
-
['bear.jpg','base'],
|
67 |
-
['anime.png','anime']
|
68 |
-
]).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DJQmUKV/rvc-inference/infer_pack/models_onnx_moess.py
DELETED
@@ -1,849 +0,0 @@
|
|
1 |
-
import math, pdb, os
|
2 |
-
from time import time as ttime
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
from infer_pack import modules
|
7 |
-
from infer_pack import attentions
|
8 |
-
from infer_pack import commons
|
9 |
-
from infer_pack.commons import init_weights, get_padding
|
10 |
-
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
|
11 |
-
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
|
12 |
-
from infer_pack.commons import init_weights
|
13 |
-
import numpy as np
|
14 |
-
from infer_pack import commons
|
15 |
-
|
16 |
-
|
17 |
-
class TextEncoder256(nn.Module):
|
18 |
-
def __init__(
|
19 |
-
self,
|
20 |
-
out_channels,
|
21 |
-
hidden_channels,
|
22 |
-
filter_channels,
|
23 |
-
n_heads,
|
24 |
-
n_layers,
|
25 |
-
kernel_size,
|
26 |
-
p_dropout,
|
27 |
-
f0=True,
|
28 |
-
):
|
29 |
-
super().__init__()
|
30 |
-
self.out_channels = out_channels
|
31 |
-
self.hidden_channels = hidden_channels
|
32 |
-
self.filter_channels = filter_channels
|
33 |
-
self.n_heads = n_heads
|
34 |
-
self.n_layers = n_layers
|
35 |
-
self.kernel_size = kernel_size
|
36 |
-
self.p_dropout = p_dropout
|
37 |
-
self.emb_phone = nn.Linear(256, hidden_channels)
|
38 |
-
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
39 |
-
if f0 == True:
|
40 |
-
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
41 |
-
self.encoder = attentions.Encoder(
|
42 |
-
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
43 |
-
)
|
44 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
45 |
-
|
46 |
-
def forward(self, phone, pitch, lengths):
|
47 |
-
if pitch == None:
|
48 |
-
x = self.emb_phone(phone)
|
49 |
-
else:
|
50 |
-
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
51 |
-
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
52 |
-
x = self.lrelu(x)
|
53 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
54 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
55 |
-
x.dtype
|
56 |
-
)
|
57 |
-
x = self.encoder(x * x_mask, x_mask)
|
58 |
-
stats = self.proj(x) * x_mask
|
59 |
-
|
60 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
61 |
-
return m, logs, x_mask
|
62 |
-
|
63 |
-
|
64 |
-
class TextEncoder256Sim(nn.Module):
|
65 |
-
def __init__(
|
66 |
-
self,
|
67 |
-
out_channels,
|
68 |
-
hidden_channels,
|
69 |
-
filter_channels,
|
70 |
-
n_heads,
|
71 |
-
n_layers,
|
72 |
-
kernel_size,
|
73 |
-
p_dropout,
|
74 |
-
f0=True,
|
75 |
-
):
|
76 |
-
super().__init__()
|
77 |
-
self.out_channels = out_channels
|
78 |
-
self.hidden_channels = hidden_channels
|
79 |
-
self.filter_channels = filter_channels
|
80 |
-
self.n_heads = n_heads
|
81 |
-
self.n_layers = n_layers
|
82 |
-
self.kernel_size = kernel_size
|
83 |
-
self.p_dropout = p_dropout
|
84 |
-
self.emb_phone = nn.Linear(256, hidden_channels)
|
85 |
-
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
|
86 |
-
if f0 == True:
|
87 |
-
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
|
88 |
-
self.encoder = attentions.Encoder(
|
89 |
-
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
|
90 |
-
)
|
91 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
|
92 |
-
|
93 |
-
def forward(self, phone, pitch, lengths):
|
94 |
-
if pitch == None:
|
95 |
-
x = self.emb_phone(phone)
|
96 |
-
else:
|
97 |
-
x = self.emb_phone(phone) + self.emb_pitch(pitch)
|
98 |
-
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
|
99 |
-
x = self.lrelu(x)
|
100 |
-
x = torch.transpose(x, 1, -1) # [b, h, t]
|
101 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
|
102 |
-
x.dtype
|
103 |
-
)
|
104 |
-
x = self.encoder(x * x_mask, x_mask)
|
105 |
-
x = self.proj(x) * x_mask
|
106 |
-
return x, x_mask
|
107 |
-
|
108 |
-
|
109 |
-
class ResidualCouplingBlock(nn.Module):
|
110 |
-
def __init__(
|
111 |
-
self,
|
112 |
-
channels,
|
113 |
-
hidden_channels,
|
114 |
-
kernel_size,
|
115 |
-
dilation_rate,
|
116 |
-
n_layers,
|
117 |
-
n_flows=4,
|
118 |
-
gin_channels=0,
|
119 |
-
):
|
120 |
-
super().__init__()
|
121 |
-
self.channels = channels
|
122 |
-
self.hidden_channels = hidden_channels
|
123 |
-
self.kernel_size = kernel_size
|
124 |
-
self.dilation_rate = dilation_rate
|
125 |
-
self.n_layers = n_layers
|
126 |
-
self.n_flows = n_flows
|
127 |
-
self.gin_channels = gin_channels
|
128 |
-
|
129 |
-
self.flows = nn.ModuleList()
|
130 |
-
for i in range(n_flows):
|
131 |
-
self.flows.append(
|
132 |
-
modules.ResidualCouplingLayer(
|
133 |
-
channels,
|
134 |
-
hidden_channels,
|
135 |
-
kernel_size,
|
136 |
-
dilation_rate,
|
137 |
-
n_layers,
|
138 |
-
gin_channels=gin_channels,
|
139 |
-
mean_only=True,
|
140 |
-
)
|
141 |
-
)
|
142 |
-
self.flows.append(modules.Flip())
|
143 |
-
|
144 |
-
def forward(self, x, x_mask, g=None, reverse=False):
|
145 |
-
if not reverse:
|
146 |
-
for flow in self.flows:
|
147 |
-
x, _ = flow(x, x_mask, g=g, reverse=reverse)
|
148 |
-
else:
|
149 |
-
for flow in reversed(self.flows):
|
150 |
-
x = flow(x, x_mask, g=g, reverse=reverse)
|
151 |
-
return x
|
152 |
-
|
153 |
-
def remove_weight_norm(self):
|
154 |
-
for i in range(self.n_flows):
|
155 |
-
self.flows[i * 2].remove_weight_norm()
|
156 |
-
|
157 |
-
|
158 |
-
class PosteriorEncoder(nn.Module):
|
159 |
-
def __init__(
|
160 |
-
self,
|
161 |
-
in_channels,
|
162 |
-
out_channels,
|
163 |
-
hidden_channels,
|
164 |
-
kernel_size,
|
165 |
-
dilation_rate,
|
166 |
-
n_layers,
|
167 |
-
gin_channels=0,
|
168 |
-
):
|
169 |
-
super().__init__()
|
170 |
-
self.in_channels = in_channels
|
171 |
-
self.out_channels = out_channels
|
172 |
-
self.hidden_channels = hidden_channels
|
173 |
-
self.kernel_size = kernel_size
|
174 |
-
self.dilation_rate = dilation_rate
|
175 |
-
self.n_layers = n_layers
|
176 |
-
self.gin_channels = gin_channels
|
177 |
-
|
178 |
-
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
|
179 |
-
self.enc = modules.WN(
|
180 |
-
hidden_channels,
|
181 |
-
kernel_size,
|
182 |
-
dilation_rate,
|
183 |
-
n_layers,
|
184 |
-
gin_channels=gin_channels,
|
185 |
-
)
|
186 |
-
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
|
187 |
-
|
188 |
-
def forward(self, x, x_lengths, g=None):
|
189 |
-
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
|
190 |
-
x.dtype
|
191 |
-
)
|
192 |
-
x = self.pre(x) * x_mask
|
193 |
-
x = self.enc(x, x_mask, g=g)
|
194 |
-
stats = self.proj(x) * x_mask
|
195 |
-
m, logs = torch.split(stats, self.out_channels, dim=1)
|
196 |
-
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
|
197 |
-
return z, m, logs, x_mask
|
198 |
-
|
199 |
-
def remove_weight_norm(self):
|
200 |
-
self.enc.remove_weight_norm()
|
201 |
-
|
202 |
-
|
203 |
-
class Generator(torch.nn.Module):
|
204 |
-
def __init__(
|
205 |
-
self,
|
206 |
-
initial_channel,
|
207 |
-
resblock,
|
208 |
-
resblock_kernel_sizes,
|
209 |
-
resblock_dilation_sizes,
|
210 |
-
upsample_rates,
|
211 |
-
upsample_initial_channel,
|
212 |
-
upsample_kernel_sizes,
|
213 |
-
gin_channels=0,
|
214 |
-
):
|
215 |
-
super(Generator, self).__init__()
|
216 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
217 |
-
self.num_upsamples = len(upsample_rates)
|
218 |
-
self.conv_pre = Conv1d(
|
219 |
-
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
220 |
-
)
|
221 |
-
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
222 |
-
|
223 |
-
self.ups = nn.ModuleList()
|
224 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
225 |
-
self.ups.append(
|
226 |
-
weight_norm(
|
227 |
-
ConvTranspose1d(
|
228 |
-
upsample_initial_channel // (2**i),
|
229 |
-
upsample_initial_channel // (2 ** (i + 1)),
|
230 |
-
k,
|
231 |
-
u,
|
232 |
-
padding=(k - u) // 2,
|
233 |
-
)
|
234 |
-
)
|
235 |
-
)
|
236 |
-
|
237 |
-
self.resblocks = nn.ModuleList()
|
238 |
-
for i in range(len(self.ups)):
|
239 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
240 |
-
for j, (k, d) in enumerate(
|
241 |
-
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
242 |
-
):
|
243 |
-
self.resblocks.append(resblock(ch, k, d))
|
244 |
-
|
245 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
246 |
-
self.ups.apply(init_weights)
|
247 |
-
|
248 |
-
if gin_channels != 0:
|
249 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
250 |
-
|
251 |
-
def forward(self, x, g=None):
|
252 |
-
x = self.conv_pre(x)
|
253 |
-
if g is not None:
|
254 |
-
x = x + self.cond(g)
|
255 |
-
|
256 |
-
for i in range(self.num_upsamples):
|
257 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
258 |
-
x = self.ups[i](x)
|
259 |
-
xs = None
|
260 |
-
for j in range(self.num_kernels):
|
261 |
-
if xs is None:
|
262 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
263 |
-
else:
|
264 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
265 |
-
x = xs / self.num_kernels
|
266 |
-
x = F.leaky_relu(x)
|
267 |
-
x = self.conv_post(x)
|
268 |
-
x = torch.tanh(x)
|
269 |
-
|
270 |
-
return x
|
271 |
-
|
272 |
-
def remove_weight_norm(self):
|
273 |
-
for l in self.ups:
|
274 |
-
remove_weight_norm(l)
|
275 |
-
for l in self.resblocks:
|
276 |
-
l.remove_weight_norm()
|
277 |
-
|
278 |
-
|
279 |
-
class SineGen(torch.nn.Module):
|
280 |
-
"""Definition of sine generator
|
281 |
-
SineGen(samp_rate, harmonic_num = 0,
|
282 |
-
sine_amp = 0.1, noise_std = 0.003,
|
283 |
-
voiced_threshold = 0,
|
284 |
-
flag_for_pulse=False)
|
285 |
-
samp_rate: sampling rate in Hz
|
286 |
-
harmonic_num: number of harmonic overtones (default 0)
|
287 |
-
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
288 |
-
noise_std: std of Gaussian noise (default 0.003)
|
289 |
-
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
290 |
-
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
291 |
-
Note: when flag_for_pulse is True, the first time step of a voiced
|
292 |
-
segment is always sin(np.pi) or cos(0)
|
293 |
-
"""
|
294 |
-
|
295 |
-
def __init__(
|
296 |
-
self,
|
297 |
-
samp_rate,
|
298 |
-
harmonic_num=0,
|
299 |
-
sine_amp=0.1,
|
300 |
-
noise_std=0.003,
|
301 |
-
voiced_threshold=0,
|
302 |
-
flag_for_pulse=False,
|
303 |
-
):
|
304 |
-
super(SineGen, self).__init__()
|
305 |
-
self.sine_amp = sine_amp
|
306 |
-
self.noise_std = noise_std
|
307 |
-
self.harmonic_num = harmonic_num
|
308 |
-
self.dim = self.harmonic_num + 1
|
309 |
-
self.sampling_rate = samp_rate
|
310 |
-
self.voiced_threshold = voiced_threshold
|
311 |
-
|
312 |
-
def _f02uv(self, f0):
|
313 |
-
# generate uv signal
|
314 |
-
uv = torch.ones_like(f0)
|
315 |
-
uv = uv * (f0 > self.voiced_threshold)
|
316 |
-
return uv
|
317 |
-
|
318 |
-
def forward(self, f0, upp):
|
319 |
-
"""sine_tensor, uv = forward(f0)
|
320 |
-
input F0: tensor(batchsize=1, length, dim=1)
|
321 |
-
f0 for unvoiced steps should be 0
|
322 |
-
output sine_tensor: tensor(batchsize=1, length, dim)
|
323 |
-
output uv: tensor(batchsize=1, length, 1)
|
324 |
-
"""
|
325 |
-
with torch.no_grad():
|
326 |
-
f0 = f0[:, None].transpose(1, 2)
|
327 |
-
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
|
328 |
-
# fundamental component
|
329 |
-
f0_buf[:, :, 0] = f0[:, :, 0]
|
330 |
-
for idx in np.arange(self.harmonic_num):
|
331 |
-
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
|
332 |
-
idx + 2
|
333 |
-
) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
|
334 |
-
rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
|
335 |
-
rand_ini = torch.rand(
|
336 |
-
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
|
337 |
-
)
|
338 |
-
rand_ini[:, 0] = 0
|
339 |
-
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
340 |
-
tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
|
341 |
-
tmp_over_one *= upp
|
342 |
-
tmp_over_one = F.interpolate(
|
343 |
-
tmp_over_one.transpose(2, 1),
|
344 |
-
scale_factor=upp,
|
345 |
-
mode="linear",
|
346 |
-
align_corners=True,
|
347 |
-
).transpose(2, 1)
|
348 |
-
rad_values = F.interpolate(
|
349 |
-
rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
|
350 |
-
).transpose(
|
351 |
-
2, 1
|
352 |
-
) #######
|
353 |
-
tmp_over_one %= 1
|
354 |
-
tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
|
355 |
-
cumsum_shift = torch.zeros_like(rad_values)
|
356 |
-
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
357 |
-
sine_waves = torch.sin(
|
358 |
-
torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
|
359 |
-
)
|
360 |
-
sine_waves = sine_waves * self.sine_amp
|
361 |
-
uv = self._f02uv(f0)
|
362 |
-
uv = F.interpolate(
|
363 |
-
uv.transpose(2, 1), scale_factor=upp, mode="nearest"
|
364 |
-
).transpose(2, 1)
|
365 |
-
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
366 |
-
noise = noise_amp * torch.randn_like(sine_waves)
|
367 |
-
sine_waves = sine_waves * uv + noise
|
368 |
-
return sine_waves, uv, noise
|
369 |
-
|
370 |
-
|
371 |
-
class SourceModuleHnNSF(torch.nn.Module):
|
372 |
-
"""SourceModule for hn-nsf
|
373 |
-
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
374 |
-
add_noise_std=0.003, voiced_threshod=0)
|
375 |
-
sampling_rate: sampling_rate in Hz
|
376 |
-
harmonic_num: number of harmonic above F0 (default: 0)
|
377 |
-
sine_amp: amplitude of sine source signal (default: 0.1)
|
378 |
-
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
379 |
-
note that amplitude of noise in unvoiced is decided
|
380 |
-
by sine_amp
|
381 |
-
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
382 |
-
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
383 |
-
F0_sampled (batchsize, length, 1)
|
384 |
-
Sine_source (batchsize, length, 1)
|
385 |
-
noise_source (batchsize, length 1)
|
386 |
-
uv (batchsize, length, 1)
|
387 |
-
"""
|
388 |
-
|
389 |
-
def __init__(
|
390 |
-
self,
|
391 |
-
sampling_rate,
|
392 |
-
harmonic_num=0,
|
393 |
-
sine_amp=0.1,
|
394 |
-
add_noise_std=0.003,
|
395 |
-
voiced_threshod=0,
|
396 |
-
is_half=True,
|
397 |
-
):
|
398 |
-
super(SourceModuleHnNSF, self).__init__()
|
399 |
-
|
400 |
-
self.sine_amp = sine_amp
|
401 |
-
self.noise_std = add_noise_std
|
402 |
-
self.is_half = is_half
|
403 |
-
# to produce sine waveforms
|
404 |
-
self.l_sin_gen = SineGen(
|
405 |
-
sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
|
406 |
-
)
|
407 |
-
|
408 |
-
# to merge source harmonics into a single excitation
|
409 |
-
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
410 |
-
self.l_tanh = torch.nn.Tanh()
|
411 |
-
|
412 |
-
def forward(self, x, upp=None):
|
413 |
-
sine_wavs, uv, _ = self.l_sin_gen(x, upp)
|
414 |
-
if self.is_half:
|
415 |
-
sine_wavs = sine_wavs.half()
|
416 |
-
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
417 |
-
return sine_merge, None, None # noise, uv
|
418 |
-
|
419 |
-
|
420 |
-
class GeneratorNSF(torch.nn.Module):
|
421 |
-
def __init__(
|
422 |
-
self,
|
423 |
-
initial_channel,
|
424 |
-
resblock,
|
425 |
-
resblock_kernel_sizes,
|
426 |
-
resblock_dilation_sizes,
|
427 |
-
upsample_rates,
|
428 |
-
upsample_initial_channel,
|
429 |
-
upsample_kernel_sizes,
|
430 |
-
gin_channels,
|
431 |
-
sr,
|
432 |
-
is_half=False,
|
433 |
-
):
|
434 |
-
super(GeneratorNSF, self).__init__()
|
435 |
-
self.num_kernels = len(resblock_kernel_sizes)
|
436 |
-
self.num_upsamples = len(upsample_rates)
|
437 |
-
|
438 |
-
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
|
439 |
-
self.m_source = SourceModuleHnNSF(
|
440 |
-
sampling_rate=sr, harmonic_num=0, is_half=is_half
|
441 |
-
)
|
442 |
-
self.noise_convs = nn.ModuleList()
|
443 |
-
self.conv_pre = Conv1d(
|
444 |
-
initial_channel, upsample_initial_channel, 7, 1, padding=3
|
445 |
-
)
|
446 |
-
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
|
447 |
-
|
448 |
-
self.ups = nn.ModuleList()
|
449 |
-
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
450 |
-
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
451 |
-
self.ups.append(
|
452 |
-
weight_norm(
|
453 |
-
ConvTranspose1d(
|
454 |
-
upsample_initial_channel // (2**i),
|
455 |
-
upsample_initial_channel // (2 ** (i + 1)),
|
456 |
-
k,
|
457 |
-
u,
|
458 |
-
padding=(k - u) // 2,
|
459 |
-
)
|
460 |
-
)
|
461 |
-
)
|
462 |
-
if i + 1 < len(upsample_rates):
|
463 |
-
stride_f0 = np.prod(upsample_rates[i + 1 :])
|
464 |
-
self.noise_convs.append(
|
465 |
-
Conv1d(
|
466 |
-
1,
|
467 |
-
c_cur,
|
468 |
-
kernel_size=stride_f0 * 2,
|
469 |
-
stride=stride_f0,
|
470 |
-
padding=stride_f0 // 2,
|
471 |
-
)
|
472 |
-
)
|
473 |
-
else:
|
474 |
-
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
|
475 |
-
|
476 |
-
self.resblocks = nn.ModuleList()
|
477 |
-
for i in range(len(self.ups)):
|
478 |
-
ch = upsample_initial_channel // (2 ** (i + 1))
|
479 |
-
for j, (k, d) in enumerate(
|
480 |
-
zip(resblock_kernel_sizes, resblock_dilation_sizes)
|
481 |
-
):
|
482 |
-
self.resblocks.append(resblock(ch, k, d))
|
483 |
-
|
484 |
-
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
485 |
-
self.ups.apply(init_weights)
|
486 |
-
|
487 |
-
if gin_channels != 0:
|
488 |
-
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
|
489 |
-
|
490 |
-
self.upp = np.prod(upsample_rates)
|
491 |
-
|
492 |
-
def forward(self, x, f0, g=None):
|
493 |
-
har_source, noi_source, uv = self.m_source(f0, self.upp)
|
494 |
-
har_source = har_source.transpose(1, 2)
|
495 |
-
x = self.conv_pre(x)
|
496 |
-
if g is not None:
|
497 |
-
x = x + self.cond(g)
|
498 |
-
|
499 |
-
for i in range(self.num_upsamples):
|
500 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
501 |
-
x = self.ups[i](x)
|
502 |
-
x_source = self.noise_convs[i](har_source)
|
503 |
-
x = x + x_source
|
504 |
-
xs = None
|
505 |
-
for j in range(self.num_kernels):
|
506 |
-
if xs is None:
|
507 |
-
xs = self.resblocks[i * self.num_kernels + j](x)
|
508 |
-
else:
|
509 |
-
xs += self.resblocks[i * self.num_kernels + j](x)
|
510 |
-
x = xs / self.num_kernels
|
511 |
-
x = F.leaky_relu(x)
|
512 |
-
x = self.conv_post(x)
|
513 |
-
x = torch.tanh(x)
|
514 |
-
return x
|
515 |
-
|
516 |
-
def remove_weight_norm(self):
|
517 |
-
for l in self.ups:
|
518 |
-
remove_weight_norm(l)
|
519 |
-
for l in self.resblocks:
|
520 |
-
l.remove_weight_norm()
|
521 |
-
|
522 |
-
|
523 |
-
sr2sr = {
|
524 |
-
"32k": 32000,
|
525 |
-
"40k": 40000,
|
526 |
-
"48k": 48000,
|
527 |
-
}
|
528 |
-
|
529 |
-
|
530 |
-
class SynthesizerTrnMs256NSFsidM(nn.Module):
|
531 |
-
def __init__(
|
532 |
-
self,
|
533 |
-
spec_channels,
|
534 |
-
segment_size,
|
535 |
-
inter_channels,
|
536 |
-
hidden_channels,
|
537 |
-
filter_channels,
|
538 |
-
n_heads,
|
539 |
-
n_layers,
|
540 |
-
kernel_size,
|
541 |
-
p_dropout,
|
542 |
-
resblock,
|
543 |
-
resblock_kernel_sizes,
|
544 |
-
resblock_dilation_sizes,
|
545 |
-
upsample_rates,
|
546 |
-
upsample_initial_channel,
|
547 |
-
upsample_kernel_sizes,
|
548 |
-
spk_embed_dim,
|
549 |
-
gin_channels,
|
550 |
-
sr,
|
551 |
-
**kwargs
|
552 |
-
):
|
553 |
-
super().__init__()
|
554 |
-
if type(sr) == type("strr"):
|
555 |
-
sr = sr2sr[sr]
|
556 |
-
self.spec_channels = spec_channels
|
557 |
-
self.inter_channels = inter_channels
|
558 |
-
self.hidden_channels = hidden_channels
|
559 |
-
self.filter_channels = filter_channels
|
560 |
-
self.n_heads = n_heads
|
561 |
-
self.n_layers = n_layers
|
562 |
-
self.kernel_size = kernel_size
|
563 |
-
self.p_dropout = p_dropout
|
564 |
-
self.resblock = resblock
|
565 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
566 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
567 |
-
self.upsample_rates = upsample_rates
|
568 |
-
self.upsample_initial_channel = upsample_initial_channel
|
569 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
570 |
-
self.segment_size = segment_size
|
571 |
-
self.gin_channels = gin_channels
|
572 |
-
# self.hop_length = hop_length#
|
573 |
-
self.spk_embed_dim = spk_embed_dim
|
574 |
-
self.enc_p = TextEncoder256(
|
575 |
-
inter_channels,
|
576 |
-
hidden_channels,
|
577 |
-
filter_channels,
|
578 |
-
n_heads,
|
579 |
-
n_layers,
|
580 |
-
kernel_size,
|
581 |
-
p_dropout,
|
582 |
-
)
|
583 |
-
self.dec = GeneratorNSF(
|
584 |
-
inter_channels,
|
585 |
-
resblock,
|
586 |
-
resblock_kernel_sizes,
|
587 |
-
resblock_dilation_sizes,
|
588 |
-
upsample_rates,
|
589 |
-
upsample_initial_channel,
|
590 |
-
upsample_kernel_sizes,
|
591 |
-
gin_channels=gin_channels,
|
592 |
-
sr=sr,
|
593 |
-
is_half=kwargs["is_half"],
|
594 |
-
)
|
595 |
-
self.enc_q = PosteriorEncoder(
|
596 |
-
spec_channels,
|
597 |
-
inter_channels,
|
598 |
-
hidden_channels,
|
599 |
-
5,
|
600 |
-
1,
|
601 |
-
16,
|
602 |
-
gin_channels=gin_channels,
|
603 |
-
)
|
604 |
-
self.flow = ResidualCouplingBlock(
|
605 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
606 |
-
)
|
607 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
608 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
609 |
-
|
610 |
-
def remove_weight_norm(self):
|
611 |
-
self.dec.remove_weight_norm()
|
612 |
-
self.flow.remove_weight_norm()
|
613 |
-
self.enc_q.remove_weight_norm()
|
614 |
-
|
615 |
-
def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None):
|
616 |
-
g = self.emb_g(sid).unsqueeze(-1)
|
617 |
-
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
618 |
-
z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
|
619 |
-
z = self.flow(z_p, x_mask, g=g, reverse=True)
|
620 |
-
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
|
621 |
-
return o
|
622 |
-
|
623 |
-
|
624 |
-
class SynthesizerTrnMs256NSFsid_sim(nn.Module):
|
625 |
-
"""
|
626 |
-
Synthesizer for Training
|
627 |
-
"""
|
628 |
-
|
629 |
-
def __init__(
|
630 |
-
self,
|
631 |
-
spec_channels,
|
632 |
-
segment_size,
|
633 |
-
inter_channels,
|
634 |
-
hidden_channels,
|
635 |
-
filter_channels,
|
636 |
-
n_heads,
|
637 |
-
n_layers,
|
638 |
-
kernel_size,
|
639 |
-
p_dropout,
|
640 |
-
resblock,
|
641 |
-
resblock_kernel_sizes,
|
642 |
-
resblock_dilation_sizes,
|
643 |
-
upsample_rates,
|
644 |
-
upsample_initial_channel,
|
645 |
-
upsample_kernel_sizes,
|
646 |
-
spk_embed_dim,
|
647 |
-
# hop_length,
|
648 |
-
gin_channels=0,
|
649 |
-
use_sdp=True,
|
650 |
-
**kwargs
|
651 |
-
):
|
652 |
-
super().__init__()
|
653 |
-
self.spec_channels = spec_channels
|
654 |
-
self.inter_channels = inter_channels
|
655 |
-
self.hidden_channels = hidden_channels
|
656 |
-
self.filter_channels = filter_channels
|
657 |
-
self.n_heads = n_heads
|
658 |
-
self.n_layers = n_layers
|
659 |
-
self.kernel_size = kernel_size
|
660 |
-
self.p_dropout = p_dropout
|
661 |
-
self.resblock = resblock
|
662 |
-
self.resblock_kernel_sizes = resblock_kernel_sizes
|
663 |
-
self.resblock_dilation_sizes = resblock_dilation_sizes
|
664 |
-
self.upsample_rates = upsample_rates
|
665 |
-
self.upsample_initial_channel = upsample_initial_channel
|
666 |
-
self.upsample_kernel_sizes = upsample_kernel_sizes
|
667 |
-
self.segment_size = segment_size
|
668 |
-
self.gin_channels = gin_channels
|
669 |
-
# self.hop_length = hop_length#
|
670 |
-
self.spk_embed_dim = spk_embed_dim
|
671 |
-
self.enc_p = TextEncoder256Sim(
|
672 |
-
inter_channels,
|
673 |
-
hidden_channels,
|
674 |
-
filter_channels,
|
675 |
-
n_heads,
|
676 |
-
n_layers,
|
677 |
-
kernel_size,
|
678 |
-
p_dropout,
|
679 |
-
)
|
680 |
-
self.dec = GeneratorNSF(
|
681 |
-
inter_channels,
|
682 |
-
resblock,
|
683 |
-
resblock_kernel_sizes,
|
684 |
-
resblock_dilation_sizes,
|
685 |
-
upsample_rates,
|
686 |
-
upsample_initial_channel,
|
687 |
-
upsample_kernel_sizes,
|
688 |
-
gin_channels=gin_channels,
|
689 |
-
is_half=kwargs["is_half"],
|
690 |
-
)
|
691 |
-
|
692 |
-
self.flow = ResidualCouplingBlock(
|
693 |
-
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
|
694 |
-
)
|
695 |
-
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
|
696 |
-
print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
|
697 |
-
|
698 |
-
def remove_weight_norm(self):
|
699 |
-
self.dec.remove_weight_norm()
|
700 |
-
self.flow.remove_weight_norm()
|
701 |
-
self.enc_q.remove_weight_norm()
|
702 |
-
|
703 |
-
def forward(
|
704 |
-
self, phone, phone_lengths, pitch, pitchf, ds, max_len=None
|
705 |
-
): # y是spec不需要了现在
|
706 |
-
g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
|
707 |
-
x, x_mask = self.enc_p(phone, pitch, phone_lengths)
|
708 |
-
x = self.flow(x, x_mask, g=g, reverse=True)
|
709 |
-
o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g)
|
710 |
-
return o
|
711 |
-
|
712 |
-
|
713 |
-
class MultiPeriodDiscriminator(torch.nn.Module):
|
714 |
-
def __init__(self, use_spectral_norm=False):
|
715 |
-
super(MultiPeriodDiscriminator, self).__init__()
|
716 |
-
periods = [2, 3, 5, 7, 11, 17]
|
717 |
-
# periods = [3, 5, 7, 11, 17, 23, 37]
|
718 |
-
|
719 |
-
discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
|
720 |
-
discs = discs + [
|
721 |
-
DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
|
722 |
-
]
|
723 |
-
self.discriminators = nn.ModuleList(discs)
|
724 |
-
|
725 |
-
def forward(self, y, y_hat):
|
726 |
-
y_d_rs = [] #
|
727 |
-
y_d_gs = []
|
728 |
-
fmap_rs = []
|
729 |
-
fmap_gs = []
|
730 |
-
for i, d in enumerate(self.discriminators):
|
731 |
-
y_d_r, fmap_r = d(y)
|
732 |
-
y_d_g, fmap_g = d(y_hat)
|
733 |
-
# for j in range(len(fmap_r)):
|
734 |
-
# print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
|
735 |
-
y_d_rs.append(y_d_r)
|
736 |
-
y_d_gs.append(y_d_g)
|
737 |
-
fmap_rs.append(fmap_r)
|
738 |
-
fmap_gs.append(fmap_g)
|
739 |
-
|
740 |
-
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
|
741 |
-
|
742 |
-
|
743 |
-
class DiscriminatorS(torch.nn.Module):
|
744 |
-
def __init__(self, use_spectral_norm=False):
|
745 |
-
super(DiscriminatorS, self).__init__()
|
746 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
747 |
-
self.convs = nn.ModuleList(
|
748 |
-
[
|
749 |
-
norm_f(Conv1d(1, 16, 15, 1, padding=7)),
|
750 |
-
norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
|
751 |
-
norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
|
752 |
-
norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
|
753 |
-
norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
|
754 |
-
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
|
755 |
-
]
|
756 |
-
)
|
757 |
-
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
|
758 |
-
|
759 |
-
def forward(self, x):
|
760 |
-
fmap = []
|
761 |
-
|
762 |
-
for l in self.convs:
|
763 |
-
x = l(x)
|
764 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
765 |
-
fmap.append(x)
|
766 |
-
x = self.conv_post(x)
|
767 |
-
fmap.append(x)
|
768 |
-
x = torch.flatten(x, 1, -1)
|
769 |
-
|
770 |
-
return x, fmap
|
771 |
-
|
772 |
-
|
773 |
-
class DiscriminatorP(torch.nn.Module):
|
774 |
-
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
775 |
-
super(DiscriminatorP, self).__init__()
|
776 |
-
self.period = period
|
777 |
-
self.use_spectral_norm = use_spectral_norm
|
778 |
-
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
|
779 |
-
self.convs = nn.ModuleList(
|
780 |
-
[
|
781 |
-
norm_f(
|
782 |
-
Conv2d(
|
783 |
-
1,
|
784 |
-
32,
|
785 |
-
(kernel_size, 1),
|
786 |
-
(stride, 1),
|
787 |
-
padding=(get_padding(kernel_size, 1), 0),
|
788 |
-
)
|
789 |
-
),
|
790 |
-
norm_f(
|
791 |
-
Conv2d(
|
792 |
-
32,
|
793 |
-
128,
|
794 |
-
(kernel_size, 1),
|
795 |
-
(stride, 1),
|
796 |
-
padding=(get_padding(kernel_size, 1), 0),
|
797 |
-
)
|
798 |
-
),
|
799 |
-
norm_f(
|
800 |
-
Conv2d(
|
801 |
-
128,
|
802 |
-
512,
|
803 |
-
(kernel_size, 1),
|
804 |
-
(stride, 1),
|
805 |
-
padding=(get_padding(kernel_size, 1), 0),
|
806 |
-
)
|
807 |
-
),
|
808 |
-
norm_f(
|
809 |
-
Conv2d(
|
810 |
-
512,
|
811 |
-
1024,
|
812 |
-
(kernel_size, 1),
|
813 |
-
(stride, 1),
|
814 |
-
padding=(get_padding(kernel_size, 1), 0),
|
815 |
-
)
|
816 |
-
),
|
817 |
-
norm_f(
|
818 |
-
Conv2d(
|
819 |
-
1024,
|
820 |
-
1024,
|
821 |
-
(kernel_size, 1),
|
822 |
-
1,
|
823 |
-
padding=(get_padding(kernel_size, 1), 0),
|
824 |
-
)
|
825 |
-
),
|
826 |
-
]
|
827 |
-
)
|
828 |
-
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
|
829 |
-
|
830 |
-
def forward(self, x):
|
831 |
-
fmap = []
|
832 |
-
|
833 |
-
# 1d to 2d
|
834 |
-
b, c, t = x.shape
|
835 |
-
if t % self.period != 0: # pad first
|
836 |
-
n_pad = self.period - (t % self.period)
|
837 |
-
x = F.pad(x, (0, n_pad), "reflect")
|
838 |
-
t = t + n_pad
|
839 |
-
x = x.view(b, c, t // self.period, self.period)
|
840 |
-
|
841 |
-
for l in self.convs:
|
842 |
-
x = l(x)
|
843 |
-
x = F.leaky_relu(x, modules.LRELU_SLOPE)
|
844 |
-
fmap.append(x)
|
845 |
-
x = self.conv_post(x)
|
846 |
-
fmap.append(x)
|
847 |
-
x = torch.flatten(x, 1, -1)
|
848 |
-
|
849 |
-
return x, fmap
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImageGrab.py
DELETED
@@ -1,169 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# The Python Imaging Library
|
3 |
-
# $Id$
|
4 |
-
#
|
5 |
-
# screen grabber
|
6 |
-
#
|
7 |
-
# History:
|
8 |
-
# 2001-04-26 fl created
|
9 |
-
# 2001-09-17 fl use builtin driver, if present
|
10 |
-
# 2002-11-19 fl added grabclipboard support
|
11 |
-
#
|
12 |
-
# Copyright (c) 2001-2002 by Secret Labs AB
|
13 |
-
# Copyright (c) 2001-2002 by Fredrik Lundh
|
14 |
-
#
|
15 |
-
# See the README file for information on usage and redistribution.
|
16 |
-
#
|
17 |
-
|
18 |
-
import io
|
19 |
-
import os
|
20 |
-
import shutil
|
21 |
-
import subprocess
|
22 |
-
import sys
|
23 |
-
import tempfile
|
24 |
-
|
25 |
-
from . import Image
|
26 |
-
|
27 |
-
|
28 |
-
def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None):
|
29 |
-
if xdisplay is None:
|
30 |
-
if sys.platform == "darwin":
|
31 |
-
fh, filepath = tempfile.mkstemp(".png")
|
32 |
-
os.close(fh)
|
33 |
-
args = ["screencapture"]
|
34 |
-
if bbox:
|
35 |
-
left, top, right, bottom = bbox
|
36 |
-
args += ["-R", f"{left},{top},{right-left},{bottom-top}"]
|
37 |
-
subprocess.call(args + ["-x", filepath])
|
38 |
-
im = Image.open(filepath)
|
39 |
-
im.load()
|
40 |
-
os.unlink(filepath)
|
41 |
-
if bbox:
|
42 |
-
im_resized = im.resize((right - left, bottom - top))
|
43 |
-
im.close()
|
44 |
-
return im_resized
|
45 |
-
return im
|
46 |
-
elif sys.platform == "win32":
|
47 |
-
offset, size, data = Image.core.grabscreen_win32(
|
48 |
-
include_layered_windows, all_screens
|
49 |
-
)
|
50 |
-
im = Image.frombytes(
|
51 |
-
"RGB",
|
52 |
-
size,
|
53 |
-
data,
|
54 |
-
# RGB, 32-bit line padding, origin lower left corner
|
55 |
-
"raw",
|
56 |
-
"BGR",
|
57 |
-
(size[0] * 3 + 3) & -4,
|
58 |
-
-1,
|
59 |
-
)
|
60 |
-
if bbox:
|
61 |
-
x0, y0 = offset
|
62 |
-
left, top, right, bottom = bbox
|
63 |
-
im = im.crop((left - x0, top - y0, right - x0, bottom - y0))
|
64 |
-
return im
|
65 |
-
try:
|
66 |
-
if not Image.core.HAVE_XCB:
|
67 |
-
msg = "Pillow was built without XCB support"
|
68 |
-
raise OSError(msg)
|
69 |
-
size, data = Image.core.grabscreen_x11(xdisplay)
|
70 |
-
except OSError:
|
71 |
-
if (
|
72 |
-
xdisplay is None
|
73 |
-
and sys.platform not in ("darwin", "win32")
|
74 |
-
and shutil.which("gnome-screenshot")
|
75 |
-
):
|
76 |
-
fh, filepath = tempfile.mkstemp(".png")
|
77 |
-
os.close(fh)
|
78 |
-
subprocess.call(["gnome-screenshot", "-f", filepath])
|
79 |
-
im = Image.open(filepath)
|
80 |
-
im.load()
|
81 |
-
os.unlink(filepath)
|
82 |
-
if bbox:
|
83 |
-
im_cropped = im.crop(bbox)
|
84 |
-
im.close()
|
85 |
-
return im_cropped
|
86 |
-
return im
|
87 |
-
else:
|
88 |
-
raise
|
89 |
-
else:
|
90 |
-
im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1)
|
91 |
-
if bbox:
|
92 |
-
im = im.crop(bbox)
|
93 |
-
return im
|
94 |
-
|
95 |
-
|
96 |
-
def grabclipboard():
|
97 |
-
if sys.platform == "darwin":
|
98 |
-
fh, filepath = tempfile.mkstemp(".png")
|
99 |
-
os.close(fh)
|
100 |
-
commands = [
|
101 |
-
'set theFile to (open for access POSIX file "'
|
102 |
-
+ filepath
|
103 |
-
+ '" with write permission)',
|
104 |
-
"try",
|
105 |
-
" write (the clipboard as «class PNGf») to theFile",
|
106 |
-
"end try",
|
107 |
-
"close access theFile",
|
108 |
-
]
|
109 |
-
script = ["osascript"]
|
110 |
-
for command in commands:
|
111 |
-
script += ["-e", command]
|
112 |
-
subprocess.call(script)
|
113 |
-
|
114 |
-
im = None
|
115 |
-
if os.stat(filepath).st_size != 0:
|
116 |
-
im = Image.open(filepath)
|
117 |
-
im.load()
|
118 |
-
os.unlink(filepath)
|
119 |
-
return im
|
120 |
-
elif sys.platform == "win32":
|
121 |
-
fmt, data = Image.core.grabclipboard_win32()
|
122 |
-
if fmt == "file": # CF_HDROP
|
123 |
-
import struct
|
124 |
-
|
125 |
-
o = struct.unpack_from("I", data)[0]
|
126 |
-
if data[16] != 0:
|
127 |
-
files = data[o:].decode("utf-16le").split("\0")
|
128 |
-
else:
|
129 |
-
files = data[o:].decode("mbcs").split("\0")
|
130 |
-
return files[: files.index("")]
|
131 |
-
if isinstance(data, bytes):
|
132 |
-
data = io.BytesIO(data)
|
133 |
-
if fmt == "png":
|
134 |
-
from . import PngImagePlugin
|
135 |
-
|
136 |
-
return PngImagePlugin.PngImageFile(data)
|
137 |
-
elif fmt == "DIB":
|
138 |
-
from . import BmpImagePlugin
|
139 |
-
|
140 |
-
return BmpImagePlugin.DibImageFile(data)
|
141 |
-
return None
|
142 |
-
else:
|
143 |
-
if shutil.which("wl-paste"):
|
144 |
-
output = subprocess.check_output(["wl-paste", "-l"]).decode()
|
145 |
-
mimetypes = output.splitlines()
|
146 |
-
if "image/png" in mimetypes:
|
147 |
-
mimetype = "image/png"
|
148 |
-
elif mimetypes:
|
149 |
-
mimetype = mimetypes[0]
|
150 |
-
else:
|
151 |
-
mimetype = None
|
152 |
-
|
153 |
-
args = ["wl-paste"]
|
154 |
-
if mimetype:
|
155 |
-
args.extend(["-t", mimetype])
|
156 |
-
elif shutil.which("xclip"):
|
157 |
-
args = ["xclip", "-selection", "clipboard", "-t", "image/png", "-o"]
|
158 |
-
else:
|
159 |
-
msg = "wl-paste or xclip is required for ImageGrab.grabclipboard() on Linux"
|
160 |
-
raise NotImplementedError(msg)
|
161 |
-
p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
162 |
-
err = p.stderr
|
163 |
-
if err:
|
164 |
-
msg = f"{args[0]} error: {err.strip().decode()}"
|
165 |
-
raise ChildProcessError(msg)
|
166 |
-
data = io.BytesIO(p.stdout)
|
167 |
-
im = Image.open(data)
|
168 |
-
im.load()
|
169 |
-
return im
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_fileresponse.py
DELETED
@@ -1,288 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import mimetypes
|
3 |
-
import os
|
4 |
-
import pathlib
|
5 |
-
import sys
|
6 |
-
from typing import ( # noqa
|
7 |
-
IO,
|
8 |
-
TYPE_CHECKING,
|
9 |
-
Any,
|
10 |
-
Awaitable,
|
11 |
-
Callable,
|
12 |
-
Iterator,
|
13 |
-
List,
|
14 |
-
Optional,
|
15 |
-
Tuple,
|
16 |
-
Union,
|
17 |
-
cast,
|
18 |
-
)
|
19 |
-
|
20 |
-
from . import hdrs
|
21 |
-
from .abc import AbstractStreamWriter
|
22 |
-
from .helpers import ETAG_ANY, ETag
|
23 |
-
from .typedefs import Final, LooseHeaders
|
24 |
-
from .web_exceptions import (
|
25 |
-
HTTPNotModified,
|
26 |
-
HTTPPartialContent,
|
27 |
-
HTTPPreconditionFailed,
|
28 |
-
HTTPRequestRangeNotSatisfiable,
|
29 |
-
)
|
30 |
-
from .web_response import StreamResponse
|
31 |
-
|
32 |
-
__all__ = ("FileResponse",)
|
33 |
-
|
34 |
-
if TYPE_CHECKING: # pragma: no cover
|
35 |
-
from .web_request import BaseRequest
|
36 |
-
|
37 |
-
|
38 |
-
_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]]
|
39 |
-
|
40 |
-
|
41 |
-
NOSENDFILE: Final[bool] = bool(os.environ.get("AIOHTTP_NOSENDFILE"))
|
42 |
-
|
43 |
-
|
44 |
-
class FileResponse(StreamResponse):
|
45 |
-
"""A response object can be used to send files."""
|
46 |
-
|
47 |
-
def __init__(
|
48 |
-
self,
|
49 |
-
path: Union[str, pathlib.Path],
|
50 |
-
chunk_size: int = 256 * 1024,
|
51 |
-
status: int = 200,
|
52 |
-
reason: Optional[str] = None,
|
53 |
-
headers: Optional[LooseHeaders] = None,
|
54 |
-
) -> None:
|
55 |
-
super().__init__(status=status, reason=reason, headers=headers)
|
56 |
-
|
57 |
-
if isinstance(path, str):
|
58 |
-
path = pathlib.Path(path)
|
59 |
-
|
60 |
-
self._path = path
|
61 |
-
self._chunk_size = chunk_size
|
62 |
-
|
63 |
-
async def _sendfile_fallback(
|
64 |
-
self, writer: AbstractStreamWriter, fobj: IO[Any], offset: int, count: int
|
65 |
-
) -> AbstractStreamWriter:
|
66 |
-
# To keep memory usage low,fobj is transferred in chunks
|
67 |
-
# controlled by the constructor's chunk_size argument.
|
68 |
-
|
69 |
-
chunk_size = self._chunk_size
|
70 |
-
loop = asyncio.get_event_loop()
|
71 |
-
|
72 |
-
await loop.run_in_executor(None, fobj.seek, offset)
|
73 |
-
|
74 |
-
chunk = await loop.run_in_executor(None, fobj.read, chunk_size)
|
75 |
-
while chunk:
|
76 |
-
await writer.write(chunk)
|
77 |
-
count = count - chunk_size
|
78 |
-
if count <= 0:
|
79 |
-
break
|
80 |
-
chunk = await loop.run_in_executor(None, fobj.read, min(chunk_size, count))
|
81 |
-
|
82 |
-
await writer.drain()
|
83 |
-
return writer
|
84 |
-
|
85 |
-
async def _sendfile(
|
86 |
-
self, request: "BaseRequest", fobj: IO[Any], offset: int, count: int
|
87 |
-
) -> AbstractStreamWriter:
|
88 |
-
writer = await super().prepare(request)
|
89 |
-
assert writer is not None
|
90 |
-
|
91 |
-
if NOSENDFILE or sys.version_info < (3, 7) or self.compression:
|
92 |
-
return await self._sendfile_fallback(writer, fobj, offset, count)
|
93 |
-
|
94 |
-
loop = request._loop
|
95 |
-
transport = request.transport
|
96 |
-
assert transport is not None
|
97 |
-
|
98 |
-
try:
|
99 |
-
await loop.sendfile(transport, fobj, offset, count)
|
100 |
-
except NotImplementedError:
|
101 |
-
return await self._sendfile_fallback(writer, fobj, offset, count)
|
102 |
-
|
103 |
-
await super().write_eof()
|
104 |
-
return writer
|
105 |
-
|
106 |
-
@staticmethod
|
107 |
-
def _strong_etag_match(etag_value: str, etags: Tuple[ETag, ...]) -> bool:
|
108 |
-
if len(etags) == 1 and etags[0].value == ETAG_ANY:
|
109 |
-
return True
|
110 |
-
return any(etag.value == etag_value for etag in etags if not etag.is_weak)
|
111 |
-
|
112 |
-
async def _not_modified(
|
113 |
-
self, request: "BaseRequest", etag_value: str, last_modified: float
|
114 |
-
) -> Optional[AbstractStreamWriter]:
|
115 |
-
self.set_status(HTTPNotModified.status_code)
|
116 |
-
self._length_check = False
|
117 |
-
self.etag = etag_value # type: ignore[assignment]
|
118 |
-
self.last_modified = last_modified # type: ignore[assignment]
|
119 |
-
# Delete any Content-Length headers provided by user. HTTP 304
|
120 |
-
# should always have empty response body
|
121 |
-
return await super().prepare(request)
|
122 |
-
|
123 |
-
async def _precondition_failed(
|
124 |
-
self, request: "BaseRequest"
|
125 |
-
) -> Optional[AbstractStreamWriter]:
|
126 |
-
self.set_status(HTTPPreconditionFailed.status_code)
|
127 |
-
self.content_length = 0
|
128 |
-
return await super().prepare(request)
|
129 |
-
|
130 |
-
async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]:
|
131 |
-
filepath = self._path
|
132 |
-
|
133 |
-
gzip = False
|
134 |
-
if "gzip" in request.headers.get(hdrs.ACCEPT_ENCODING, ""):
|
135 |
-
gzip_path = filepath.with_name(filepath.name + ".gz")
|
136 |
-
|
137 |
-
if gzip_path.is_file():
|
138 |
-
filepath = gzip_path
|
139 |
-
gzip = True
|
140 |
-
|
141 |
-
loop = asyncio.get_event_loop()
|
142 |
-
st: os.stat_result = await loop.run_in_executor(None, filepath.stat)
|
143 |
-
|
144 |
-
etag_value = f"{st.st_mtime_ns:x}-{st.st_size:x}"
|
145 |
-
last_modified = st.st_mtime
|
146 |
-
|
147 |
-
# https://tools.ietf.org/html/rfc7232#section-6
|
148 |
-
ifmatch = request.if_match
|
149 |
-
if ifmatch is not None and not self._strong_etag_match(etag_value, ifmatch):
|
150 |
-
return await self._precondition_failed(request)
|
151 |
-
|
152 |
-
unmodsince = request.if_unmodified_since
|
153 |
-
if (
|
154 |
-
unmodsince is not None
|
155 |
-
and ifmatch is None
|
156 |
-
and st.st_mtime > unmodsince.timestamp()
|
157 |
-
):
|
158 |
-
return await self._precondition_failed(request)
|
159 |
-
|
160 |
-
ifnonematch = request.if_none_match
|
161 |
-
if ifnonematch is not None and self._strong_etag_match(etag_value, ifnonematch):
|
162 |
-
return await self._not_modified(request, etag_value, last_modified)
|
163 |
-
|
164 |
-
modsince = request.if_modified_since
|
165 |
-
if (
|
166 |
-
modsince is not None
|
167 |
-
and ifnonematch is None
|
168 |
-
and st.st_mtime <= modsince.timestamp()
|
169 |
-
):
|
170 |
-
return await self._not_modified(request, etag_value, last_modified)
|
171 |
-
|
172 |
-
if hdrs.CONTENT_TYPE not in self.headers:
|
173 |
-
ct, encoding = mimetypes.guess_type(str(filepath))
|
174 |
-
if not ct:
|
175 |
-
ct = "application/octet-stream"
|
176 |
-
should_set_ct = True
|
177 |
-
else:
|
178 |
-
encoding = "gzip" if gzip else None
|
179 |
-
should_set_ct = False
|
180 |
-
|
181 |
-
status = self._status
|
182 |
-
file_size = st.st_size
|
183 |
-
count = file_size
|
184 |
-
|
185 |
-
start = None
|
186 |
-
|
187 |
-
ifrange = request.if_range
|
188 |
-
if ifrange is None or st.st_mtime <= ifrange.timestamp():
|
189 |
-
# If-Range header check:
|
190 |
-
# condition = cached date >= last modification date
|
191 |
-
# return 206 if True else 200.
|
192 |
-
# if False:
|
193 |
-
# Range header would not be processed, return 200
|
194 |
-
# if True but Range header missing
|
195 |
-
# return 200
|
196 |
-
try:
|
197 |
-
rng = request.http_range
|
198 |
-
start = rng.start
|
199 |
-
end = rng.stop
|
200 |
-
except ValueError:
|
201 |
-
# https://tools.ietf.org/html/rfc7233:
|
202 |
-
# A server generating a 416 (Range Not Satisfiable) response to
|
203 |
-
# a byte-range request SHOULD send a Content-Range header field
|
204 |
-
# with an unsatisfied-range value.
|
205 |
-
# The complete-length in a 416 response indicates the current
|
206 |
-
# length of the selected representation.
|
207 |
-
#
|
208 |
-
# Will do the same below. Many servers ignore this and do not
|
209 |
-
# send a Content-Range header with HTTP 416
|
210 |
-
self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}"
|
211 |
-
self.set_status(HTTPRequestRangeNotSatisfiable.status_code)
|
212 |
-
return await super().prepare(request)
|
213 |
-
|
214 |
-
# If a range request has been made, convert start, end slice
|
215 |
-
# notation into file pointer offset and count
|
216 |
-
if start is not None or end is not None:
|
217 |
-
if start < 0 and end is None: # return tail of file
|
218 |
-
start += file_size
|
219 |
-
if start < 0:
|
220 |
-
# if Range:bytes=-1000 in request header but file size
|
221 |
-
# is only 200, there would be trouble without this
|
222 |
-
start = 0
|
223 |
-
count = file_size - start
|
224 |
-
else:
|
225 |
-
# rfc7233:If the last-byte-pos value is
|
226 |
-
# absent, or if the value is greater than or equal to
|
227 |
-
# the current length of the representation data,
|
228 |
-
# the byte range is interpreted as the remainder
|
229 |
-
# of the representation (i.e., the server replaces the
|
230 |
-
# value of last-byte-pos with a value that is one less than
|
231 |
-
# the current length of the selected representation).
|
232 |
-
count = (
|
233 |
-
min(end if end is not None else file_size, file_size) - start
|
234 |
-
)
|
235 |
-
|
236 |
-
if start >= file_size:
|
237 |
-
# HTTP 416 should be returned in this case.
|
238 |
-
#
|
239 |
-
# According to https://tools.ietf.org/html/rfc7233:
|
240 |
-
# If a valid byte-range-set includes at least one
|
241 |
-
# byte-range-spec with a first-byte-pos that is less than
|
242 |
-
# the current length of the representation, or at least one
|
243 |
-
# suffix-byte-range-spec with a non-zero suffix-length,
|
244 |
-
# then the byte-range-set is satisfiable. Otherwise, the
|
245 |
-
# byte-range-set is unsatisfiable.
|
246 |
-
self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}"
|
247 |
-
self.set_status(HTTPRequestRangeNotSatisfiable.status_code)
|
248 |
-
return await super().prepare(request)
|
249 |
-
|
250 |
-
status = HTTPPartialContent.status_code
|
251 |
-
# Even though you are sending the whole file, you should still
|
252 |
-
# return a HTTP 206 for a Range request.
|
253 |
-
self.set_status(status)
|
254 |
-
|
255 |
-
if should_set_ct:
|
256 |
-
self.content_type = ct # type: ignore[assignment]
|
257 |
-
if encoding:
|
258 |
-
self.headers[hdrs.CONTENT_ENCODING] = encoding
|
259 |
-
if gzip:
|
260 |
-
self.headers[hdrs.VARY] = hdrs.ACCEPT_ENCODING
|
261 |
-
|
262 |
-
self.etag = etag_value # type: ignore[assignment]
|
263 |
-
self.last_modified = st.st_mtime # type: ignore[assignment]
|
264 |
-
self.content_length = count
|
265 |
-
|
266 |
-
self.headers[hdrs.ACCEPT_RANGES] = "bytes"
|
267 |
-
|
268 |
-
real_start = cast(int, start)
|
269 |
-
|
270 |
-
if status == HTTPPartialContent.status_code:
|
271 |
-
self.headers[hdrs.CONTENT_RANGE] = "bytes {}-{}/{}".format(
|
272 |
-
real_start, real_start + count - 1, file_size
|
273 |
-
)
|
274 |
-
|
275 |
-
# If we are sending 0 bytes calling sendfile() will throw a ValueError
|
276 |
-
if count == 0 or request.method == hdrs.METH_HEAD or self.status in [204, 304]:
|
277 |
-
return await super().prepare(request)
|
278 |
-
|
279 |
-
fobj = await loop.run_in_executor(None, filepath.open, "rb")
|
280 |
-
if start: # be aware that start could be None or int=0 here.
|
281 |
-
offset = start
|
282 |
-
else:
|
283 |
-
offset = 0
|
284 |
-
|
285 |
-
try:
|
286 |
-
return await self._sendfile(request, fobj, offset, count)
|
287 |
-
finally:
|
288 |
-
await loop.run_in_executor(None, fobj.close)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_winconsole.py
DELETED
@@ -1,279 +0,0 @@
|
|
1 |
-
# This module is based on the excellent work by Adam Bartoš who
|
2 |
-
# provided a lot of what went into the implementation here in
|
3 |
-
# the discussion to issue1602 in the Python bug tracker.
|
4 |
-
#
|
5 |
-
# There are some general differences in regards to how this works
|
6 |
-
# compared to the original patches as we do not need to patch
|
7 |
-
# the entire interpreter but just work in our little world of
|
8 |
-
# echo and prompt.
|
9 |
-
import io
|
10 |
-
import sys
|
11 |
-
import time
|
12 |
-
import typing as t
|
13 |
-
from ctypes import byref
|
14 |
-
from ctypes import c_char
|
15 |
-
from ctypes import c_char_p
|
16 |
-
from ctypes import c_int
|
17 |
-
from ctypes import c_ssize_t
|
18 |
-
from ctypes import c_ulong
|
19 |
-
from ctypes import c_void_p
|
20 |
-
from ctypes import POINTER
|
21 |
-
from ctypes import py_object
|
22 |
-
from ctypes import Structure
|
23 |
-
from ctypes.wintypes import DWORD
|
24 |
-
from ctypes.wintypes import HANDLE
|
25 |
-
from ctypes.wintypes import LPCWSTR
|
26 |
-
from ctypes.wintypes import LPWSTR
|
27 |
-
|
28 |
-
from ._compat import _NonClosingTextIOWrapper
|
29 |
-
|
30 |
-
assert sys.platform == "win32"
|
31 |
-
import msvcrt # noqa: E402
|
32 |
-
from ctypes import windll # noqa: E402
|
33 |
-
from ctypes import WINFUNCTYPE # noqa: E402
|
34 |
-
|
35 |
-
c_ssize_p = POINTER(c_ssize_t)
|
36 |
-
|
37 |
-
kernel32 = windll.kernel32
|
38 |
-
GetStdHandle = kernel32.GetStdHandle
|
39 |
-
ReadConsoleW = kernel32.ReadConsoleW
|
40 |
-
WriteConsoleW = kernel32.WriteConsoleW
|
41 |
-
GetConsoleMode = kernel32.GetConsoleMode
|
42 |
-
GetLastError = kernel32.GetLastError
|
43 |
-
GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
|
44 |
-
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
|
45 |
-
("CommandLineToArgvW", windll.shell32)
|
46 |
-
)
|
47 |
-
LocalFree = WINFUNCTYPE(c_void_p, c_void_p)(("LocalFree", windll.kernel32))
|
48 |
-
|
49 |
-
STDIN_HANDLE = GetStdHandle(-10)
|
50 |
-
STDOUT_HANDLE = GetStdHandle(-11)
|
51 |
-
STDERR_HANDLE = GetStdHandle(-12)
|
52 |
-
|
53 |
-
PyBUF_SIMPLE = 0
|
54 |
-
PyBUF_WRITABLE = 1
|
55 |
-
|
56 |
-
ERROR_SUCCESS = 0
|
57 |
-
ERROR_NOT_ENOUGH_MEMORY = 8
|
58 |
-
ERROR_OPERATION_ABORTED = 995
|
59 |
-
|
60 |
-
STDIN_FILENO = 0
|
61 |
-
STDOUT_FILENO = 1
|
62 |
-
STDERR_FILENO = 2
|
63 |
-
|
64 |
-
EOF = b"\x1a"
|
65 |
-
MAX_BYTES_WRITTEN = 32767
|
66 |
-
|
67 |
-
try:
|
68 |
-
from ctypes import pythonapi
|
69 |
-
except ImportError:
|
70 |
-
# On PyPy we cannot get buffers so our ability to operate here is
|
71 |
-
# severely limited.
|
72 |
-
get_buffer = None
|
73 |
-
else:
|
74 |
-
|
75 |
-
class Py_buffer(Structure):
|
76 |
-
_fields_ = [
|
77 |
-
("buf", c_void_p),
|
78 |
-
("obj", py_object),
|
79 |
-
("len", c_ssize_t),
|
80 |
-
("itemsize", c_ssize_t),
|
81 |
-
("readonly", c_int),
|
82 |
-
("ndim", c_int),
|
83 |
-
("format", c_char_p),
|
84 |
-
("shape", c_ssize_p),
|
85 |
-
("strides", c_ssize_p),
|
86 |
-
("suboffsets", c_ssize_p),
|
87 |
-
("internal", c_void_p),
|
88 |
-
]
|
89 |
-
|
90 |
-
PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
|
91 |
-
PyBuffer_Release = pythonapi.PyBuffer_Release
|
92 |
-
|
93 |
-
def get_buffer(obj, writable=False):
|
94 |
-
buf = Py_buffer()
|
95 |
-
flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
|
96 |
-
PyObject_GetBuffer(py_object(obj), byref(buf), flags)
|
97 |
-
|
98 |
-
try:
|
99 |
-
buffer_type = c_char * buf.len
|
100 |
-
return buffer_type.from_address(buf.buf)
|
101 |
-
finally:
|
102 |
-
PyBuffer_Release(byref(buf))
|
103 |
-
|
104 |
-
|
105 |
-
class _WindowsConsoleRawIOBase(io.RawIOBase):
|
106 |
-
def __init__(self, handle):
|
107 |
-
self.handle = handle
|
108 |
-
|
109 |
-
def isatty(self):
|
110 |
-
super().isatty()
|
111 |
-
return True
|
112 |
-
|
113 |
-
|
114 |
-
class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
|
115 |
-
def readable(self):
|
116 |
-
return True
|
117 |
-
|
118 |
-
def readinto(self, b):
|
119 |
-
bytes_to_be_read = len(b)
|
120 |
-
if not bytes_to_be_read:
|
121 |
-
return 0
|
122 |
-
elif bytes_to_be_read % 2:
|
123 |
-
raise ValueError(
|
124 |
-
"cannot read odd number of bytes from UTF-16-LE encoded console"
|
125 |
-
)
|
126 |
-
|
127 |
-
buffer = get_buffer(b, writable=True)
|
128 |
-
code_units_to_be_read = bytes_to_be_read // 2
|
129 |
-
code_units_read = c_ulong()
|
130 |
-
|
131 |
-
rv = ReadConsoleW(
|
132 |
-
HANDLE(self.handle),
|
133 |
-
buffer,
|
134 |
-
code_units_to_be_read,
|
135 |
-
byref(code_units_read),
|
136 |
-
None,
|
137 |
-
)
|
138 |
-
if GetLastError() == ERROR_OPERATION_ABORTED:
|
139 |
-
# wait for KeyboardInterrupt
|
140 |
-
time.sleep(0.1)
|
141 |
-
if not rv:
|
142 |
-
raise OSError(f"Windows error: {GetLastError()}")
|
143 |
-
|
144 |
-
if buffer[0] == EOF:
|
145 |
-
return 0
|
146 |
-
return 2 * code_units_read.value
|
147 |
-
|
148 |
-
|
149 |
-
class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
|
150 |
-
def writable(self):
|
151 |
-
return True
|
152 |
-
|
153 |
-
@staticmethod
|
154 |
-
def _get_error_message(errno):
|
155 |
-
if errno == ERROR_SUCCESS:
|
156 |
-
return "ERROR_SUCCESS"
|
157 |
-
elif errno == ERROR_NOT_ENOUGH_MEMORY:
|
158 |
-
return "ERROR_NOT_ENOUGH_MEMORY"
|
159 |
-
return f"Windows error {errno}"
|
160 |
-
|
161 |
-
def write(self, b):
|
162 |
-
bytes_to_be_written = len(b)
|
163 |
-
buf = get_buffer(b)
|
164 |
-
code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
|
165 |
-
code_units_written = c_ulong()
|
166 |
-
|
167 |
-
WriteConsoleW(
|
168 |
-
HANDLE(self.handle),
|
169 |
-
buf,
|
170 |
-
code_units_to_be_written,
|
171 |
-
byref(code_units_written),
|
172 |
-
None,
|
173 |
-
)
|
174 |
-
bytes_written = 2 * code_units_written.value
|
175 |
-
|
176 |
-
if bytes_written == 0 and bytes_to_be_written > 0:
|
177 |
-
raise OSError(self._get_error_message(GetLastError()))
|
178 |
-
return bytes_written
|
179 |
-
|
180 |
-
|
181 |
-
class ConsoleStream:
|
182 |
-
def __init__(self, text_stream: t.TextIO, byte_stream: t.BinaryIO) -> None:
|
183 |
-
self._text_stream = text_stream
|
184 |
-
self.buffer = byte_stream
|
185 |
-
|
186 |
-
@property
|
187 |
-
def name(self) -> str:
|
188 |
-
return self.buffer.name
|
189 |
-
|
190 |
-
def write(self, x: t.AnyStr) -> int:
|
191 |
-
if isinstance(x, str):
|
192 |
-
return self._text_stream.write(x)
|
193 |
-
try:
|
194 |
-
self.flush()
|
195 |
-
except Exception:
|
196 |
-
pass
|
197 |
-
return self.buffer.write(x)
|
198 |
-
|
199 |
-
def writelines(self, lines: t.Iterable[t.AnyStr]) -> None:
|
200 |
-
for line in lines:
|
201 |
-
self.write(line)
|
202 |
-
|
203 |
-
def __getattr__(self, name: str) -> t.Any:
|
204 |
-
return getattr(self._text_stream, name)
|
205 |
-
|
206 |
-
def isatty(self) -> bool:
|
207 |
-
return self.buffer.isatty()
|
208 |
-
|
209 |
-
def __repr__(self):
|
210 |
-
return f"<ConsoleStream name={self.name!r} encoding={self.encoding!r}>"
|
211 |
-
|
212 |
-
|
213 |
-
def _get_text_stdin(buffer_stream: t.BinaryIO) -> t.TextIO:
|
214 |
-
text_stream = _NonClosingTextIOWrapper(
|
215 |
-
io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
|
216 |
-
"utf-16-le",
|
217 |
-
"strict",
|
218 |
-
line_buffering=True,
|
219 |
-
)
|
220 |
-
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
|
221 |
-
|
222 |
-
|
223 |
-
def _get_text_stdout(buffer_stream: t.BinaryIO) -> t.TextIO:
|
224 |
-
text_stream = _NonClosingTextIOWrapper(
|
225 |
-
io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
|
226 |
-
"utf-16-le",
|
227 |
-
"strict",
|
228 |
-
line_buffering=True,
|
229 |
-
)
|
230 |
-
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
|
231 |
-
|
232 |
-
|
233 |
-
def _get_text_stderr(buffer_stream: t.BinaryIO) -> t.TextIO:
|
234 |
-
text_stream = _NonClosingTextIOWrapper(
|
235 |
-
io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
|
236 |
-
"utf-16-le",
|
237 |
-
"strict",
|
238 |
-
line_buffering=True,
|
239 |
-
)
|
240 |
-
return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
|
241 |
-
|
242 |
-
|
243 |
-
_stream_factories: t.Mapping[int, t.Callable[[t.BinaryIO], t.TextIO]] = {
|
244 |
-
0: _get_text_stdin,
|
245 |
-
1: _get_text_stdout,
|
246 |
-
2: _get_text_stderr,
|
247 |
-
}
|
248 |
-
|
249 |
-
|
250 |
-
def _is_console(f: t.TextIO) -> bool:
|
251 |
-
if not hasattr(f, "fileno"):
|
252 |
-
return False
|
253 |
-
|
254 |
-
try:
|
255 |
-
fileno = f.fileno()
|
256 |
-
except (OSError, io.UnsupportedOperation):
|
257 |
-
return False
|
258 |
-
|
259 |
-
handle = msvcrt.get_osfhandle(fileno)
|
260 |
-
return bool(GetConsoleMode(handle, byref(DWORD())))
|
261 |
-
|
262 |
-
|
263 |
-
def _get_windows_console_stream(
|
264 |
-
f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
|
265 |
-
) -> t.Optional[t.TextIO]:
|
266 |
-
if (
|
267 |
-
get_buffer is not None
|
268 |
-
and encoding in {"utf-16-le", None}
|
269 |
-
and errors in {"strict", None}
|
270 |
-
and _is_console(f)
|
271 |
-
):
|
272 |
-
func = _stream_factories.get(f.fileno())
|
273 |
-
if func is not None:
|
274 |
-
b = getattr(f, "buffer", None)
|
275 |
-
|
276 |
-
if b is None:
|
277 |
-
return None
|
278 |
-
|
279 |
-
return func(b)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_h_e_a_d.py
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
from fontTools.misc import sstruct
|
2 |
-
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
|
3 |
-
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
4 |
-
from fontTools.misc.timeTools import (
|
5 |
-
timestampFromString,
|
6 |
-
timestampToString,
|
7 |
-
timestampNow,
|
8 |
-
)
|
9 |
-
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
|
10 |
-
from fontTools.misc.arrayTools import intRect, unionRect
|
11 |
-
from . import DefaultTable
|
12 |
-
import logging
|
13 |
-
|
14 |
-
|
15 |
-
log = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
headFormat = """
|
18 |
-
> # big endian
|
19 |
-
tableVersion: 16.16F
|
20 |
-
fontRevision: 16.16F
|
21 |
-
checkSumAdjustment: I
|
22 |
-
magicNumber: I
|
23 |
-
flags: H
|
24 |
-
unitsPerEm: H
|
25 |
-
created: Q
|
26 |
-
modified: Q
|
27 |
-
xMin: h
|
28 |
-
yMin: h
|
29 |
-
xMax: h
|
30 |
-
yMax: h
|
31 |
-
macStyle: H
|
32 |
-
lowestRecPPEM: H
|
33 |
-
fontDirectionHint: h
|
34 |
-
indexToLocFormat: h
|
35 |
-
glyphDataFormat: h
|
36 |
-
"""
|
37 |
-
|
38 |
-
|
39 |
-
class table__h_e_a_d(DefaultTable.DefaultTable):
|
40 |
-
|
41 |
-
dependencies = ["maxp", "loca", "CFF ", "CFF2"]
|
42 |
-
|
43 |
-
def decompile(self, data, ttFont):
|
44 |
-
dummy, rest = sstruct.unpack2(headFormat, data, self)
|
45 |
-
if rest:
|
46 |
-
# this is quite illegal, but there seem to be fonts out there that do this
|
47 |
-
log.warning("extra bytes at the end of 'head' table")
|
48 |
-
assert rest == b"\0\0"
|
49 |
-
|
50 |
-
# For timestamp fields, ignore the top four bytes. Some fonts have
|
51 |
-
# bogus values there. Since till 2038 those bytes only can be zero,
|
52 |
-
# ignore them.
|
53 |
-
#
|
54 |
-
# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
|
55 |
-
for stamp in "created", "modified":
|
56 |
-
value = getattr(self, stamp)
|
57 |
-
if value > 0xFFFFFFFF:
|
58 |
-
log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
|
59 |
-
value &= 0xFFFFFFFF
|
60 |
-
setattr(self, stamp, value)
|
61 |
-
if value < 0x7C259DC0: # January 1, 1970 00:00:00
|
62 |
-
log.warning(
|
63 |
-
"'%s' timestamp seems very low; regarding as unix timestamp", stamp
|
64 |
-
)
|
65 |
-
value += 0x7C259DC0
|
66 |
-
setattr(self, stamp, value)
|
67 |
-
|
68 |
-
def compile(self, ttFont):
|
69 |
-
if ttFont.recalcBBoxes:
|
70 |
-
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
|
71 |
-
if "CFF " in ttFont:
|
72 |
-
topDict = ttFont["CFF "].cff.topDictIndex[0]
|
73 |
-
self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
|
74 |
-
elif "CFF2" in ttFont:
|
75 |
-
topDict = ttFont["CFF2"].cff.topDictIndex[0]
|
76 |
-
charStrings = topDict.CharStrings
|
77 |
-
fontBBox = None
|
78 |
-
for charString in charStrings.values():
|
79 |
-
bounds = charString.calcBounds(charStrings)
|
80 |
-
if bounds is not None:
|
81 |
-
if fontBBox is not None:
|
82 |
-
fontBBox = unionRect(fontBBox, bounds)
|
83 |
-
else:
|
84 |
-
fontBBox = bounds
|
85 |
-
if fontBBox is not None:
|
86 |
-
self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
|
87 |
-
if ttFont.recalcTimestamp:
|
88 |
-
self.modified = timestampNow()
|
89 |
-
data = sstruct.pack(headFormat, self)
|
90 |
-
return data
|
91 |
-
|
92 |
-
def toXML(self, writer, ttFont):
|
93 |
-
writer.comment("Most of this table will be recalculated by the compiler")
|
94 |
-
writer.newline()
|
95 |
-
_, names, fixes = sstruct.getformat(headFormat)
|
96 |
-
for name in names:
|
97 |
-
value = getattr(self, name)
|
98 |
-
if name in fixes:
|
99 |
-
value = floatToFixedToStr(value, precisionBits=fixes[name])
|
100 |
-
elif name in ("created", "modified"):
|
101 |
-
value = timestampToString(value)
|
102 |
-
elif name in ("magicNumber", "checkSumAdjustment"):
|
103 |
-
if value < 0:
|
104 |
-
value = value + 0x100000000
|
105 |
-
value = hex(value)
|
106 |
-
if value[-1:] == "L":
|
107 |
-
value = value[:-1]
|
108 |
-
elif name in ("macStyle", "flags"):
|
109 |
-
value = num2binary(value, 16)
|
110 |
-
writer.simpletag(name, value=value)
|
111 |
-
writer.newline()
|
112 |
-
|
113 |
-
def fromXML(self, name, attrs, content, ttFont):
|
114 |
-
value = attrs["value"]
|
115 |
-
fixes = sstruct.getformat(headFormat)[2]
|
116 |
-
if name in fixes:
|
117 |
-
value = strToFixedToFloat(value, precisionBits=fixes[name])
|
118 |
-
elif name in ("created", "modified"):
|
119 |
-
value = timestampFromString(value)
|
120 |
-
elif name in ("macStyle", "flags"):
|
121 |
-
value = binary2num(value)
|
122 |
-
else:
|
123 |
-
value = safeEval(value)
|
124 |
-
setattr(self, name, value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/ranged_response.py
DELETED
@@ -1,185 +0,0 @@
|
|
1 |
-
# Taken from https://gist.github.com/kevinastone/a6a62db57577b3f24e8a6865ed311463
|
2 |
-
# Context: https://github.com/encode/starlette/pull/1090
|
3 |
-
from __future__ import annotations
|
4 |
-
|
5 |
-
import os
|
6 |
-
import re
|
7 |
-
import stat
|
8 |
-
from typing import NamedTuple
|
9 |
-
from urllib.parse import quote
|
10 |
-
|
11 |
-
import aiofiles
|
12 |
-
from aiofiles.os import stat as aio_stat
|
13 |
-
from starlette.datastructures import Headers
|
14 |
-
from starlette.exceptions import HTTPException
|
15 |
-
from starlette.responses import Response, guess_type
|
16 |
-
from starlette.staticfiles import StaticFiles
|
17 |
-
from starlette.types import Receive, Scope, Send
|
18 |
-
|
19 |
-
RANGE_REGEX = re.compile(r"^bytes=(?P<start>\d+)-(?P<end>\d*)$")
|
20 |
-
|
21 |
-
|
22 |
-
class ClosedRange(NamedTuple):
|
23 |
-
start: int
|
24 |
-
end: int
|
25 |
-
|
26 |
-
def __len__(self) -> int:
|
27 |
-
return self.end - self.start + 1
|
28 |
-
|
29 |
-
def __bool__(self) -> bool:
|
30 |
-
return len(self) > 0
|
31 |
-
|
32 |
-
|
33 |
-
class OpenRange(NamedTuple):
|
34 |
-
start: int
|
35 |
-
end: int | None = None
|
36 |
-
|
37 |
-
def clamp(self, start: int, end: int) -> ClosedRange:
|
38 |
-
begin = max(self.start, start)
|
39 |
-
end = min(x for x in (self.end, end) if x)
|
40 |
-
|
41 |
-
begin = min(begin, end)
|
42 |
-
end = max(begin, end)
|
43 |
-
|
44 |
-
return ClosedRange(begin, end)
|
45 |
-
|
46 |
-
|
47 |
-
class RangedFileResponse(Response):
|
48 |
-
chunk_size = 4096
|
49 |
-
|
50 |
-
def __init__(
|
51 |
-
self,
|
52 |
-
path: str | os.PathLike,
|
53 |
-
range: OpenRange,
|
54 |
-
headers: dict[str, str] | None = None,
|
55 |
-
media_type: str | None = None,
|
56 |
-
filename: str | None = None,
|
57 |
-
stat_result: os.stat_result | None = None,
|
58 |
-
method: str | None = None,
|
59 |
-
) -> None:
|
60 |
-
assert aiofiles is not None, "'aiofiles' must be installed to use FileResponse"
|
61 |
-
self.path = path
|
62 |
-
self.range = range
|
63 |
-
self.filename = filename
|
64 |
-
self.background = None
|
65 |
-
self.send_header_only = method is not None and method.upper() == "HEAD"
|
66 |
-
if media_type is None:
|
67 |
-
media_type = guess_type(filename or path)[0] or "text/plain"
|
68 |
-
self.media_type = media_type
|
69 |
-
self.init_headers(headers or {})
|
70 |
-
if self.filename is not None:
|
71 |
-
content_disposition_filename = quote(self.filename)
|
72 |
-
if content_disposition_filename != self.filename:
|
73 |
-
content_disposition = (
|
74 |
-
f"attachment; filename*=utf-8''{content_disposition_filename}"
|
75 |
-
)
|
76 |
-
else:
|
77 |
-
content_disposition = f'attachment; filename="{self.filename}"'
|
78 |
-
self.headers.setdefault("content-disposition", content_disposition)
|
79 |
-
self.stat_result = stat_result
|
80 |
-
|
81 |
-
def set_range_headers(self, range: ClosedRange) -> None:
|
82 |
-
assert self.stat_result
|
83 |
-
total_length = self.stat_result.st_size
|
84 |
-
content_length = len(range)
|
85 |
-
self.headers[
|
86 |
-
"content-range"
|
87 |
-
] = f"bytes {range.start}-{range.end}/{total_length}"
|
88 |
-
self.headers["content-length"] = str(content_length)
|
89 |
-
pass
|
90 |
-
|
91 |
-
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
|
92 |
-
if self.stat_result is None:
|
93 |
-
try:
|
94 |
-
stat_result = await aio_stat(self.path)
|
95 |
-
self.stat_result = stat_result
|
96 |
-
except FileNotFoundError as fnfe:
|
97 |
-
raise RuntimeError(
|
98 |
-
f"File at path {self.path} does not exist."
|
99 |
-
) from fnfe
|
100 |
-
else:
|
101 |
-
mode = stat_result.st_mode
|
102 |
-
if not stat.S_ISREG(mode):
|
103 |
-
raise RuntimeError(f"File at path {self.path} is not a file.")
|
104 |
-
|
105 |
-
byte_range = self.range.clamp(0, self.stat_result.st_size)
|
106 |
-
self.set_range_headers(byte_range)
|
107 |
-
|
108 |
-
async with aiofiles.open(self.path, mode="rb") as file:
|
109 |
-
await file.seek(byte_range.start)
|
110 |
-
await send(
|
111 |
-
{
|
112 |
-
"type": "http.response.start",
|
113 |
-
"status": 206,
|
114 |
-
"headers": self.raw_headers,
|
115 |
-
}
|
116 |
-
)
|
117 |
-
if self.send_header_only:
|
118 |
-
await send(
|
119 |
-
{"type": "http.response.body", "body": b"", "more_body": False}
|
120 |
-
)
|
121 |
-
else:
|
122 |
-
remaining_bytes = len(byte_range)
|
123 |
-
|
124 |
-
if not byte_range:
|
125 |
-
await send(
|
126 |
-
{"type": "http.response.body", "body": b"", "more_body": False}
|
127 |
-
)
|
128 |
-
return
|
129 |
-
|
130 |
-
while remaining_bytes > 0:
|
131 |
-
chunk_size = min(self.chunk_size, remaining_bytes)
|
132 |
-
chunk = await file.read(chunk_size)
|
133 |
-
remaining_bytes -= len(chunk)
|
134 |
-
await send(
|
135 |
-
{
|
136 |
-
"type": "http.response.body",
|
137 |
-
"body": chunk,
|
138 |
-
"more_body": remaining_bytes > 0,
|
139 |
-
}
|
140 |
-
)
|
141 |
-
|
142 |
-
|
143 |
-
class RangedStaticFiles(StaticFiles):
|
144 |
-
def file_response(
|
145 |
-
self,
|
146 |
-
full_path: str | os.PathLike,
|
147 |
-
stat_result: os.stat_result,
|
148 |
-
scope: Scope,
|
149 |
-
status_code: int = 200,
|
150 |
-
) -> Response:
|
151 |
-
request_headers = Headers(scope=scope)
|
152 |
-
|
153 |
-
if request_headers.get("range"):
|
154 |
-
response = self.ranged_file_response(
|
155 |
-
full_path, stat_result=stat_result, scope=scope
|
156 |
-
)
|
157 |
-
else:
|
158 |
-
response = super().file_response(
|
159 |
-
full_path, stat_result=stat_result, scope=scope, status_code=status_code
|
160 |
-
)
|
161 |
-
response.headers["accept-ranges"] = "bytes"
|
162 |
-
return response
|
163 |
-
|
164 |
-
def ranged_file_response(
|
165 |
-
self,
|
166 |
-
full_path: str | os.PathLike,
|
167 |
-
stat_result: os.stat_result,
|
168 |
-
scope: Scope,
|
169 |
-
) -> Response:
|
170 |
-
method = scope["method"]
|
171 |
-
request_headers = Headers(scope=scope)
|
172 |
-
|
173 |
-
range_header = request_headers["range"]
|
174 |
-
|
175 |
-
match = RANGE_REGEX.search(range_header)
|
176 |
-
if not match:
|
177 |
-
raise HTTPException(400)
|
178 |
-
|
179 |
-
start, end = match.group("start"), match.group("end")
|
180 |
-
|
181 |
-
range = OpenRange(int(start), int(end) if end else None)
|
182 |
-
|
183 |
-
return RangedFileResponse(
|
184 |
-
full_path, range, stat_result=stat_result, method=method
|
185 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Copy-9f1657c4.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as p,e as c,s as h,J as a,K as e,p as u,M as i,n as o,A as d}from"./index-1d65707a.js";function v(l){let t,s;return{c(){t=a("svg"),s=a("polyline"),e(s,"points","20 6 9 17 4 12"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 24 24"),e(t,"fill","none"),e(t,"stroke","currentColor"),e(t,"stroke-width","3"),e(t,"stroke-linecap","round"),e(t,"stroke-linejoin","round")},m(n,r){u(n,t,r),i(t,s)},p:o,i:o,o,d(n){n&&d(t)}}}class m extends p{constructor(t){super(),c(this,t,null,v,h,{})}}function w(l){let t,s,n;return{c(){t=a("svg"),s=a("path"),n=a("path"),e(s,"fill","currentColor"),e(s,"d","M28 10v18H10V10h18m0-2H10a2 2 0 0 0-2 2v18a2 2 0 0 0 2 2h18a2 2 0 0 0 2-2V10a2 2 0 0 0-2-2Z"),e(n,"fill","currentColor"),e(n,"d","M4 18H2V4a2 2 0 0 1 2-2h14v2H4Z"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","100%"),e(t,"height","100%"),e(t,"viewBox","0 0 32 32")},m(r,g){u(r,t,g),i(t,s),i(t,n)},p:o,i:o,o,d(r){r&&d(t)}}}class x extends p{constructor(t){super(),c(this,t,null,w,h,{})}}export{x as C,m as a};
|
2 |
-
//# sourceMappingURL=Copy-9f1657c4.js.map
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/index.html
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
<!doctype html>
|
2 |
-
<html
|
3 |
-
lang="en"
|
4 |
-
style="
|
5 |
-
margin: 0;
|
6 |
-
padding: 0;
|
7 |
-
min-height: 100%;
|
8 |
-
display: flex;
|
9 |
-
flex-direction: column;
|
10 |
-
"
|
11 |
-
>
|
12 |
-
<head>
|
13 |
-
<meta charset="utf-8" />
|
14 |
-
<meta
|
15 |
-
name="viewport"
|
16 |
-
content="width=device-width, initial-scale=1, shrink-to-fit=no, maximum-scale=1"
|
17 |
-
/>
|
18 |
-
|
19 |
-
|
20 |
-
<meta property="og:url" content="https://gradio.app/" />
|
21 |
-
<meta property="og:type" content="website" />
|
22 |
-
<meta property="og:image" content="{{ config['thumbnail'] or '' }}" />
|
23 |
-
<meta property="og:title" content="{{ config['title'] or '' }}" />
|
24 |
-
<meta
|
25 |
-
property="og:description"
|
26 |
-
content="{{ config['simple_description'] or '' }}"
|
27 |
-
/>
|
28 |
-
<meta name="twitter:card" content="summary_large_image" />
|
29 |
-
<meta name="twitter:creator" content="@teamGradio" />
|
30 |
-
<meta name="twitter:title" content="{{ config['title'] or '' }}" />
|
31 |
-
<meta
|
32 |
-
name="twitter:description"
|
33 |
-
content="{{ config['simple_description'] or '' }}"
|
34 |
-
/>
|
35 |
-
<meta name="twitter:image" content="{{ config['thumbnail'] or '' }}" />
|
36 |
-
|
37 |
-
<script>
|
38 |
-
window.__gradio_mode__ = "app";
|
39 |
-
</script>
|
40 |
-
|
41 |
-
<script>window.gradio_config = {{ config | toorjson }};</script>
|
42 |
-
|
43 |
-
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
44 |
-
<link
|
45 |
-
rel="preconnect"
|
46 |
-
href="https://fonts.gstatic.com"
|
47 |
-
crossorigin="anonymous"
|
48 |
-
/>
|
49 |
-
<script
|
50 |
-
src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.6/iframeResizer.contentWindow.min.js"
|
51 |
-
async
|
52 |
-
></script>
|
53 |
-
<script type="module" crossorigin src="https://gradio.s3-us-west-2.amazonaws.com/3.37.0/assets/index-1d65707a.js"></script>
|
54 |
-
|
55 |
-
</head>
|
56 |
-
|
57 |
-
<body
|
58 |
-
style="
|
59 |
-
width: 100%;
|
60 |
-
margin: 0;
|
61 |
-
padding: 0;
|
62 |
-
display: flex;
|
63 |
-
flex-direction: column;
|
64 |
-
flex-grow: 1;
|
65 |
-
"
|
66 |
-
>
|
67 |
-
<gradio-app
|
68 |
-
control_page_title="true"
|
69 |
-
embed="false"
|
70 |
-
eager="true"
|
71 |
-
style="display: flex; flex-direction: column; flex-grow: 1"
|
72 |
-
>
|
73 |
-
</gradio-app>
|
74 |
-
<script>
|
75 |
-
const ce = document.getElementsByTagName("gradio-app");
|
76 |
-
if (ce[0]) {
|
77 |
-
ce[0].addEventListener("domchange", () => {
|
78 |
-
document.body.style.padding = "0";
|
79 |
-
});
|
80 |
-
document.body.style.padding = "0";
|
81 |
-
}
|
82 |
-
</script>
|
83 |
-
</body>
|
84 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/__init__.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230),
|
2 |
-
# containing no networking code at all, loosely modelled on hyper-h2's generic
|
3 |
-
# implementation of HTTP/2 (and in particular the h2.connection.H2Connection
|
4 |
-
# class). There's still a bunch of subtle details you need to get right if you
|
5 |
-
# want to make this actually useful, because it doesn't implement all the
|
6 |
-
# semantics to check that what you're asking to write to the wire is sensible,
|
7 |
-
# but at least it gets you out of dealing with the wire itself.
|
8 |
-
|
9 |
-
from h11._connection import Connection, NEED_DATA, PAUSED
|
10 |
-
from h11._events import (
|
11 |
-
ConnectionClosed,
|
12 |
-
Data,
|
13 |
-
EndOfMessage,
|
14 |
-
Event,
|
15 |
-
InformationalResponse,
|
16 |
-
Request,
|
17 |
-
Response,
|
18 |
-
)
|
19 |
-
from h11._state import (
|
20 |
-
CLIENT,
|
21 |
-
CLOSED,
|
22 |
-
DONE,
|
23 |
-
ERROR,
|
24 |
-
IDLE,
|
25 |
-
MIGHT_SWITCH_PROTOCOL,
|
26 |
-
MUST_CLOSE,
|
27 |
-
SEND_BODY,
|
28 |
-
SEND_RESPONSE,
|
29 |
-
SERVER,
|
30 |
-
SWITCHED_PROTOCOL,
|
31 |
-
)
|
32 |
-
from h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError
|
33 |
-
from h11._version import __version__
|
34 |
-
|
35 |
-
PRODUCT_ID = "python-h11/" + __version__
|
36 |
-
|
37 |
-
|
38 |
-
__all__ = (
|
39 |
-
"Connection",
|
40 |
-
"NEED_DATA",
|
41 |
-
"PAUSED",
|
42 |
-
"ConnectionClosed",
|
43 |
-
"Data",
|
44 |
-
"EndOfMessage",
|
45 |
-
"Event",
|
46 |
-
"InformationalResponse",
|
47 |
-
"Request",
|
48 |
-
"Response",
|
49 |
-
"CLIENT",
|
50 |
-
"CLOSED",
|
51 |
-
"DONE",
|
52 |
-
"ERROR",
|
53 |
-
"IDLE",
|
54 |
-
"MUST_CLOSE",
|
55 |
-
"SEND_BODY",
|
56 |
-
"SEND_RESPONSE",
|
57 |
-
"SERVER",
|
58 |
-
"SWITCHED_PROTOCOL",
|
59 |
-
"ProtocolError",
|
60 |
-
"LocalProtocolError",
|
61 |
-
"RemoteProtocolError",
|
62 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/AIart_sources_of_inspiration/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Identifying Painting Authors
|
3 |
-
emoji: 🎨
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.12.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: Datasculptor/Predicting_Authors
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|