Commit
·
cd758f0
1
Parent(s):
907e826
Update parquet files (step 22 of 296)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xSynapse/LlamaGPT/app.py +0 -408
- spaces/101-5/gpt4free/g4f/.v1/testing/aicolors_test.py +0 -6
- spaces/17TheWord/RealESRGAN/realesrgan/utils.py +0 -280
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Badmashiyaan Fun Never Ends Movie In Hindi Mp4.md +0 -25
- spaces/1gistliPinn/ChatGPT4/Examples/DVDpedia 6.0.1 Crack macOS MacOSX The Ultimate Movie Cataloging Software.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Evangelion 111 Vostfr Ddl TOP.md +0 -6
- spaces/1phancelerku/anime-remove-background/Convert YouTube Videos to MP3 Files for Free and Easy Listening.md +0 -150
- spaces/1phancelerku/anime-remove-background/Enjoy Unlimited Lives and Boosters with Candy Crush Saga APK.md +0 -87
- spaces/AEUPH/SENTIENCE_PROGRAMMING_LANGUAGE/style.css +0 -28
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/vocoders/__init__.py +0 -1
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/rnn.py +0 -261
- spaces/AIGText/GlyphControl/ldm/modules/midas/midas/midas_net.py +0 -76
- spaces/AIML-TUDA/does-clip-know-my-face/README.md +0 -64
- spaces/AIWaves/Debate/src/agents/Memory/base_Memory.py +0 -32
- spaces/ASJMO/freegpt/server/babel.py +0 -48
- spaces/Aashir01/Live_Transcription/README.md +0 -13
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/ops/fused_bias_act.py +0 -211
- spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_configs/paths_config.py +0 -24
- spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/misc.py +0 -294
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/attention.py +0 -390
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/check_config_docstrings.py +0 -84
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py +0 -36
- spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/hourglass.py +0 -198
- spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py +0 -2
- spaces/Artrajz/vits-simple-api/static/css/bootstrap.min.css +0 -0
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/inspect.py +0 -92
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/base.py +0 -20
- spaces/Awesimo/jojogan/e4e/models/stylegan2/op/__init__.py +0 -0
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/blocks.py +0 -111
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_export_caffe2.py +0 -52
- spaces/BalaBhaskarudu/Balu/app.py +0 -34
- spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_537227KB.py +0 -123
- spaces/BenjaminB/pyscript-demo/style.css +0 -28
- spaces/Benson/text-generation/Examples/Chessclub.com Download.md +0 -74
- spaces/Benson/text-generation/Examples/Descargar Carretes De Instagram De Alta Calidad.md +0 -69
- spaces/Benson/text-generation/Examples/Descargar El Juego Completo De La Saga De Verano 2022.md +0 -140
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/base.py +0 -26
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_loop.py +0 -43
- spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/ArrowTable.ts +0 -224
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/partition.h +0 -23
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/transform.h +0 -23
- spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter_mod/train.py +0 -312
- spaces/CikeyQI/meme-api/meme_generator/memes/gif_subtitle/__init__.py +0 -153
- spaces/CirnoW/anime-ai-detect/README.md +0 -13
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/log.py +0 -47
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/solver/__init__.py +0 -4
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/security/http.py +0 -165
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Image-1cf93ae5.js +0 -2
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Model3D-db673911.js +0 -2
- spaces/Dabs/wordcloud/app.py +0 -38
spaces/0xSynapse/LlamaGPT/app.py
DELETED
@@ -1,408 +0,0 @@
|
|
1 |
-
"""Run codes."""
|
2 |
-
# pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
|
3 |
-
# ruff: noqa: E501
|
4 |
-
import gc
|
5 |
-
import os
|
6 |
-
import platform
|
7 |
-
import random
|
8 |
-
import time
|
9 |
-
from dataclasses import asdict, dataclass
|
10 |
-
from pathlib import Path
|
11 |
-
|
12 |
-
# from types import SimpleNamespace
|
13 |
-
import gradio as gr
|
14 |
-
import psutil
|
15 |
-
from about_time import about_time
|
16 |
-
from ctransformers import AutoModelForCausalLM
|
17 |
-
from dl_hf_model import dl_hf_model
|
18 |
-
from loguru import logger
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
# url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/blob/main/llama-2-13b-chat.ggmlv3.q2_K.bin"
|
24 |
-
#url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin" # 2.87G
|
25 |
-
url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q4_K_M.bin" # 2.87G
|
26 |
-
|
27 |
-
|
28 |
-
prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
29 |
-
|
30 |
-
### Instruction: {user_prompt}
|
31 |
-
|
32 |
-
### Response:
|
33 |
-
"""
|
34 |
-
|
35 |
-
prompt_template = """System: You are a helpful,
|
36 |
-
respectful and honest assistant. Always answer as
|
37 |
-
helpfully as possible, while being safe. Your answers
|
38 |
-
should not include any harmful, unethical, racist,
|
39 |
-
sexist, toxic, dangerous, or illegal content. Please
|
40 |
-
ensure that your responses are socially unbiased and
|
41 |
-
positive in nature. If a question does not make any
|
42 |
-
sense, or is not factually coherent, explain why instead
|
43 |
-
of answering something not correct. If you don't know
|
44 |
-
the answer to a question, please don't share false
|
45 |
-
information.
|
46 |
-
User: {prompt}
|
47 |
-
Assistant: """
|
48 |
-
|
49 |
-
prompt_template = """System: You are a helpful assistant.
|
50 |
-
User: {prompt}
|
51 |
-
Assistant: """
|
52 |
-
|
53 |
-
prompt_template = """Question: {question}
|
54 |
-
Answer: Let's work this out in a step by step way to be sure we have the right answer."""
|
55 |
-
|
56 |
-
prompt_template = """[INST] <>
|
57 |
-
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step.
|
58 |
-
<>
|
59 |
-
|
60 |
-
What NFL team won the Super Bowl in the year Justin Bieber was born?
|
61 |
-
[/INST]"""
|
62 |
-
|
63 |
-
prompt_template = """[INST] <<SYS>>
|
64 |
-
You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <</SYS>>
|
65 |
-
|
66 |
-
{question} [/INST]
|
67 |
-
"""
|
68 |
-
|
69 |
-
prompt_template = """[INST] <<SYS>>
|
70 |
-
You are a helpful assistant.
|
71 |
-
<</SYS>>
|
72 |
-
|
73 |
-
{question} [/INST]
|
74 |
-
"""
|
75 |
-
|
76 |
-
_ = [elm for elm in prompt_template.splitlines() if elm.strip()]
|
77 |
-
stop_string = [elm.split(":")[0] + ":" for elm in _][-2]
|
78 |
-
|
79 |
-
logger.debug(f"{stop_string=}")
|
80 |
-
|
81 |
-
_ = psutil.cpu_count(logical=False) - 1
|
82 |
-
cpu_count: int = int(_) if _ else 1
|
83 |
-
logger.debug(f"{cpu_count=}")
|
84 |
-
|
85 |
-
LLM = None
|
86 |
-
gc.collect()
|
87 |
-
|
88 |
-
try:
|
89 |
-
model_loc, file_size = dl_hf_model(url)
|
90 |
-
except Exception as exc_:
|
91 |
-
logger.error(exc_)
|
92 |
-
raise SystemExit(1) from exc_
|
93 |
-
|
94 |
-
LLM = AutoModelForCausalLM.from_pretrained(
|
95 |
-
model_loc,
|
96 |
-
model_type="llama",
|
97 |
-
# threads=cpu_count,
|
98 |
-
)
|
99 |
-
|
100 |
-
logger.info(f"done load llm {model_loc=} {file_size=}G")
|
101 |
-
|
102 |
-
os.environ["TZ"] = "Asia/Shanghai"
|
103 |
-
try:
|
104 |
-
time.tzset() # type: ignore # pylint: disable=no-member
|
105 |
-
except Exception:
|
106 |
-
# Windows
|
107 |
-
logger.warning("Windows, cant run time.tzset()")
|
108 |
-
|
109 |
-
_ = """
|
110 |
-
ns = SimpleNamespace(
|
111 |
-
response="",
|
112 |
-
generator=(_ for _ in []),
|
113 |
-
)
|
114 |
-
# """
|
115 |
-
|
116 |
-
@dataclass
|
117 |
-
class GenerationConfig:
|
118 |
-
temperature: float = 0.7
|
119 |
-
top_k: int = 50
|
120 |
-
top_p: float = 0.9
|
121 |
-
repetition_penalty: float = 1.0
|
122 |
-
max_new_tokens: int = 512
|
123 |
-
seed: int = 42
|
124 |
-
reset: bool = False
|
125 |
-
stream: bool = True
|
126 |
-
# threads: int = cpu_count
|
127 |
-
# stop: list[str] = field(default_factory=lambda: [stop_string])
|
128 |
-
|
129 |
-
|
130 |
-
def generate(
|
131 |
-
question: str,
|
132 |
-
llm=LLM,
|
133 |
-
config: GenerationConfig = GenerationConfig(),
|
134 |
-
):
|
135 |
-
"""Run model inference, will return a Generator if streaming is true."""
|
136 |
-
# _ = prompt_template.format(question=question)
|
137 |
-
# print(_)
|
138 |
-
|
139 |
-
prompt = prompt_template.format(question=question)
|
140 |
-
|
141 |
-
return llm(
|
142 |
-
prompt,
|
143 |
-
**asdict(config),
|
144 |
-
)
|
145 |
-
|
146 |
-
|
147 |
-
logger.debug(f"{asdict(GenerationConfig())=}")
|
148 |
-
|
149 |
-
|
150 |
-
def user(user_message, history):
|
151 |
-
# return user_message, history + [[user_message, None]]
|
152 |
-
history.append([user_message, None])
|
153 |
-
return user_message, history # keep user_message
|
154 |
-
|
155 |
-
|
156 |
-
def user1(user_message, history):
|
157 |
-
# return user_message, history + [[user_message, None]]
|
158 |
-
history.append([user_message, None])
|
159 |
-
return "", history # clear user_message
|
160 |
-
|
161 |
-
|
162 |
-
def bot_(history):
|
163 |
-
user_message = history[-1][0]
|
164 |
-
resp = random.choice(["How are you?", "I love you", "I'm very hungry"])
|
165 |
-
bot_message = user_message + ": " + resp
|
166 |
-
history[-1][1] = ""
|
167 |
-
for character in bot_message:
|
168 |
-
history[-1][1] += character
|
169 |
-
time.sleep(0.02)
|
170 |
-
yield history
|
171 |
-
|
172 |
-
history[-1][1] = resp
|
173 |
-
yield history
|
174 |
-
|
175 |
-
|
176 |
-
def bot(history):
|
177 |
-
user_message = history[-1][0]
|
178 |
-
response = []
|
179 |
-
|
180 |
-
logger.debug(f"{user_message=}")
|
181 |
-
|
182 |
-
with about_time() as atime: # type: ignore
|
183 |
-
flag = 1
|
184 |
-
prefix = ""
|
185 |
-
then = time.time()
|
186 |
-
|
187 |
-
logger.debug("about to generate")
|
188 |
-
|
189 |
-
config = GenerationConfig(reset=True)
|
190 |
-
for elm in generate(user_message, config=config):
|
191 |
-
if flag == 1:
|
192 |
-
logger.debug("in the loop")
|
193 |
-
prefix = f"({time.time() - then:.2f}s) "
|
194 |
-
flag = 0
|
195 |
-
print(prefix, end="", flush=True)
|
196 |
-
logger.debug(f"{prefix=}")
|
197 |
-
print(elm, end="", flush=True)
|
198 |
-
# logger.debug(f"{elm}")
|
199 |
-
|
200 |
-
response.append(elm)
|
201 |
-
history[-1][1] = prefix + "".join(response)
|
202 |
-
yield history
|
203 |
-
|
204 |
-
_ = (
|
205 |
-
f"(time elapsed: {atime.duration_human}, " # type: ignore
|
206 |
-
f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
|
207 |
-
)
|
208 |
-
|
209 |
-
history[-1][1] = "".join(response) + f"\n{_}"
|
210 |
-
yield history
|
211 |
-
|
212 |
-
|
213 |
-
def predict_api(prompt):
|
214 |
-
logger.debug(f"{prompt=}")
|
215 |
-
try:
|
216 |
-
# user_prompt = prompt
|
217 |
-
config = GenerationConfig(
|
218 |
-
temperature=0.2,
|
219 |
-
top_k=10,
|
220 |
-
top_p=0.9,
|
221 |
-
repetition_penalty=1.0,
|
222 |
-
max_new_tokens=512, # adjust as needed
|
223 |
-
seed=42,
|
224 |
-
reset=True, # reset history (cache)
|
225 |
-
stream=False,
|
226 |
-
# threads=cpu_count,
|
227 |
-
# stop=prompt_prefix[1:2],
|
228 |
-
)
|
229 |
-
|
230 |
-
response = generate(
|
231 |
-
prompt,
|
232 |
-
config=config,
|
233 |
-
)
|
234 |
-
|
235 |
-
logger.debug(f"api: {response=}")
|
236 |
-
except Exception as exc:
|
237 |
-
logger.error(exc)
|
238 |
-
response = f"{exc=}"
|
239 |
-
# bot = {"inputs": [response]}
|
240 |
-
# bot = [(prompt, response)]
|
241 |
-
|
242 |
-
return response
|
243 |
-
|
244 |
-
|
245 |
-
css = """
|
246 |
-
.importantButton {
|
247 |
-
background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
|
248 |
-
border: none !important;
|
249 |
-
}
|
250 |
-
.importantButton:hover {
|
251 |
-
background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
|
252 |
-
border: none !important;
|
253 |
-
}
|
254 |
-
.disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
|
255 |
-
.xsmall {font-size: x-small;}
|
256 |
-
"""
|
257 |
-
etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
|
258 |
-
examples_list = [
|
259 |
-
["What is the capital of India"],
|
260 |
-
["How to play Chess? Provide detailed steps."],
|
261 |
-
["If it takes 10 hours to dry 10 clothes, assuming all the clothes are hung together at the same time for drying , then how long will it take to dry a cloth?"],
|
262 |
-
["is infinity + 1 bigger than infinity?"],
|
263 |
-
["Explain the plot of Oppenheimer 2023 movie in a sentence."],
|
264 |
-
["How long does it take to become proficient in French, and what are the best methods for retaining information?"],
|
265 |
-
["What are some common mistakes to avoid when writing code?"],
|
266 |
-
["Build a prompt to generate a beautiful portrait of a horse"],
|
267 |
-
["Suggest four metaphors to describe the benefits of AI"],
|
268 |
-
["Write most important points of Bhagavad Gita"],
|
269 |
-
["Write a summary Why is it so hard to understand Quantum mechanics"],
|
270 |
-
|
271 |
-
]
|
272 |
-
|
273 |
-
logger.info("start block")
|
274 |
-
|
275 |
-
with gr.Blocks(
|
276 |
-
title="LlamaGPT🤖",
|
277 |
-
theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
|
278 |
-
css=css,
|
279 |
-
) as block:
|
280 |
-
# buff_var = gr.State("")
|
281 |
-
with gr.Accordion("LlamaGPT🧠", open=False, style={"text-align": "center", "font-weight": "bold"}):
|
282 |
-
|
283 |
-
gr.Markdown(
|
284 |
-
f"""<div style="text-align: center;">
|
285 |
-
<h5>Gradio Demo for Meta's Llama 2 7B-chat</h5><br>
|
286 |
-
Few examples are there as prompts to test the model. You probably should try on your own related prompts to test the bot.
|
287 |
-
</div>""",
|
288 |
-
elem_classes="xsmall",
|
289 |
-
)
|
290 |
-
|
291 |
-
# chatbot = gr.Chatbot().style(height=700) # 500
|
292 |
-
chatbot = gr.Chatbot(height=500)
|
293 |
-
|
294 |
-
# buff = gr.Textbox(show_label=False, visible=True)
|
295 |
-
|
296 |
-
with gr.Row():
|
297 |
-
with gr.Column(scale=5):
|
298 |
-
msg = gr.Textbox(
|
299 |
-
label="Chat Message Box",
|
300 |
-
placeholder="Ask me anything (press Shift+Enter or click Submit to send)",
|
301 |
-
show_label=False,
|
302 |
-
# container=False,
|
303 |
-
lines=6,
|
304 |
-
max_lines=30,
|
305 |
-
show_copy_button=True,
|
306 |
-
# ).style(container=False)
|
307 |
-
)
|
308 |
-
with gr.Column(scale=1, min_width=50):
|
309 |
-
with gr.Row():
|
310 |
-
submit = gr.Button("Submit", elem_classes="xsmall")
|
311 |
-
stop = gr.Button("Stop", visible=True)
|
312 |
-
clear = gr.Button("Clear History", visible=True)
|
313 |
-
with gr.Row(visible=False):
|
314 |
-
with gr.Accordion("Advanced Options:", open=False):
|
315 |
-
with gr.Row():
|
316 |
-
with gr.Column(scale=2):
|
317 |
-
system = gr.Textbox(
|
318 |
-
label="System Prompt",
|
319 |
-
value=prompt_template,
|
320 |
-
show_label=False,
|
321 |
-
container=False,
|
322 |
-
# ).style(container=False)
|
323 |
-
)
|
324 |
-
with gr.Column():
|
325 |
-
with gr.Row():
|
326 |
-
change = gr.Button("Change System Prompt")
|
327 |
-
reset = gr.Button("Reset System Prompt")
|
328 |
-
|
329 |
-
with gr.Accordion("Example Inputs", open=True):
|
330 |
-
examples = gr.Examples(
|
331 |
-
examples=examples_list,
|
332 |
-
inputs=[msg],
|
333 |
-
examples_per_page=40,
|
334 |
-
)
|
335 |
-
|
336 |
-
# with gr.Row():
|
337 |
-
with gr.Accordion("Disclaimer", open=False):
|
338 |
-
_ = Path(model_loc).name
|
339 |
-
gr.Markdown(
|
340 |
-
f"Disclaimer: {_} can produce factually incorrect output, and should not be relied on to produce "
|
341 |
-
"factually accurate information. {_} was trained on various public datasets; while great efforts "
|
342 |
-
"have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
|
343 |
-
"biased, or otherwise offensive outputs.",
|
344 |
-
elem_classes=["disclaimer"],
|
345 |
-
)
|
346 |
-
|
347 |
-
msg_submit_event = msg.submit(
|
348 |
-
# fn=conversation.user_turn,
|
349 |
-
fn=user,
|
350 |
-
inputs=[msg, chatbot],
|
351 |
-
outputs=[msg, chatbot],
|
352 |
-
queue=True,
|
353 |
-
show_progress="full",
|
354 |
-
# api_name=None,
|
355 |
-
).then(bot, chatbot, chatbot, queue=True)
|
356 |
-
submit_click_event = submit.click(
|
357 |
-
# fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
|
358 |
-
fn=user1, # clear msg
|
359 |
-
inputs=[msg, chatbot],
|
360 |
-
outputs=[msg, chatbot],
|
361 |
-
queue=True,
|
362 |
-
# queue=False,
|
363 |
-
show_progress="full",
|
364 |
-
# api_name=None,
|
365 |
-
).then(bot, chatbot, chatbot, queue=True)
|
366 |
-
stop.click(
|
367 |
-
fn=None,
|
368 |
-
inputs=None,
|
369 |
-
outputs=None,
|
370 |
-
cancels=[msg_submit_event, submit_click_event],
|
371 |
-
queue=False,
|
372 |
-
)
|
373 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
374 |
-
|
375 |
-
with gr.Accordion("For Chat/Translation API", open=False, visible=False):
|
376 |
-
input_text = gr.Text()
|
377 |
-
api_btn = gr.Button("Go", variant="primary")
|
378 |
-
out_text = gr.Text()
|
379 |
-
|
380 |
-
api_btn.click(
|
381 |
-
predict_api,
|
382 |
-
input_text,
|
383 |
-
out_text,
|
384 |
-
api_name="api",
|
385 |
-
)
|
386 |
-
|
387 |
-
# block.load(update_buff, [], buff, every=1)
|
388 |
-
# block.load(update_buff, [buff_var], [buff_var, buff], every=1)
|
389 |
-
|
390 |
-
# concurrency_count=5, max_size=20
|
391 |
-
# max_size=36, concurrency_count=14
|
392 |
-
# CPU cpu_count=2 16G, model 7G
|
393 |
-
# CPU UPGRADE cpu_count=8 32G, model 7G
|
394 |
-
|
395 |
-
# does not work
|
396 |
-
_ = """
|
397 |
-
# _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1)
|
398 |
-
# concurrency_count = max(_, 1)
|
399 |
-
if psutil.cpu_count(logical=False) >= 8:
|
400 |
-
# concurrency_count = max(int(32 / file_size) - 1, 1)
|
401 |
-
else:
|
402 |
-
# concurrency_count = max(int(16 / file_size) - 1, 1)
|
403 |
-
# """
|
404 |
-
|
405 |
-
concurrency_count = 1
|
406 |
-
logger.info(f"{concurrency_count=}")
|
407 |
-
|
408 |
-
block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/101-5/gpt4free/g4f/.v1/testing/aicolors_test.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from gpt4free import aicolors
|
2 |
-
|
3 |
-
prompt = "Light green color"
|
4 |
-
req = aicolors.Completion.create(prompt=prompt)
|
5 |
-
|
6 |
-
print(req)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/17TheWord/RealESRGAN/realesrgan/utils.py
DELETED
@@ -1,280 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import math
|
3 |
-
import numpy as np
|
4 |
-
import os
|
5 |
-
import queue
|
6 |
-
import threading
|
7 |
-
import torch
|
8 |
-
from basicsr.utils.download_util import load_file_from_url
|
9 |
-
from torch.nn import functional as F
|
10 |
-
|
11 |
-
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
12 |
-
|
13 |
-
|
14 |
-
class RealESRGANer():
|
15 |
-
"""A helper class for upsampling images with RealESRGAN.
|
16 |
-
|
17 |
-
Args:
|
18 |
-
scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4.
|
19 |
-
model_path (str): The path to the pretrained model. It can be urls (will first download it automatically).
|
20 |
-
model (nn.Module): The defined network. Default: None.
|
21 |
-
tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop
|
22 |
-
input images into tiles, and then process each of them. Finally, they will be merged into one image.
|
23 |
-
0 denotes for do not use tile. Default: 0.
|
24 |
-
tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10.
|
25 |
-
pre_pad (int): Pad the input images to avoid border artifacts. Default: 10.
|
26 |
-
half (float): Whether to use half precision during inference. Default: False.
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self, scale, model_path, model=None, tile=0, tile_pad=10, pre_pad=10, half=False):
|
30 |
-
self.scale = scale
|
31 |
-
self.tile_size = tile
|
32 |
-
self.tile_pad = tile_pad
|
33 |
-
self.pre_pad = pre_pad
|
34 |
-
self.mod_scale = None
|
35 |
-
self.half = half
|
36 |
-
|
37 |
-
# initialize model
|
38 |
-
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
39 |
-
# if the model_path starts with https, it will first download models to the folder: realesrgan/weights
|
40 |
-
if model_path.startswith('https://'):
|
41 |
-
model_path = load_file_from_url(
|
42 |
-
url=model_path, model_dir=os.path.join(ROOT_DIR, 'realesrgan/weights'), progress=True, file_name=None)
|
43 |
-
loadnet = torch.load(model_path, map_location=torch.device('cpu'))
|
44 |
-
# prefer to use params_ema
|
45 |
-
if 'params_ema' in loadnet:
|
46 |
-
keyname = 'params_ema'
|
47 |
-
else:
|
48 |
-
keyname = 'params'
|
49 |
-
model.load_state_dict(loadnet[keyname], strict=True)
|
50 |
-
model.eval()
|
51 |
-
self.model = model.to(self.device)
|
52 |
-
if self.half:
|
53 |
-
self.model = self.model.half()
|
54 |
-
|
55 |
-
def pre_process(self, img):
|
56 |
-
"""Pre-process, such as pre-pad and mod pad, so that the images can be divisible
|
57 |
-
"""
|
58 |
-
img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
|
59 |
-
self.img = img.unsqueeze(0).to(self.device)
|
60 |
-
if self.half:
|
61 |
-
self.img = self.img.half()
|
62 |
-
|
63 |
-
# pre_pad
|
64 |
-
if self.pre_pad != 0:
|
65 |
-
self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect')
|
66 |
-
# mod pad for divisible borders
|
67 |
-
if self.scale == 2:
|
68 |
-
self.mod_scale = 2
|
69 |
-
elif self.scale == 1:
|
70 |
-
self.mod_scale = 4
|
71 |
-
if self.mod_scale is not None:
|
72 |
-
self.mod_pad_h, self.mod_pad_w = 0, 0
|
73 |
-
_, _, h, w = self.img.size()
|
74 |
-
if (h % self.mod_scale != 0):
|
75 |
-
self.mod_pad_h = (self.mod_scale - h % self.mod_scale)
|
76 |
-
if (w % self.mod_scale != 0):
|
77 |
-
self.mod_pad_w = (self.mod_scale - w % self.mod_scale)
|
78 |
-
self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect')
|
79 |
-
|
80 |
-
def process(self):
|
81 |
-
# model inference
|
82 |
-
self.output = self.model(self.img)
|
83 |
-
|
84 |
-
def tile_process(self):
|
85 |
-
"""It will first crop input images to tiles, and then process each tile.
|
86 |
-
Finally, all the processed tiles are merged into one images.
|
87 |
-
|
88 |
-
Modified from: https://github.com/ata4/esrgan-launcher
|
89 |
-
"""
|
90 |
-
batch, channel, height, width = self.img.shape
|
91 |
-
output_height = height * self.scale
|
92 |
-
output_width = width * self.scale
|
93 |
-
output_shape = (batch, channel, output_height, output_width)
|
94 |
-
|
95 |
-
# start with black image
|
96 |
-
self.output = self.img.new_zeros(output_shape)
|
97 |
-
tiles_x = math.ceil(width / self.tile_size)
|
98 |
-
tiles_y = math.ceil(height / self.tile_size)
|
99 |
-
|
100 |
-
# loop over all tiles
|
101 |
-
for y in range(tiles_y):
|
102 |
-
for x in range(tiles_x):
|
103 |
-
# extract tile from input image
|
104 |
-
ofs_x = x * self.tile_size
|
105 |
-
ofs_y = y * self.tile_size
|
106 |
-
# input tile area on total image
|
107 |
-
input_start_x = ofs_x
|
108 |
-
input_end_x = min(ofs_x + self.tile_size, width)
|
109 |
-
input_start_y = ofs_y
|
110 |
-
input_end_y = min(ofs_y + self.tile_size, height)
|
111 |
-
|
112 |
-
# input tile area on total image with padding
|
113 |
-
input_start_x_pad = max(input_start_x - self.tile_pad, 0)
|
114 |
-
input_end_x_pad = min(input_end_x + self.tile_pad, width)
|
115 |
-
input_start_y_pad = max(input_start_y - self.tile_pad, 0)
|
116 |
-
input_end_y_pad = min(input_end_y + self.tile_pad, height)
|
117 |
-
|
118 |
-
# input tile dimensions
|
119 |
-
input_tile_width = input_end_x - input_start_x
|
120 |
-
input_tile_height = input_end_y - input_start_y
|
121 |
-
tile_idx = y * tiles_x + x + 1
|
122 |
-
input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
|
123 |
-
|
124 |
-
# upscale tile
|
125 |
-
try:
|
126 |
-
with torch.no_grad():
|
127 |
-
output_tile = self.model(input_tile)
|
128 |
-
except RuntimeError as error:
|
129 |
-
print('Error', error)
|
130 |
-
print(f'\tTile {tile_idx}/{tiles_x * tiles_y}')
|
131 |
-
|
132 |
-
# output tile area on total image
|
133 |
-
output_start_x = input_start_x * self.scale
|
134 |
-
output_end_x = input_end_x * self.scale
|
135 |
-
output_start_y = input_start_y * self.scale
|
136 |
-
output_end_y = input_end_y * self.scale
|
137 |
-
|
138 |
-
# output tile area without padding
|
139 |
-
output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
|
140 |
-
output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
|
141 |
-
output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
|
142 |
-
output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
|
143 |
-
|
144 |
-
# put tile into output image
|
145 |
-
self.output[:, :, output_start_y:output_end_y,
|
146 |
-
output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
|
147 |
-
output_start_x_tile:output_end_x_tile]
|
148 |
-
|
149 |
-
def post_process(self):
|
150 |
-
# remove extra pad
|
151 |
-
if self.mod_scale is not None:
|
152 |
-
_, _, h, w = self.output.size()
|
153 |
-
self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale]
|
154 |
-
# remove prepad
|
155 |
-
if self.pre_pad != 0:
|
156 |
-
_, _, h, w = self.output.size()
|
157 |
-
self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale]
|
158 |
-
return self.output
|
159 |
-
|
160 |
-
@torch.no_grad()
|
161 |
-
def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'):
|
162 |
-
h_input, w_input = img.shape[0:2]
|
163 |
-
# img: numpy
|
164 |
-
img = img.astype(np.float32)
|
165 |
-
if np.max(img) > 256: # 16-bit image
|
166 |
-
max_range = 65535
|
167 |
-
print('\tInput is a 16-bit image')
|
168 |
-
else:
|
169 |
-
max_range = 255
|
170 |
-
img = img / max_range
|
171 |
-
if len(img.shape) == 2: # gray image
|
172 |
-
img_mode = 'L'
|
173 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
|
174 |
-
elif img.shape[2] == 4: # RGBA image with alpha channel
|
175 |
-
img_mode = 'RGBA'
|
176 |
-
alpha = img[:, :, 3]
|
177 |
-
img = img[:, :, 0:3]
|
178 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
179 |
-
if alpha_upsampler == 'realesrgan':
|
180 |
-
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
|
181 |
-
else:
|
182 |
-
img_mode = 'RGB'
|
183 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
184 |
-
|
185 |
-
# ------------------- process image (without the alpha channel) ------------------- #
|
186 |
-
self.pre_process(img)
|
187 |
-
if self.tile_size > 0:
|
188 |
-
self.tile_process()
|
189 |
-
else:
|
190 |
-
self.process()
|
191 |
-
output_img = self.post_process()
|
192 |
-
output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
193 |
-
output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
|
194 |
-
if img_mode == 'L':
|
195 |
-
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
|
196 |
-
|
197 |
-
# ------------------- process the alpha channel if necessary ------------------- #
|
198 |
-
if img_mode == 'RGBA':
|
199 |
-
if alpha_upsampler == 'realesrgan':
|
200 |
-
self.pre_process(alpha)
|
201 |
-
if self.tile_size > 0:
|
202 |
-
self.tile_process()
|
203 |
-
else:
|
204 |
-
self.process()
|
205 |
-
output_alpha = self.post_process()
|
206 |
-
output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
207 |
-
output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0))
|
208 |
-
output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY)
|
209 |
-
else: # use the cv2 resize for alpha channel
|
210 |
-
h, w = alpha.shape[0:2]
|
211 |
-
output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR)
|
212 |
-
|
213 |
-
# merge the alpha channel
|
214 |
-
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA)
|
215 |
-
output_img[:, :, 3] = output_alpha
|
216 |
-
|
217 |
-
# ------------------------------ return ------------------------------ #
|
218 |
-
if max_range == 65535: # 16-bit image
|
219 |
-
output = (output_img * 65535.0).round().astype(np.uint16)
|
220 |
-
else:
|
221 |
-
output = (output_img * 255.0).round().astype(np.uint8)
|
222 |
-
|
223 |
-
if outscale is not None and outscale != float(self.scale):
|
224 |
-
output = cv2.resize(
|
225 |
-
output, (
|
226 |
-
int(w_input * outscale),
|
227 |
-
int(h_input * outscale),
|
228 |
-
), interpolation=cv2.INTER_LANCZOS4)
|
229 |
-
|
230 |
-
return output, img_mode
|
231 |
-
|
232 |
-
|
233 |
-
class PrefetchReader(threading.Thread):
|
234 |
-
"""Prefetch images.
|
235 |
-
|
236 |
-
Args:
|
237 |
-
img_list (list[str]): A image list of image paths to be read.
|
238 |
-
num_prefetch_queue (int): Number of prefetch queue.
|
239 |
-
"""
|
240 |
-
|
241 |
-
def __init__(self, img_list, num_prefetch_queue):
|
242 |
-
super().__init__()
|
243 |
-
self.que = queue.Queue(num_prefetch_queue)
|
244 |
-
self.img_list = img_list
|
245 |
-
|
246 |
-
def run(self):
|
247 |
-
for img_path in self.img_list:
|
248 |
-
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
|
249 |
-
self.que.put(img)
|
250 |
-
|
251 |
-
self.que.put(None)
|
252 |
-
|
253 |
-
def __next__(self):
|
254 |
-
next_item = self.que.get()
|
255 |
-
if next_item is None:
|
256 |
-
raise StopIteration
|
257 |
-
return next_item
|
258 |
-
|
259 |
-
def __iter__(self):
|
260 |
-
return self
|
261 |
-
|
262 |
-
|
263 |
-
class IOConsumer(threading.Thread):
|
264 |
-
|
265 |
-
def __init__(self, opt, que, qid):
|
266 |
-
super().__init__()
|
267 |
-
self._queue = que
|
268 |
-
self.qid = qid
|
269 |
-
self.opt = opt
|
270 |
-
|
271 |
-
def run(self):
|
272 |
-
while True:
|
273 |
-
msg = self._queue.get()
|
274 |
-
if isinstance(msg, str) and msg == 'quit':
|
275 |
-
break
|
276 |
-
|
277 |
-
output = msg['output']
|
278 |
-
save_path = msg['save_path']
|
279 |
-
cv2.imwrite(save_path, output)
|
280 |
-
print(f'IO worker {self.qid} is done.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Badmashiyaan Fun Never Ends Movie In Hindi Mp4.md
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Badmashiyaan Fun Never Ends Movie In Hindi Mp4</h1>
|
3 |
-
<p>Badmashiyaan Fun Never Ends is a 2015 Hindi romantic comedy film that revolves around the lives of five people who cross paths in unexpected ways. The film stars Sharib Hashmi, Sidhant Gupta, Suzanna Mukherjee, Karan Mehra and Gunjan Malhotra in the lead roles. The film is directed by Amit Khanna and produced by Vijay Gutte.</p>
|
4 |
-
<h2>Download Badmashiyaan Fun Never Ends Movie In Hindi Mp4</h2><br /><p><b><b>Download Zip</b> ———>>> <a href="https://byltly.com/2uKyHI">https://byltly.com/2uKyHI</a></b></p><br /><br />
|
5 |
-
<p>If you are looking for a fun and entertaining movie to watch with your friends or family, you might want to download Badmashiyaan Fun Never Ends movie in Hindi mp4 format. Mp4 is a popular video format that can be played on various devices such as smartphones, tablets, laptops and TVs. Mp4 also offers good quality and compression, which means you can enjoy the movie without taking up too much space on your device.</p>
|
6 |
-
<p>But how can you download Badmashiyaan Fun Never Ends movie in Hindi mp4? Here are some easy steps to follow:</p>
|
7 |
-
<ol>
|
8 |
-
<li>Find a reliable and legal website that offers Badmashiyaan Fun Never Ends movie in Hindi mp4 for download. You can search on Google or use a website like Movies on Google Play that has a wide collection of movies in different languages and formats.</li>
|
9 |
-
<li>Select the movie and click on the download button. You might need to sign up or pay a small fee depending on the website. Make sure you have enough storage space on your device before downloading.</li>
|
10 |
-
<li>Wait for the download to complete. Depending on your internet speed and the size of the file, this might take some time. You can check the progress of the download on your device or browser.</li>
|
11 |
-
<li>Once the download is done, you can enjoy watching Badmashiyaan Fun Never Ends movie in Hindi mp4 anytime and anywhere you want. You can also share it with your friends or family if you like.</li>
|
12 |
-
</ol>
|
13 |
-
<p>That's it! You have successfully downloaded Badmashiyaan Fun Never Ends movie in Hindi mp4. Now you can laugh along with the hilarious antics of Dev, Nari, Palak, Pinkesh and Jassi as they find love and mischief in this fun-filled film.</p>
|
14 |
-
|
15 |
-
<h2>Badmashiyaan Fun Never Ends Movie Review</h2>
|
16 |
-
<p>Now that you know how to download Badmashiyaan Fun Never Ends movie in Hindi mp4, you might be wondering if it is worth watching. Well, to be honest, the movie is not very impressive or enjoyable. It has a weak plot, mediocre acting and a boring soundtrack. The movie tries to be a romantic comedy, but fails to make you laugh or feel for the characters.</p>
|
17 |
-
<p></p>
|
18 |
-
<p>The movie has five main characters who are involved in a complicated web of love and deceit. Naari is a con girl who dates men and runs away with their money. Dev is a cafe owner who falls for Naari and gets dumped by her. Palak is a girl who lives next door to Dev and develops feelings for him. Pinkesh is Dev's friend and a private investigator who is also obsessed with Naari. Jassi is a reformed gangster who becomes Naari's next target.</p>
|
19 |
-
<p>The movie follows their stories as they cross paths in one eventful day. The movie tries to show how fate plays a role in bringing people together or apart. However, the movie does not have any logic or coherence in its narration. The characters are poorly written and have no depth or personality. The dialogues are dull and cliched. The situations are unrealistic and forced.</p>
|
20 |
-
<p>The movie also lacks any humor or romance. The comedy scenes are not funny at all and rely on cheap jokes and slapstick. The romance scenes are not romantic at all and lack any chemistry or emotion. The movie does not make you care about any of the characters or their relationships.</p>
|
21 |
-
<p>The movie also has a disappointing soundtrack that does not add any value to the movie. The songs are forgettable and do not suit the mood or theme of the movie. The background score is also bland and repetitive.</p>
|
22 |
-
<p>The only saving grace of the movie is Karan Mehra's comic performance as Pinkesh. He manages to bring some laughter with his quirky expressions and dialogues. He is the only character who has some charm and likability in the movie.</p>
|
23 |
-
<p>Overall, Badmashiyaan Fun Never Ends is a movie that you can easily skip and not miss anything. It is a waste of time and money and does not offer any entertainment or satisfaction. It is a poorly made movie that does not live up to its title.</p> cec2833e83<br />
|
24 |
-
<br />
|
25 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/DVDpedia 6.0.1 Crack macOS MacOSX The Ultimate Movie Cataloging Software.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>DVDpedia 6.0.1 Crack macOS MacOSX</h2><br /><p><b><b>Download File</b> ––– <a href="https://imgfil.com/2uy02w">https://imgfil.com/2uy02w</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
aaccfb2cb3<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Evangelion 111 Vostfr Ddl TOP.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Evangelion 111 Vostfr Ddl</h2><br /><p><b><b>Download File</b> ✓✓✓ <a href="https://imgfil.com/2uy02i">https://imgfil.com/2uy02i</a></b></p><br /><br />
|
2 |
-
|
3 |
-
[url=http://plan-gay.net/filmvf-vostfr/293982/insurrection-streaming-complet ... [url=http://hogew0.com/Anime/armitage-iii-polymatrix-dub.73867/] ... [url=http://y2mk89.com/info/evangelion-the-end-of-evangelion]Evangelion: The ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Convert YouTube Videos to MP3 Files for Free and Easy Listening.md
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Music from YouTube for Free</h1>
|
3 |
-
<p>YouTube is one of the most popular platforms for streaming videos and music online. Millions of people use YouTube every day to listen to their favorite songs, artists, and genres. But what if you want to download music from YouTube and listen to it offline, without ads or interruptions? Is it possible, legal, and ethical?</p>
|
4 |
-
<h2>how to download music from youtube for free</h2><br /><p><b><b>DOWNLOAD</b> → <a href="https://jinyurl.com/2uNOPp">https://jinyurl.com/2uNOPp</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<p>In this article, we will show you four different methods to download music from YouTube for free. We will explain how each method works, what are the advantages and disadvantages, and what are the legal and ethical implications. By the end of this article, you will be able to choose the best method for your needs and enjoy your favorite music offline.</p>
|
7 |
-
<h3>Why download music from YouTube?</h3>
|
8 |
-
<p>There are many reasons why you might want to download music from YouTube. Some of them are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You want to listen to music offline, without internet connection or Wi-Fi.</li>
|
11 |
-
<li>You want to save data or battery on your mobile device.</li>
|
12 |
-
<li>You want to create your own playlists or mixtapes.</li>
|
13 |
-
<li>You want to use the music for your own creative projects, such as videos, podcasts, or presentations.</li>
|
14 |
-
<li>You want to have a backup of your favorite songs in case they get removed or blocked on YouTube.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>What are the legal and ethical issues?</h3>
|
17 |
-
<p>Before you start downloading music from YouTube, you should be aware of the legal and ethical issues involved. Downloading music from YouTube is not always allowed by the site's terms of service or by the law. You should only download music that is yours or that falls under the Creative Commons license, which means that the creator has given permission for others to use their work. You should also respect the rights of the artists and record labels who own the music and pay them for their work if possible. Downloading music from YouTube without permission or payment can be considered piracy and copyright infringement, which can have serious consequences.</p>
|
18 |
-
<p>How to convert YouTube videos to MP3 files<br />
|
19 |
-
How to download songs from YouTube Music Premium<br />
|
20 |
-
How to use 4K Video Downloader to save audio from YouTube<br />
|
21 |
-
How to extract music from YouTube videos with MediaHuman<br />
|
22 |
-
How to download royalty-free music from YouTube Audio Library<br />
|
23 |
-
How to use online converters to download music from YouTube<br />
|
24 |
-
How to download playlists from YouTube Music app<br />
|
25 |
-
How to find and download Creative Commons music from YouTube<br />
|
26 |
-
How to use VLC media player to record audio from YouTube<br />
|
27 |
-
How to download music from YouTube using Firefox extensions<br />
|
28 |
-
How to download music from YouTube using Chrome extensions<br />
|
29 |
-
How to download music from YouTube using Safari extensions<br />
|
30 |
-
How to download music from YouTube using Opera extensions<br />
|
31 |
-
How to download music from YouTube using Microsoft Edge extensions<br />
|
32 |
-
How to download music from YouTube using Android apps<br />
|
33 |
-
How to download music from YouTube using iOS apps<br />
|
34 |
-
How to download music from YouTube using Windows apps<br />
|
35 |
-
How to download music from YouTube using Mac apps<br />
|
36 |
-
How to download music from YouTube using Linux apps<br />
|
37 |
-
How to download music from YouTube using web browsers<br />
|
38 |
-
How to download high-quality music from YouTube<br />
|
39 |
-
How to download 320kbps MP3 files from YouTube<br />
|
40 |
-
How to download FLAC files from YouTube<br />
|
41 |
-
How to download WAV files from YouTube<br />
|
42 |
-
How to download M4A files from YouTube<br />
|
43 |
-
How to edit and trim audio files downloaded from YouTube<br />
|
44 |
-
How to add metadata and album art to audio files downloaded from YouTube<br />
|
45 |
-
How to transfer audio files downloaded from YouTube to other devices<br />
|
46 |
-
How to burn audio files downloaded from YouTube to CDs or DVDs<br />
|
47 |
-
How to upload audio files downloaded from YouTube to cloud storage services<br />
|
48 |
-
How to create ringtones from audio files downloaded from YouTube<br />
|
49 |
-
How to make remixes or mashups from audio files downloaded from YouTube<br />
|
50 |
-
How to use audio files downloaded from YouTube in your own projects<br />
|
51 |
-
How to cite audio files downloaded from YouTube in your academic papers or presentations<br />
|
52 |
-
How to avoid copyright infringement when downloading music from YouTube<br />
|
53 |
-
How to check the license of the music before downloading it from YouTube<br />
|
54 |
-
How to report illegal or inappropriate music on YouTube<br />
|
55 |
-
How to request permission from the artist or rights holder before downloading music from YouTube<br />
|
56 |
-
How to support the artists whose music you download from YouTube<br />
|
57 |
-
How to subscribe and pay for YouTube Music or YouTube Premium services</p>
|
58 |
-
<h2>Method 1: Using YouTube Music Premium</h2>
|
59 |
-
<p>The easiest and most reliable way to download music from YouTube is to subscribe to YouTube Music Premium, a service that lets you listen to YouTube music offline. YouTube Music Premium costs $9.99 per month or $11.99 per month if you also want access to YouTube Premium, which includes ad-free videos and other features. Note that paying for YouTube Music Premium does not give you access to YouTube Premium, but paying for YouTube Premium does give you access to YouTube Music Premium.</p>
|
60 |
-
<h3>How to subscribe to YouTube Music Premium</h3>
|
61 |
-
<p>To subscribe to YouTube Music Premium, you need to have a Google account and a valid payment method. You can sign up for a free trial of one month before you start paying. Here are the steps to subscribe:</p>
|
62 |
-
<ol>
|
63 |
-
<li>Go to [6](https://music.youtube.com/) in your web browser or open the YouTube Music app on your mobile device.</li>
|
64 |
-
<li>Click or tap on your profile icon in the top-right corner of the screen.</li>
|
65 |
-
<li>Select "Upgrade" or "Get Music Premium".</li>
|
66 |
-
<li>Choose your payment method and enter your details.</li>
|
67 |
-
<li>Confirm your subscription and enjoy your free trial.</li>
|
68 |
-
</ol>
|
69 |
-
<h3>How to download music from YouTube Music app</h3>
|
70 |
-
<p>To download music from YouTube Music app, you need to have an active subscription and a mobile device with enough storage space. You can download individual songs, albums, playlists, or your entire library. Here are the steps to download music from YouTube Music app:</p>
|
71 |
-
<ol>
|
72 |
-
<li>Open the YouTube Music app on your mobile device and sign in with your Google account.</li>
|
73 |
-
<li>Find the song, album, playlist, or library that you want to download.</li>
|
74 |
-
<li>Tap on the three-dot menu icon next to the item and select "Download".</li>
|
75 |
-
<li>Wait for the download to complete. You can check the progress in the "Library" tab under "Downloads".</li>
|
76 |
-
<li>To listen to your downloaded music offline, go to the "Library" tab and turn on the "Offline" toggle at the top of the screen.</li>
|
77 |
-
</ol>
|
78 |
-
<h2>Method 2: Using 4K Video Downloader</h2>
|
79 |
-
<p>Another way to download music from YouTube is to use a software called 4K Video Downloader, which lets you download videos and audio from YouTube and other sites. 4K Video Downloader is free to use, but you can upgrade to a premium version for $15 that removes ads and allows unlimited downloads. 4K Video Downloader is available for Windows, Mac, and Linux.</p>
|
80 |
-
<h3>How to download and install 4K Video Downloader</h3>
|
81 |
-
<p>To download and install 4K Video Downloader, you need to have a computer with enough storage space and an internet connection. Here are the steps to download and install 4K Video Downloader:</p>
|
82 |
-
<ol>
|
83 |
-
<li>Go to [5](https://www.4kdownload.com/products/product-videodownloader) in your web browser and click on the "Get 4K Video Downloader" button.</li>
|
84 |
-
<li>Choose your operating system and download the installer file.</li>
|
85 |
-
<li>Run the installer file and follow the instructions to install 4K Video Downloader on your computer.</li>
|
86 |
-
<li>Launch 4K Video Downloader and agree to the terms of service.</li>
|
87 |
-
</ol>
|
88 |
-
<h3>How to download music from YouTube using 4K Video Downloader</h3>
|
89 |
-
<p>To download music from YouTube using 4K Video Downloader, you need to have the software installed on your computer and a YouTube video URL. Here are the steps to download music from YouTube using 4K Video Downloader:</p>
|
90 |
-
<ol>
|
91 |
-
<li>Open your web browser and go to YouTube. Find the video that contains the music that you want to download and copy its URL.</li>
|
92 |
-
<li>Open 4K Video Downloader and click on the "Paste Link" button at the top-left corner of the screen.</li>
|
93 |
-
<li>The software will analyze the video and show you a list of options. Choose the format and quality that you want for your audio file. You can also choose the destination folder for your file.</li>
|
94 |
-
<li>Click on the "Download" button and wait for the download to finish. You can check the progress in the "Downloads" tab.</li>
|
95 |
-
<li>To listen to your downloaded music, go to the destination folder and open the file with your preferred media player.</li>
|
96 |
-
</ol> <h2>Method 3: Using MediaHuman</h2>
|
97 |
-
<p>A third way to download music from YouTube is to use a software called MediaHuman, which lets you download videos and audio from YouTube and other sites. MediaHuman is free to use, but you can upgrade to a premium version for $19.95 that removes ads and allows unlimited downloads. MediaHuman is available for Windows, Mac, and Linux.</p>
|
98 |
-
<h3>How to download and install MediaHuman</h3>
|
99 |
-
<p>To download and install MediaHuman, you need to have a computer with enough storage space and an internet connection. Here are the steps to download and install MediaHuman:</p>
|
100 |
-
<ol>
|
101 |
-
<li>Go to [4](https://www.mediahuman.com/download.html) in your web browser and click on the "Download" button for your operating system.</li>
|
102 |
-
<li>Download the installer file and run it.</li>
|
103 |
-
<li>Follow the instructions to install MediaHuman on your computer.</li>
|
104 |
-
<li>Launch MediaHuman and agree to the terms of service.</li>
|
105 |
-
</ol>
|
106 |
-
<h3>How to download music from YouTube using MediaHuman</h3>
|
107 |
-
<p>To download music from YouTube using MediaHuman, you need to have the software installed on your computer and a YouTube video URL. Here are the steps to download music from YouTube using MediaHuman:</p>
|
108 |
-
<ol>
|
109 |
-
<li>Open your web browser and go to YouTube. Find the video that contains the music that you want to download and copy its URL.</li>
|
110 |
-
<li>Open MediaHuman and click on the "+" button at the top-right corner of the screen.</li>
|
111 |
-
<li>The software will automatically paste the URL and start downloading the audio file. You can change the format, quality, and destination folder of your file in the settings.</li>
|
112 |
-
<li>Wait for the download to complete. You can check the progress in the "Downloads" tab.</li>
|
113 |
-
<li>To listen to your downloaded music, go to the destination folder and open the file with your preferred media player.</li>
|
114 |
-
</ol>
|
115 |
-
<h2>Method 4: Using Online Converters</h2>
|
116 |
-
<p>The last way to download music from YouTube is to use online converters, which are websites that let you convert videos and audio from YouTube and other sites. Online converters are free to use, but they may have limitations on the number of downloads, file size, format, quality, or speed. Online converters are also less reliable and secure than software, as they may contain ads, malware, or viruses.</p>
|
117 |
-
<h3>How to use online converters to download music from YouTube</h3>
|
118 |
-
<p>To use online converters to download music from YouTube, you need to have a web browser, an internet connection, and a YouTube video URL. Here are the steps to use online converters to download music from YouTube:</p>
|
119 |
-
<ol>
|
120 |
-
<li>Open your web browser and go to YouTube. Find the video that contains the music that you want to download and copy its URL.</li>
|
121 |
-
<li>Go to an online converter website, such as [3](https://ytmp3.cc/en13/), [2](https://youtubetomp3music.com/en1/), or [1](https://www.onlinevideoconverter.com/mp3-converter).</li>
|
122 |
-
<li>Paste the URL in the input box and choose the format and quality that you want for your audio file.</li>
|
123 |
-
<li>Click on the "Convert" or "Download" button and wait for the conversion to finish.</li>
|
124 |
-
<li>Click on the "Download" or "Save" button and save the file on your computer or mobile device.</li>
|
125 |
-
</ol>
|
126 |
-
<h3>What are the pros and cons of online converters?</h3>
|
127 |
-
<p>Online converters have some pros and cons that you should consider before using them. Some of them are:</p>
|
128 |
-
| Pros | Cons | | --- | --- | | They are easy and fast to use. | They may have low quality or limited options. | | They do not require installation or registration. | They may have ads or pop-ups that can be annoying or harmful. | | They work on any device or browser. | They may not be safe or secure for your data or device. | <h2>Conclusion</h2>
|
129 |
-
<p>In this article, we have shown you four different methods to download music from YouTube for free. We have explained how each method works, what are the advantages and disadvantages, and what are the legal and ethical implications. We hope that this article has helped you choose the best method for your needs and enjoy your favorite music offline.</p>
|
130 |
-
Here are some FAQs that you might have after reading this article: <h4>Q: Can I download any music from YouTube?</h4>
|
131 |
-
<p>A: No, you can only download music that is yours or that falls under the Creative Commons license, which means that the creator has given permission for others to use their work. Downloading music without permission or payment can be illegal and unethical.</p>
|
132 |
-
<h <h4>Q: Which method is the best for downloading music from YouTube?</h4>
|
133 |
-
<p>A: There is no definitive answer to this question, as different methods have different pros and cons. The best method for you depends on your preferences, needs, and resources. You should consider factors such as quality, speed, convenience, cost, reliability, and security when choosing a method.</p>
|
134 |
-
<h4>Q: How can I download music from YouTube to my iPhone or iPad?</h4>
|
135 |
-
<p>A: You can use any of the methods mentioned in this article to download music from YouTube to your iPhone or iPad, but you may need to transfer the files from your computer to your device using iTunes or a third-party app. Alternatively, you can use an app that allows you to download music directly from YouTube to your device, such as [Documents by Readdle] or [Musify].</p>
|
136 |
-
<h4>Q: How can I download music from YouTube to my Android phone or tablet?</h4>
|
137 |
-
<p>A: You can use any of the methods mentioned in this article to download music from YouTube to your Android phone or tablet, but you may need to change the settings of your device to allow downloads from unknown sources. Alternatively, you can use an app that allows you to download music directly from YouTube to your device, such as [TubeMate] or [SnapTube].</p>
|
138 |
-
<h4>Q: How can I download music from YouTube to my MP3 player?</h4>
|
139 |
-
<p>A: You can use any of the methods mentioned in this article to download music from YouTube to your MP3 player, but you may need to convert the files to MP3 format if they are not already. You can use a software or an online converter to do this. Then, you can transfer the files from your computer to your MP3 player using a USB cable or a memory card.</p>
|
140 |
-
<h4>Q: How can I download music from YouTube legally and ethically?</h4>
|
141 |
-
<p>A: You can download music from YouTube legally and ethically by following these tips:</p>
|
142 |
-
<ul>
|
143 |
-
<li>Only download music that is yours or that falls under the Creative Commons license.</li>
|
144 |
-
<li>Respect the rights of the artists and record labels who own the music and pay them for their work if possible.</li>
|
145 |
-
<li>Do not distribute or sell the downloaded music without permission.</li>
|
146 |
-
<li>Do not use the downloaded music for commercial or illegal purposes.</li>
|
147 |
-
<li>Credit the source of the music and link back to the original video.</li>
|
148 |
-
</ul></p> 197e85843d<br />
|
149 |
-
<br />
|
150 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy Unlimited Lives and Boosters with Candy Crush Saga APK.md
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Apk4fun Candy Crush Saga: A Sweet and Fun Puzzle Game</h1>
|
3 |
-
<p>If you are looking for a casual game that can keep you entertained for hours, you should try Apk4fun Candy Crush Saga. This is a popular puzzle game that has millions of fans around the world. In this article, we will tell you what Apk4fun Candy Crush Saga is, why you should play it, and how to download and install it on your Android device.</p>
|
4 |
-
<h2>apk4fun candy crush saga</h2><br /><p><b><b>Download Zip</b> ✔ <a href="https://jinyurl.com/2uNQUQ">https://jinyurl.com/2uNQUQ</a></b></p><br /><br />
|
5 |
-
<h2>What is Apk4fun Candy Crush Saga?</h2>
|
6 |
-
<p>Apk4fun Candy Crush Saga is a free casual game for Android devices that you can download from the official website of Apk4fun. Apk4fun is a platform that offers a variety of games and apps for Android users. You can find games of different genres, such as action, adventure, arcade, puzzle, racing, simulation, sports, and more.</p>
|
7 |
-
<h3>A match-3 puzzle game with colorful candies</h3>
|
8 |
-
<p>Apk4fun Candy Crush Saga is a match-3 puzzle game that involves swapping and matching candies of the same color to clear them from the board. The game has hundreds of levels, each with a different goal and layout. Some levels require you to collect a certain number of candies, some require you to clear jelly or frosting from the board, some require you to bring down ingredients to the bottom, and some require you to score a certain number of points within a time limit or a limited number of moves.</p>
|
9 |
-
<h3>A game with hundreds of levels and challenges</h3>
|
10 |
-
<p>One of the reasons why Apk4fun Candy Crush Saga is so popular is that it has hundreds of levels that offer different challenges and surprises. The game is constantly updated with new episodes and events that add more fun and variety to the gameplay. You can also unlock special candies and boosters that can help you complete difficult levels. Some of the special candies are striped candies, wrapped candies, color bombs, jelly fish, coconut wheels, and more. Some of the boosters are lollipop hammers, free switches, extra moves, extra time, and more.</p>
|
11 |
-
<h2>Why should you play Apk4fun Candy Crush Saga?</h2>
|
12 |
-
<p>There are many reasons why you should play Apk4fun Candy Crush Saga. Here are some of them:</p>
|
13 |
-
<h3>It is easy to play but hard to master</h3>
|
14 |
-
<p>Apk4fun Candy Crush Saga is a game that anyone can play, regardless of their age or skill level. The game has simple controls and rules that are easy to understand. You just need to swipe your finger on the screen to swap and match candies. However, the game also requires strategy and planning to complete the levels efficiently. You need to think ahead and use your moves wisely. You also need to deal with obstacles and blockers that can make the levels harder.</p>
|
15 |
-
<p>apk4fun candy crush saga download free<br />
|
16 |
-
apk4fun candy crush saga latest version<br />
|
17 |
-
apk4fun candy crush saga mod apk<br />
|
18 |
-
apk4fun candy crush saga cheats and tips<br />
|
19 |
-
apk4fun candy crush saga update<br />
|
20 |
-
apk4fun candy crush saga hack<br />
|
21 |
-
apk4fun candy crush saga offline<br />
|
22 |
-
apk4fun candy crush saga for android<br />
|
23 |
-
apk4fun candy crush saga for pc<br />
|
24 |
-
apk4fun candy crush saga online<br />
|
25 |
-
apk4fun candy crush saga game play<br />
|
26 |
-
apk4fun candy crush saga reviews<br />
|
27 |
-
apk4fun candy crush saga levels<br />
|
28 |
-
apk4fun candy crush saga unlimited lives<br />
|
29 |
-
apk4fun candy crush saga boosters<br />
|
30 |
-
apk4fun candy crush saga friends<br />
|
31 |
-
apk4fun candy crush saga jelly<br />
|
32 |
-
apk4fun candy crush saga soda<br />
|
33 |
-
apk4fun candy crush saga wiki<br />
|
34 |
-
apk4fun candy crush saga facebook<br />
|
35 |
-
apk4fun candy crush saga windows 10<br />
|
36 |
-
apk4fun candy crush saga install<br />
|
37 |
-
apk4fun candy crush saga android 11<br />
|
38 |
-
apk4fun candy crush saga new features<br />
|
39 |
-
apk4fun candy crush saga events<br />
|
40 |
-
apk4fun candy crush saga rewards<br />
|
41 |
-
apk4fun candy crush saga challenges<br />
|
42 |
-
apk4fun candy crush saga leaderboard<br />
|
43 |
-
apk4fun candy crush saga gold bars<br />
|
44 |
-
apk4fun candy crush saga episodes<br />
|
45 |
-
apk4fun candy crush saga characters<br />
|
46 |
-
apk4fun candy crush saga costumes<br />
|
47 |
-
apk4fun candy crush saga themes<br />
|
48 |
-
apk4fun candy crush saga wallpapers<br />
|
49 |
-
apk4fun candy crush saga stickers<br />
|
50 |
-
apk4fun candy crush saga emojis<br />
|
51 |
-
apk4fun candy crush saga memes<br />
|
52 |
-
apk4fun candy crush saga trivia<br />
|
53 |
-
apk4fun candy crush saga quiz<br />
|
54 |
-
apk4fun candy crush saga puzzles<br />
|
55 |
-
apk4fun candy crush saga merchandise<br />
|
56 |
-
apk4fun candy crush saga toys<br />
|
57 |
-
apk4fun candy crush saga books<br />
|
58 |
-
apk4fun candy crush saga comics<br />
|
59 |
-
apk4fun candy crush saga movies<br />
|
60 |
-
apk4fun candy crush saga tv show<br />
|
61 |
-
apk4fun candy crush saga music<br />
|
62 |
-
apk4fun candy crush saga songs<br />
|
63 |
-
apk4fun candy crush saga soundtracks</p>
|
64 |
-
<h3>It is fun and addictive</h3>
|
65 |
-
<p>Apk4fun Candy Crush Saga is a game that can keep you hooked for hours. The game has a colorful and cute design that appeals to many players. The game also has a satisfying sound effect when you match candies and clear them from the board. The game also has a rewarding system that gives you stars, points, coins, lives, and other prizes when you complete levels. The game also has a progress map that shows you how far you have gone in the game.</p>
|
66 |
-
<h3>It has amazing graphics and sound effects</h3>
|
67 |
-
<p>Apk4fun Candy Crush Saga is a game p>The second step is to search for Candy Crush Saga in the games category. You can use the search bar at the top of the website or browse through the categories and subcategories. You can also filter the games by popularity, rating, date, or alphabet.</p>
|
68 |
-
<h3>Download the latest version of the game</h3>
|
69 |
-
<p>The third step is to download the latest version of Apk4fun Candy Crush Saga. You can find the download link on the game page, along with the game description, screenshots, reviews, and other information. You can also see the file size, version number, and update date of the game. You need to click on the download link and wait for the file to be downloaded to your device.</p>
|
70 |
-
<h3>Install the game on your device and enjoy</h3>
|
71 |
-
<p>The final step is to install Apk4fun Candy Crush Saga on your device and enjoy playing it. You need to locate the downloaded file on your device and tap on it to start the installation process. You may need to enable the installation of apps from unknown sources in your device settings. You need to follow the instructions on the screen and grant the necessary permissions to the game. Once the installation is complete, you can launch the game and start matching candies.</p>
|
72 |
-
<h2>Conclusion</h2>
|
73 |
-
<p>Apk4fun Candy Crush Saga is a sweet and fun puzzle game that you can play on your Android device. It is a match-3 game that involves swapping and matching candies of the same color to clear them from the board. It has hundreds of levels, each with a different goal and challenge. It also has amazing graphics, sound effects, social features, and rewards. You can download and install Apk4fun Candy Crush Saga from the official website of Apk4fun by following the steps we have explained in this article. We hope you enjoy playing this game and have a great time.</p>
|
74 |
-
<h2>FAQs</h2>
|
75 |
-
<p>Here are some frequently asked questions about Apk4fun Candy Crush Saga:</p>
|
76 |
-
<h3>Q: Is Apk4fun Candy Crush Saga safe to download and play?</h3>
|
77 |
-
<p>A: Yes, Apk4fun Candy Crush Saga is safe to download and play. Apk4fun is a trusted platform that offers verified and tested games and apps for Android users. You should always download Apk4fun Candy Crush Saga from the official website of Apk4fun and avoid other sources that may contain viruses or malware.</p>
|
78 |
-
<h3>Q: How can I update Apk4fun Candy Crush Saga?</h3>
|
79 |
-
<p>A: You can update Apk4fun Candy Crush Saga by visiting the official website of Apk4fun and downloading the latest version of the game. You can also check for updates in the game settings or notifications. You should always update Apk4fun Candy Crush Saga to enjoy new features, levels, events, and bug fixes.</p>
|
80 |
-
<h3>Q: How can I sync Apk4fun Candy Crush Saga with Facebook?</h3>
|
81 |
-
<p>A: You can sync Apk4fun Candy Crush Saga with Facebook by connecting your Facebook account in the game settings. This will allow you to save your progress, access your game on multiple devices, see your friends' scores, send and receive lives, gifts, and messages, and join tournaments and leaderboards.</p>
|
82 |
-
<h3>Q: How can I get more lives in Apk4fun Candy Crush Saga?</h3>
|
83 |
-
<p>A: You can get more lives in Apk4fun Candy Crush Saga by waiting for them to refill over time, asking your friends to send you lives, buying them with coins or real money, or using boosters or special candies that can give you extra lives.</p>
|
84 |
-
<h3>Q: How can I contact Apk4fun or Candy Crush Saga support?</h3>
|
85 |
-
<p>A: You can contact Apk4fun by visiting their website and filling out their contact form or sending them an email at <a href="">[email protected]</a>. You can also follow them on Facebook, Twitter, Instagram, or YouTube for updates and news. You can contact Candy Crush Saga support by visiting their website and filling out their support form or sending them an email at <a href="">[email protected]</a>. You can also follow them on Facebook, Twitter, Instagram, or YouTube for updates and news.</p> 401be4b1e0<br />
|
86 |
-
<br />
|
87 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AEUPH/SENTIENCE_PROGRAMMING_LANGUAGE/style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/vocoders/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from vocoders import hifigan
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/rnn.py
DELETED
@@ -1,261 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
|
6 |
-
class PreNet(nn.Module):
|
7 |
-
def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5):
|
8 |
-
super().__init__()
|
9 |
-
self.fc1 = nn.Linear(in_dims, fc1_dims)
|
10 |
-
self.fc2 = nn.Linear(fc1_dims, fc2_dims)
|
11 |
-
self.p = dropout
|
12 |
-
|
13 |
-
def forward(self, x):
|
14 |
-
x = self.fc1(x)
|
15 |
-
x = F.relu(x)
|
16 |
-
x = F.dropout(x, self.p, training=self.training)
|
17 |
-
x = self.fc2(x)
|
18 |
-
x = F.relu(x)
|
19 |
-
x = F.dropout(x, self.p, training=self.training)
|
20 |
-
return x
|
21 |
-
|
22 |
-
|
23 |
-
class HighwayNetwork(nn.Module):
|
24 |
-
def __init__(self, size):
|
25 |
-
super().__init__()
|
26 |
-
self.W1 = nn.Linear(size, size)
|
27 |
-
self.W2 = nn.Linear(size, size)
|
28 |
-
self.W1.bias.data.fill_(0.)
|
29 |
-
|
30 |
-
def forward(self, x):
|
31 |
-
x1 = self.W1(x)
|
32 |
-
x2 = self.W2(x)
|
33 |
-
g = torch.sigmoid(x2)
|
34 |
-
y = g * F.relu(x1) + (1. - g) * x
|
35 |
-
return y
|
36 |
-
|
37 |
-
|
38 |
-
class BatchNormConv(nn.Module):
|
39 |
-
def __init__(self, in_channels, out_channels, kernel, relu=True):
|
40 |
-
super().__init__()
|
41 |
-
self.conv = nn.Conv1d(in_channels, out_channels, kernel, stride=1, padding=kernel // 2, bias=False)
|
42 |
-
self.bnorm = nn.BatchNorm1d(out_channels)
|
43 |
-
self.relu = relu
|
44 |
-
|
45 |
-
def forward(self, x):
|
46 |
-
x = self.conv(x)
|
47 |
-
x = F.relu(x) if self.relu is True else x
|
48 |
-
return self.bnorm(x)
|
49 |
-
|
50 |
-
|
51 |
-
class ConvNorm(torch.nn.Module):
|
52 |
-
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
|
53 |
-
padding=None, dilation=1, bias=True, w_init_gain='linear'):
|
54 |
-
super(ConvNorm, self).__init__()
|
55 |
-
if padding is None:
|
56 |
-
assert (kernel_size % 2 == 1)
|
57 |
-
padding = int(dilation * (kernel_size - 1) / 2)
|
58 |
-
|
59 |
-
self.conv = torch.nn.Conv1d(in_channels, out_channels,
|
60 |
-
kernel_size=kernel_size, stride=stride,
|
61 |
-
padding=padding, dilation=dilation,
|
62 |
-
bias=bias)
|
63 |
-
|
64 |
-
torch.nn.init.xavier_uniform_(
|
65 |
-
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
|
66 |
-
|
67 |
-
def forward(self, signal):
|
68 |
-
conv_signal = self.conv(signal)
|
69 |
-
return conv_signal
|
70 |
-
|
71 |
-
|
72 |
-
class CBHG(nn.Module):
|
73 |
-
def __init__(self, K, in_channels, channels, proj_channels, num_highways):
|
74 |
-
super().__init__()
|
75 |
-
|
76 |
-
# List of all rnns to call `flatten_parameters()` on
|
77 |
-
self._to_flatten = []
|
78 |
-
|
79 |
-
self.bank_kernels = [i for i in range(1, K + 1)]
|
80 |
-
self.conv1d_bank = nn.ModuleList()
|
81 |
-
for k in self.bank_kernels:
|
82 |
-
conv = BatchNormConv(in_channels, channels, k)
|
83 |
-
self.conv1d_bank.append(conv)
|
84 |
-
|
85 |
-
self.maxpool = nn.MaxPool1d(kernel_size=2, stride=1, padding=1)
|
86 |
-
|
87 |
-
self.conv_project1 = BatchNormConv(len(self.bank_kernels) * channels, proj_channels[0], 3)
|
88 |
-
self.conv_project2 = BatchNormConv(proj_channels[0], proj_channels[1], 3, relu=False)
|
89 |
-
|
90 |
-
# Fix the highway input if necessary
|
91 |
-
if proj_channels[-1] != channels:
|
92 |
-
self.highway_mismatch = True
|
93 |
-
self.pre_highway = nn.Linear(proj_channels[-1], channels, bias=False)
|
94 |
-
else:
|
95 |
-
self.highway_mismatch = False
|
96 |
-
|
97 |
-
self.highways = nn.ModuleList()
|
98 |
-
for i in range(num_highways):
|
99 |
-
hn = HighwayNetwork(channels)
|
100 |
-
self.highways.append(hn)
|
101 |
-
|
102 |
-
self.rnn = nn.GRU(channels, channels, batch_first=True, bidirectional=True)
|
103 |
-
self._to_flatten.append(self.rnn)
|
104 |
-
|
105 |
-
# Avoid fragmentation of RNN parameters and associated warning
|
106 |
-
self._flatten_parameters()
|
107 |
-
|
108 |
-
def forward(self, x):
|
109 |
-
# Although we `_flatten_parameters()` on init, when using DataParallel
|
110 |
-
# the model gets replicated, making it no longer guaranteed that the
|
111 |
-
# weights are contiguous in GPU memory. Hence, we must call it again
|
112 |
-
self._flatten_parameters()
|
113 |
-
|
114 |
-
# Save these for later
|
115 |
-
residual = x
|
116 |
-
seq_len = x.size(-1)
|
117 |
-
conv_bank = []
|
118 |
-
|
119 |
-
# Convolution Bank
|
120 |
-
for conv in self.conv1d_bank:
|
121 |
-
c = conv(x) # Convolution
|
122 |
-
conv_bank.append(c[:, :, :seq_len])
|
123 |
-
|
124 |
-
# Stack along the channel axis
|
125 |
-
conv_bank = torch.cat(conv_bank, dim=1)
|
126 |
-
|
127 |
-
# dump the last padding to fit residual
|
128 |
-
x = self.maxpool(conv_bank)[:, :, :seq_len]
|
129 |
-
|
130 |
-
# Conv1d projections
|
131 |
-
x = self.conv_project1(x)
|
132 |
-
x = self.conv_project2(x)
|
133 |
-
|
134 |
-
# Residual Connect
|
135 |
-
x = x + residual
|
136 |
-
|
137 |
-
# Through the highways
|
138 |
-
x = x.transpose(1, 2)
|
139 |
-
if self.highway_mismatch is True:
|
140 |
-
x = self.pre_highway(x)
|
141 |
-
for h in self.highways:
|
142 |
-
x = h(x)
|
143 |
-
|
144 |
-
# And then the RNN
|
145 |
-
x, _ = self.rnn(x)
|
146 |
-
return x
|
147 |
-
|
148 |
-
def _flatten_parameters(self):
|
149 |
-
"""Calls `flatten_parameters` on all the rnns used by the WaveRNN. Used
|
150 |
-
to improve efficiency and avoid PyTorch yelling at us."""
|
151 |
-
[m.flatten_parameters() for m in self._to_flatten]
|
152 |
-
|
153 |
-
|
154 |
-
class TacotronEncoder(nn.Module):
|
155 |
-
def __init__(self, embed_dims, num_chars, cbhg_channels, K, num_highways, dropout):
|
156 |
-
super().__init__()
|
157 |
-
self.embedding = nn.Embedding(num_chars, embed_dims)
|
158 |
-
self.pre_net = PreNet(embed_dims, embed_dims, embed_dims, dropout=dropout)
|
159 |
-
self.cbhg = CBHG(K=K, in_channels=cbhg_channels, channels=cbhg_channels,
|
160 |
-
proj_channels=[cbhg_channels, cbhg_channels],
|
161 |
-
num_highways=num_highways)
|
162 |
-
self.proj_out = nn.Linear(cbhg_channels * 2, cbhg_channels)
|
163 |
-
|
164 |
-
def forward(self, x):
|
165 |
-
x = self.embedding(x)
|
166 |
-
x = self.pre_net(x)
|
167 |
-
x.transpose_(1, 2)
|
168 |
-
x = self.cbhg(x)
|
169 |
-
x = self.proj_out(x)
|
170 |
-
return x
|
171 |
-
|
172 |
-
|
173 |
-
class RNNEncoder(nn.Module):
|
174 |
-
def __init__(self, num_chars, embedding_dim, n_convolutions=3, kernel_size=5):
|
175 |
-
super(RNNEncoder, self).__init__()
|
176 |
-
self.embedding = nn.Embedding(num_chars, embedding_dim, padding_idx=0)
|
177 |
-
convolutions = []
|
178 |
-
for _ in range(n_convolutions):
|
179 |
-
conv_layer = nn.Sequential(
|
180 |
-
ConvNorm(embedding_dim,
|
181 |
-
embedding_dim,
|
182 |
-
kernel_size=kernel_size, stride=1,
|
183 |
-
padding=int((kernel_size - 1) / 2),
|
184 |
-
dilation=1, w_init_gain='relu'),
|
185 |
-
nn.BatchNorm1d(embedding_dim))
|
186 |
-
convolutions.append(conv_layer)
|
187 |
-
self.convolutions = nn.ModuleList(convolutions)
|
188 |
-
|
189 |
-
self.lstm = nn.LSTM(embedding_dim, int(embedding_dim / 2), 1,
|
190 |
-
batch_first=True, bidirectional=True)
|
191 |
-
|
192 |
-
def forward(self, x):
|
193 |
-
input_lengths = (x > 0).sum(-1)
|
194 |
-
input_lengths = input_lengths.cpu().numpy()
|
195 |
-
|
196 |
-
x = self.embedding(x)
|
197 |
-
x = x.transpose(1, 2) # [B, H, T]
|
198 |
-
for conv in self.convolutions:
|
199 |
-
x = F.dropout(F.relu(conv(x)), 0.5, self.training) + x
|
200 |
-
x = x.transpose(1, 2) # [B, T, H]
|
201 |
-
|
202 |
-
# pytorch tensor are not reversible, hence the conversion
|
203 |
-
x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True, enforce_sorted=False)
|
204 |
-
|
205 |
-
self.lstm.flatten_parameters()
|
206 |
-
outputs, _ = self.lstm(x)
|
207 |
-
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
|
208 |
-
|
209 |
-
return outputs
|
210 |
-
|
211 |
-
|
212 |
-
class DecoderRNN(torch.nn.Module):
|
213 |
-
def __init__(self, hidden_size, decoder_rnn_dim, dropout):
|
214 |
-
super(DecoderRNN, self).__init__()
|
215 |
-
self.in_conv1d = nn.Sequential(
|
216 |
-
torch.nn.Conv1d(
|
217 |
-
in_channels=hidden_size,
|
218 |
-
out_channels=hidden_size,
|
219 |
-
kernel_size=9, padding=4,
|
220 |
-
),
|
221 |
-
torch.nn.ReLU(),
|
222 |
-
torch.nn.Conv1d(
|
223 |
-
in_channels=hidden_size,
|
224 |
-
out_channels=hidden_size,
|
225 |
-
kernel_size=9, padding=4,
|
226 |
-
),
|
227 |
-
)
|
228 |
-
self.ln = nn.LayerNorm(hidden_size)
|
229 |
-
if decoder_rnn_dim == 0:
|
230 |
-
decoder_rnn_dim = hidden_size * 2
|
231 |
-
self.rnn = torch.nn.LSTM(
|
232 |
-
input_size=hidden_size,
|
233 |
-
hidden_size=decoder_rnn_dim,
|
234 |
-
num_layers=1,
|
235 |
-
batch_first=True,
|
236 |
-
bidirectional=True,
|
237 |
-
dropout=dropout
|
238 |
-
)
|
239 |
-
self.rnn.flatten_parameters()
|
240 |
-
self.conv1d = torch.nn.Conv1d(
|
241 |
-
in_channels=decoder_rnn_dim * 2,
|
242 |
-
out_channels=hidden_size,
|
243 |
-
kernel_size=3,
|
244 |
-
padding=1,
|
245 |
-
)
|
246 |
-
|
247 |
-
def forward(self, x):
|
248 |
-
input_masks = x.abs().sum(-1).ne(0).data[:, :, None]
|
249 |
-
input_lengths = input_masks.sum([-1, -2])
|
250 |
-
input_lengths = input_lengths.cpu().numpy()
|
251 |
-
|
252 |
-
x = self.in_conv1d(x.transpose(1, 2)).transpose(1, 2)
|
253 |
-
x = self.ln(x)
|
254 |
-
x = nn.utils.rnn.pack_padded_sequence(x, input_lengths, batch_first=True, enforce_sorted=False)
|
255 |
-
self.rnn.flatten_parameters()
|
256 |
-
x, _ = self.rnn(x) # [B, T, C]
|
257 |
-
x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)
|
258 |
-
x = x * input_masks
|
259 |
-
pre_mel = self.conv1d(x.transpose(1, 2)).transpose(1, 2) # [B, T, C]
|
260 |
-
pre_mel = pre_mel * input_masks
|
261 |
-
return pre_mel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/modules/midas/midas/midas_net.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
|
2 |
-
This file contains code that is adapted from
|
3 |
-
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
|
4 |
-
"""
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
|
8 |
-
from .base_model import BaseModel
|
9 |
-
from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
|
10 |
-
|
11 |
-
|
12 |
-
class MidasNet(BaseModel):
|
13 |
-
"""Network for monocular depth estimation.
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __init__(self, path=None, features=256, non_negative=True):
|
17 |
-
"""Init.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
path (str, optional): Path to saved model. Defaults to None.
|
21 |
-
features (int, optional): Number of features. Defaults to 256.
|
22 |
-
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
23 |
-
"""
|
24 |
-
print("Loading weights: ", path)
|
25 |
-
|
26 |
-
super(MidasNet, self).__init__()
|
27 |
-
|
28 |
-
use_pretrained = False if path is None else True
|
29 |
-
|
30 |
-
self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
|
31 |
-
|
32 |
-
self.scratch.refinenet4 = FeatureFusionBlock(features)
|
33 |
-
self.scratch.refinenet3 = FeatureFusionBlock(features)
|
34 |
-
self.scratch.refinenet2 = FeatureFusionBlock(features)
|
35 |
-
self.scratch.refinenet1 = FeatureFusionBlock(features)
|
36 |
-
|
37 |
-
self.scratch.output_conv = nn.Sequential(
|
38 |
-
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
|
39 |
-
Interpolate(scale_factor=2, mode="bilinear"),
|
40 |
-
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
|
41 |
-
nn.ReLU(True),
|
42 |
-
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
43 |
-
nn.ReLU(True) if non_negative else nn.Identity(),
|
44 |
-
)
|
45 |
-
|
46 |
-
if path:
|
47 |
-
self.load(path)
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
"""Forward pass.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
x (tensor): input data (image)
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
tensor: depth
|
57 |
-
"""
|
58 |
-
|
59 |
-
layer_1 = self.pretrained.layer1(x)
|
60 |
-
layer_2 = self.pretrained.layer2(layer_1)
|
61 |
-
layer_3 = self.pretrained.layer3(layer_2)
|
62 |
-
layer_4 = self.pretrained.layer4(layer_3)
|
63 |
-
|
64 |
-
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
65 |
-
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
66 |
-
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
67 |
-
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
68 |
-
|
69 |
-
path_4 = self.scratch.refinenet4(layer_4_rn)
|
70 |
-
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
71 |
-
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
72 |
-
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
73 |
-
|
74 |
-
out = self.scratch.output_conv(path_1)
|
75 |
-
|
76 |
-
return torch.squeeze(out, dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIML-TUDA/does-clip-know-my-face/README.md
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Does Clip Know My Face?
|
3 |
-
emoji: 🧑
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.18.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-sa-4.0
|
11 |
-
python_version: 3.10.0
|
12 |
-
---
|
13 |
-
|
14 |
-
# Example Images License Information
|
15 |
-
|
16 |
-
### Barbara Schöneberger
|
17 |
-
|
18 |
-
| Image Name | Image Url | Author | License |
|
19 |
-
|----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------|--------------|
|
20 |
-
| Barbara_Schöneberger_0.jpg | [https://upload.wikimedia.org/wikipedia/commons/1/1d/Barbara_Sch%C3%B6neberger_-_Deutscher_Radiopreis_Hamburg_2016_13.jpg](https://upload.wikimedia.org/wikipedia/commons/1/1d/Barbara_Sch%C3%B6neberger_-_Deutscher_Radiopreis_Hamburg_2016_13.jpg) | Frank Schwichtenberg | CC-BY-SA-3.0 |
|
21 |
-
| Barbara_Schöneberger_1.jpg | [https://upload.wikimedia.org/wikipedia/commons/9/9d/Barbara_Sch%C3%B6neberger_%282007%29.jpg](https://upload.wikimedia.org/wikipedia/commons/9/9d/Barbara_Sch%C3%B6neberger_%282007%29.jpg) | Pottschalk | CC-BY-SA-3.0 |
|
22 |
-
| Barbara_Schöneberger_2.jpg | [https://upload.wikimedia.org/wikipedia/commons/f/f0/Barbara_Sch%C3%B6neberger_-_Deutscher_Radiopreis_Hamburg_2016_03.jpg](https://upload.wikimedia.org/wikipedia/commons/f/f0/Barbara_Sch%C3%B6neberger_-_Deutscher_Radiopreis_Hamburg_2016_03.jpg) | Frank Schwichtenberg | CC-BY-SA-3.0 |
|
23 |
-
| Barbara_Schöneberger_3.jpg | [https://upload.wikimedia.org/wikipedia/commons/f/fa/Barbara_Sch%C3%B6neberger_-_Deutscher_Radiopreis_Hamburg_2016_12.jpg](https://upload.wikimedia.org/wikipedia/commons/f/fa/Barbara_Sch%C3%B6neberger_-_Deutscher_Radiopreis_Hamburg_2016_12.jpg) | Frank Schwichtenberg | CC-BY-SA-3.0 |
|
24 |
-
| Barbara_Schöneberger_4.jpg | [https://upload.wikimedia.org/wikipedia/commons/0/0a/Barbara_Sch%C3%B6neberger_-_Deutscher_Radiopreis_Hamburg_2016_01.jpg](https://upload.wikimedia.org/wikipedia/commons/0/0a/Barbara_Sch%C3%B6neberger_-_Deutscher_Radiopreis_Hamburg_2016_01.jpg) | Frank Schwichtenberg | CC-BY-SA-3.0 |
|
25 |
-
|
26 |
-
### Carolin Kebekus
|
27 |
-
|
28 |
-
| Image Name | Image Url | Author | License |
|
29 |
-
|-----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------|--------------|
|
30 |
-
| Carolin_Kebekus_0.jpg | [https://upload.wikimedia.org/wikipedia/commons/c/ce/Carolin_Kebekus_-_2019102193318_2019-04-12_Radio_Regenbogen_Award_2019_-_Sven_-_1D_X_MK_II_-_0905_-_AK8I0075.jpg](https://upload.wikimedia.org/wikipedia/commons/c/ce/Carolin_Kebekus_-_2019102193318_2019-04-12_Radio_Regenbogen_Award_2019_-_Sven_-_1D_X_MK_II_-_0905_-_AK8I0075.jpg) | Sven Mandel | CC-BY-SA-4.0 |
|
31 |
-
| Carolin_Kebekus_1.jpg | [https://upload.wikimedia.org/wikipedia/commons/4/45/Carolin-Kebekus-Bonn.jpg](https://upload.wikimedia.org/wikipedia/commons/4/45/Carolin-Kebekus-Bonn.jpg) | Superbass | CC-BY-SA-3.0 |
|
32 |
-
| Carolin_Kebekus_2.jpg | [https://upload.wikimedia.org/wikipedia/commons/4/45/Carolin-Kebekus-Bonn.jpg](https://upload.wikimedia.org/wikipedia/commons/4/45/Carolin-Kebekus-Bonn.jpg) | Sven Mandel | CC-BY-SA-4.0 |
|
33 |
-
| Carolin_Kebekus_3.jpg | [https://upload.wikimedia.org/wikipedia/commons/0/02/Carolin_Kebekus-5848.jpg](https://upload.wikimedia.org/wikipedia/commons/0/02/Carolin_Kebekus-5848.jpg) | Harald Krichel | CC-BY-SA-3.0 |
|
34 |
-
| Carolin_Kebekus_4.jpg | [https://upload.wikimedia.org/wikipedia/commons/e/e1/2021-09-16-Carolin_Kebekus_Deutscher_Fernsehpreis_2021_-3757.jpg](https://upload.wikimedia.org/wikipedia/commons/e/e1/2021-09-16-Carolin_Kebekus_Deutscher_Fernsehpreis_2021_-3757.jpg) | Superbass | CC-BY-SA-4.0 |
|
35 |
-
|
36 |
-
### Max Giermann
|
37 |
-
|
38 |
-
| Image Name | Image Url | Author | License |
|
39 |
-
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------|--------------|
|
40 |
-
| Max_Giermann_0.jpg | [https://upload.wikimedia.org/wikipedia/commons/4/4b/2018-01-26-DFP_2018-7513.jpg](https://upload.wikimedia.org/wikipedia/commons/4/4b/2018-01-26-DFP_2018-7513.jpg) | Superbass | CC-BY-SA-4.0 |
|
41 |
-
| Max_Giermann_1.jpg | [https://upload.wikimedia.org/wikipedia/commons/f/f6/Deutscher_Fernsehpreis_2012_-_Max_Giermann.jpg](https://upload.wikimedia.org/wikipedia/commons/f/f6/Deutscher_Fernsehpreis_2012_-_Max_Giermann.jpg) | JCS | CC-BY-3.0 |
|
42 |
-
| Max_Giermann_2.jpg | [https://upload.wikimedia.org/wikipedia/commons/1/1c/Hessischer_Filmpreis_2017_-_Max_Giermann_2.JPG](https://upload.wikimedia.org/wikipedia/commons/1/1c/Hessischer_Filmpreis_2017_-_Max_Giermann_2.JPG) | JCS | CC-BY-3.0 |
|
43 |
-
| Max_Giermann_3.jpg | [https://upload.wikimedia.org/wikipedia/commons/1/1d/Max_Giermann_%28extra_3%29_01.jpg](https://upload.wikimedia.org/wikipedia/commons/1/1d/Max_Giermann_%28extra_3%29_01.jpg) | Frank Schwichtenberg | CC-BY-SA-3.0 |
|
44 |
-
| Max_Giermann_4.jpg | [https://upload.wikimedia.org/wikipedia/commons/8/85/Max_Giermann_%28extra_3%29_03.jpg](https://upload.wikimedia.org/wikipedia/commons/8/85/Max_Giermann_%28extra_3%29_03.jpg) | Frank Schwichtenberg | CC-BY-SA-3.0 |
|
45 |
-
|
46 |
-
### Nicole De Boer
|
47 |
-
|
48 |
-
| Image Name | Image Url | Author | License |
|
49 |
-
|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------|
|
50 |
-
| Nicole_De_Boer_0.jpg | [https://upload.wikimedia.org/wikipedia/commons/0/03/Praha%2C_Lhotka%2C_KC_Novodvorsk%C3%A1%2C_CzechTREK_2013_%2827%29.jpg](https://upload.wikimedia.org/wikipedia/commons/0/03/Praha%2C_Lhotka%2C_KC_Novodvorsk%C3%A1%2C_CzechTREK_2013_%2827%29.jpg) | Harold | CC-BY-SA-3.0 |
|
51 |
-
| Nicole_De_Boer_1.jpg | [https://upload.wikimedia.org/wikipedia/commons/d/db/Nicole_DeBoer_at_Toronto_Comicon_1.jpg](https://upload.wikimedia.org/wikipedia/commons/d/db/Nicole_DeBoer_at_Toronto_Comicon_1.jpg) | Tabercil | CC-BY-SA-3.0 |
|
52 |
-
| Nicole_De_Boer_2.jpg | [https://upload.wikimedia.org/wikipedia/commons/4/4b/Nicole_de_Boer_at_Toronto_Comicon_2_%28cropped%29.jpg](https://upload.wikimedia.org/wikipedia/commons/4/4b/Nicole_de_Boer_at_Toronto_Comicon_2_%28cropped%29.jpg) | Tabercil | CC-BY-SA-3.0 |
|
53 |
-
| Nicole_De_Boer_3.jpg | [https://upload.wikimedia.org/wikipedia/commons/b/b9/Nicole_de_boer_LFCC2015.jpg](https://upload.wikimedia.org/wikipedia/commons/b/b9/Nicole_de_boer_LFCC2015.jpg) | Dazzoboy | CC-BY-SA-4.0 |
|
54 |
-
| Nicole_De_Boer_4.jpg | [https://upload.wikimedia.org/wikipedia/commons/9/90/Nicole_de_Boer_at_Toronto_Comicon_2.jpg](https://upload.wikimedia.org/wikipedia/commons/9/90/Nicole_de_Boer_at_Toronto_Comicon_2.jpg) | Tabercil | CC-BY-SA-3.0 |
|
55 |
-
|
56 |
-
### T. J. Thyne
|
57 |
-
|
58 |
-
| Image Name | Image Url | Author | License |
|
59 |
-
|-------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|--------------|
|
60 |
-
| T._J._Thyne_0.jpg | [https://live.staticflickr.com/7036/6837850246_c09a148d70_o.jpg](https://live.staticflickr.com/7036/6837850246_c09a148d70_o.jpg) | Genevieve | CC-BY-2.0 |
|
61 |
-
| T._J._Thyne_1.jpg | [https://live.staticflickr.com/3273/5705869811_d9ff808383_o.jpg](https://live.staticflickr.com/3273/5705869811_d9ff808383_o.jpg) | Genevieve | CC-BY-2.0 |
|
62 |
-
| T._J._Thyne_2.jpg | [https://upload.wikimedia.org/wikipedia/commons/d/d8/TJThyneFanExpo2017.jpg](https://upload.wikimedia.org/wikipedia/commons/d/d8/TJThyneFanExpo2017.jpg) | Christian Dahl-Lacroix | CC-BY-SA-4.0 |
|
63 |
-
| T._J._Thyne_3.jpg | [https://live.staticflickr.com/7041/6984629777_8a415b72d9_b.jpg](https://live.staticflickr.com/7041/6984629777_8a415b72d9_b.jpg) | Genevieve | CC-BY-2.0 |
|
64 |
-
| T._J._Thyne_4.jpg | [https://live.staticflickr.com/7042/6837821654_d65ab80913_b.jpg](https://live.staticflickr.com/7042/6837821654_d65ab80913_b.jpg) | Genevieve | CC-BY-2.0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/Debate/src/agents/Memory/base_Memory.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
from Prompt import *
|
2 |
-
class Memory:
|
3 |
-
def __init__(self,role,name,content) -> None:
|
4 |
-
self.send_role = role
|
5 |
-
self.send_name = name
|
6 |
-
self.content = content
|
7 |
-
|
8 |
-
def get_gpt_message(self,role):
|
9 |
-
return {"role":role,"content":self.content}
|
10 |
-
|
11 |
-
@classmethod
|
12 |
-
def get_chat_history(self,messages,agent_name =None):
|
13 |
-
"""
|
14 |
-
Splice a memory list into a sentence
|
15 |
-
input :
|
16 |
-
messages(list) : list of memory(Memory)
|
17 |
-
Return :
|
18 |
-
chat_history(str) : One sentence after integration
|
19 |
-
"""
|
20 |
-
chat_history = ""
|
21 |
-
for message in messages:
|
22 |
-
name,role,content = message.send_name,message.send_role,message.content
|
23 |
-
if agent_name and agent_name==name:
|
24 |
-
name = "you"
|
25 |
-
chat_history += eval(Single_message)
|
26 |
-
chat_history = eval(Chat_total_message)
|
27 |
-
return chat_history
|
28 |
-
|
29 |
-
def get_query(self):
|
30 |
-
"Return : query(str):last sentence"
|
31 |
-
name,role,content = self.send_name,self.send_role,self.content
|
32 |
-
return eval(Single_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/server/babel.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
from flask import request, session, jsonify
|
4 |
-
from flask_babel import Babel
|
5 |
-
|
6 |
-
|
7 |
-
def get_languages_from_dir(directory):
|
8 |
-
"""Return a list of directory names in the given directory."""
|
9 |
-
return [name for name in os.listdir(directory)
|
10 |
-
if os.path.isdir(os.path.join(directory, name))]
|
11 |
-
|
12 |
-
|
13 |
-
BABEL_DEFAULT_LOCALE = 'en_US'
|
14 |
-
BABEL_LANGUAGES = get_languages_from_dir('translations')
|
15 |
-
|
16 |
-
|
17 |
-
def create_babel(app):
|
18 |
-
"""Create and initialize a Babel instance with the given Flask app."""
|
19 |
-
babel = Babel(app)
|
20 |
-
app.config['BABEL_DEFAULT_LOCALE'] = BABEL_DEFAULT_LOCALE
|
21 |
-
app.config['BABEL_LANGUAGES'] = BABEL_LANGUAGES
|
22 |
-
|
23 |
-
babel.init_app(app, locale_selector=get_locale)
|
24 |
-
compile_translations()
|
25 |
-
|
26 |
-
|
27 |
-
def get_locale():
|
28 |
-
"""Get the user's locale from the session or the request's accepted languages."""
|
29 |
-
return session.get('language') or request.accept_languages.best_match(BABEL_LANGUAGES)
|
30 |
-
|
31 |
-
|
32 |
-
def get_languages():
|
33 |
-
"""Return a list of available languages in JSON format."""
|
34 |
-
return jsonify(BABEL_LANGUAGES)
|
35 |
-
|
36 |
-
|
37 |
-
def compile_translations():
|
38 |
-
"""Compile the translation files."""
|
39 |
-
result = subprocess.run(
|
40 |
-
['pybabel', 'compile', '-d', 'translations'],
|
41 |
-
stdout=subprocess.PIPE,
|
42 |
-
)
|
43 |
-
|
44 |
-
if result.returncode != 0:
|
45 |
-
raise Exception(
|
46 |
-
f'Compiling translations failed:\n{result.stdout.decode()}')
|
47 |
-
|
48 |
-
print('Translations compiled successfully')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aashir01/Live_Transcription/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Video Subtitles Online
|
3 |
-
emoji: 🐨
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.34.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: afl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/ops/fused_bias_act.py
DELETED
@@ -1,211 +0,0 @@
|
|
1 |
-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Custom TensorFlow ops for efficient bias and activation."""
|
10 |
-
|
11 |
-
import os
|
12 |
-
import numpy as np
|
13 |
-
import tensorflow as tf
|
14 |
-
from .. import custom_ops
|
15 |
-
from ...util import EasyDict
|
16 |
-
|
17 |
-
def _get_plugin():
|
18 |
-
return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu')
|
19 |
-
|
20 |
-
#----------------------------------------------------------------------------
|
21 |
-
|
22 |
-
activation_funcs = {
|
23 |
-
'linear': EasyDict(func=lambda x, **_: x, def_alpha=None, def_gain=1.0, cuda_idx=1, ref='y', zero_2nd_grad=True),
|
24 |
-
'relu': EasyDict(func=lambda x, **_: tf.nn.relu(x), def_alpha=None, def_gain=np.sqrt(2), cuda_idx=2, ref='y', zero_2nd_grad=True),
|
25 |
-
'lrelu': EasyDict(func=lambda x, alpha, **_: tf.nn.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', zero_2nd_grad=True),
|
26 |
-
'tanh': EasyDict(func=lambda x, **_: tf.nn.tanh(x), def_alpha=None, def_gain=1.0, cuda_idx=4, ref='y', zero_2nd_grad=False),
|
27 |
-
'sigmoid': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x), def_alpha=None, def_gain=1.0, cuda_idx=5, ref='y', zero_2nd_grad=False),
|
28 |
-
'elu': EasyDict(func=lambda x, **_: tf.nn.elu(x), def_alpha=None, def_gain=1.0, cuda_idx=6, ref='y', zero_2nd_grad=False),
|
29 |
-
'selu': EasyDict(func=lambda x, **_: tf.nn.selu(x), def_alpha=None, def_gain=1.0, cuda_idx=7, ref='y', zero_2nd_grad=False),
|
30 |
-
'softplus': EasyDict(func=lambda x, **_: tf.nn.softplus(x), def_alpha=None, def_gain=1.0, cuda_idx=8, ref='y', zero_2nd_grad=False),
|
31 |
-
'swish': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x) * x, def_alpha=None, def_gain=np.sqrt(2), cuda_idx=9, ref='x', zero_2nd_grad=False),
|
32 |
-
}
|
33 |
-
|
34 |
-
#----------------------------------------------------------------------------
|
35 |
-
|
36 |
-
def fused_bias_act(x, b=None, axis=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
|
37 |
-
r"""Fused bias and activation function.
|
38 |
-
|
39 |
-
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
|
40 |
-
and scales the result by `gain`. Each of the steps is optional. In most cases,
|
41 |
-
the fused op is considerably more efficient than performing the same calculation
|
42 |
-
using standard TensorFlow ops. It supports first and second order gradients,
|
43 |
-
but not third order gradients.
|
44 |
-
|
45 |
-
Args:
|
46 |
-
x: Input activation tensor. Can have any shape, but if `b` is defined, the
|
47 |
-
dimension corresponding to `axis`, as well as the rank, must be known.
|
48 |
-
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
|
49 |
-
as `x`. The shape must be known, and it must match the dimension of `x`
|
50 |
-
corresponding to `axis`.
|
51 |
-
axis: The dimension in `x` corresponding to the elements of `b`.
|
52 |
-
The value of `axis` is ignored if `b` is not specified.
|
53 |
-
act: Name of the activation function to evaluate, or `"linear"` to disable.
|
54 |
-
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
|
55 |
-
See `activation_funcs` for a full list. `None` is not allowed.
|
56 |
-
alpha: Shape parameter for the activation function, or `None` to use the default.
|
57 |
-
gain: Scaling factor for the output tensor, or `None` to use default.
|
58 |
-
See `activation_funcs` for the default scaling of each activation function.
|
59 |
-
If unsure, consider specifying `1.0`.
|
60 |
-
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
|
61 |
-
the clamping (default).
|
62 |
-
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
|
63 |
-
|
64 |
-
Returns:
|
65 |
-
Tensor of the same shape and datatype as `x`.
|
66 |
-
"""
|
67 |
-
|
68 |
-
impl_dict = {
|
69 |
-
'ref': _fused_bias_act_ref,
|
70 |
-
'cuda': _fused_bias_act_cuda,
|
71 |
-
}
|
72 |
-
return impl_dict[impl](x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain, clamp=clamp)
|
73 |
-
|
74 |
-
#----------------------------------------------------------------------------
|
75 |
-
|
76 |
-
def _fused_bias_act_ref(x, b, axis, act, alpha, gain, clamp):
|
77 |
-
"""Slow reference implementation of `fused_bias_act()` using standard TensorFlow ops."""
|
78 |
-
|
79 |
-
# Validate arguments.
|
80 |
-
x = tf.convert_to_tensor(x)
|
81 |
-
b = tf.convert_to_tensor(b) if b is not None else tf.constant([], dtype=x.dtype)
|
82 |
-
act_spec = activation_funcs[act]
|
83 |
-
assert b.shape.rank == 1 and (b.shape[0] == 0 or b.shape[0] == x.shape[axis])
|
84 |
-
assert b.shape[0] == 0 or 0 <= axis < x.shape.rank
|
85 |
-
if alpha is None:
|
86 |
-
alpha = act_spec.def_alpha
|
87 |
-
if gain is None:
|
88 |
-
gain = act_spec.def_gain
|
89 |
-
|
90 |
-
# Add bias.
|
91 |
-
if b.shape[0] != 0:
|
92 |
-
x += tf.reshape(b, [-1 if i == axis else 1 for i in range(x.shape.rank)])
|
93 |
-
|
94 |
-
# Evaluate activation function.
|
95 |
-
x = act_spec.func(x, alpha=alpha)
|
96 |
-
|
97 |
-
# Scale by gain.
|
98 |
-
if gain != 1:
|
99 |
-
x *= gain
|
100 |
-
|
101 |
-
# Clamp.
|
102 |
-
if clamp is not None:
|
103 |
-
clamp = np.asarray(clamp, dtype=x.dtype.name)
|
104 |
-
assert clamp.shape == () and clamp >= 0
|
105 |
-
x = tf.clip_by_value(x, -clamp, clamp)
|
106 |
-
return x
|
107 |
-
|
108 |
-
#----------------------------------------------------------------------------
|
109 |
-
|
110 |
-
def _fused_bias_act_cuda(x, b, axis, act, alpha, gain, clamp):
|
111 |
-
"""Fast CUDA implementation of `fused_bias_act()` using custom ops."""
|
112 |
-
|
113 |
-
# Validate arguments.
|
114 |
-
x = tf.convert_to_tensor(x)
|
115 |
-
empty_tensor = tf.constant([], dtype=x.dtype)
|
116 |
-
b = tf.convert_to_tensor(b) if b is not None else empty_tensor
|
117 |
-
act_spec = activation_funcs[act]
|
118 |
-
assert b.shape.rank == 1 and (b.shape[0] == 0 or b.shape[0] == x.shape[axis])
|
119 |
-
assert b.shape[0] == 0 or 0 <= axis < x.shape.rank
|
120 |
-
if alpha is None:
|
121 |
-
alpha = act_spec.def_alpha
|
122 |
-
if gain is None:
|
123 |
-
gain = act_spec.def_gain
|
124 |
-
|
125 |
-
# Special cases.
|
126 |
-
if act == 'linear' and b is None and gain == 1.0:
|
127 |
-
return x
|
128 |
-
if act_spec.cuda_idx is None:
|
129 |
-
return _fused_bias_act_ref(x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain, clamp=clamp)
|
130 |
-
|
131 |
-
# CUDA op.
|
132 |
-
cuda_op = _get_plugin().fused_bias_act
|
133 |
-
cuda_kwargs = dict(axis=int(axis), act=int(act_spec.cuda_idx), gain=float(gain))
|
134 |
-
if alpha is not None:
|
135 |
-
cuda_kwargs['alpha'] = float(alpha)
|
136 |
-
if clamp is not None:
|
137 |
-
clamp = np.asarray(clamp, dtype=x.dtype.name)
|
138 |
-
assert clamp.shape == () and clamp >= 0
|
139 |
-
cuda_kwargs['clamp'] = float(clamp.astype(np.float32))
|
140 |
-
def ref(tensor, name):
|
141 |
-
return tensor if act_spec.ref == name else empty_tensor
|
142 |
-
|
143 |
-
# Forward pass: y = func(x, b).
|
144 |
-
def func_y(x, b):
|
145 |
-
y = cuda_op(x=x, b=b, xref=empty_tensor, yref=empty_tensor, grad=0, **cuda_kwargs)
|
146 |
-
y.set_shape(x.shape)
|
147 |
-
return y
|
148 |
-
|
149 |
-
# Backward pass: dx, db = grad(dy, x, y)
|
150 |
-
def grad_dx(dy, x, y):
|
151 |
-
dx = cuda_op(x=dy, b=empty_tensor, xref=ref(x,'x'), yref=ref(y,'y'), grad=1, **cuda_kwargs)
|
152 |
-
dx.set_shape(x.shape)
|
153 |
-
return dx
|
154 |
-
def grad_db(dx):
|
155 |
-
if b.shape[0] == 0:
|
156 |
-
return empty_tensor
|
157 |
-
db = dx
|
158 |
-
if axis < x.shape.rank - 1:
|
159 |
-
db = tf.reduce_sum(db, list(range(axis + 1, x.shape.rank)))
|
160 |
-
if axis > 0:
|
161 |
-
db = tf.reduce_sum(db, list(range(axis)))
|
162 |
-
db.set_shape(b.shape)
|
163 |
-
return db
|
164 |
-
|
165 |
-
# Second order gradients: d_dy, d_x = grad2(d_dx, d_db, x, y)
|
166 |
-
def grad2_d_dy(d_dx, d_db, x, y):
|
167 |
-
d_dy = cuda_op(x=d_dx, b=d_db, xref=ref(x,'x'), yref=ref(y,'y'), grad=1, **cuda_kwargs)
|
168 |
-
d_dy.set_shape(x.shape)
|
169 |
-
return d_dy
|
170 |
-
def grad2_d_x(d_dx, d_db, x, y):
|
171 |
-
d_x = cuda_op(x=d_dx, b=d_db, xref=ref(x,'x'), yref=ref(y,'y'), grad=2, **cuda_kwargs)
|
172 |
-
d_x.set_shape(x.shape)
|
173 |
-
return d_x
|
174 |
-
|
175 |
-
# Fast version for piecewise-linear activation funcs.
|
176 |
-
@tf.custom_gradient
|
177 |
-
def func_zero_2nd_grad(x, b):
|
178 |
-
y = func_y(x, b)
|
179 |
-
@tf.custom_gradient
|
180 |
-
def grad(dy):
|
181 |
-
dx = grad_dx(dy, x, y)
|
182 |
-
db = grad_db(dx)
|
183 |
-
def grad2(d_dx, d_db):
|
184 |
-
d_dy = grad2_d_dy(d_dx, d_db, x, y)
|
185 |
-
return d_dy
|
186 |
-
return (dx, db), grad2
|
187 |
-
return y, grad
|
188 |
-
|
189 |
-
# Slow version for general activation funcs.
|
190 |
-
@tf.custom_gradient
|
191 |
-
def func_nonzero_2nd_grad(x, b):
|
192 |
-
y = func_y(x, b)
|
193 |
-
def grad_wrap(dy):
|
194 |
-
@tf.custom_gradient
|
195 |
-
def grad_impl(dy, x):
|
196 |
-
dx = grad_dx(dy, x, y)
|
197 |
-
db = grad_db(dx)
|
198 |
-
def grad2(d_dx, d_db):
|
199 |
-
d_dy = grad2_d_dy(d_dx, d_db, x, y)
|
200 |
-
d_x = grad2_d_x(d_dx, d_db, x, y)
|
201 |
-
return d_dy, d_x
|
202 |
-
return (dx, db), grad2
|
203 |
-
return grad_impl(dy, x)
|
204 |
-
return y, grad_wrap
|
205 |
-
|
206 |
-
# Which version to use?
|
207 |
-
if act_spec.zero_2nd_grad:
|
208 |
-
return func_zero_2nd_grad(x, b)
|
209 |
-
return func_nonzero_2nd_grad(x, b)
|
210 |
-
|
211 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_configs/paths_config.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
# Pretrained models paths
|
4 |
-
e4e = './pti/e4e_w+.pt'
|
5 |
-
stylegan2_ada_shhq = './pretrained_models/stylegan_human_v2_1024.pkl'
|
6 |
-
ir_se50 = '' # './model_ir_se50.pth'
|
7 |
-
|
8 |
-
# Dirs for output files
|
9 |
-
checkpoints_dir = './outputs/pti/checkpoints/'
|
10 |
-
embedding_base_dir = './outputs/pti/embeddings'
|
11 |
-
experiments_output_dir = './outputs/pti/'
|
12 |
-
|
13 |
-
# Input info
|
14 |
-
# Input dir, where the images reside
|
15 |
-
input_data_path = 'aligned_image/'
|
16 |
-
# Inversion identifier, used to keeping track of the inversion results. Both the latent code and the generator
|
17 |
-
input_data_id = 'test'
|
18 |
-
|
19 |
-
# Keywords
|
20 |
-
pti_results_keyword = 'PTI'
|
21 |
-
e4e_results_keyword = 'e4e'
|
22 |
-
sg2_results_keyword = 'SG2'
|
23 |
-
sg2_plus_results_keyword = 'SG2_Plus'
|
24 |
-
multi_id_model_type = 'multi_id'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/misc.py
DELETED
@@ -1,294 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
#
|
5 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
6 |
-
# and proprietary rights in and to this software, related documentation
|
7 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
8 |
-
# distribution of this software and related documentation without an express
|
9 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
10 |
-
|
11 |
-
import re
|
12 |
-
import contextlib
|
13 |
-
import numpy as np
|
14 |
-
import torch
|
15 |
-
import warnings
|
16 |
-
import dnnlib
|
17 |
-
|
18 |
-
# ----------------------------------------------------------------------------
|
19 |
-
# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
|
20 |
-
# same constant is used multiple times.
|
21 |
-
|
22 |
-
_constant_cache = dict()
|
23 |
-
|
24 |
-
|
25 |
-
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
|
26 |
-
value = np.asarray(value)
|
27 |
-
if shape is not None:
|
28 |
-
shape = tuple(shape)
|
29 |
-
if dtype is None:
|
30 |
-
dtype = torch.get_default_dtype()
|
31 |
-
if device is None:
|
32 |
-
device = torch.device('cpu')
|
33 |
-
if memory_format is None:
|
34 |
-
memory_format = torch.contiguous_format
|
35 |
-
|
36 |
-
key = (value.shape, value.dtype, value.tobytes(),
|
37 |
-
shape, dtype, device, memory_format)
|
38 |
-
tensor = _constant_cache.get(key, None)
|
39 |
-
if tensor is None:
|
40 |
-
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
|
41 |
-
if shape is not None:
|
42 |
-
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
|
43 |
-
tensor = tensor.contiguous(memory_format=memory_format)
|
44 |
-
_constant_cache[key] = tensor
|
45 |
-
return tensor
|
46 |
-
|
47 |
-
# ----------------------------------------------------------------------------
|
48 |
-
# Replace NaN/Inf with specified numerical values.
|
49 |
-
|
50 |
-
|
51 |
-
try:
|
52 |
-
nan_to_num = torch.nan_to_num # 1.8.0a0
|
53 |
-
except AttributeError:
|
54 |
-
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
|
55 |
-
assert isinstance(input, torch.Tensor)
|
56 |
-
if posinf is None:
|
57 |
-
posinf = torch.finfo(input.dtype).max
|
58 |
-
if neginf is None:
|
59 |
-
neginf = torch.finfo(input.dtype).min
|
60 |
-
assert nan == 0
|
61 |
-
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
|
62 |
-
|
63 |
-
# ----------------------------------------------------------------------------
|
64 |
-
# Symbolic assert.
|
65 |
-
|
66 |
-
try:
|
67 |
-
symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
|
68 |
-
except AttributeError:
|
69 |
-
symbolic_assert = torch.Assert # 1.7.0
|
70 |
-
|
71 |
-
# ----------------------------------------------------------------------------
|
72 |
-
# Context manager to suppress known warnings in torch.jit.trace().
|
73 |
-
|
74 |
-
|
75 |
-
class suppress_tracer_warnings(warnings.catch_warnings):
|
76 |
-
def __enter__(self):
|
77 |
-
super().__enter__()
|
78 |
-
warnings.simplefilter('ignore', category=torch.jit.TracerWarning)
|
79 |
-
return self
|
80 |
-
|
81 |
-
# ----------------------------------------------------------------------------
|
82 |
-
# Assert that the shape of a tensor matches the given list of integers.
|
83 |
-
# None indicates that the size of a dimension is allowed to vary.
|
84 |
-
# Performs symbolic assertion when used in torch.jit.trace().
|
85 |
-
|
86 |
-
|
87 |
-
def assert_shape(tensor, ref_shape):
|
88 |
-
if tensor.ndim != len(ref_shape):
|
89 |
-
raise AssertionError(
|
90 |
-
f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
|
91 |
-
for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
|
92 |
-
if ref_size is None:
|
93 |
-
pass
|
94 |
-
elif isinstance(ref_size, torch.Tensor):
|
95 |
-
with suppress_tracer_warnings(): # as_tensor results are registered as constants
|
96 |
-
symbolic_assert(torch.equal(torch.as_tensor(
|
97 |
-
size), ref_size), f'Wrong size for dimension {idx}')
|
98 |
-
elif isinstance(size, torch.Tensor):
|
99 |
-
with suppress_tracer_warnings(): # as_tensor results are registered as constants
|
100 |
-
symbolic_assert(torch.equal(size, torch.as_tensor(
|
101 |
-
ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
|
102 |
-
elif size != ref_size:
|
103 |
-
raise AssertionError(
|
104 |
-
f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
|
105 |
-
|
106 |
-
# ----------------------------------------------------------------------------
|
107 |
-
# Function decorator that calls torch.autograd.profiler.record_function().
|
108 |
-
|
109 |
-
|
110 |
-
def profiled_function(fn):
|
111 |
-
def decorator(*args, **kwargs):
|
112 |
-
with torch.autograd.profiler.record_function(fn.__name__):
|
113 |
-
return fn(*args, **kwargs)
|
114 |
-
decorator.__name__ = fn.__name__
|
115 |
-
return decorator
|
116 |
-
|
117 |
-
# ----------------------------------------------------------------------------
|
118 |
-
# Sampler for torch.utils.data.DataLoader that loops over the dataset
|
119 |
-
# indefinitely, shuffling items as it goes.
|
120 |
-
|
121 |
-
|
122 |
-
class InfiniteSampler(torch.utils.data.Sampler):
|
123 |
-
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
|
124 |
-
assert len(dataset) > 0
|
125 |
-
assert num_replicas > 0
|
126 |
-
assert 0 <= rank < num_replicas
|
127 |
-
assert 0 <= window_size <= 1
|
128 |
-
super().__init__(dataset)
|
129 |
-
self.dataset = dataset
|
130 |
-
self.rank = rank
|
131 |
-
self.num_replicas = num_replicas
|
132 |
-
self.shuffle = shuffle
|
133 |
-
self.seed = seed
|
134 |
-
self.window_size = window_size
|
135 |
-
|
136 |
-
def __iter__(self):
|
137 |
-
order = np.arange(len(self.dataset))
|
138 |
-
rnd = None
|
139 |
-
window = 0
|
140 |
-
if self.shuffle:
|
141 |
-
rnd = np.random.RandomState(self.seed)
|
142 |
-
rnd.shuffle(order)
|
143 |
-
window = int(np.rint(order.size * self.window_size))
|
144 |
-
|
145 |
-
idx = 0
|
146 |
-
while True:
|
147 |
-
i = idx % order.size
|
148 |
-
if idx % self.num_replicas == self.rank:
|
149 |
-
yield order[i]
|
150 |
-
if window >= 2:
|
151 |
-
j = (i - rnd.randint(window)) % order.size
|
152 |
-
order[i], order[j] = order[j], order[i]
|
153 |
-
idx += 1
|
154 |
-
|
155 |
-
# ----------------------------------------------------------------------------
|
156 |
-
# Utilities for operating with torch.nn.Module parameters and buffers.
|
157 |
-
|
158 |
-
|
159 |
-
def params_and_buffers(module):
|
160 |
-
assert isinstance(module, torch.nn.Module)
|
161 |
-
return list(module.parameters()) + list(module.buffers())
|
162 |
-
|
163 |
-
|
164 |
-
def named_params_and_buffers(module):
|
165 |
-
assert isinstance(module, torch.nn.Module)
|
166 |
-
return list(module.named_parameters()) + list(module.named_buffers())
|
167 |
-
|
168 |
-
|
169 |
-
def copy_params_and_buffers(src_module, dst_module, require_all=False):
|
170 |
-
assert isinstance(src_module, torch.nn.Module)
|
171 |
-
assert isinstance(dst_module, torch.nn.Module)
|
172 |
-
src_tensors = {name: tensor for name,
|
173 |
-
tensor in named_params_and_buffers(src_module)}
|
174 |
-
for name, tensor in named_params_and_buffers(dst_module):
|
175 |
-
assert (name in src_tensors) or (not require_all)
|
176 |
-
if name in src_tensors:
|
177 |
-
tensor.copy_(src_tensors[name].detach()).requires_grad_(
|
178 |
-
tensor.requires_grad)
|
179 |
-
|
180 |
-
# ----------------------------------------------------------------------------
|
181 |
-
# Context manager for easily enabling/disabling DistributedDataParallel
|
182 |
-
# synchronization.
|
183 |
-
|
184 |
-
|
185 |
-
@contextlib.contextmanager
|
186 |
-
def ddp_sync(module, sync):
|
187 |
-
assert isinstance(module, torch.nn.Module)
|
188 |
-
if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
|
189 |
-
yield
|
190 |
-
else:
|
191 |
-
with module.no_sync():
|
192 |
-
yield
|
193 |
-
|
194 |
-
# ----------------------------------------------------------------------------
|
195 |
-
# Check DistributedDataParallel consistency across processes.
|
196 |
-
|
197 |
-
|
198 |
-
def check_ddp_consistency(module, ignore_regex=None):
|
199 |
-
assert isinstance(module, torch.nn.Module)
|
200 |
-
for name, tensor in named_params_and_buffers(module):
|
201 |
-
fullname = type(module).__name__ + '.' + name
|
202 |
-
if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
|
203 |
-
continue
|
204 |
-
tensor = tensor.detach()
|
205 |
-
other = tensor.clone()
|
206 |
-
torch.distributed.broadcast(tensor=other, src=0)
|
207 |
-
assert (nan_to_num(tensor) == nan_to_num(other)).all(), fullname
|
208 |
-
|
209 |
-
# ----------------------------------------------------------------------------
|
210 |
-
# Print summary table of module hierarchy.
|
211 |
-
|
212 |
-
|
213 |
-
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
|
214 |
-
assert isinstance(module, torch.nn.Module)
|
215 |
-
assert not isinstance(module, torch.jit.ScriptModule)
|
216 |
-
assert isinstance(inputs, (tuple, list))
|
217 |
-
|
218 |
-
# Register hooks.
|
219 |
-
entries = []
|
220 |
-
nesting = [0]
|
221 |
-
|
222 |
-
def pre_hook(_mod, _inputs):
|
223 |
-
nesting[0] += 1
|
224 |
-
|
225 |
-
def post_hook(mod, _inputs, outputs):
|
226 |
-
nesting[0] -= 1
|
227 |
-
if nesting[0] <= max_nesting:
|
228 |
-
outputs = list(outputs) if isinstance(
|
229 |
-
outputs, (tuple, list)) else [outputs]
|
230 |
-
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
|
231 |
-
entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
|
232 |
-
hooks = [mod.register_forward_pre_hook(
|
233 |
-
pre_hook) for mod in module.modules()]
|
234 |
-
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
|
235 |
-
|
236 |
-
# Run module.
|
237 |
-
outputs = module(*inputs)
|
238 |
-
for hook in hooks:
|
239 |
-
hook.remove()
|
240 |
-
|
241 |
-
# Identify unique outputs, parameters, and buffers.
|
242 |
-
tensors_seen = set()
|
243 |
-
for e in entries:
|
244 |
-
e.unique_params = [
|
245 |
-
t for t in e.mod.parameters() if id(t) not in tensors_seen]
|
246 |
-
e.unique_buffers = [
|
247 |
-
t for t in e.mod.buffers() if id(t) not in tensors_seen]
|
248 |
-
e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
|
249 |
-
tensors_seen |= {id(t) for t in e.unique_params +
|
250 |
-
e.unique_buffers + e.unique_outputs}
|
251 |
-
|
252 |
-
# Filter out redundant entries.
|
253 |
-
if skip_redundant:
|
254 |
-
entries = [e for e in entries if len(e.unique_params) or len(
|
255 |
-
e.unique_buffers) or len(e.unique_outputs)]
|
256 |
-
|
257 |
-
# Construct table.
|
258 |
-
rows = [[type(module).__name__, 'Parameters',
|
259 |
-
'Buffers', 'Output shape', 'Datatype']]
|
260 |
-
rows += [['---'] * len(rows[0])]
|
261 |
-
param_total = 0
|
262 |
-
buffer_total = 0
|
263 |
-
submodule_names = {mod: name for name, mod in module.named_modules()}
|
264 |
-
for e in entries:
|
265 |
-
name = '<top-level>' if e.mod is module else submodule_names[e.mod]
|
266 |
-
param_size = sum(t.numel() for t in e.unique_params)
|
267 |
-
buffer_size = sum(t.numel() for t in e.unique_buffers)
|
268 |
-
output_shapes = [str(list(e.outputs[0].shape)) for t in e.outputs]
|
269 |
-
output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
|
270 |
-
rows += [[
|
271 |
-
name + (':0' if len(e.outputs) >= 2 else ''),
|
272 |
-
str(param_size) if param_size else '-',
|
273 |
-
str(buffer_size) if buffer_size else '-',
|
274 |
-
(output_shapes + ['-'])[0],
|
275 |
-
(output_dtypes + ['-'])[0],
|
276 |
-
]]
|
277 |
-
for idx in range(1, len(e.outputs)):
|
278 |
-
rows += [[name + f':{idx}', '-', '-',
|
279 |
-
output_shapes[idx], output_dtypes[idx]]]
|
280 |
-
param_total += param_size
|
281 |
-
buffer_total += buffer_size
|
282 |
-
rows += [['---'] * len(rows[0])]
|
283 |
-
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
|
284 |
-
|
285 |
-
# Print table.
|
286 |
-
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
|
287 |
-
print()
|
288 |
-
for row in rows:
|
289 |
-
print(' '.join(cell + ' ' * (width - len(cell))
|
290 |
-
for cell, width in zip(row, widths)))
|
291 |
-
print()
|
292 |
-
return outputs
|
293 |
-
|
294 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/attention.py
DELETED
@@ -1,390 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
from typing import Any, Dict, Optional
|
15 |
-
|
16 |
-
import torch
|
17 |
-
import torch.nn.functional as F
|
18 |
-
from torch import nn
|
19 |
-
|
20 |
-
from ..utils import maybe_allow_in_graph
|
21 |
-
from .activations import get_activation
|
22 |
-
from .attention_processor import Attention
|
23 |
-
from .embeddings import CombinedTimestepLabelEmbeddings
|
24 |
-
from .lora import LoRACompatibleLinear
|
25 |
-
|
26 |
-
|
27 |
-
@maybe_allow_in_graph
|
28 |
-
class BasicTransformerBlock(nn.Module):
|
29 |
-
r"""
|
30 |
-
A basic Transformer block.
|
31 |
-
|
32 |
-
Parameters:
|
33 |
-
dim (`int`): The number of channels in the input and output.
|
34 |
-
num_attention_heads (`int`): The number of heads to use for multi-head attention.
|
35 |
-
attention_head_dim (`int`): The number of channels in each head.
|
36 |
-
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
37 |
-
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
|
38 |
-
only_cross_attention (`bool`, *optional*):
|
39 |
-
Whether to use only cross-attention layers. In this case two cross attention layers are used.
|
40 |
-
double_self_attention (`bool`, *optional*):
|
41 |
-
Whether to use two self-attention layers. In this case no cross attention layers are used.
|
42 |
-
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
43 |
-
num_embeds_ada_norm (:
|
44 |
-
obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
|
45 |
-
attention_bias (:
|
46 |
-
obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
|
47 |
-
"""
|
48 |
-
|
49 |
-
def __init__(
|
50 |
-
self,
|
51 |
-
dim: int,
|
52 |
-
num_attention_heads: int,
|
53 |
-
attention_head_dim: int,
|
54 |
-
dropout=0.0,
|
55 |
-
cross_attention_dim: Optional[int] = None,
|
56 |
-
activation_fn: str = "geglu",
|
57 |
-
num_embeds_ada_norm: Optional[int] = None,
|
58 |
-
attention_bias: bool = False,
|
59 |
-
only_cross_attention: bool = False,
|
60 |
-
double_self_attention: bool = False,
|
61 |
-
upcast_attention: bool = False,
|
62 |
-
norm_elementwise_affine: bool = True,
|
63 |
-
norm_type: str = "layer_norm",
|
64 |
-
final_dropout: bool = False,
|
65 |
-
):
|
66 |
-
super().__init__()
|
67 |
-
self.only_cross_attention = only_cross_attention
|
68 |
-
|
69 |
-
self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
|
70 |
-
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
|
71 |
-
|
72 |
-
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
|
73 |
-
raise ValueError(
|
74 |
-
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
|
75 |
-
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
|
76 |
-
)
|
77 |
-
|
78 |
-
# Define 3 blocks. Each block has its own normalization layer.
|
79 |
-
# 1. Self-Attn
|
80 |
-
if self.use_ada_layer_norm:
|
81 |
-
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
|
82 |
-
elif self.use_ada_layer_norm_zero:
|
83 |
-
self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
|
84 |
-
else:
|
85 |
-
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
86 |
-
self.attn1 = Attention(
|
87 |
-
query_dim=dim,
|
88 |
-
heads=num_attention_heads,
|
89 |
-
dim_head=attention_head_dim,
|
90 |
-
dropout=dropout,
|
91 |
-
bias=attention_bias,
|
92 |
-
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
|
93 |
-
upcast_attention=upcast_attention,
|
94 |
-
)
|
95 |
-
|
96 |
-
# 2. Cross-Attn
|
97 |
-
if cross_attention_dim is not None or double_self_attention:
|
98 |
-
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
|
99 |
-
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
|
100 |
-
# the second cross attention block.
|
101 |
-
self.norm2 = (
|
102 |
-
AdaLayerNorm(dim, num_embeds_ada_norm)
|
103 |
-
if self.use_ada_layer_norm
|
104 |
-
else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
105 |
-
)
|
106 |
-
self.attn2 = Attention(
|
107 |
-
query_dim=dim,
|
108 |
-
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
|
109 |
-
heads=num_attention_heads,
|
110 |
-
dim_head=attention_head_dim,
|
111 |
-
dropout=dropout,
|
112 |
-
bias=attention_bias,
|
113 |
-
upcast_attention=upcast_attention,
|
114 |
-
) # is self-attn if encoder_hidden_states is none
|
115 |
-
else:
|
116 |
-
self.norm2 = None
|
117 |
-
self.attn2 = None
|
118 |
-
|
119 |
-
# 3. Feed-forward
|
120 |
-
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
|
121 |
-
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
|
122 |
-
|
123 |
-
# let chunk size default to None
|
124 |
-
self._chunk_size = None
|
125 |
-
self._chunk_dim = 0
|
126 |
-
|
127 |
-
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int):
|
128 |
-
# Sets chunk feed-forward
|
129 |
-
self._chunk_size = chunk_size
|
130 |
-
self._chunk_dim = dim
|
131 |
-
|
132 |
-
def forward(
|
133 |
-
self,
|
134 |
-
hidden_states: torch.FloatTensor,
|
135 |
-
attention_mask: Optional[torch.FloatTensor] = None,
|
136 |
-
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
137 |
-
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
138 |
-
timestep: Optional[torch.LongTensor] = None,
|
139 |
-
cross_attention_kwargs: Dict[str, Any] = None,
|
140 |
-
class_labels: Optional[torch.LongTensor] = None,
|
141 |
-
):
|
142 |
-
# Notice that normalization is always applied before the real computation in the following blocks.
|
143 |
-
# 1. Self-Attention
|
144 |
-
if self.use_ada_layer_norm:
|
145 |
-
norm_hidden_states = self.norm1(hidden_states, timestep)
|
146 |
-
elif self.use_ada_layer_norm_zero:
|
147 |
-
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
|
148 |
-
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
|
149 |
-
)
|
150 |
-
else:
|
151 |
-
norm_hidden_states = self.norm1(hidden_states)
|
152 |
-
|
153 |
-
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
|
154 |
-
|
155 |
-
attn_output = self.attn1(
|
156 |
-
norm_hidden_states,
|
157 |
-
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
|
158 |
-
attention_mask=attention_mask,
|
159 |
-
**cross_attention_kwargs,
|
160 |
-
)
|
161 |
-
if self.use_ada_layer_norm_zero:
|
162 |
-
attn_output = gate_msa.unsqueeze(1) * attn_output
|
163 |
-
hidden_states = attn_output + hidden_states
|
164 |
-
|
165 |
-
# 2. Cross-Attention
|
166 |
-
if self.attn2 is not None:
|
167 |
-
norm_hidden_states = (
|
168 |
-
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
|
169 |
-
)
|
170 |
-
|
171 |
-
attn_output = self.attn2(
|
172 |
-
norm_hidden_states,
|
173 |
-
encoder_hidden_states=encoder_hidden_states,
|
174 |
-
attention_mask=encoder_attention_mask,
|
175 |
-
**cross_attention_kwargs,
|
176 |
-
)
|
177 |
-
hidden_states = attn_output + hidden_states
|
178 |
-
|
179 |
-
# 3. Feed-forward
|
180 |
-
norm_hidden_states = self.norm3(hidden_states)
|
181 |
-
|
182 |
-
if self.use_ada_layer_norm_zero:
|
183 |
-
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
|
184 |
-
|
185 |
-
if self._chunk_size is not None:
|
186 |
-
# "feed_forward_chunk_size" can be used to save memory
|
187 |
-
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
|
188 |
-
raise ValueError(
|
189 |
-
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
|
190 |
-
)
|
191 |
-
|
192 |
-
num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
|
193 |
-
ff_output = torch.cat(
|
194 |
-
[self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)],
|
195 |
-
dim=self._chunk_dim,
|
196 |
-
)
|
197 |
-
else:
|
198 |
-
ff_output = self.ff(norm_hidden_states)
|
199 |
-
|
200 |
-
if self.use_ada_layer_norm_zero:
|
201 |
-
ff_output = gate_mlp.unsqueeze(1) * ff_output
|
202 |
-
|
203 |
-
hidden_states = ff_output + hidden_states
|
204 |
-
|
205 |
-
return hidden_states
|
206 |
-
|
207 |
-
|
208 |
-
class FeedForward(nn.Module):
|
209 |
-
r"""
|
210 |
-
A feed-forward layer.
|
211 |
-
|
212 |
-
Parameters:
|
213 |
-
dim (`int`): The number of channels in the input.
|
214 |
-
dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
|
215 |
-
mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
|
216 |
-
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
217 |
-
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
218 |
-
final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
|
219 |
-
"""
|
220 |
-
|
221 |
-
def __init__(
|
222 |
-
self,
|
223 |
-
dim: int,
|
224 |
-
dim_out: Optional[int] = None,
|
225 |
-
mult: int = 4,
|
226 |
-
dropout: float = 0.0,
|
227 |
-
activation_fn: str = "geglu",
|
228 |
-
final_dropout: bool = False,
|
229 |
-
):
|
230 |
-
super().__init__()
|
231 |
-
inner_dim = int(dim * mult)
|
232 |
-
dim_out = dim_out if dim_out is not None else dim
|
233 |
-
|
234 |
-
if activation_fn == "gelu":
|
235 |
-
act_fn = GELU(dim, inner_dim)
|
236 |
-
if activation_fn == "gelu-approximate":
|
237 |
-
act_fn = GELU(dim, inner_dim, approximate="tanh")
|
238 |
-
elif activation_fn == "geglu":
|
239 |
-
act_fn = GEGLU(dim, inner_dim)
|
240 |
-
elif activation_fn == "geglu-approximate":
|
241 |
-
act_fn = ApproximateGELU(dim, inner_dim)
|
242 |
-
|
243 |
-
self.net = nn.ModuleList([])
|
244 |
-
# project in
|
245 |
-
self.net.append(act_fn)
|
246 |
-
# project dropout
|
247 |
-
self.net.append(nn.Dropout(dropout))
|
248 |
-
# project out
|
249 |
-
self.net.append(LoRACompatibleLinear(inner_dim, dim_out))
|
250 |
-
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
|
251 |
-
if final_dropout:
|
252 |
-
self.net.append(nn.Dropout(dropout))
|
253 |
-
|
254 |
-
def forward(self, hidden_states):
|
255 |
-
for module in self.net:
|
256 |
-
hidden_states = module(hidden_states)
|
257 |
-
return hidden_states
|
258 |
-
|
259 |
-
|
260 |
-
class GELU(nn.Module):
|
261 |
-
r"""
|
262 |
-
GELU activation function with tanh approximation support with `approximate="tanh"`.
|
263 |
-
"""
|
264 |
-
|
265 |
-
def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"):
|
266 |
-
super().__init__()
|
267 |
-
self.proj = nn.Linear(dim_in, dim_out)
|
268 |
-
self.approximate = approximate
|
269 |
-
|
270 |
-
def gelu(self, gate):
|
271 |
-
if gate.device.type != "mps":
|
272 |
-
return F.gelu(gate, approximate=self.approximate)
|
273 |
-
# mps: gelu is not implemented for float16
|
274 |
-
return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
|
275 |
-
|
276 |
-
def forward(self, hidden_states):
|
277 |
-
hidden_states = self.proj(hidden_states)
|
278 |
-
hidden_states = self.gelu(hidden_states)
|
279 |
-
return hidden_states
|
280 |
-
|
281 |
-
|
282 |
-
class GEGLU(nn.Module):
|
283 |
-
r"""
|
284 |
-
A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202.
|
285 |
-
|
286 |
-
Parameters:
|
287 |
-
dim_in (`int`): The number of channels in the input.
|
288 |
-
dim_out (`int`): The number of channels in the output.
|
289 |
-
"""
|
290 |
-
|
291 |
-
def __init__(self, dim_in: int, dim_out: int):
|
292 |
-
super().__init__()
|
293 |
-
self.proj = LoRACompatibleLinear(dim_in, dim_out * 2)
|
294 |
-
|
295 |
-
def gelu(self, gate):
|
296 |
-
if gate.device.type != "mps":
|
297 |
-
return F.gelu(gate)
|
298 |
-
# mps: gelu is not implemented for float16
|
299 |
-
return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)
|
300 |
-
|
301 |
-
def forward(self, hidden_states):
|
302 |
-
hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
|
303 |
-
return hidden_states * self.gelu(gate)
|
304 |
-
|
305 |
-
|
306 |
-
class ApproximateGELU(nn.Module):
|
307 |
-
"""
|
308 |
-
The approximate form of Gaussian Error Linear Unit (GELU)
|
309 |
-
|
310 |
-
For more details, see section 2: https://arxiv.org/abs/1606.08415
|
311 |
-
"""
|
312 |
-
|
313 |
-
def __init__(self, dim_in: int, dim_out: int):
|
314 |
-
super().__init__()
|
315 |
-
self.proj = nn.Linear(dim_in, dim_out)
|
316 |
-
|
317 |
-
def forward(self, x):
|
318 |
-
x = self.proj(x)
|
319 |
-
return x * torch.sigmoid(1.702 * x)
|
320 |
-
|
321 |
-
|
322 |
-
class AdaLayerNorm(nn.Module):
|
323 |
-
"""
|
324 |
-
Norm layer modified to incorporate timestep embeddings.
|
325 |
-
"""
|
326 |
-
|
327 |
-
def __init__(self, embedding_dim, num_embeddings):
|
328 |
-
super().__init__()
|
329 |
-
self.emb = nn.Embedding(num_embeddings, embedding_dim)
|
330 |
-
self.silu = nn.SiLU()
|
331 |
-
self.linear = nn.Linear(embedding_dim, embedding_dim * 2)
|
332 |
-
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False)
|
333 |
-
|
334 |
-
def forward(self, x, timestep):
|
335 |
-
emb = self.linear(self.silu(self.emb(timestep)))
|
336 |
-
scale, shift = torch.chunk(emb, 2)
|
337 |
-
x = self.norm(x) * (1 + scale) + shift
|
338 |
-
return x
|
339 |
-
|
340 |
-
|
341 |
-
class AdaLayerNormZero(nn.Module):
|
342 |
-
"""
|
343 |
-
Norm layer adaptive layer norm zero (adaLN-Zero).
|
344 |
-
"""
|
345 |
-
|
346 |
-
def __init__(self, embedding_dim, num_embeddings):
|
347 |
-
super().__init__()
|
348 |
-
|
349 |
-
self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)
|
350 |
-
|
351 |
-
self.silu = nn.SiLU()
|
352 |
-
self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
|
353 |
-
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
|
354 |
-
|
355 |
-
def forward(self, x, timestep, class_labels, hidden_dtype=None):
|
356 |
-
emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)))
|
357 |
-
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)
|
358 |
-
x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
|
359 |
-
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
|
360 |
-
|
361 |
-
|
362 |
-
class AdaGroupNorm(nn.Module):
|
363 |
-
"""
|
364 |
-
GroupNorm layer modified to incorporate timestep embeddings.
|
365 |
-
"""
|
366 |
-
|
367 |
-
def __init__(
|
368 |
-
self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5
|
369 |
-
):
|
370 |
-
super().__init__()
|
371 |
-
self.num_groups = num_groups
|
372 |
-
self.eps = eps
|
373 |
-
|
374 |
-
if act_fn is None:
|
375 |
-
self.act = None
|
376 |
-
else:
|
377 |
-
self.act = get_activation(act_fn)
|
378 |
-
|
379 |
-
self.linear = nn.Linear(embedding_dim, out_dim * 2)
|
380 |
-
|
381 |
-
def forward(self, x, emb):
|
382 |
-
if self.act:
|
383 |
-
emb = self.act(emb)
|
384 |
-
emb = self.linear(emb)
|
385 |
-
emb = emb[:, :, None, None]
|
386 |
-
scale, shift = emb.chunk(2, dim=1)
|
387 |
-
|
388 |
-
x = F.group_norm(x, self.num_groups, eps=self.eps)
|
389 |
-
x = x * (1 + scale) + shift
|
390 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/check_config_docstrings.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import importlib
|
17 |
-
import inspect
|
18 |
-
import os
|
19 |
-
import re
|
20 |
-
|
21 |
-
|
22 |
-
# All paths are set with the intent you should run this script from the root of the repo with the command
|
23 |
-
# python utils/check_config_docstrings.py
|
24 |
-
PATH_TO_TRANSFORMERS = "src/transformers"
|
25 |
-
|
26 |
-
|
27 |
-
# This is to make sure the transformers module imported is the one in the repo.
|
28 |
-
spec = importlib.util.spec_from_file_location(
|
29 |
-
"transformers",
|
30 |
-
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
|
31 |
-
submodule_search_locations=[PATH_TO_TRANSFORMERS],
|
32 |
-
)
|
33 |
-
transformers = spec.loader.load_module()
|
34 |
-
|
35 |
-
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
|
36 |
-
|
37 |
-
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
|
38 |
-
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
|
39 |
-
_re_checkpoint = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
|
40 |
-
|
41 |
-
|
42 |
-
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
|
43 |
-
"CLIPConfigMixin",
|
44 |
-
"DecisionTransformerConfigMixin",
|
45 |
-
"EncoderDecoderConfigMixin",
|
46 |
-
"RagConfigMixin",
|
47 |
-
"SpeechEncoderDecoderConfigMixin",
|
48 |
-
"VisionEncoderDecoderConfigMixin",
|
49 |
-
"VisionTextDualEncoderConfigMixin",
|
50 |
-
}
|
51 |
-
|
52 |
-
|
53 |
-
def check_config_docstrings_have_checkpoints():
|
54 |
-
configs_without_checkpoint = []
|
55 |
-
|
56 |
-
for config_class in list(CONFIG_MAPPING.values()):
|
57 |
-
checkpoint_found = False
|
58 |
-
|
59 |
-
# source code of `config_class`
|
60 |
-
config_source = inspect.getsource(config_class)
|
61 |
-
checkpoints = _re_checkpoint.findall(config_source)
|
62 |
-
|
63 |
-
for checkpoint in checkpoints:
|
64 |
-
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
|
65 |
-
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
|
66 |
-
ckpt_name, ckpt_link = checkpoint
|
67 |
-
|
68 |
-
# verify the checkpoint name corresponds to the checkpoint link
|
69 |
-
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
|
70 |
-
if ckpt_link == ckpt_link_from_name:
|
71 |
-
checkpoint_found = True
|
72 |
-
break
|
73 |
-
|
74 |
-
name = config_class.__name__
|
75 |
-
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
|
76 |
-
configs_without_checkpoint.append(name)
|
77 |
-
|
78 |
-
if len(configs_without_checkpoint) > 0:
|
79 |
-
message = "\n".join(sorted(configs_without_checkpoint))
|
80 |
-
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}")
|
81 |
-
|
82 |
-
|
83 |
-
if __name__ == "__main__":
|
84 |
-
check_config_docstrings_have_checkpoints()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w32',
|
4 |
-
backbone=dict(
|
5 |
-
_delete_=True,
|
6 |
-
type='HRNet',
|
7 |
-
extra=dict(
|
8 |
-
stage1=dict(
|
9 |
-
num_modules=1,
|
10 |
-
num_branches=1,
|
11 |
-
block='BOTTLENECK',
|
12 |
-
num_blocks=(4, ),
|
13 |
-
num_channels=(64, )),
|
14 |
-
stage2=dict(
|
15 |
-
num_modules=1,
|
16 |
-
num_branches=2,
|
17 |
-
block='BASIC',
|
18 |
-
num_blocks=(4, 4),
|
19 |
-
num_channels=(32, 64)),
|
20 |
-
stage3=dict(
|
21 |
-
num_modules=4,
|
22 |
-
num_branches=3,
|
23 |
-
block='BASIC',
|
24 |
-
num_blocks=(4, 4, 4),
|
25 |
-
num_channels=(32, 64, 128)),
|
26 |
-
stage4=dict(
|
27 |
-
num_modules=3,
|
28 |
-
num_branches=4,
|
29 |
-
block='BASIC',
|
30 |
-
num_blocks=(4, 4, 4, 4),
|
31 |
-
num_channels=(32, 64, 128, 256)))),
|
32 |
-
neck=dict(
|
33 |
-
_delete_=True,
|
34 |
-
type='HRFPN',
|
35 |
-
in_channels=[32, 64, 128, 256],
|
36 |
-
out_channels=256))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/hourglass.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
from mmcv.cnn import ConvModule
|
3 |
-
|
4 |
-
from ..builder import BACKBONES
|
5 |
-
from ..utils import ResLayer
|
6 |
-
from .resnet import BasicBlock
|
7 |
-
|
8 |
-
|
9 |
-
class HourglassModule(nn.Module):
|
10 |
-
"""Hourglass Module for HourglassNet backbone.
|
11 |
-
|
12 |
-
Generate module recursively and use BasicBlock as the base unit.
|
13 |
-
|
14 |
-
Args:
|
15 |
-
depth (int): Depth of current HourglassModule.
|
16 |
-
stage_channels (list[int]): Feature channels of sub-modules in current
|
17 |
-
and follow-up HourglassModule.
|
18 |
-
stage_blocks (list[int]): Number of sub-modules stacked in current and
|
19 |
-
follow-up HourglassModule.
|
20 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self,
|
24 |
-
depth,
|
25 |
-
stage_channels,
|
26 |
-
stage_blocks,
|
27 |
-
norm_cfg=dict(type='BN', requires_grad=True)):
|
28 |
-
super(HourglassModule, self).__init__()
|
29 |
-
|
30 |
-
self.depth = depth
|
31 |
-
|
32 |
-
cur_block = stage_blocks[0]
|
33 |
-
next_block = stage_blocks[1]
|
34 |
-
|
35 |
-
cur_channel = stage_channels[0]
|
36 |
-
next_channel = stage_channels[1]
|
37 |
-
|
38 |
-
self.up1 = ResLayer(
|
39 |
-
BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
|
40 |
-
|
41 |
-
self.low1 = ResLayer(
|
42 |
-
BasicBlock,
|
43 |
-
cur_channel,
|
44 |
-
next_channel,
|
45 |
-
cur_block,
|
46 |
-
stride=2,
|
47 |
-
norm_cfg=norm_cfg)
|
48 |
-
|
49 |
-
if self.depth > 1:
|
50 |
-
self.low2 = HourglassModule(depth - 1, stage_channels[1:],
|
51 |
-
stage_blocks[1:])
|
52 |
-
else:
|
53 |
-
self.low2 = ResLayer(
|
54 |
-
BasicBlock,
|
55 |
-
next_channel,
|
56 |
-
next_channel,
|
57 |
-
next_block,
|
58 |
-
norm_cfg=norm_cfg)
|
59 |
-
|
60 |
-
self.low3 = ResLayer(
|
61 |
-
BasicBlock,
|
62 |
-
next_channel,
|
63 |
-
cur_channel,
|
64 |
-
cur_block,
|
65 |
-
norm_cfg=norm_cfg,
|
66 |
-
downsample_first=False)
|
67 |
-
|
68 |
-
self.up2 = nn.Upsample(scale_factor=2)
|
69 |
-
|
70 |
-
def forward(self, x):
|
71 |
-
"""Forward function."""
|
72 |
-
up1 = self.up1(x)
|
73 |
-
low1 = self.low1(x)
|
74 |
-
low2 = self.low2(low1)
|
75 |
-
low3 = self.low3(low2)
|
76 |
-
up2 = self.up2(low3)
|
77 |
-
return up1 + up2
|
78 |
-
|
79 |
-
|
80 |
-
@BACKBONES.register_module()
|
81 |
-
class HourglassNet(nn.Module):
|
82 |
-
"""HourglassNet backbone.
|
83 |
-
|
84 |
-
Stacked Hourglass Networks for Human Pose Estimation.
|
85 |
-
More details can be found in the `paper
|
86 |
-
<https://arxiv.org/abs/1603.06937>`_ .
|
87 |
-
|
88 |
-
Args:
|
89 |
-
downsample_times (int): Downsample times in a HourglassModule.
|
90 |
-
num_stacks (int): Number of HourglassModule modules stacked,
|
91 |
-
1 for Hourglass-52, 2 for Hourglass-104.
|
92 |
-
stage_channels (list[int]): Feature channel of each sub-module in a
|
93 |
-
HourglassModule.
|
94 |
-
stage_blocks (list[int]): Number of sub-modules stacked in a
|
95 |
-
HourglassModule.
|
96 |
-
feat_channel (int): Feature channel of conv after a HourglassModule.
|
97 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
98 |
-
|
99 |
-
Example:
|
100 |
-
>>> from mmdet.models import HourglassNet
|
101 |
-
>>> import torch
|
102 |
-
>>> self = HourglassNet()
|
103 |
-
>>> self.eval()
|
104 |
-
>>> inputs = torch.rand(1, 3, 511, 511)
|
105 |
-
>>> level_outputs = self.forward(inputs)
|
106 |
-
>>> for level_output in level_outputs:
|
107 |
-
... print(tuple(level_output.shape))
|
108 |
-
(1, 256, 128, 128)
|
109 |
-
(1, 256, 128, 128)
|
110 |
-
"""
|
111 |
-
|
112 |
-
def __init__(self,
|
113 |
-
downsample_times=5,
|
114 |
-
num_stacks=2,
|
115 |
-
stage_channels=(256, 256, 384, 384, 384, 512),
|
116 |
-
stage_blocks=(2, 2, 2, 2, 2, 4),
|
117 |
-
feat_channel=256,
|
118 |
-
norm_cfg=dict(type='BN', requires_grad=True)):
|
119 |
-
super(HourglassNet, self).__init__()
|
120 |
-
|
121 |
-
self.num_stacks = num_stacks
|
122 |
-
assert self.num_stacks >= 1
|
123 |
-
assert len(stage_channels) == len(stage_blocks)
|
124 |
-
assert len(stage_channels) > downsample_times
|
125 |
-
|
126 |
-
cur_channel = stage_channels[0]
|
127 |
-
|
128 |
-
self.stem = nn.Sequential(
|
129 |
-
ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg),
|
130 |
-
ResLayer(BasicBlock, 128, 256, 1, stride=2, norm_cfg=norm_cfg))
|
131 |
-
|
132 |
-
self.hourglass_modules = nn.ModuleList([
|
133 |
-
HourglassModule(downsample_times, stage_channels, stage_blocks)
|
134 |
-
for _ in range(num_stacks)
|
135 |
-
])
|
136 |
-
|
137 |
-
self.inters = ResLayer(
|
138 |
-
BasicBlock,
|
139 |
-
cur_channel,
|
140 |
-
cur_channel,
|
141 |
-
num_stacks - 1,
|
142 |
-
norm_cfg=norm_cfg)
|
143 |
-
|
144 |
-
self.conv1x1s = nn.ModuleList([
|
145 |
-
ConvModule(
|
146 |
-
cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
|
147 |
-
for _ in range(num_stacks - 1)
|
148 |
-
])
|
149 |
-
|
150 |
-
self.out_convs = nn.ModuleList([
|
151 |
-
ConvModule(
|
152 |
-
cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)
|
153 |
-
for _ in range(num_stacks)
|
154 |
-
])
|
155 |
-
|
156 |
-
self.remap_convs = nn.ModuleList([
|
157 |
-
ConvModule(
|
158 |
-
feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
|
159 |
-
for _ in range(num_stacks - 1)
|
160 |
-
])
|
161 |
-
|
162 |
-
self.relu = nn.ReLU(inplace=True)
|
163 |
-
|
164 |
-
def init_weights(self, pretrained=None):
|
165 |
-
"""Init module weights.
|
166 |
-
|
167 |
-
We do nothing in this function because all modules we used
|
168 |
-
(ConvModule, BasicBlock and etc.) have default initialization, and
|
169 |
-
currently we don't provide pretrained model of HourglassNet.
|
170 |
-
|
171 |
-
Detector's __init__() will call backbone's init_weights() with
|
172 |
-
pretrained as input, so we keep this function.
|
173 |
-
"""
|
174 |
-
# Training Centripetal Model needs to reset parameters for Conv2d
|
175 |
-
for m in self.modules():
|
176 |
-
if isinstance(m, nn.Conv2d):
|
177 |
-
m.reset_parameters()
|
178 |
-
|
179 |
-
def forward(self, x):
|
180 |
-
"""Forward function."""
|
181 |
-
inter_feat = self.stem(x)
|
182 |
-
out_feats = []
|
183 |
-
|
184 |
-
for ind in range(self.num_stacks):
|
185 |
-
single_hourglass = self.hourglass_modules[ind]
|
186 |
-
out_conv = self.out_convs[ind]
|
187 |
-
|
188 |
-
hourglass_feat = single_hourglass(inter_feat)
|
189 |
-
out_feat = out_conv(hourglass_feat)
|
190 |
-
out_feats.append(out_feat)
|
191 |
-
|
192 |
-
if ind < self.num_stacks - 1:
|
193 |
-
inter_feat = self.conv1x1s[ind](
|
194 |
-
inter_feat) + self.remap_convs[ind](
|
195 |
-
out_feat)
|
196 |
-
inter_feat = self.inters[ind](self.relu(inter_feat))
|
197 |
-
|
198 |
-
return out_feats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './apcnet_r50-d8_769x769_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/static/css/bootstrap.min.css
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/inspect.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
from optparse import Values
|
3 |
-
from typing import Any, Dict, List
|
4 |
-
|
5 |
-
from pip._vendor.packaging.markers import default_environment
|
6 |
-
from pip._vendor.rich import print_json
|
7 |
-
|
8 |
-
from pip import __version__
|
9 |
-
from pip._internal.cli import cmdoptions
|
10 |
-
from pip._internal.cli.req_command import Command
|
11 |
-
from pip._internal.cli.status_codes import SUCCESS
|
12 |
-
from pip._internal.metadata import BaseDistribution, get_environment
|
13 |
-
from pip._internal.utils.compat import stdlib_pkgs
|
14 |
-
from pip._internal.utils.urls import path_to_url
|
15 |
-
|
16 |
-
logger = logging.getLogger(__name__)
|
17 |
-
|
18 |
-
|
19 |
-
class InspectCommand(Command):
|
20 |
-
"""
|
21 |
-
Inspect the content of a Python environment and produce a report in JSON format.
|
22 |
-
"""
|
23 |
-
|
24 |
-
ignore_require_venv = True
|
25 |
-
usage = """
|
26 |
-
%prog [options]"""
|
27 |
-
|
28 |
-
def add_options(self) -> None:
|
29 |
-
self.cmd_opts.add_option(
|
30 |
-
"--local",
|
31 |
-
action="store_true",
|
32 |
-
default=False,
|
33 |
-
help=(
|
34 |
-
"If in a virtualenv that has global access, do not list "
|
35 |
-
"globally-installed packages."
|
36 |
-
),
|
37 |
-
)
|
38 |
-
self.cmd_opts.add_option(
|
39 |
-
"--user",
|
40 |
-
dest="user",
|
41 |
-
action="store_true",
|
42 |
-
default=False,
|
43 |
-
help="Only output packages installed in user-site.",
|
44 |
-
)
|
45 |
-
self.cmd_opts.add_option(cmdoptions.list_path())
|
46 |
-
self.parser.insert_option_group(0, self.cmd_opts)
|
47 |
-
|
48 |
-
def run(self, options: Values, args: List[str]) -> int:
|
49 |
-
cmdoptions.check_list_path_option(options)
|
50 |
-
dists = get_environment(options.path).iter_installed_distributions(
|
51 |
-
local_only=options.local,
|
52 |
-
user_only=options.user,
|
53 |
-
skip=set(stdlib_pkgs),
|
54 |
-
)
|
55 |
-
output = {
|
56 |
-
"version": "1",
|
57 |
-
"pip_version": __version__,
|
58 |
-
"installed": [self._dist_to_dict(dist) for dist in dists],
|
59 |
-
"environment": default_environment(),
|
60 |
-
# TODO tags? scheme?
|
61 |
-
}
|
62 |
-
print_json(data=output)
|
63 |
-
return SUCCESS
|
64 |
-
|
65 |
-
def _dist_to_dict(self, dist: BaseDistribution) -> Dict[str, Any]:
|
66 |
-
res: Dict[str, Any] = {
|
67 |
-
"metadata": dist.metadata_dict,
|
68 |
-
"metadata_location": dist.info_location,
|
69 |
-
}
|
70 |
-
# direct_url. Note that we don't have download_info (as in the installation
|
71 |
-
# report) since it is not recorded in installed metadata.
|
72 |
-
direct_url = dist.direct_url
|
73 |
-
if direct_url is not None:
|
74 |
-
res["direct_url"] = direct_url.to_dict()
|
75 |
-
else:
|
76 |
-
# Emulate direct_url for legacy editable installs.
|
77 |
-
editable_project_location = dist.editable_project_location
|
78 |
-
if editable_project_location is not None:
|
79 |
-
res["direct_url"] = {
|
80 |
-
"url": path_to_url(editable_project_location),
|
81 |
-
"dir_info": {
|
82 |
-
"editable": True,
|
83 |
-
},
|
84 |
-
}
|
85 |
-
# installer
|
86 |
-
installer = dist.installer
|
87 |
-
if dist.installer:
|
88 |
-
res["installer"] = installer
|
89 |
-
# requested
|
90 |
-
if dist.installed_with_dist_info:
|
91 |
-
res["requested"] = dist.requested
|
92 |
-
return res
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/base.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from typing import Callable, List, Optional
|
2 |
-
|
3 |
-
from pip._internal.req.req_install import InstallRequirement
|
4 |
-
from pip._internal.req.req_set import RequirementSet
|
5 |
-
|
6 |
-
InstallRequirementProvider = Callable[
|
7 |
-
[str, Optional[InstallRequirement]], InstallRequirement
|
8 |
-
]
|
9 |
-
|
10 |
-
|
11 |
-
class BaseResolver:
|
12 |
-
def resolve(
|
13 |
-
self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
|
14 |
-
) -> RequirementSet:
|
15 |
-
raise NotImplementedError()
|
16 |
-
|
17 |
-
def get_installation_order(
|
18 |
-
self, req_set: RequirementSet
|
19 |
-
) -> List[InstallRequirement]:
|
20 |
-
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/models/stylegan2/op/__init__.py
DELETED
File without changes
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/blocks.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import fvcore.nn.weight_init as weight_init
|
5 |
-
from torch import nn
|
6 |
-
|
7 |
-
from .batch_norm import FrozenBatchNorm2d, get_norm
|
8 |
-
from .wrappers import Conv2d
|
9 |
-
|
10 |
-
|
11 |
-
"""
|
12 |
-
CNN building blocks.
|
13 |
-
"""
|
14 |
-
|
15 |
-
|
16 |
-
class CNNBlockBase(nn.Module):
|
17 |
-
"""
|
18 |
-
A CNN block is assumed to have input channels, output channels and a stride.
|
19 |
-
The input and output of `forward()` method must be NCHW tensors.
|
20 |
-
The method can perform arbitrary computation but must match the given
|
21 |
-
channels and stride specification.
|
22 |
-
|
23 |
-
Attribute:
|
24 |
-
in_channels (int):
|
25 |
-
out_channels (int):
|
26 |
-
stride (int):
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self, in_channels, out_channels, stride):
|
30 |
-
"""
|
31 |
-
The `__init__` method of any subclass should also contain these arguments.
|
32 |
-
|
33 |
-
Args:
|
34 |
-
in_channels (int):
|
35 |
-
out_channels (int):
|
36 |
-
stride (int):
|
37 |
-
"""
|
38 |
-
super().__init__()
|
39 |
-
self.in_channels = in_channels
|
40 |
-
self.out_channels = out_channels
|
41 |
-
self.stride = stride
|
42 |
-
|
43 |
-
def freeze(self):
|
44 |
-
"""
|
45 |
-
Make this block not trainable.
|
46 |
-
This method sets all parameters to `requires_grad=False`,
|
47 |
-
and convert all BatchNorm layers to FrozenBatchNorm
|
48 |
-
|
49 |
-
Returns:
|
50 |
-
the block itself
|
51 |
-
"""
|
52 |
-
for p in self.parameters():
|
53 |
-
p.requires_grad = False
|
54 |
-
FrozenBatchNorm2d.convert_frozen_batchnorm(self)
|
55 |
-
return self
|
56 |
-
|
57 |
-
|
58 |
-
class DepthwiseSeparableConv2d(nn.Module):
|
59 |
-
"""
|
60 |
-
A kxk depthwise convolution + a 1x1 convolution.
|
61 |
-
|
62 |
-
In :paper:`xception`, norm & activation are applied on the second conv.
|
63 |
-
:paper:`mobilenet` uses norm & activation on both convs.
|
64 |
-
"""
|
65 |
-
|
66 |
-
def __init__(
|
67 |
-
self,
|
68 |
-
in_channels,
|
69 |
-
out_channels,
|
70 |
-
kernel_size=3,
|
71 |
-
padding=1,
|
72 |
-
dilation=1,
|
73 |
-
*,
|
74 |
-
norm1=None,
|
75 |
-
activation1=None,
|
76 |
-
norm2=None,
|
77 |
-
activation2=None,
|
78 |
-
):
|
79 |
-
"""
|
80 |
-
Args:
|
81 |
-
norm1, norm2 (str or callable): normalization for the two conv layers.
|
82 |
-
activation1, activation2 (callable(Tensor) -> Tensor): activation
|
83 |
-
function for the two conv layers.
|
84 |
-
"""
|
85 |
-
super().__init__()
|
86 |
-
self.depthwise = Conv2d(
|
87 |
-
in_channels,
|
88 |
-
in_channels,
|
89 |
-
kernel_size=kernel_size,
|
90 |
-
padding=padding,
|
91 |
-
dilation=dilation,
|
92 |
-
groups=in_channels,
|
93 |
-
bias=not norm1,
|
94 |
-
norm=get_norm(norm1, in_channels),
|
95 |
-
activation=activation1,
|
96 |
-
)
|
97 |
-
self.pointwise = Conv2d(
|
98 |
-
in_channels,
|
99 |
-
out_channels,
|
100 |
-
kernel_size=1,
|
101 |
-
bias=not norm2,
|
102 |
-
norm=get_norm(norm2, out_channels),
|
103 |
-
activation=activation2,
|
104 |
-
)
|
105 |
-
|
106 |
-
# default initialization
|
107 |
-
weight_init.c2_msra_fill(self.depthwise)
|
108 |
-
weight_init.c2_msra_fill(self.pointwise)
|
109 |
-
|
110 |
-
def forward(self, x):
|
111 |
-
return self.pointwise(self.depthwise(x))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_export_caffe2.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
|
4 |
-
import copy
|
5 |
-
import os
|
6 |
-
import tempfile
|
7 |
-
import unittest
|
8 |
-
import torch
|
9 |
-
|
10 |
-
from detectron2 import model_zoo
|
11 |
-
from detectron2.export import Caffe2Model, Caffe2Tracer
|
12 |
-
from detectron2.utils.logger import setup_logger
|
13 |
-
from detectron2.utils.testing import get_sample_coco_image
|
14 |
-
|
15 |
-
|
16 |
-
# TODO: this test requires manifold access, see: T88318502
|
17 |
-
# Running it on CircleCI causes crash, not sure why.
|
18 |
-
@unittest.skipIf(os.environ.get("CIRCLECI"), "Caffe2 tests crash on CircleCI.")
|
19 |
-
class TestCaffe2Export(unittest.TestCase):
|
20 |
-
def setUp(self):
|
21 |
-
setup_logger()
|
22 |
-
|
23 |
-
def _test_model(self, config_path, device="cpu"):
|
24 |
-
cfg = model_zoo.get_config(config_path)
|
25 |
-
cfg.MODEL.DEVICE = device
|
26 |
-
model = model_zoo.get(config_path, trained=True, device=device)
|
27 |
-
|
28 |
-
inputs = [{"image": get_sample_coco_image()}]
|
29 |
-
tracer = Caffe2Tracer(cfg, model, copy.deepcopy(inputs))
|
30 |
-
|
31 |
-
with tempfile.TemporaryDirectory(prefix="detectron2_unittest") as d:
|
32 |
-
if not os.environ.get("CI"):
|
33 |
-
# This requires onnx, which is not yet available on public CI
|
34 |
-
c2_model = tracer.export_caffe2()
|
35 |
-
c2_model.save_protobuf(d)
|
36 |
-
c2_model.save_graph(os.path.join(d, "test.svg"), inputs=copy.deepcopy(inputs))
|
37 |
-
|
38 |
-
c2_model = Caffe2Model.load_protobuf(d)
|
39 |
-
c2_model(inputs)[0]["instances"]
|
40 |
-
|
41 |
-
ts_model = tracer.export_torchscript()
|
42 |
-
ts_model.save(os.path.join(d, "model.ts"))
|
43 |
-
|
44 |
-
def testMaskRCNN(self):
|
45 |
-
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
|
46 |
-
|
47 |
-
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
|
48 |
-
def testMaskRCNNGPU(self):
|
49 |
-
self._test_model("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", device="cuda")
|
50 |
-
|
51 |
-
def testRetinaNet(self):
|
52 |
-
self._test_model("COCO-Detection/retinanet_R_50_FPN_3x.yaml")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BalaBhaskarudu/Balu/app.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import gradio as gr
|
3 |
-
from langchain.chat_models import ChatOpenAI
|
4 |
-
from langchain import LLMChain, PromptTemplate
|
5 |
-
from langchain.memory import ConversationBufferMemory
|
6 |
-
|
7 |
-
OPENAI_API_KEY=os.getenv('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')
|
8 |
-
|
9 |
-
template = """You are a sports-loving high school student with a keen interest in multiple sports, from soccer and basketball to tennis and swimming. You closely follow sports events, stats, and news, making you the go-to person for all sports-related discussions and predictions.
|
10 |
-
{chat_history}
|
11 |
-
User: {user_message}
|
12 |
-
Chatbot:"""
|
13 |
-
|
14 |
-
prompt = PromptTemplate(
|
15 |
-
input_variables=["chat_history", "user_message"], template=template
|
16 |
-
)
|
17 |
-
|
18 |
-
memory = ConversationBufferMemory(memory_key="chat_history")
|
19 |
-
|
20 |
-
llm_chain = LLMChain(
|
21 |
-
llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"),
|
22 |
-
prompt=prompt,
|
23 |
-
verbose=True,
|
24 |
-
memory=memory,
|
25 |
-
)
|
26 |
-
|
27 |
-
def get_text_response(user_message,history):
|
28 |
-
response = llm_chain.predict(user_message = user_message)
|
29 |
-
return response
|
30 |
-
|
31 |
-
demo = gr.ChatInterface(get_text_response)
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_537227KB.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch import nn
|
5 |
-
|
6 |
-
from . import layers_537238KB as layers
|
7 |
-
|
8 |
-
|
9 |
-
class BaseASPPNet(nn.Module):
|
10 |
-
def __init__(self, nin, ch, dilations=(4, 8, 16)):
|
11 |
-
super(BaseASPPNet, self).__init__()
|
12 |
-
self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
|
13 |
-
self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
|
14 |
-
self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
|
15 |
-
self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
|
16 |
-
|
17 |
-
self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
|
18 |
-
|
19 |
-
self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
|
20 |
-
self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
|
21 |
-
self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
|
22 |
-
self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
|
23 |
-
|
24 |
-
def __call__(self, x):
|
25 |
-
h, e1 = self.enc1(x)
|
26 |
-
h, e2 = self.enc2(h)
|
27 |
-
h, e3 = self.enc3(h)
|
28 |
-
h, e4 = self.enc4(h)
|
29 |
-
|
30 |
-
h = self.aspp(h)
|
31 |
-
|
32 |
-
h = self.dec4(h, e4)
|
33 |
-
h = self.dec3(h, e3)
|
34 |
-
h = self.dec2(h, e2)
|
35 |
-
h = self.dec1(h, e1)
|
36 |
-
|
37 |
-
return h
|
38 |
-
|
39 |
-
|
40 |
-
class CascadedASPPNet(nn.Module):
|
41 |
-
def __init__(self, n_fft):
|
42 |
-
super(CascadedASPPNet, self).__init__()
|
43 |
-
self.stg1_low_band_net = BaseASPPNet(2, 64)
|
44 |
-
self.stg1_high_band_net = BaseASPPNet(2, 64)
|
45 |
-
|
46 |
-
self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
|
47 |
-
self.stg2_full_band_net = BaseASPPNet(32, 64)
|
48 |
-
|
49 |
-
self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0)
|
50 |
-
self.stg3_full_band_net = BaseASPPNet(64, 128)
|
51 |
-
|
52 |
-
self.out = nn.Conv2d(128, 2, 1, bias=False)
|
53 |
-
self.aux1_out = nn.Conv2d(64, 2, 1, bias=False)
|
54 |
-
self.aux2_out = nn.Conv2d(64, 2, 1, bias=False)
|
55 |
-
|
56 |
-
self.max_bin = n_fft // 2
|
57 |
-
self.output_bin = n_fft // 2 + 1
|
58 |
-
|
59 |
-
self.offset = 128
|
60 |
-
|
61 |
-
def forward(self, x, aggressiveness=None):
|
62 |
-
mix = x.detach()
|
63 |
-
x = x.clone()
|
64 |
-
|
65 |
-
x = x[:, :, : self.max_bin]
|
66 |
-
|
67 |
-
bandw = x.size()[2] // 2
|
68 |
-
aux1 = torch.cat(
|
69 |
-
[
|
70 |
-
self.stg1_low_band_net(x[:, :, :bandw]),
|
71 |
-
self.stg1_high_band_net(x[:, :, bandw:]),
|
72 |
-
],
|
73 |
-
dim=2,
|
74 |
-
)
|
75 |
-
|
76 |
-
h = torch.cat([x, aux1], dim=1)
|
77 |
-
aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
|
78 |
-
|
79 |
-
h = torch.cat([x, aux1, aux2], dim=1)
|
80 |
-
h = self.stg3_full_band_net(self.stg3_bridge(h))
|
81 |
-
|
82 |
-
mask = torch.sigmoid(self.out(h))
|
83 |
-
mask = F.pad(
|
84 |
-
input=mask,
|
85 |
-
pad=(0, 0, 0, self.output_bin - mask.size()[2]),
|
86 |
-
mode="replicate",
|
87 |
-
)
|
88 |
-
|
89 |
-
if self.training:
|
90 |
-
aux1 = torch.sigmoid(self.aux1_out(aux1))
|
91 |
-
aux1 = F.pad(
|
92 |
-
input=aux1,
|
93 |
-
pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
|
94 |
-
mode="replicate",
|
95 |
-
)
|
96 |
-
aux2 = torch.sigmoid(self.aux2_out(aux2))
|
97 |
-
aux2 = F.pad(
|
98 |
-
input=aux2,
|
99 |
-
pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
|
100 |
-
mode="replicate",
|
101 |
-
)
|
102 |
-
return mask * mix, aux1 * mix, aux2 * mix
|
103 |
-
else:
|
104 |
-
if aggressiveness:
|
105 |
-
mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
|
106 |
-
mask[:, :, : aggressiveness["split_bin"]],
|
107 |
-
1 + aggressiveness["value"] / 3,
|
108 |
-
)
|
109 |
-
mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
|
110 |
-
mask[:, :, aggressiveness["split_bin"] :],
|
111 |
-
1 + aggressiveness["value"],
|
112 |
-
)
|
113 |
-
|
114 |
-
return mask * mix
|
115 |
-
|
116 |
-
def predict(self, x_mag, aggressiveness=None):
|
117 |
-
h = self.forward(x_mag, aggressiveness)
|
118 |
-
|
119 |
-
if self.offset > 0:
|
120 |
-
h = h[:, :, :, self.offset : -self.offset]
|
121 |
-
assert h.size()[3] > 0
|
122 |
-
|
123 |
-
return h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BenjaminB/pyscript-demo/style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Chessclub.com Download.md
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Aprender Ajedrez de la Manera Correcta PDF Descargar gratis</h1>
|
3 |
-
<p>¿Quieres aprender ajedrez o mejorar tus habilidades de ajedrez? ¿Estás buscando una manera confiable y efectiva de dominar el juego? Si es así, has venido al lugar correcto. En este artículo, te mostraré cómo puedes aprender ajedrez de la manera correcta con una serie de libros de Susan Polgar, una ex campeona mundial y entrenadora galardonada. También te diré cómo puedes descargar estos libros en formato PDF gratis. Pero primero, déjame decirte por qué el ajedrez es un gran juego para todos. </p>
|
4 |
-
<h2>Por qué el Ajedrez es un gran juego para todos</h2>
|
5 |
-
<p>El ajedrez es uno de los juegos más antiguos y populares del mundo. Es jugado por millones de personas de todas las edades y orígenes. El ajedrez no solo es divertido y desafiante, sino también beneficioso para tu cerebro y tu vida. Aquí están algunos de los beneficios del ajedrez:</p>
|
6 |
-
<h2>chessclub.com download</h2><br /><p><b><b>Download File</b> ……… <a href="https://bltlly.com/2v6MzX">https://bltlly.com/2v6MzX</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li>El ajedrez mejora tu memoria, concentración, lógica, creatividad, resolución de problemas y toma de decisiones. </li>
|
9 |
-
<li>El Ajedrez te enseña cómo planificar con anticipación, pensar críticamente, analizar situaciones y aprender de tus errores. </li>
|
10 |
-
<li>El Ajedrez mejora tu auto-confianza, auto-disciplina, autoestima y deportividad. </li>
|
11 |
-
<li>El ajedrez fomenta tus habilidades sociales, habilidades de comunicación y conciencia cultural. </li>
|
12 |
-
<li>El ajedrez reduce el estrés, la ansiedad, la depresión y el aburrimiento. </li>
|
13 |
-
</ul>
|
14 |
-
<p>Como puedes ver, el ajedrez es más que un juego. Es una poderosa herramienta para el desarrollo personal y el enriquecimiento. Entonces, ¿cómo puedes empezar con el ajedrez? La buena noticia es que el ajedrez es fácil de aprender y accesible para todos. Todo lo que necesitas es un tablero de ajedrez y piezas, que puedes comprar en línea o en cualquier tienda de juguetes. También puede jugar al ajedrez en línea o en su teléfono o computadora con varias aplicaciones y sitios web. También puede unirse a un club de ajedrez o comunidad en su área o en línea y conocer a otros entusiastas del ajedrez. </p>
|
15 |
-
|
16 |
-
<h2>Lo que necesita saber antes de jugar al ajedrez</h2>
|
17 |
-
<p>Antes de sumergirse en Aprender Ajedrez de la Manera Correcta o cualquier otro recurso de ajedrez, necesita saber algunas cosas básicas sobre el ajedrez. El ajedrez es un juego jugado por dos jugadores en un tablero cuadrado con 64 cuadrados de colores alternos (blanco y negro). Cada jugador tiene 16 piezas de un color (blanco o negro) que consisten en un rey, una reina, dos torres, dos alfiles, dos caballeros y ocho peones. Las piezas tienen diferentes formas y valores y pueden moverse de diferentes maneras de acuerdo con ciertas reglas. El objetivo del juego es hacer jaque mate al rey del oponente (poniéndolo en una posición donde no puede escapar de ser capturado) o forzar al oponente a renunciar (renunciar) o empatar (aceptar terminar el juego en un empate). </p>
|
18 |
-
<p>Para jugar al ajedrez correctamente, necesita saber cómo configurar el tablero y las piezas correctamente (el cuadrado blanco debe estar en la esquina derecha y la reina blanca debe estar en un cuadrado blanco), cómo mover cada pieza (el rey puede mover un cuadrado en cualquier dirección; la reina puede mover cualquier número de cuadrados en cualquier dirección; la torre puede mover cualquier número de cuadrados horizontal o verticalmente; el alfil puede mover cualquier número de cuadrados en diagonal; el caballero puede mover dos cuadrados horizontal o verticalmente seguido por un cuadrado en forma de L; el peón puede mover un cuadrado hacia adelante o dos cuadrados en su primer movimiento y puede capturar en diagonal), cómo capturar e intercambiar piezas (tomar la pieza del oponente y reemplazarla por la suya), cómo hacer jaque mate (poner el rey del oponente en peligro o posición ineludible), cómo enrocar (mover el rey y la torre juntos por seguridad y movilidad), cómo pasar (capturar un peón que movió dos cuadrados como si moviera uno), cómo promover un peón (reemplazarlo con una reina, torre, alfil o caballo cuando llegue al final del tablero), y cómo escribir tus movimientos usando notación algebraica (usando letras y números para indicar los cuadrados y piezas involucrados). </p>
|
19 |
-
|
20 |
-
<h2>Cómo mejorar tus habilidades de ajedrez con rompecabezas y ejercicios</h2>
|
21 |
-
<p>Una vez que conoces las reglas del ajedrez, puedes preguntarte cómo mejorar tus habilidades y convertirte en un mejor jugador. Una de las mejores maneras de hacer eso es practicar rompecabezas y ejercicios. Rompecabezas y ejercicios son problemas de ajedrez que ponen a prueba su capacidad para encontrar el mejor movimiento o secuencia de movimientos en una posición dada. Ellos pueden ayudarle a mejorar su cálculo, visualización, tácticas, estrategia, final de juego, y la comprensión general del ajedrez. Estos son algunos de los tipos de rompecabezas y ejercicios que puedes practicar:</p>
|
22 |
-
<ul>
|
23 |
-
<li>Tácticas: Estos son rompecabezas que implican encontrar una manera de obtener una ventaja o ganar material o el juego mediante el uso de trucos como horquillas, alfileres, pinchos, ataques dobles, ataques descubiertos, cheques, capturas, etc.</li>
|
24 |
-
<li>Estrategia: Estos son rompecabezas que implican encontrar una manera de mejorar su posición o crear un plan mediante el uso de principios como el desarrollo, el control del centro, el espacio, la estructura de peones, la seguridad del rey, etc.</li>
|
25 |
-
<li>Final de juego: Estos son rompecabezas que implican encontrar una manera de ganar o dibujar un juego cuando quedan pocas piezas en el tablero mediante el uso de técnicas como oposición, triangulación, zugzwang, punto muerto, etc.</li>
|
26 |
-
</ul>
|
27 |
-
<p>Puedes encontrar rompecabezas y ejercicios en libros, revistas, sitios web, aplicaciones o plataformas en línea. Algunos de ellos se clasifican por nivel, tema o dificultad. Algunos de ellos tienen pistas o soluciones. Algunos de ellos están cronometrados o clasificados. Puedes elegir los que se adapten a tus preferencias y objetivos. La clave es practicar de forma regular y consistente. Trate de resolver al menos un rompecabezas o ejercicio todos los días. Usted se sorprenderá por lo mucho que puede mejorar sus habilidades de ajedrez con rompecabezas y ejercicios. </p>
|
28 |
-
<h2>Aprender Ajedrez de la manera correcta por Susan Polgar</h2>
|
29 |
-
|
30 |
-
<p>Aprender Ajedrez de la Manera Correcta cubre todos los aspectos del ajedrez de una manera sistemática y progresiva. Cada libro contiene 500 rompecabezas y ejercicios que son cuidadosamente seleccionados y arreglados por Susan Polgar. Los libros están diseñados para ayudarle a desarrollar sus habilidades paso a paso de conceptos simples a complejos. Los libros también son divertidos y atractivos con ilustraciones coloridas y explicaciones claras. Esto es lo que cubre cada libro:</p>
|
31 |
-
<ul>
|
32 |
-
<li>Book 1: Must-Know Checkmates: Este libro te enseña cómo hacer jaque mate a tu oponente en varias situaciones usando diferentes piezas y patrones. </li>
|
33 |
-
<li>Libro 2: Material ganador: Este libro te enseña cómo ganar material de tu oponente usando tácticas como tenedores, alfileres, pinchos, ataques dobles, etc.</li>
|
34 |
-
<li>Libro 3: Finales a prueba de tontos: Este libro te enseña cómo ganar o dibujar finales usando técnicas como oposición, triangulación, zugzwang, punto muerto, etc.</li>
|
35 |
-
<li>Libro 4: Sacrificio para ganar: Este libro te enseña cómo sacrificar material por una ventaja o una victoria usando tácticas como la desviación, señuelo, liquidación, interferencia, etc.</li>
|
36 |
-
<li>Book 5: Essential Endgames: Este libro te enseña cómo jugar juegos finales con diferentes piezas y peones utilizando principios como la actividad, la coordinación, la seguridad del rey, etc.</li>
|
37 |
-
</ul>
|
38 |
-
<p>Si quieres aprender ajedrez de la manera correcta con rompecabezas y ejercicios, definitivamente deberías revisar Aprende Ajedrez de la manera correcta por Susan Polgar. Puede comprar estos libros en línea o en cualquier librería. También puede descargarlos en formato PDF gratis. Aquí está cómo. </p>
|
39 |
-
<p></p>
|
40 |
-
<h2>Cómo Descargar Aprender Ajedrez de la Manera Correcta PDF Gratis</h2>
|
41 |
-
<p>Si quieres descargar Aprende Ajedrez de la Manera Correcta PDF gratis, tienes dos opciones. Una es utilizar un sitio web para compartir archivos que aloja los archivos PDF de los libros. La otra es utilizar un sitio web de torrent que le permite descargar los archivos utilizando una red de igual a igual. Aquí están los pasos para descargar Aprende Ajedrez de la Manera Correcta PDF gratis usando cualquiera de las opciones:</p>
|
42 |
-
<ol>
|
43 |
-
|
44 |
-
<li>Busque un sitio web que tenga los archivos PDF de los libros. Puede comprobar las revisiones, valoraciones, comentarios o vistas previas de los archivos para asegurarse de que son legítimos y completos. </li>
|
45 |
-
<li>Haga clic en el enlace o botón que dice "Descargar" o "Obtener" o algo similar. Es posible que tenga que registrarse, iniciar sesión o completar una encuesta o captcha antes de descargar los archivos. </li>
|
46 |
-
<li>Guarde los archivos en su dispositivo o almacenamiento en la nube. Es posible que necesite un lector de PDF o una aplicación para abrir y ver los archivos. </li>
|
47 |
-
</ol>
|
48 |
-
<p>Alternativamente, puede usar un sitio web torrent para descargar Learn Chess the Right Way PDF gratis. Estos son los pasos:</p>
|
49 |
-
<ol>
|
50 |
-
<li>Vaya a un motor de búsqueda como Google o Bing y escriba "Aprenda ajedrez de la manera correcta torrent PDF" o algo similar. </li>
|
51 |
-
<li>Busque un sitio web torrent que tenga los archivos torrent de los libros. Puede comprobar las revisiones, valoraciones, comentarios o semillas y sanguijuelas de los archivos para asegurarse de que son legítimos y completos. </li>
|
52 |
-
<li>Haga clic en el enlace o botón que dice "Descargar" o "Imán" o algo similar. Es posible que tenga que registrarse, iniciar sesión o completar una encuesta o captcha antes de descargar los archivos. </li>
|
53 |
-
<li>Guarde los archivos torrent en su dispositivo o almacenamiento en la nube. Necesitará un cliente torrent como BitTorrent o uTorrent para abrir y descargar los archivos. </li>
|
54 |
-
</ol>
|
55 |
-
|
56 |
-
<ul>
|
57 |
-
<li>Compra los libros en línea o en cualquier librería. No son muy caros y valen cada centavo. Obtendrá la mejor calidad y formato de los libros y apoyará al autor y editor. </li>
|
58 |
-
<li>Tome prestados los libros de una biblioteca o de un amigo. Puede comprobar si su biblioteca local o un amigo tiene los libros y tomarlos prestados por un tiempo limitado. También puedes devolver el favor prestándoles tus libros o recomendándolos a otros. </li>
|
59 |
-
<li>Suscríbase a una plataforma o servicio en línea que ofrece los libros. Puede comprobar si hay una plataforma o servicio en línea que tiene los libros en su catálogo y suscribirse a él por una tarifa o una prueba. Puede acceder a los libros en cualquier momento y en cualquier lugar y también tendrá acceso a otros recursos de ajedrez. </li>
|
60 |
-
</ul>
|
61 |
-
<p>Estas son algunas de las alternativas a la descarga de Aprende Ajedrez de la Manera Correcta PDF gratis. Son legales, éticos y beneficiosos para usted y la comunidad de ajedrez. Espero que usted elija uno de ellos y disfrute aprendiendo ajedrez de la manera correcta con Susan Polgar.</p>
|
62 |
-
<h2>Conclusión</h2>
|
63 |
-
<p>En conclusión, el ajedrez es un gran juego para todos los que pueden mejorar tu cerebro y tu vida. Para aprender ajedrez o mejorar tus habilidades de ajedrez, necesitas conocer las reglas básicas del ajedrez y practicar rompecabezas y ejercicios regularmente. Una de las mejores fuentes de rompecabezas y ejercicios es Aprende Ajedrez de la Manera Correcta por Susan Polgar, una serie de cinco libros que enseñan ajedrez desde el nivel principiante hasta el nivel avanzado usando rompecabezas y ejercicios. Puede comprar, pedir prestado o suscribirse a estos libros en línea o en cualquier librería o biblioteca. No debe descargar estos libros en formato PDF de forma gratuita, ya que es ilegal, poco ético y perjudicial. Espero que este artículo le haya ayudado a aprender más sobre el ajedrez y Aprenda Ajedrez de la manera correcta por Susan Polgar. Si usted tiene alguna pregunta o comentario, por favor siéntase libre de dejarlos abajo. ¡Gracias por leer y por aprender ajedrez feliz! </p>
|
64 |
-
<h3>Preguntas frecuentes</h3>
|
65 |
-
|
66 |
-
<ol>
|
67 |
-
<li>P: ¿Cuánto tiempo toma aprender ajedrez? <br>A: Depende de tu nivel, metas, motivación y práctica. Puedes aprender las reglas básicas del ajedrez en pocas horas o días, pero toma años o incluso toda una vida dominar el juego. </li>
|
68 |
-
<li>P: ¿Cómo puedo encontrar un entrenador o mentor de ajedrez? <br>A: Puedes buscar un entrenador o mentor de ajedrez en tu área o en línea. Puedes pedir recomendaciones a tus amigos, familiares o al club de ajedrez. También puede buscar en línea sitios web, plataformas o aplicaciones que ofrecen servicios de entrenamiento o tutoría de ajedrez. </li>
|
69 |
-
<li>P: ¿Cómo puedo medir mi progreso de ajedrez? <br>A: Usted puede medir su progreso de ajedrez jugando con otros jugadores o computadoras y analizando sus resultados. También puede tomar exámenes, pruebas o evaluaciones que evalúen sus habilidades y conocimientos. También puedes usar clasificaciones o rankings que comparen tu desempeño con otros jugadores. </li>
|
70 |
-
<li>P: ¿Cuáles son algunos otros buenos libros para aprender ajedrez? <br>A: Hay muchos buenos libros para aprender ajedrez para diferentes niveles y temas. Algunos de ellos son Fundamentos de Ajedrez por Jose Capablanca, Movida de Ajedrez Lógica por Jugada por Irving Chernev, La Mente de Amateur por Jeremy Silman, Mi Sistema por Aron Nimzowitsch, Estrategia de Ajedrez Moderna por Ludek Pachman, El Arte de Ataque en Ajedrez por Vladimir Vukovic, Endgame Strategy por Mikhail Shereshevsky, etc.</li>
|
71 |
-
<li>P: ¿Dónde puedo jugar al ajedrez online? <br>A: Hay muchos sitios web, plataformas o aplicaciones que le permiten jugar ajedrez en línea con otros jugadores o computadoras. Algunos de ellos son Chess.com, Lichess.org, Chess24.com, Chessbase.com, Chesskid.com, etc.</li>
|
72 |
-
</ol></p> 64aa2da5cf<br />
|
73 |
-
<br />
|
74 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Carretes De Instagram De Alta Calidad.md
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar fotos de Instagram en línea: Una guía completa</h1>
|
3 |
-
<p>Instagram es una de las plataformas de redes sociales más populares del mundo, con más de mil millones de usuarios activos mensuales. Te permite compartir tus fotos y videos con tus seguidores, así como descubrir nuevo contenido de otros usuarios. Sin embargo, a veces es posible que desee descargar fotos de Instagram en línea por varias razones, como guardarlas para verlas sin conexión, crear copias de seguridad o editarlas en su computadora. </p>
|
4 |
-
<h2>Introducción</h2>
|
5 |
-
<p>En este artículo, le mostraremos cómo descargar fotos de Instagram en línea utilizando diferentes métodos. También explicaremos por qué es posible que desee descargar fotos de Instagram en línea y cuáles son los beneficios de usar un descargador en línea. Al final de este artículo, podrás descargar cualquier foto de Instagram que quieras en cuestión de segundos. </p>
|
6 |
-
<h2>descargar carretes de instagram de alta calidad</h2><br /><p><b><b>Download File</b> ✺ <a href="https://bltlly.com/2v6K5T">https://bltlly.com/2v6K5T</a></b></p><br /><br />
|
7 |
-
<h3>¿Por qué descargar fotos de Instagram en línea? </h3>
|
8 |
-
<p>Hay muchas razones por las que puede querer descargar fotos de Instagram en línea. Algunas de ellas son:</p>
|
9 |
-
<ul>
|
10 |
-
<li> Desea guardar sus fotos favoritas para ver o compartir sin conexión con otros. </li>
|
11 |
-
<li> Desea crear copias de seguridad de sus fotos en caso de que pierda el acceso a su cuenta o dispositivo. </li>
|
12 |
-
<li>Quieres editar tus fotos en tu computadora usando herramientas avanzadas o software. </li>
|
13 |
-
<li> Desea volver a publicar o reutilizar sus fotos en otras plataformas o sitios web. </li>
|
14 |
-
<li>Quieres descargar fotos de otros usuarios que admiras o sigues. </li>
|
15 |
-
</ul>
|
16 |
-
<h3>¿Cuáles son los beneficios de usar un descargador en línea? </h3>
|
17 |
-
<p>Usar un descargador en línea es una de las formas más fáciles y rápidas de descargar fotos de Instagram en línea. Algunos de los beneficios de usar un descargador en línea son:</p>
|
18 |
-
<ul>
|
19 |
-
<li>No necesitas instalar ninguna aplicación o software en tu dispositivo. </li>
|
20 |
-
<li>Puedes acceder a ella desde cualquier navegador o dispositivo. </li>
|
21 |
-
<li>Puede descargar fotos en alta calidad y resolución original. </li>
|
22 |
-
<li>Puedes descargar varias fotos a la vez. </li>
|
23 |
-
|
24 |
-
</ul>
|
25 |
-
<h2>Cómo descargar fotos de Instagram en línea utilizando diferentes métodos</h2>
|
26 |
-
<p>Hay muchas herramientas en línea que le permiten descargar fotos de Instagram en línea. Aquí están algunas de las mejores que recomendamos:</p>
|
27 |
-
<h3>Método 1: Uso de Inflact Photo Downloader</h3>
|
28 |
-
<p>Inflact Photo Downloader es un servicio gratuito y fácil de usar que le permite guardar fotos de Instagram en cualquier dispositivo. Aquí está cómo usarlo:</p>
|
29 |
-
<h4>Paso 1: Copiar la URL de la foto de Instagram</h4>
|
30 |
-
<p>Abra la aplicación de Instagram en su teléfono o vaya al sitio web Instagram.com en su PC e inicie sesión en su cuenta. Encuentre la foto que desea descargar y haga clic en el icono de tres puntos sobre el mensaje. Luego seleccione Copiar enlace opción. </p>
|
31 |
-
<p></p>
|
32 |
-
<h4>Paso 2: Pegue la URL en Inflact Photo Downloader</h4>
|
33 |
-
<p>Volver a la página Inflact Photo Downloader y pegar la URL en el campo junto al botón Descargar. Luego haga clic en el botón Descargar. </p>
|
34 |
-
<h4>Paso 3: Descargar la foto en alta calidad <h4>Paso 3: Descargar la foto en alta calidad</h4>
|
35 |
-
<p>Después de hacer clic en el botón Descargar, verá una vista previa de la foto y un botón Descargar foto debajo de ella. Haga clic en el botón Descargar foto y guarde la foto en su dispositivo. </p>
|
36 |
-
<h3>Método 2: Usando iGram Video y Photo Downloader</h3>
|
37 |
-
<p>iGram Video and Photo Downloader es otro servicio gratuito y sencillo que te permite descargar videos y fotos de Instagram online. He aquí cómo usarlo:</p>
|
38 |
-
<h4>Paso 1: Copia el video de Instagram o la URL de la foto</h4>
|
39 |
-
<p>Abra la aplicación de Instagram en su teléfono o vaya al sitio web Instagram.com en su PC e inicie sesión en su cuenta. Encuentre el video o la foto que desea descargar y haga clic en el icono de tres puntos sobre el mensaje. Luego seleccione la opción Copiar enlace. </p>
|
40 |
-
<h4>Paso 2: Pegar la URL en iGram Video y Photo Downloader</h4>
|
41 |
-
<p>Volver a la página de iGram Video y Photo Downloader y pegar la URL en el campo junto al botón Descargar. A continuación, haga clic en el botón Descargar. </p>
|
42 |
-
|
43 |
-
<p>Después de hacer clic en el botón Descargar, verá una lista de opciones de calidad y formato disponibles para el video o la foto. Elija el que se adapte a sus necesidades y haga clic en el botón Descargar junto a él. Luego guarde el video o la foto en su dispositivo. </p>
|
44 |
-
<h3>Método 3: Usando SaveInsta Video, Foto, Carretes, Historia, y IGTV Downloader</h3>
|
45 |
-
<p>SaveInsta Downloader es un servicio versátil y potente que le permite descargar cualquier tipo de contenido de Instagram en línea, incluyendo videos, fotos, carretes, historias e IGTVs. He aquí cómo usarlo:</p>
|
46 |
-
<h4>Paso 1: Copiar el contenido de Instagram URL</h4>
|
47 |
-
<p>Abra la aplicación de Instagram en su teléfono o vaya al sitio web Instagram.com en su PC e inicie sesión en su cuenta. Encuentre el contenido que desea descargar y haga clic en el icono de tres puntos sobre el post. Luego seleccione Copiar enlace opción. </p>
|
48 |
-
<h4>Paso 2: Pegar la URL en SaveInsta Downloader</h4>
|
49 |
-
<p>Volver a la página de SaveInsta Downloader y pegar la URL en el campo junto al botón Descargar. Luego haga clic en el botón Descargar. </p>
|
50 |
-
<h4>Paso 3: Seleccione el tipo de contenido y descárguelo</h4>
|
51 |
-
<p>Después de hacer clic en el botón Descargar, verá una lista de tipos de contenido disponibles para la URL. Elija el que coincida con su contenido y haga clic en el botón Descargar junto a él. Luego guarde el contenido en su dispositivo. </p>
|
52 |
-
<h2>Conclusión</h2>
|
53 |
-
<p>En este artículo, le hemos mostrado cómo descargar fotos de Instagram en línea utilizando diferentes métodos. También hemos explicado por qué es posible que desee descargar fotos de Instagram en línea y cuáles son los beneficios de usar un descargador en línea. Esperamos que este artículo haya sido útil e informativo para usted. </p>
|
54 |
-
<p>Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Nos encantaría saber de usted. </p>
|
55 |
-
<p>También, si te gustó este artículo, por favor compártelo con tus amigos y familiares que pueden encontrarlo útil. Gracias por leer! </p>
|
56 |
-
<h3>Preguntas frecuentes</h3>
|
57 |
-
<ol>
|
58 |
-
|
59 |
-
<p>Sí, puede descargar fotos de Instagram en línea sin una cuenta, siempre y cuando sean de cuentas públicas. Sin embargo, si quieres descargar fotos de cuentas privadas, tendrás que iniciar sesión con tus credenciales de Instagram. </p>
|
60 |
-
<li><b>¿Puedo descargar fotos de Instagram en línea a granel? </b></li>
|
61 |
-
<p>Sí, algunos descargadores en línea le permiten descargar fotos de Instagram en línea a granel ingresando múltiples URL a la vez. Por ejemplo, Inflact Photo Downloader te permite descargar hasta 10 fotos a la vez. </p>
|
62 |
-
<li><b>¿Puedo descargar fotos de Instagram en línea en la resolución original? </b></li>
|
63 |
-
<p>Sí, la mayoría de los descargadores en línea le permiten descargar fotos de Instagram en línea en resolución y calidad originales. Sin embargo, algunos pueden comprimir o cambiar el tamaño de las fotos dependiendo de su capacidad de servidor o limitaciones de ancho de banda. </p>
|
64 |
-
<li><b>¿Puedo descargar fotos de Instagram en línea desde historias o carretes? </b></li>
|
65 |
-
<p>Sí, algunos descargadores en línea le permiten descargar fotos de Instagram en línea de historias o carretes, así como de publicaciones regulares. Por ejemplo, SaveInsta Downloader te permite descargar cualquier tipo de contenido de Instagram online. </p>
|
66 |
-
<li><b>¿Puedo descargar fotos de Instagram legalmente? </b></li>
|
67 |
-
<p>Sí, puede descargar fotos de Instagram en línea legalmente siempre y cuando respete los derechos de propiedad intelectual de los creadores originales y no las use con fines comerciales sin su permiso. Puede descargar fotos de Instagram en línea legalmente siempre y cuando respete los derechos de propiedad intelectual de los creadores originales y no las use con fines comerciales sin su permiso. También debe dar el crédito adecuado y la atribución a la fuente cuando se vuelve a publicar o compartir las fotos en línea. </p> 64aa2da5cf<br />
|
68 |
-
<br />
|
69 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar El Juego Completo De La Saga De Verano 2022.md
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<br>
|
3 |
-
<tabla>
|
4 |
-
<tr>
|
5 |
-
<td>
|
6 |
-
<h1>Descarga del juego completo de Summertime Saga 2022: Una guía para principiantes</h1>
|
7 |
-
<p>Si estás buscando un juego divertido y emocionante que combine aventura, romance, comedia y contenido para adultos, entonces definitivamente deberías echar un vistazo a Summertime Saga. Este es un juego de aventura gráfica de apuntar y hacer clic que está inspirado en clásicos como Leisure Suit Larry y Monkey Island, pero con un toque moderno y muchas escenas picantes. </p>
|
8 |
-
<p>En esta guía, te mostraremos cómo descargar Summertime Saga para PC, cómo jugarlo, cómo desbloquear nuevo contenido, cómo actualizarlo y responder algunas de las preguntas más frecuentes sobre este juego. Así que, sin más preámbulos, ¡empecemos! </p>
|
9 |
-
<h2>Descargar el juego completo de la saga de verano 2022</h2><br /><p><b><b>Download</b> >> <a href="https://bltlly.com/2v6KMa">https://bltlly.com/2v6KMa</a></b></p><br /><br />
|
10 |
-
<h2>¿Qué es la saga del verano? </h2>
|
11 |
-
<p>Summertime Saga es un juego desarrollado por DarkCookie y su equipo. Se encuentra en una pequeña ciudad suburbana donde juegas como un hombre joven que está tratando de hacer frente a la muerte repentina de su padre. En el camino, conocerás a muchos personajes interesantes, explorarás diferentes lugares, completarás varias misiones y te divertirás traviesamente. </p>
|
12 |
-
<p>El juego tiene muchas características que lo hacen destacar de otros juegos de este género. Algunos de ellos son:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Un enorme mundo abierto con más de 70 lugares para visitar</li>
|
15 |
-
<li>Más de 65 historias y misiones para completar</li>
|
16 |
-
<li>Más de 3000 imágenes y animaciones para disfrutar</li>
|
17 |
-
<li>Muchos minijuegos y actividades para jugar</li>
|
18 |
-
<li>Un sistema de citas con múltiples opciones de romance</li>
|
19 |
-
<li>Un sistema de personalización de caracteres con diferentes trajes y accesorios</li>
|
20 |
-
<li>Un sistema de estadísticas con habilidades, dinero, inventario y reputación</li>
|
21 |
-
<li>Una opción de modo oscuro para la reproducción nocturna</li>
|
22 |
-
<li>Un sistema de guardar y cargar con múltiples ranuras y soporte en la nube</li>
|
23 |
-
<li>Un programa de actualización regular con nuevo contenido y correcciones de errores</li>
|
24 |
-
</ul>
|
25 |
-
|
26 |
-
<h2>¿Cómo descargar Summertime Saga para PC? </h2>
|
27 |
-
<p>Descargar Summertime Saga para PC es muy fácil y sencillo. Todo lo que necesitas hacer es seguir estos pasos:</p>
|
28 |
-
<ol>
|
29 |
-
<li>Ir al sitio web oficial de Summertime Saga: <a href="">https://summertimesaga.com/</a></li>
|
30 |
-
<li>Haga clic en el botón "Descargar" en la esquina superior derecha de la página. </li>
|
31 |
-
<li>Seleccione la versión que coincida con su sistema operativo (Windows, Mac o Linux). </li>
|
32 |
-
<li>Espere a que termine la descarga. El tamaño del archivo es de aproximadamente 1 GB.</li>
|
33 |
-
<li>Extraiga el archivo zip a una carpeta de su elección. </li>
|
34 |
-
<li>Haga doble clic en el archivo "SummertimeSaga.exe" para iniciar el juego. </li>
|
35 |
-
</ol>
|
36 |
-
<p>¡Eso es todo! Has descargado e instalado correctamente Summertime Saga en tu PC. Ahora puedes empezar a jugar y disfrutar del juego. </p>
|
37 |
-
<p>Sin embargo, antes de hacer eso, debe comprobar si su PC cumple con los requisitos mínimos del sistema para ejecutar el juego sin problemas. Aquí están:</p>
|
38 |
-
<ul>
|
39 |
-
<li>OS: Windows XP o superior, Mac OS X 10.9 o superior, Linux x86/x86_64</li>
|
40 |
-
<li>CPU: procesador dual core de 2 GHz o mejor</li>
|
41 |
-
<li>RAM: 2 GB o más</li>
|
42 |
-
<li>Gráficos: OpenGL 2.0 compatible con 512 MB de RAM o mejor (algunos dispositivos pueden necesitar menor resolución)</li>
|
43 |
-
<li>Almacenamiento: 2 GB o más espacio disponible</li>
|
44 |
-
</ul>
|
45 |
-
<p>Si su PC cumple con estos requisitos, entonces no debería tener problemas para jugar Summertime Saga. Sin embargo, si encuentra algún problema o error, puede consultar la sección de preguntas frecuentes en el sitio web o ponerse en contacto con el equipo de soporte para obtener ayuda. </p>
|
46 |
-
<p></p>
|
47 |
-
<h2>¿Cómo se juega saga de verano? </h2>
|
48 |
-
<p>Jugar a Summertime Saga es muy simple e intuitivo. El juego tiene una interfaz de apuntar y hacer clic que le permite interactuar con los personajes, objetos y ubicaciones en el mundo del juego. También puede usar los atajos de teclado para algunas acciones, como guardar, cargar, omitir, etc.</p>
|
49 |
-
|
50 |
-
<p>La pantalla del juego consta de varios elementos que te ayudan a navegar y jugar. Estos son:</p>
|
51 |
-
<ul>
|
52 |
-
<li>El cuadro de diálogo: Aquí es donde se puede leer el texto y el diálogo de los caracteres. También puede elegir sus respuestas cuando hay varias opciones disponibles. </li>
|
53 |
-
<li>Los retratos de caracteres: Estas son las imágenes de los caracteres que aparecen junto al cuadro de diálogo. Muestran sus expresiones y emociones durante la conversación. </li>
|
54 |
-
<li>El mapa: Aquí es donde puedes ver los diferentes lugares que puedes visitar en el mundo del juego. Puedes hacer clic en ellos para viajar allí. </li>
|
55 |
-
<li>La hora: Aquí es donde se puede ver la fecha y hora actual en el juego. El juego tiene un ciclo día-noche que afecta a algunos eventos y actividades. </li>
|
56 |
-
<li>Las estadísticas: Aquí es donde puedes ver los atributos de tu personaje, como dinero, energía, carisma, inteligencia, fuerza, etc. Puedes aumentarlos haciendo ciertas acciones o completando ciertas misiones. </li>
|
57 |
-
<li>El inventario: Aquí es donde puedes ver los artículos de tu personaje, como ropa, accesorios, regalos, etc. Puedes usarlos o dárselos a otros personajes dependiendo de la situación. </li>
|
58 |
-
<li>El teléfono: Aquí es donde puedes acceder a las funciones del teléfono de tu personaje, como contactos, mensajes, galería, etc. Puedes usarlas para comunicarte con otros personajes o ver algunas imágenes o videos. </li>
|
59 |
-
</ul>
|
60 |
-
<p>El juego tiene muchos personajes y lugares con los que puedes interactuar y explorar en el juego. Cada personaje tiene su propia personalidad, historia y argumento que puedes descubrir y seguir. Cada lugar tiene sus propios eventos, actividades y secretos que puedes descubrir y disfrutar. </p>
|
61 |
-
<p>Para darte una idea de lo que puedes esperar en el juego, aquí hay una breve descripción de algunos de los personajes principales y lugares en Summertime Saga:</p>
|
62 |
-
<h3>Caracteres</h3>
|
63 |
-
<ul>
|
64 |
-
|
65 |
-
<li><b>Mia:</b> Ella es tu compañera de clase y la enamorada de Erik. Es una chica dulce e inocente que proviene de una familia religiosa estricta. Tiene curiosidad por el mundo y quiere divertirse un poco. </li>
|
66 |
-
<li><b>Roxxy:</b> Ella es tu compañera de clase y la capitana animadora de la escuela. Es una chica mimada y arrogante a la que le gusta intimidar a los demás. Ella tiene una relación secreta con Dexter, el mariscal de campo de la escuela. </li>
|
67 |
-
<li><b>Jenny:</b> Ella es tu hermanastra y compañera de cuarto. Ella es una chica grosera y rebelde que le gusta molestar y molestar. Tiene un lado suave oculto que rara vez muestra. </li>
|
68 |
-
<li><b>Sra. Johnson:</b> Ella es tu vecina y la madre de Erik. Ella es una mujer solitaria y deprimida que sufre de alcoholismo. Ella tiene una relación tensa con su marido, que siempre está lejos por negocios. </li>
|
69 |
-
<li><b>Sra. Smith:</b> Ella es tu vecina y la madre de Mia. Es una mujer estricta y conservadora que sigue las reglas de su iglesia. Desaprueba la amistad de Mia contigo y Erik.</li>
|
70 |
-
<li><b>Sra. Bissette:</b> Ella es tu profesora de francés en la escuela. Es una mujer joven y atractiva que tiene una pasión por la enseñanza y el aprendizaje. Ella está enamorada de ti, pero intenta ocultarlo. </li>
|
71 |
-
<li><b>Ms. Dewitt:</b> Ella es tu profesora de historia en la escuela. Es una mujer vieja y gruñona que odia su trabajo y a sus estudiantes. Ella tiene un pasado misterioso que involucra algunos secretos oscuros. </li>
|
72 |
-
<li><b>Tía Diane:</b> Ella es tu tía y la hermana de tu padre. Vive en una granja fuera de la ciudad. Ella es una mujer amable y cariñosa que ama la jardinería y la cocina. Tiene un vínculo especial con usted, pero también tiene algunos deseos ocultos. </li>
|
73 |
-
<li><b>Cassie:</b> Ella es tu prima y la hija de la tía Diane. Vive en la ciudad con su novio. Ella es una chica salvaje y aventurera a la que le gusta divertirse y divertirse. Ella te visita a veces, pero también tiene algunos motivos ocultos. </li>
|
74 |
-
</ul>
|
75 |
-
<h3>Lugares</h3>
|
76 |
-
<ul>
|
77 |
-
|
78 |
-
<li><b>La casa de Erik:</b> Aquí es donde Erik vive con su madre la Sra. Johnson. Puedes visitarlo en cualquier momento, excepto cuando está en la escuela o durmiendo. Puedes pasar el rato con él en su sótano, donde tiene su configuración de juegos, su colección de cómics, etc.</li>
|
79 |
-
<li><b>La casa de Mia:</b> Aquí es donde Mia vive con sus padres la Sra. y el Sr. Smith. Puedes visitarla en cualquier momento, excepto cuando está en la escuela o durmiendo. Puedes pasar el rato con ella en su habitación, donde tiene sus libros, su música, etc.</li>
|
80 |
-
<li><b>El trailer de Roxxy:</b> Aquí es donde Roxxy vive con su madre Crystal y su hermana Becca. Puedes visitarla en cualquier momento, excepto cuando está en la escuela o durmiendo. Puedes salir con ella en su trailer, donde tiene su ropa, su maquillaje, etc.</li>
|
81 |
-
<li><b>Escuela:</b> Aquí es donde vas a estudiar cada día de la semana de 8 AM a 4 PM. Puedes asistir a diferentes clases, como francés, historia, matemáticas, etc., donde puedes aprender cosas nuevas o hacer exámenes. También puedes interactuar con otros estudiantes y profesores en los pasillos, la cafetería, la biblioteca, etc.</li>
|
82 |
-
<li><b>Mall:</b> Aquí es donde puedes ir a comprar diferentes artículos o servicios, como ropa, accesorios, regalos, comida, etc. También puedes encontrar algunas opciones de entretenimiento aquí, como el cine, la galería, el salón de tatuajes, etc.</li>
|
83 |
-
<li><b>Parque:</b> Aquí es donde se puede ir a relajarse y disfrutar de la naturaleza. Puedes encontrar algunas actividades aquí, como pesca, jogging, picnicking, etc. También puedes conocer algunos personajes aquí, como Eve, Kevin, Annie, etc.</li>
|
84 |
-
<li><b>Playa:</b> Aquí es donde puedes ir a divertirte al sol y al mar. Puedes encontrar algunas actividades aquí, como natación, surf, tomar el sol, etc. También puedes conocer algunos personajes aquí, como la señorita Ross, el capitán Terry, Consuela, etc.</li>
|
85 |
-
|
86 |
-
<li><b>Comisaría:</b> Aquí es donde puedes ir a tratar asuntos legales o crímenes. Puedes encontrar algunos servicios aquí, como la recepción, la sala de interrogatorios, la celda de la cárcel, etc. También puedes conocer algunos personajes aquí, como el oficial Debbie, Earl, Tony, etc.</li>
|
87 |
-
<li><b>Granja:</b> Aquí es donde viven tu tía Diane y tu prima Cassie. Puedes visitarlos en cualquier momento, excepto cuando están durmiendo. Usted puede ayudarles con sus tareas agrícolas, como ordeñar vacas, recolectar huevos, cosechar cosechas, etc. También puede pasar el rato con ellos en su casa o granero. </li>
|
88 |
-
</ul>
|
89 |
-
<p>Estos son solo algunos de los personajes principales y lugares en Summertime Saga. Hay muchos más que puedes descubrir y explorar en el juego. Cada uno tiene su propia historia y contenido único que puedes disfrutar. </p>
|
90 |
-
<h2>¿Cómo desbloquear nuevo contenido en Summertime Saga? </h2>
|
91 |
-
<p>Una de las mejores cosas de Summertime Saga es que tiene mucho contenido que puedes desbloquear y experimentar en el juego. Hay diferentes maneras de hacerlo, dependiendo del tipo de contenido que estés buscando. </p>
|
92 |
-
<p>Si quieres desbloquear nuevas historias y misiones en el juego, necesitas progresar en el juego y aumentar tus estadísticas. Cada personaje tiene su propia historia y misión que puedes seguir y completar haciendo ciertas acciones o cumpliendo ciertos requisitos. Por ejemplo, si quieres desbloquear la historia y la búsqueda de Mia, necesitas hacerte amigo de ella y aumentar tu carisma al leer libros o tomar clases de francés. </p>
|
93 |
-
<p>Si quieres desbloquear nuevas escenas y finales en el juego, necesitas usar trucos o mods. Estos son códigos especiales o archivos que puedes introducir o instalar en el juego para acceder a algún contenido oculto o adicional que no está disponible de otra manera. Por ejemplo, si quieres desbloquear todas las escenas y finales del juego sin jugar a través de todo el juego, puedes usar un código de trucos que te da todos los elementos y estadísticas del juego. </p>
|
94 |
-
|
95 |
-
<ol>
|
96 |
-
<li>Ir al sitio web oficial de Summertime Saga: <a href="">https://summertimesaga.com/</a></li>
|
97 |
-
<li>Haga clic en el botón "Descargar" en la esquina superior derecha de la página. </li>
|
98 |
-
<li>Seleccione la versión que coincida con su sistema operativo (Windows, Mac o Linux). </li>
|
99 |
-
<li>Espere a que termine la descarga. El tamaño del archivo puede variar dependiendo de la actualización. </li>
|
100 |
-
<li>Extraiga el archivo zip a una carpeta de su elección. </li>
|
101 |
-
<li>Haga doble clic en el archivo "SummertimeSaga.exe" para iniciar el juego. </li>
|
102 |
-
</ol>
|
103 |
-
<p>El juego detectará automáticamente tus archivos guardados anteriores y los cargará. Ahora puedes disfrutar del nuevo contenido y características del juego. </p>
|
104 |
-
<p>Para darle una idea de lo que puede esperar en la última actualización, aquí hay una vista previa de algunos de los nuevos contenidos y características en Summertime Saga:</p>
|
105 |
-
<ul>
|
106 |
-
<li>Un nuevo personaje: Daisy, una vaquera que trabaja en la granja de la tía Diane. </li>
|
107 |
-
<li>Una nueva ubicación: El salón de tatuajes, donde puede obtener un poco de tinta hecha por Eve.</li>
|
108 |
-
<li>Una nueva historia: La búsqueda del tatuaje, donde puedes ayudar a Eve con su negocio de tatuajes y obtener algunas recompensas. </li>
|
109 |
-
<li>Una nueva característica: La opción de modo oscuro, donde puede cambiar a un tema más oscuro para la reproducción nocturna. </li>
|
110 |
-
<li>Muchas mejoras y correcciones de errores. </li>
|
111 |
-
</ul>
|
112 |
-
<h2>Conclusión</h2>
|
113 |
-
<p>Summertime Saga es un juego que ofrece mucha diversión y emoción para los jugadores que aman la aventura, el romance, la comedia y el contenido para adultos. Tiene un enorme mundo abierto con más de 70 lugares para visitar, más de 20 personajes para interactuar, más de 65 historias y misiones para completar, más de 3000 imágenes y animaciones para disfrutar, y un montón de minijuegos y actividades para jugar. También tiene un sistema de citas, un sistema de personalización de caracteres, un sistema de estadísticas, un sistema de guardar y cargar, un programa de actualización regular y una opción de modo oscuro. </p>
|
114 |
-
|
115 |
-
<p>Entonces, ¿qué estás esperando? Descarga Summertime Saga hoy y disfruta de este increíble juego! </p>
|
116 |
-
<h2>Preguntas frecuentes</h2>
|
117 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Summertime Saga:</p>
|
118 |
-
<h3>Q1: ¿Es libre Summertime Saga? </h3>
|
119 |
-
<p>A1: Sí, Summertime Saga es gratis para descargar y jugar. Sin embargo, si desea apoyar a los desarrolladores y obtener algunas ventajas, como el acceso temprano a nuevas actualizaciones, contenido exclusivo, etc., puede convertirse en un patrocinador en su página de Patreon: <a href="">https:/www.patreon.com/summertimesaga</a></p>
|
120 |
-
<h3>Q2: ¿Es seguro Summertime Saga? </h3>
|
121 |
-
<p>A2: Sí, Summertime Saga es seguro para descargar y jugar. Sin embargo, siempre debe descargarlo desde el sitio web oficial u otras fuentes de confianza. También debe escanearlo con un programa antivirus antes de instalarlo. También debes tener en cuenta que Summertime Saga contiene contenido para adultos que no es adecuado para menores o personas sensibles. </p>
|
122 |
-
<h3>Q3: ¿Cuánto tiempo es la saga de verano? </h3>
|
123 |
-
<p>A3: Summertime Saga es un juego muy largo que puede tardar cientos de horas en completarse. Depende de cómo lo juegues y de cuánto contenido quieras explorar. Sin embargo, si quieres completar todos los argumentos y misiones del juego, puedes esperar pasar al menos 50 horas en él. </p>
|
124 |
-
<h3>Q4: ¿Puedo jugar Summertime Saga en el móvil? </h3>
|
125 |
-
<p>A4: Sí, puede jugar Summertime Saga en dispositivos móviles como teléfonos inteligentes o tabletas. Sin embargo, debe tener en cuenta que la versión móvil del juego no es tan optimizada o estable como la versión para PC. Usted puede experimentar algunos problemas de retraso o estrellarse en algunos dispositivos. También es posible que tenga que reducir la resolución o los ajustes de calidad para que se ejecute sin problemas. </p>
|
126 |
-
<h3>Q5: ¿Dónde puedo encontrar más información sobre Summertime Saga? </h3>
|
127 |
-
<p>A5: Si quieres encontrar más información sobre Summertime Saga, como noticias, actualizaciones, consejos, guías, etc., puedes visitar estas fuentes:</p>
|
128 |
-
<ul>
|
129 |
-
<li>El sitio web oficial de Summertime Saga: <a href="">https://summertimesaga.com/</a></li>
|
130 |
-
|
131 |
-
<li>El servidor oficial de Discord de Summertime Saga: <a href="">https://discord.gg/summertimesaga</a></li>
|
132 |
-
<li>Wiki oficial de Summertime Saga: <a href="">https://wiki.summertimesaga.com/</a></li>
|
133 |
-
<li>La comunidad oficial de Reddit de Summertime Saga: <a href="">https://www.reddit.com/r/SummertimeSaga/</a></li>
|
134 |
-
</ul>
|
135 |
-
<p>Estos son algunos de los mejores lugares para encontrar más información sobre Summertime Saga. También puedes buscar otros sitios web, blogs, videos, etc. </p>
|
136 |
-
</td>
|
137 |
-
</tr>
|
138 |
-
</tabla></p> 64aa2da5cf<br />
|
139 |
-
<br />
|
140 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/base.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
class BaseRetryBackoff:
|
2 |
-
def delay_amount(self, context):
|
3 |
-
"""Calculate how long we should delay before retrying.
|
4 |
-
|
5 |
-
:type context: RetryContext
|
6 |
-
|
7 |
-
"""
|
8 |
-
raise NotImplementedError("delay_amount")
|
9 |
-
|
10 |
-
|
11 |
-
class BaseRetryableChecker:
|
12 |
-
"""Base class for determining if a retry should happen.
|
13 |
-
|
14 |
-
This base class checks for specific retryable conditions.
|
15 |
-
A single retryable checker doesn't necessarily indicate a retry
|
16 |
-
will happen. It's up to the ``RetryPolicy`` to use its
|
17 |
-
``BaseRetryableCheckers`` to make the final decision on whether a retry
|
18 |
-
should happen.
|
19 |
-
"""
|
20 |
-
|
21 |
-
def is_retryable(self, context):
|
22 |
-
"""Returns True if retryable, False if not.
|
23 |
-
|
24 |
-
:type context: RetryContext
|
25 |
-
"""
|
26 |
-
raise NotImplementedError("is_retryable")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_loop.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
from typing import Iterable, Tuple, TypeVar
|
2 |
-
|
3 |
-
T = TypeVar("T")
|
4 |
-
|
5 |
-
|
6 |
-
def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
|
7 |
-
"""Iterate and generate a tuple with a flag for first value."""
|
8 |
-
iter_values = iter(values)
|
9 |
-
try:
|
10 |
-
value = next(iter_values)
|
11 |
-
except StopIteration:
|
12 |
-
return
|
13 |
-
yield True, value
|
14 |
-
for value in iter_values:
|
15 |
-
yield False, value
|
16 |
-
|
17 |
-
|
18 |
-
def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
|
19 |
-
"""Iterate and generate a tuple with a flag for last value."""
|
20 |
-
iter_values = iter(values)
|
21 |
-
try:
|
22 |
-
previous_value = next(iter_values)
|
23 |
-
except StopIteration:
|
24 |
-
return
|
25 |
-
for value in iter_values:
|
26 |
-
yield False, previous_value
|
27 |
-
previous_value = value
|
28 |
-
yield True, previous_value
|
29 |
-
|
30 |
-
|
31 |
-
def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]:
|
32 |
-
"""Iterate and generate a tuple with a flag for first and last value."""
|
33 |
-
iter_values = iter(values)
|
34 |
-
try:
|
35 |
-
previous_value = next(iter_values)
|
36 |
-
except StopIteration:
|
37 |
-
return
|
38 |
-
first = True
|
39 |
-
for value in iter_values:
|
40 |
-
yield first, False, previous_value
|
41 |
-
first = False
|
42 |
-
previous_value = value
|
43 |
-
yield first, True, previous_value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/ArrowTable.ts
DELETED
@@ -1,224 +0,0 @@
|
|
1 |
-
/**
|
2 |
-
* @license
|
3 |
-
* Copyright 2018-2019 Streamlit Inc.
|
4 |
-
*
|
5 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
* you may not use this file except in compliance with the License.
|
7 |
-
* You may obtain a copy of the License at
|
8 |
-
*
|
9 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
*
|
11 |
-
* Unless required by applicable law or agreed to in writing, software
|
12 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
* See the License for the specific language governing permissions and
|
15 |
-
* limitations under the License.
|
16 |
-
*/
|
17 |
-
|
18 |
-
import { Table, Type } from "apache-arrow"
|
19 |
-
|
20 |
-
type CellType = "blank" | "index" | "columns" | "data"
|
21 |
-
|
22 |
-
export interface ArrowDataframeProto {
|
23 |
-
data: ArrowTableProto
|
24 |
-
height: string
|
25 |
-
width: string
|
26 |
-
}
|
27 |
-
|
28 |
-
export interface ArrowTableProto {
|
29 |
-
data: Uint8Array
|
30 |
-
index: Uint8Array
|
31 |
-
columns: Uint8Array
|
32 |
-
styler: Styler
|
33 |
-
}
|
34 |
-
|
35 |
-
interface Cell {
|
36 |
-
classNames: string
|
37 |
-
content: string
|
38 |
-
id?: string
|
39 |
-
type: CellType
|
40 |
-
}
|
41 |
-
|
42 |
-
interface Styler {
|
43 |
-
caption?: string
|
44 |
-
displayValuesTable: Table
|
45 |
-
styles?: string
|
46 |
-
uuid: string
|
47 |
-
}
|
48 |
-
|
49 |
-
export class ArrowTable {
|
50 |
-
private readonly dataTable: Table
|
51 |
-
private readonly indexTable: Table
|
52 |
-
private readonly columnsTable: Table
|
53 |
-
private readonly styler?: Styler
|
54 |
-
|
55 |
-
constructor(
|
56 |
-
dataBuffer: Uint8Array,
|
57 |
-
indexBuffer: Uint8Array,
|
58 |
-
columnsBuffer: Uint8Array,
|
59 |
-
styler?: any
|
60 |
-
) {
|
61 |
-
this.dataTable = Table.from(dataBuffer)
|
62 |
-
this.indexTable = Table.from(indexBuffer)
|
63 |
-
this.columnsTable = Table.from(columnsBuffer)
|
64 |
-
this.styler = styler
|
65 |
-
? {
|
66 |
-
caption: styler.get("caption"),
|
67 |
-
displayValuesTable: Table.from(styler.get("displayValues")),
|
68 |
-
styles: styler.get("styles"),
|
69 |
-
uuid: styler.get("uuid"),
|
70 |
-
}
|
71 |
-
: undefined
|
72 |
-
}
|
73 |
-
|
74 |
-
get rows(): number {
|
75 |
-
return this.indexTable.length + this.columnsTable.numCols
|
76 |
-
}
|
77 |
-
|
78 |
-
get columns(): number {
|
79 |
-
return this.indexTable.numCols + this.columnsTable.length
|
80 |
-
}
|
81 |
-
|
82 |
-
get headerRows(): number {
|
83 |
-
return this.rows - this.dataRows
|
84 |
-
}
|
85 |
-
|
86 |
-
get headerColumns(): number {
|
87 |
-
return this.columns - this.dataColumns
|
88 |
-
}
|
89 |
-
|
90 |
-
get dataRows(): number {
|
91 |
-
return this.dataTable.length
|
92 |
-
}
|
93 |
-
|
94 |
-
get dataColumns(): number {
|
95 |
-
return this.dataTable.numCols
|
96 |
-
}
|
97 |
-
|
98 |
-
get uuid(): string | undefined {
|
99 |
-
return this.styler && this.styler.uuid
|
100 |
-
}
|
101 |
-
|
102 |
-
get caption(): string | undefined {
|
103 |
-
return this.styler && this.styler.caption
|
104 |
-
}
|
105 |
-
|
106 |
-
get styles(): string | undefined {
|
107 |
-
return this.styler && this.styler.styles
|
108 |
-
}
|
109 |
-
|
110 |
-
get table(): Table {
|
111 |
-
return this.dataTable
|
112 |
-
}
|
113 |
-
|
114 |
-
get index(): Table {
|
115 |
-
return this.indexTable
|
116 |
-
}
|
117 |
-
|
118 |
-
get columnTable(): Table {
|
119 |
-
return this.columnsTable
|
120 |
-
}
|
121 |
-
|
122 |
-
public getCell = (rowIndex: number, columnIndex: number): Cell => {
|
123 |
-
const isBlankCell =
|
124 |
-
rowIndex < this.headerRows && columnIndex < this.headerColumns
|
125 |
-
const isIndexCell =
|
126 |
-
rowIndex >= this.headerRows && columnIndex < this.headerColumns
|
127 |
-
const isColumnsCell =
|
128 |
-
rowIndex < this.headerRows && columnIndex >= this.headerColumns
|
129 |
-
|
130 |
-
if (isBlankCell) {
|
131 |
-
const classNames = ["blank"]
|
132 |
-
if (columnIndex > 0) {
|
133 |
-
classNames.push("level" + rowIndex)
|
134 |
-
}
|
135 |
-
|
136 |
-
return {
|
137 |
-
type: "blank",
|
138 |
-
classNames: classNames.join(" "),
|
139 |
-
content: "",
|
140 |
-
}
|
141 |
-
} else if (isColumnsCell) {
|
142 |
-
const dataColumnIndex = columnIndex - this.headerColumns
|
143 |
-
const classNames = [
|
144 |
-
"col_heading",
|
145 |
-
"level" + rowIndex,
|
146 |
-
"col" + dataColumnIndex,
|
147 |
-
]
|
148 |
-
|
149 |
-
return {
|
150 |
-
type: "columns",
|
151 |
-
classNames: classNames.join(" "),
|
152 |
-
content: this.getContent(this.columnsTable, dataColumnIndex, rowIndex),
|
153 |
-
}
|
154 |
-
} else if (isIndexCell) {
|
155 |
-
const dataRowIndex = rowIndex - this.headerRows
|
156 |
-
const classNames = [
|
157 |
-
"row_heading",
|
158 |
-
"level" + columnIndex,
|
159 |
-
"row" + dataRowIndex,
|
160 |
-
]
|
161 |
-
|
162 |
-
return {
|
163 |
-
type: "index",
|
164 |
-
id: `T_${this.uuid}level${columnIndex}_row${dataRowIndex}`,
|
165 |
-
classNames: classNames.join(" "),
|
166 |
-
content: this.getContent(this.indexTable, dataRowIndex, columnIndex),
|
167 |
-
}
|
168 |
-
} else {
|
169 |
-
const dataRowIndex = rowIndex - this.headerRows
|
170 |
-
const dataColumnIndex = columnIndex - this.headerColumns
|
171 |
-
const classNames = [
|
172 |
-
"data",
|
173 |
-
"row" + dataRowIndex,
|
174 |
-
"col" + dataColumnIndex,
|
175 |
-
]
|
176 |
-
const content = this.styler
|
177 |
-
? this.getContent(
|
178 |
-
this.styler.displayValuesTable,
|
179 |
-
dataRowIndex,
|
180 |
-
dataColumnIndex
|
181 |
-
)
|
182 |
-
: this.getContent(this.dataTable, dataRowIndex, dataColumnIndex)
|
183 |
-
|
184 |
-
return {
|
185 |
-
type: "data",
|
186 |
-
id: `T_${this.uuid}row${dataRowIndex}_col${dataColumnIndex}`,
|
187 |
-
classNames: classNames.join(" "),
|
188 |
-
content,
|
189 |
-
}
|
190 |
-
}
|
191 |
-
}
|
192 |
-
|
193 |
-
public getContent = (
|
194 |
-
table: Table,
|
195 |
-
rowIndex: number,
|
196 |
-
columnIndex: number
|
197 |
-
): any => {
|
198 |
-
const column = table.getColumnAt(columnIndex)
|
199 |
-
if (column === null) {
|
200 |
-
return ""
|
201 |
-
}
|
202 |
-
|
203 |
-
const columnTypeId = this.getColumnTypeId(table, columnIndex)
|
204 |
-
switch (columnTypeId) {
|
205 |
-
case Type.Timestamp: {
|
206 |
-
return this.nanosToDate(column.get(rowIndex))
|
207 |
-
}
|
208 |
-
default: {
|
209 |
-
return column.get(rowIndex)
|
210 |
-
}
|
211 |
-
}
|
212 |
-
}
|
213 |
-
|
214 |
-
/**
|
215 |
-
* Returns apache-arrow specific typeId of column.
|
216 |
-
*/
|
217 |
-
private getColumnTypeId(table: Table, columnIndex: number): Type {
|
218 |
-
return table.schema.fields[columnIndex].type.typeId
|
219 |
-
}
|
220 |
-
|
221 |
-
private nanosToDate(nanos: number): Date {
|
222 |
-
return new Date(nanos / 1e6)
|
223 |
-
}
|
224 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/partition.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits partition
|
22 |
-
#include <thrust/system/detail/sequential/partition.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/transform.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// omp inherits transform
|
22 |
-
#include <thrust/system/cpp/detail/transform.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter_mod/train.py
DELETED
@@ -1,312 +0,0 @@
|
|
1 |
-
|
2 |
-
import os
|
3 |
-
import argparse
|
4 |
-
import random
|
5 |
-
import logging
|
6 |
-
import torch
|
7 |
-
import wandb
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
import torch.nn as nn
|
11 |
-
import torch.optim as optim
|
12 |
-
import matplotlib.pyplot as plt
|
13 |
-
import matplotlib.ticker as ticker
|
14 |
-
from torchvision import transforms
|
15 |
-
from torch.utils.data import DataLoader
|
16 |
-
from pathlib import Path
|
17 |
-
|
18 |
-
from utils import __balance_val_split, __split_of_train_sequence
|
19 |
-
from datasets.czech_slr_dataset import CzechSLRDataset
|
20 |
-
from spoter.spoter_model import SPOTER
|
21 |
-
from spoter.utils import train_epoch, evaluate
|
22 |
-
from spoter.gaussian_noise import GaussianNoise
|
23 |
-
|
24 |
-
|
25 |
-
def get_default_args():
|
26 |
-
parser = argparse.ArgumentParser(add_help=False)
|
27 |
-
|
28 |
-
parser.add_argument("--experiment_name", type=str, default="lsa_64_spoter",
|
29 |
-
help="Name of the experiment after which the logs and plots will be named")
|
30 |
-
parser.add_argument("--num_classes", type=int, default=64, help="Number of classes to be recognized by the model")
|
31 |
-
parser.add_argument("--hidden_dim", type=int, default=108,
|
32 |
-
help="Hidden dimension of the underlying Transformer model")
|
33 |
-
parser.add_argument("--seed", type=int, default=379,
|
34 |
-
help="Seed with which to initialize all the random components of the training")
|
35 |
-
|
36 |
-
# Data
|
37 |
-
parser.add_argument("--training_set_path", type=str, default="", help="Path to the training dataset CSV file")
|
38 |
-
parser.add_argument("--testing_set_path", type=str, default="", help="Path to the testing dataset CSV file")
|
39 |
-
parser.add_argument("--experimental_train_split", type=float, default=None,
|
40 |
-
help="Determines how big a portion of the training set should be employed (intended for the "
|
41 |
-
"gradually enlarging training set experiment from the paper)")
|
42 |
-
|
43 |
-
parser.add_argument("--validation_set", type=str, choices=["from-file", "split-from-train", "none"],
|
44 |
-
default="from-file", help="Type of validation set construction. See README for further rederence")
|
45 |
-
parser.add_argument("--validation_set_size", type=float,
|
46 |
-
help="Proportion of the training set to be split as validation set, if 'validation_size' is set"
|
47 |
-
" to 'split-from-train'")
|
48 |
-
parser.add_argument("--validation_set_path", type=str, default="", help="Path to the validation dataset CSV file")
|
49 |
-
|
50 |
-
# Training hyperparameters
|
51 |
-
parser.add_argument("--epochs", type=int, default=100, help="Number of epochs to train the model for")
|
52 |
-
parser.add_argument("--lr", type=float, default=0.001, help="Learning rate for the model training")
|
53 |
-
parser.add_argument("--log_freq", type=int, default=1,
|
54 |
-
help="Log frequency (frequency of printing all the training info)")
|
55 |
-
|
56 |
-
# Checkpointing
|
57 |
-
parser.add_argument("--save_checkpoints", type=bool, default=True,
|
58 |
-
help="Determines whether to save weights checkpoints")
|
59 |
-
|
60 |
-
# Scheduler
|
61 |
-
parser.add_argument("--scheduler_factor", type=int, default=0.1, help="Factor for the ReduceLROnPlateau scheduler")
|
62 |
-
parser.add_argument("--scheduler_patience", type=int, default=5,
|
63 |
-
help="Patience for the ReduceLROnPlateau scheduler")
|
64 |
-
|
65 |
-
# Gaussian noise normalization
|
66 |
-
parser.add_argument("--gaussian_mean", type=int, default=0, help="Mean parameter for Gaussian noise layer")
|
67 |
-
parser.add_argument("--gaussian_std", type=int, default=0.001,
|
68 |
-
help="Standard deviation parameter for Gaussian noise layer")
|
69 |
-
|
70 |
-
parser.add_argument("--augmentations_probability", type=float, default=0.5, help="") # 0.462
|
71 |
-
parser.add_argument("--rotate_angle", type=int, default=17, help="") # 17
|
72 |
-
parser.add_argument("--perspective_transform_ratio", type=float, default=0.2, help="") # 0.1682
|
73 |
-
parser.add_argument("--squeeze_ratio", type=float, default=0.4, help="") # 0.3971
|
74 |
-
parser.add_argument("--arm_joint_rotate_angle", type=int, default=4, help="") # 3
|
75 |
-
parser.add_argument("--arm_joint_rotate_probability", type=float, default=0.4, help="") # 0.3596
|
76 |
-
|
77 |
-
# Visualization
|
78 |
-
parser.add_argument("--plot_stats", type=bool, default=True,
|
79 |
-
help="Determines whether continuous statistics should be plotted at the end")
|
80 |
-
parser.add_argument("--plot_lr", type=bool, default=True,
|
81 |
-
help="Determines whether the LR should be plotted at the end")
|
82 |
-
|
83 |
-
# WANDB
|
84 |
-
parser.add_argument("--wandb_key", type=str, default="", help="")
|
85 |
-
parser.add_argument("--wandb_entity", type=str, default="", help="")
|
86 |
-
|
87 |
-
return parser
|
88 |
-
|
89 |
-
|
90 |
-
def train(args):
|
91 |
-
|
92 |
-
if args.wandb_key:
|
93 |
-
wandb.login(key=args.wandb_key)
|
94 |
-
wandb.init(project=args.experiment_name, entity=args.wandb_entity)
|
95 |
-
wandb.config.update(args)
|
96 |
-
|
97 |
-
# MARK: TRAINING PREPARATION AND MODULES
|
98 |
-
args.experiment_name = args.experiment_name + "_lr" + wandb.run.id
|
99 |
-
|
100 |
-
# Initialize all the random seeds
|
101 |
-
random.seed(args.seed)
|
102 |
-
np.random.seed(args.seed)
|
103 |
-
os.environ["PYTHONHASHSEED"] = str(args.seed)
|
104 |
-
torch.manual_seed(args.seed)
|
105 |
-
torch.cuda.manual_seed(args.seed)
|
106 |
-
torch.cuda.manual_seed_all(args.seed)
|
107 |
-
torch.backends.cudnn.deterministic = True
|
108 |
-
g = torch.Generator()
|
109 |
-
g.manual_seed(args.seed)
|
110 |
-
|
111 |
-
# Set the output format to print into the console and save into LOG file
|
112 |
-
logging.basicConfig(
|
113 |
-
level=logging.INFO,
|
114 |
-
format="%(asctime)s [%(levelname)s] %(message)s",
|
115 |
-
handlers=[
|
116 |
-
logging.FileHandler(args.experiment_name + "_" + str(args.experimental_train_split).replace(".", "") + ".log")
|
117 |
-
]
|
118 |
-
)
|
119 |
-
|
120 |
-
# Set device to CUDA only if applicable
|
121 |
-
device = torch.device("cpu")
|
122 |
-
if torch.cuda.is_available():
|
123 |
-
device = torch.device("cuda")
|
124 |
-
|
125 |
-
# Construct the model
|
126 |
-
slrt_model = SPOTER(num_classes=args.num_classes, hidden_dim=args.hidden_dim)
|
127 |
-
slrt_model.train(True)
|
128 |
-
slrt_model.to(device)
|
129 |
-
|
130 |
-
# Construct the other modules
|
131 |
-
cel_criterion = nn.CrossEntropyLoss()
|
132 |
-
sgd_optimizer = optim.SGD(slrt_model.parameters(), lr=args.lr)
|
133 |
-
scheduler = optim.lr_scheduler.ReduceLROnPlateau(sgd_optimizer, factor=args.scheduler_factor, patience=args.scheduler_patience)
|
134 |
-
|
135 |
-
# Ensure that the path for checkpointing and for images both exist
|
136 |
-
Path("out-checkpoints/" + args.experiment_name + "/").mkdir(parents=True, exist_ok=True)
|
137 |
-
Path("out-img/").mkdir(parents=True, exist_ok=True)
|
138 |
-
|
139 |
-
|
140 |
-
# MARK: DATA
|
141 |
-
|
142 |
-
# Training set
|
143 |
-
transform = transforms.Compose([GaussianNoise(args.gaussian_mean, args.gaussian_std)])
|
144 |
-
augmentations_config = {
|
145 |
-
"rotate-angle": args.rotate_angle,
|
146 |
-
"perspective-transform-ratio": args.perspective_transform_ratio,
|
147 |
-
"squeeze-ratio": args.squeeze_ratio,
|
148 |
-
"arm-joint-rotate-angle": args.arm_joint_rotate_angle,
|
149 |
-
"arm-joint-rotate-probability": args.arm_joint_rotate_probability
|
150 |
-
}
|
151 |
-
|
152 |
-
train_set = CzechSLRDataset(args.training_set_path, transform=transform, augmentations=True,
|
153 |
-
augmentations_prob=args.augmentations_probability, augmentations_config=augmentations_config)
|
154 |
-
|
155 |
-
# Validation set
|
156 |
-
if args.validation_set == "from-file":
|
157 |
-
val_set = CzechSLRDataset(args.validation_set_path)
|
158 |
-
val_loader = DataLoader(val_set, shuffle=True, generator=g)
|
159 |
-
|
160 |
-
elif args.validation_set == "split-from-train":
|
161 |
-
train_set, val_set = __balance_val_split(train_set, 0.2)
|
162 |
-
|
163 |
-
val_set.transform = None
|
164 |
-
val_set.augmentations = False
|
165 |
-
val_loader = DataLoader(val_set, shuffle=True, generator=g)
|
166 |
-
|
167 |
-
else:
|
168 |
-
val_loader = None
|
169 |
-
|
170 |
-
# Testing set
|
171 |
-
if args.testing_set_path:
|
172 |
-
eval_set = CzechSLRDataset(args.testing_set_path)
|
173 |
-
eval_loader = DataLoader(eval_set, shuffle=True, generator=g)
|
174 |
-
|
175 |
-
else:
|
176 |
-
eval_loader = None
|
177 |
-
|
178 |
-
# Final training set refinements
|
179 |
-
if args.experimental_train_split:
|
180 |
-
train_set = __split_of_train_sequence(train_set, args.experimental_train_split)
|
181 |
-
|
182 |
-
train_loader = DataLoader(train_set, shuffle=True, generator=g)
|
183 |
-
|
184 |
-
|
185 |
-
# MARK: TRAINING
|
186 |
-
train_acc, val_acc = 0, 0
|
187 |
-
losses, train_accs, val_accs = [], [], []
|
188 |
-
lr_progress = []
|
189 |
-
top_train_acc, top_val_acc = 0, 0
|
190 |
-
checkpoint_index = 0
|
191 |
-
|
192 |
-
if args.experimental_train_split:
|
193 |
-
print("Starting " + args.experiment_name + "_" + str(args.experimental_train_split).replace(".", "") + "...\n\n")
|
194 |
-
logging.info("Starting " + args.experiment_name + "_" + str(args.experimental_train_split).replace(".", "") + "...\n\n")
|
195 |
-
|
196 |
-
else:
|
197 |
-
print("Starting " + args.experiment_name + "...\n\n")
|
198 |
-
logging.info("Starting " + args.experiment_name + "...\n\n")
|
199 |
-
|
200 |
-
for epoch in range(args.epochs):
|
201 |
-
train_loss, _, _, train_acc = train_epoch(slrt_model, train_loader, cel_criterion, sgd_optimizer, device)
|
202 |
-
losses.append(train_loss.item() / len(train_loader))
|
203 |
-
train_accs.append(train_acc)
|
204 |
-
|
205 |
-
if val_loader:
|
206 |
-
slrt_model.train(False)
|
207 |
-
_, _, val_acc = evaluate(slrt_model, val_loader, device)
|
208 |
-
slrt_model.train(True)
|
209 |
-
val_accs.append(val_acc)
|
210 |
-
|
211 |
-
# Save checkpoints if they are best in the current subset
|
212 |
-
if args.save_checkpoints:
|
213 |
-
if train_acc > top_train_acc:
|
214 |
-
top_train_acc = train_acc
|
215 |
-
torch.save(slrt_model, "out-checkpoints/" + args.experiment_name + "/checkpoint_t_" + str(checkpoint_index) + ".pth")
|
216 |
-
|
217 |
-
if val_acc > top_val_acc:
|
218 |
-
top_val_acc = val_acc
|
219 |
-
torch.save(slrt_model, "out-checkpoints/" + args.experiment_name + "/checkpoint_v_" + str(checkpoint_index) + ".pth")
|
220 |
-
|
221 |
-
if epoch % args.log_freq == 0:
|
222 |
-
print("[" + str(epoch + 1) + "] TRAIN loss: " + str(train_loss.item() / len(train_loader)) + " acc: " + str(train_acc))
|
223 |
-
logging.info("[" + str(epoch + 1) + "] TRAIN loss: " + str(train_loss.item() / len(train_loader)) + " acc: " + str(train_acc))
|
224 |
-
|
225 |
-
wandb.log({
|
226 |
-
"epoch": int(epoch + 1),
|
227 |
-
"train-loss": float(train_loss.item() / len(train_loader)),
|
228 |
-
"train-accuracy": train_acc
|
229 |
-
})
|
230 |
-
|
231 |
-
if val_loader:
|
232 |
-
print("[" + str(epoch + 1) + "] VALIDATION acc: " + str(val_acc))
|
233 |
-
logging.info("[" + str(epoch + 1) + "] VALIDATION acc: " + str(val_acc))
|
234 |
-
|
235 |
-
if args.wandb_key:
|
236 |
-
wandb.log({
|
237 |
-
"validation-accuracy": val_acc
|
238 |
-
})
|
239 |
-
|
240 |
-
print("")
|
241 |
-
logging.info("")
|
242 |
-
|
243 |
-
# Reset the top accuracies on static subsets
|
244 |
-
if epoch % 10 == 0:
|
245 |
-
top_train_acc, top_val_acc = 0, 0
|
246 |
-
checkpoint_index += 1
|
247 |
-
|
248 |
-
lr_progress.append(sgd_optimizer.param_groups[0]["lr"])
|
249 |
-
|
250 |
-
# MARK: TESTING
|
251 |
-
|
252 |
-
print("\nTesting checkpointed models starting...\n")
|
253 |
-
logging.info("\nTesting checkpointed models starting...\n")
|
254 |
-
|
255 |
-
top_result, top_result_name = 0, ""
|
256 |
-
|
257 |
-
if eval_loader:
|
258 |
-
for i in range(checkpoint_index):
|
259 |
-
for checkpoint_id in ["t", "v"]:
|
260 |
-
# tested_model = VisionTransformer(dim=2, mlp_dim=108, num_classes=100, depth=12, heads=8)
|
261 |
-
tested_model = torch.load("out-checkpoints/" + args.experiment_name + "/checkpoint_" + checkpoint_id + "_" + str(i) + ".pth")
|
262 |
-
tested_model.train(False)
|
263 |
-
_, _, eval_acc = evaluate(tested_model, eval_loader, device, print_stats=True)
|
264 |
-
|
265 |
-
if eval_acc > top_result:
|
266 |
-
top_result = eval_acc
|
267 |
-
top_result_name = args.experiment_name + "/checkpoint_" + checkpoint_id + "_" + str(i)
|
268 |
-
|
269 |
-
print("checkpoint_" + checkpoint_id + "_" + str(i) + " -> " + str(eval_acc))
|
270 |
-
logging.info("checkpoint_" + checkpoint_id + "_" + str(i) + " -> " + str(eval_acc))
|
271 |
-
|
272 |
-
print("\nThe top result was recorded at " + str(top_result) + " testing accuracy. The best checkpoint is " + top_result_name + ".")
|
273 |
-
logging.info("\nThe top result was recorded at " + str(top_result) + " testing accuracy. The best checkpoint is " + top_result_name + ".")
|
274 |
-
|
275 |
-
if args.wandb_key:
|
276 |
-
wandb.run.summary["best-accuracy"] = top_result
|
277 |
-
wandb.run.summary["best-checkpoint"] = top_result_name
|
278 |
-
|
279 |
-
# PLOT 0: Performance (loss, accuracies) chart plotting
|
280 |
-
if args.plot_stats:
|
281 |
-
fig, ax = plt.subplots()
|
282 |
-
ax.plot(range(1, len(losses) + 1), losses, c="#D64436", label="Training loss")
|
283 |
-
ax.plot(range(1, len(train_accs) + 1), train_accs, c="#00B09B", label="Training accuracy")
|
284 |
-
|
285 |
-
if val_loader:
|
286 |
-
ax.plot(range(1, len(val_accs) + 1), val_accs, c="#E0A938", label="Validation accuracy")
|
287 |
-
|
288 |
-
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
|
289 |
-
|
290 |
-
ax.set(xlabel="Epoch", ylabel="Accuracy / Loss", title="")
|
291 |
-
plt.legend(loc="upper center", bbox_to_anchor=(0.5, 1.05), ncol=4, fancybox=True, shadow=True, fontsize="xx-small")
|
292 |
-
ax.grid()
|
293 |
-
|
294 |
-
fig.savefig("out-img/" + args.experiment_name + "_loss.png")
|
295 |
-
|
296 |
-
# PLOT 1: Learning rate progress
|
297 |
-
if args.plot_lr:
|
298 |
-
fig1, ax1 = plt.subplots()
|
299 |
-
ax1.plot(range(1, len(lr_progress) + 1), lr_progress, label="LR")
|
300 |
-
ax1.set(xlabel="Epoch", ylabel="LR", title="")
|
301 |
-
ax1.grid()
|
302 |
-
|
303 |
-
fig1.savefig("out-img/" + args.experiment_name + "_lr.png")
|
304 |
-
|
305 |
-
print("\nAny desired statistics have been plotted.\nThe experiment is finished.")
|
306 |
-
logging.info("\nAny desired statistics have been plotted.\nThe experiment is finished.")
|
307 |
-
|
308 |
-
|
309 |
-
if __name__ == '__main__':
|
310 |
-
parser = argparse.ArgumentParser("", parents=[get_default_args()], add_help=False)
|
311 |
-
args = parser.parse_args()
|
312 |
-
train(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/gif_subtitle/__init__.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List, Tuple
|
3 |
-
|
4 |
-
from pil_utils import BuildImage
|
5 |
-
|
6 |
-
from meme_generator import add_meme
|
7 |
-
from meme_generator.exception import TextOverLength
|
8 |
-
from meme_generator.utils import save_gif
|
9 |
-
|
10 |
-
img_dir = Path(__file__).parent / "images"
|
11 |
-
|
12 |
-
|
13 |
-
def make_gif(
|
14 |
-
key: str,
|
15 |
-
texts: List[str],
|
16 |
-
pieces: Tuple[Tuple[int, int], ...],
|
17 |
-
fontsize: int = 20,
|
18 |
-
padding_x: int = 5,
|
19 |
-
padding_y: int = 5,
|
20 |
-
):
|
21 |
-
img = BuildImage.open(img_dir / f"{key}.gif").image
|
22 |
-
frames: List[BuildImage] = []
|
23 |
-
for i in range(img.n_frames):
|
24 |
-
img.seek(i)
|
25 |
-
frames.append(BuildImage(img.convert("RGB")))
|
26 |
-
|
27 |
-
parts = [frames[start:end] for start, end in pieces]
|
28 |
-
for part, text in zip(parts, texts):
|
29 |
-
for frame in part:
|
30 |
-
try:
|
31 |
-
frame.draw_text(
|
32 |
-
(padding_x, 0, frame.width - padding_x, frame.height - padding_y),
|
33 |
-
text,
|
34 |
-
max_fontsize=fontsize,
|
35 |
-
min_fontsize=fontsize,
|
36 |
-
fill="white",
|
37 |
-
stroke_ratio=0.05,
|
38 |
-
stroke_fill="black",
|
39 |
-
valign="bottom",
|
40 |
-
)
|
41 |
-
except ValueError:
|
42 |
-
raise TextOverLength(text)
|
43 |
-
|
44 |
-
return save_gif([frame.image for frame in frames], img.info["duration"] / 1000)
|
45 |
-
|
46 |
-
|
47 |
-
def add_gif_meme(
|
48 |
-
key: str,
|
49 |
-
keywords: List[str],
|
50 |
-
pieces: Tuple[Tuple[int, int], ...],
|
51 |
-
examples: Tuple[str, ...],
|
52 |
-
**kwargs,
|
53 |
-
):
|
54 |
-
def gif_func(images, texts: List[str], args):
|
55 |
-
return make_gif(key, texts, pieces, **kwargs)
|
56 |
-
|
57 |
-
text_num = len(pieces)
|
58 |
-
add_meme(
|
59 |
-
key,
|
60 |
-
gif_func,
|
61 |
-
min_texts=text_num,
|
62 |
-
max_texts=text_num,
|
63 |
-
default_texts=list(examples),
|
64 |
-
keywords=keywords,
|
65 |
-
)
|
66 |
-
|
67 |
-
|
68 |
-
add_gif_meme(
|
69 |
-
"wangjingze",
|
70 |
-
["王境泽"],
|
71 |
-
((0, 9), (12, 24), (25, 35), (37, 48)),
|
72 |
-
("我就是饿死", "死外边 从这里跳下去", "不会吃你们一点东西", "真香"),
|
73 |
-
)
|
74 |
-
|
75 |
-
# fmt: off
|
76 |
-
add_gif_meme(
|
77 |
-
"weisuoyuwei",
|
78 |
-
["为所欲为"],
|
79 |
-
((11, 14), (27, 38), (42, 61), (63, 81), (82, 95), (96, 105), (111, 131), (145, 157), (157, 167),),
|
80 |
-
("好啊", "就算你是一流工程师", "就算你出报告再完美", "我叫你改报告你就要改", "毕竟我是客户", "客户了不起啊", "Sorry 客户真的了不起", "以后叫他天天改报告", "天天改 天天改"),
|
81 |
-
fontsize=19,
|
82 |
-
)
|
83 |
-
# fmt: on
|
84 |
-
|
85 |
-
add_gif_meme(
|
86 |
-
"chanshenzi",
|
87 |
-
["馋身子"],
|
88 |
-
((0, 16), (16, 31), (33, 40)),
|
89 |
-
("你那叫喜欢吗?", "你那是馋她身子", "你下贱!"),
|
90 |
-
fontsize=18,
|
91 |
-
)
|
92 |
-
|
93 |
-
add_gif_meme(
|
94 |
-
"qiegewala",
|
95 |
-
["切格瓦拉"],
|
96 |
-
((0, 15), (16, 31), (31, 38), (38, 48), (49, 68), (68, 86)),
|
97 |
-
("没有钱啊 肯定要做的啊", "不做的话没有钱用", "那你不会去打工啊", "有手有脚的", "打工是不可能打工的", "这辈子不可能打工的"),
|
98 |
-
)
|
99 |
-
|
100 |
-
add_gif_meme(
|
101 |
-
"shuifandui",
|
102 |
-
["谁反对"],
|
103 |
-
((3, 14), (21, 26), (31, 38), (40, 45)),
|
104 |
-
("我话说完了", "谁赞成", "谁反对", "我反对"),
|
105 |
-
fontsize=19,
|
106 |
-
)
|
107 |
-
|
108 |
-
add_gif_meme(
|
109 |
-
"zengxiaoxian",
|
110 |
-
["曾小贤"],
|
111 |
-
((3, 15), (24, 30), (30, 46), (56, 63)),
|
112 |
-
("平时你打电子游戏吗", "偶尔", "星际还是魔兽", "连连看"),
|
113 |
-
fontsize=21,
|
114 |
-
)
|
115 |
-
|
116 |
-
add_gif_meme(
|
117 |
-
"yalidaye",
|
118 |
-
["压力大爷"],
|
119 |
-
((0, 16), (21, 47), (52, 77)),
|
120 |
-
("外界都说我们压力大", "我觉得吧压力也没有那么大", "主要是28岁了还没媳妇儿"),
|
121 |
-
fontsize=21,
|
122 |
-
)
|
123 |
-
|
124 |
-
add_gif_meme(
|
125 |
-
"nihaosaoa",
|
126 |
-
["你好骚啊"],
|
127 |
-
((0, 14), (16, 26), (42, 61)),
|
128 |
-
("既然追求刺激", "就贯彻到底了", "你好骚啊"),
|
129 |
-
fontsize=17,
|
130 |
-
)
|
131 |
-
|
132 |
-
add_gif_meme(
|
133 |
-
"shishilani",
|
134 |
-
["食屎啦你"],
|
135 |
-
((14, 21), (23, 36), (38, 46), (60, 66)),
|
136 |
-
("穿西装打领带", "拿大哥大有什么用", "跟着这样的大哥", "食屎啦你"),
|
137 |
-
fontsize=17,
|
138 |
-
)
|
139 |
-
|
140 |
-
add_gif_meme(
|
141 |
-
"wunian",
|
142 |
-
["五年怎么过的"],
|
143 |
-
((11, 20), (35, 50), (59, 77), (82, 95)),
|
144 |
-
("五年", "你知道我这五年是怎么过的吗", "我每天躲在家里玩贪玩蓝月", "你知道有多好玩吗"),
|
145 |
-
fontsize=16,
|
146 |
-
)
|
147 |
-
|
148 |
-
add_gif_meme(
|
149 |
-
"maikease",
|
150 |
-
["麦克阿瑟说"],
|
151 |
-
((0, 22), (24, 46), (48, 70), (72, 84)),
|
152 |
-
("美国前五星上将麦克阿瑟", "曾这样评价道", "如果让我去阻止xxx", "那么我宁愿去阻止上帝"),
|
153 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CirnoW/anime-ai-detect/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Anime Ai Detect
|
3 |
-
emoji: 🤖
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.15.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
duplicated_from: saltacc/anime-ai-detect
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/log.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
#coding=utf-8
|
2 |
-
'''
|
3 |
-
Created on 2016年10月12日
|
4 |
-
|
5 |
-
@author: dengdan
|
6 |
-
'''
|
7 |
-
import datetime
|
8 |
-
import logging
|
9 |
-
import util
|
10 |
-
import sys
|
11 |
-
|
12 |
-
def get_date_str():
|
13 |
-
now = datetime.datetime.now()
|
14 |
-
return now.strftime('%Y-%m-%d %H:%M:%S')
|
15 |
-
|
16 |
-
def init_logger(log_file = None, log_path = None, log_level = logging.DEBUG, mode = 'w', stdout = True):
|
17 |
-
"""
|
18 |
-
log_path: 日志文件的文件夹路径
|
19 |
-
mode: 'a', append; 'w', 覆盖原文件写入.
|
20 |
-
"""
|
21 |
-
fmt = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s'
|
22 |
-
if log_path is None:
|
23 |
-
log_path = '~/temp/log/'
|
24 |
-
if log_file is None:
|
25 |
-
log_file = 'log_' + get_date_str() + '.log'
|
26 |
-
log_file = util.io.join_path(log_path, log_file)
|
27 |
-
# 此处不能使用logging输出
|
28 |
-
print('log file path:' + log_file);
|
29 |
-
util.io.make_parent_dir(log_file)
|
30 |
-
logging.basicConfig(level = log_level,
|
31 |
-
format= fmt,
|
32 |
-
filename= util.io.get_absolute_path(log_file),
|
33 |
-
filemode=mode)
|
34 |
-
|
35 |
-
if stdout:
|
36 |
-
console = logging.StreamHandler(stream = sys.stdout)
|
37 |
-
console.setLevel(log_level)
|
38 |
-
formatter = logging.Formatter(fmt)
|
39 |
-
console.setFormatter(formatter)
|
40 |
-
logging.getLogger('').addHandler(console)
|
41 |
-
|
42 |
-
# console = logging.StreamHandler(stream = sys.stderr)
|
43 |
-
# console.setLevel(log_level)
|
44 |
-
# formatter = logging.Formatter(fmt)
|
45 |
-
# console.setFormatter(formatter)
|
46 |
-
# logging.getLogger('').addHandler(console)
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/solver/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
from .build import make_optimizer
|
3 |
-
from .build import make_lr_scheduler
|
4 |
-
from .lr_scheduler import WarmupMultiStepLR
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/security/http.py
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
import binascii
|
2 |
-
from base64 import b64decode
|
3 |
-
from typing import Optional
|
4 |
-
|
5 |
-
from fastapi.exceptions import HTTPException
|
6 |
-
from fastapi.openapi.models import HTTPBase as HTTPBaseModel
|
7 |
-
from fastapi.openapi.models import HTTPBearer as HTTPBearerModel
|
8 |
-
from fastapi.security.base import SecurityBase
|
9 |
-
from fastapi.security.utils import get_authorization_scheme_param
|
10 |
-
from pydantic import BaseModel
|
11 |
-
from starlette.requests import Request
|
12 |
-
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
|
13 |
-
|
14 |
-
|
15 |
-
class HTTPBasicCredentials(BaseModel):
|
16 |
-
username: str
|
17 |
-
password: str
|
18 |
-
|
19 |
-
|
20 |
-
class HTTPAuthorizationCredentials(BaseModel):
|
21 |
-
scheme: str
|
22 |
-
credentials: str
|
23 |
-
|
24 |
-
|
25 |
-
class HTTPBase(SecurityBase):
|
26 |
-
def __init__(
|
27 |
-
self,
|
28 |
-
*,
|
29 |
-
scheme: str,
|
30 |
-
scheme_name: Optional[str] = None,
|
31 |
-
description: Optional[str] = None,
|
32 |
-
auto_error: bool = True,
|
33 |
-
):
|
34 |
-
self.model = HTTPBaseModel(scheme=scheme, description=description)
|
35 |
-
self.scheme_name = scheme_name or self.__class__.__name__
|
36 |
-
self.auto_error = auto_error
|
37 |
-
|
38 |
-
async def __call__(
|
39 |
-
self, request: Request
|
40 |
-
) -> Optional[HTTPAuthorizationCredentials]:
|
41 |
-
authorization = request.headers.get("Authorization")
|
42 |
-
scheme, credentials = get_authorization_scheme_param(authorization)
|
43 |
-
if not (authorization and scheme and credentials):
|
44 |
-
if self.auto_error:
|
45 |
-
raise HTTPException(
|
46 |
-
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
|
47 |
-
)
|
48 |
-
else:
|
49 |
-
return None
|
50 |
-
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
|
51 |
-
|
52 |
-
|
53 |
-
class HTTPBasic(HTTPBase):
|
54 |
-
def __init__(
|
55 |
-
self,
|
56 |
-
*,
|
57 |
-
scheme_name: Optional[str] = None,
|
58 |
-
realm: Optional[str] = None,
|
59 |
-
description: Optional[str] = None,
|
60 |
-
auto_error: bool = True,
|
61 |
-
):
|
62 |
-
self.model = HTTPBaseModel(scheme="basic", description=description)
|
63 |
-
self.scheme_name = scheme_name or self.__class__.__name__
|
64 |
-
self.realm = realm
|
65 |
-
self.auto_error = auto_error
|
66 |
-
|
67 |
-
async def __call__( # type: ignore
|
68 |
-
self, request: Request
|
69 |
-
) -> Optional[HTTPBasicCredentials]:
|
70 |
-
authorization = request.headers.get("Authorization")
|
71 |
-
scheme, param = get_authorization_scheme_param(authorization)
|
72 |
-
if self.realm:
|
73 |
-
unauthorized_headers = {"WWW-Authenticate": f'Basic realm="{self.realm}"'}
|
74 |
-
else:
|
75 |
-
unauthorized_headers = {"WWW-Authenticate": "Basic"}
|
76 |
-
if not authorization or scheme.lower() != "basic":
|
77 |
-
if self.auto_error:
|
78 |
-
raise HTTPException(
|
79 |
-
status_code=HTTP_401_UNAUTHORIZED,
|
80 |
-
detail="Not authenticated",
|
81 |
-
headers=unauthorized_headers,
|
82 |
-
)
|
83 |
-
else:
|
84 |
-
return None
|
85 |
-
invalid_user_credentials_exc = HTTPException(
|
86 |
-
status_code=HTTP_401_UNAUTHORIZED,
|
87 |
-
detail="Invalid authentication credentials",
|
88 |
-
headers=unauthorized_headers,
|
89 |
-
)
|
90 |
-
try:
|
91 |
-
data = b64decode(param).decode("ascii")
|
92 |
-
except (ValueError, UnicodeDecodeError, binascii.Error):
|
93 |
-
raise invalid_user_credentials_exc
|
94 |
-
username, separator, password = data.partition(":")
|
95 |
-
if not separator:
|
96 |
-
raise invalid_user_credentials_exc
|
97 |
-
return HTTPBasicCredentials(username=username, password=password)
|
98 |
-
|
99 |
-
|
100 |
-
class HTTPBearer(HTTPBase):
|
101 |
-
def __init__(
|
102 |
-
self,
|
103 |
-
*,
|
104 |
-
bearerFormat: Optional[str] = None,
|
105 |
-
scheme_name: Optional[str] = None,
|
106 |
-
description: Optional[str] = None,
|
107 |
-
auto_error: bool = True,
|
108 |
-
):
|
109 |
-
self.model = HTTPBearerModel(bearerFormat=bearerFormat, description=description)
|
110 |
-
self.scheme_name = scheme_name or self.__class__.__name__
|
111 |
-
self.auto_error = auto_error
|
112 |
-
|
113 |
-
async def __call__(
|
114 |
-
self, request: Request
|
115 |
-
) -> Optional[HTTPAuthorizationCredentials]:
|
116 |
-
authorization = request.headers.get("Authorization")
|
117 |
-
scheme, credentials = get_authorization_scheme_param(authorization)
|
118 |
-
if not (authorization and scheme and credentials):
|
119 |
-
if self.auto_error:
|
120 |
-
raise HTTPException(
|
121 |
-
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
|
122 |
-
)
|
123 |
-
else:
|
124 |
-
return None
|
125 |
-
if scheme.lower() != "bearer":
|
126 |
-
if self.auto_error:
|
127 |
-
raise HTTPException(
|
128 |
-
status_code=HTTP_403_FORBIDDEN,
|
129 |
-
detail="Invalid authentication credentials",
|
130 |
-
)
|
131 |
-
else:
|
132 |
-
return None
|
133 |
-
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
|
134 |
-
|
135 |
-
|
136 |
-
class HTTPDigest(HTTPBase):
|
137 |
-
def __init__(
|
138 |
-
self,
|
139 |
-
*,
|
140 |
-
scheme_name: Optional[str] = None,
|
141 |
-
description: Optional[str] = None,
|
142 |
-
auto_error: bool = True,
|
143 |
-
):
|
144 |
-
self.model = HTTPBaseModel(scheme="digest", description=description)
|
145 |
-
self.scheme_name = scheme_name or self.__class__.__name__
|
146 |
-
self.auto_error = auto_error
|
147 |
-
|
148 |
-
async def __call__(
|
149 |
-
self, request: Request
|
150 |
-
) -> Optional[HTTPAuthorizationCredentials]:
|
151 |
-
authorization = request.headers.get("Authorization")
|
152 |
-
scheme, credentials = get_authorization_scheme_param(authorization)
|
153 |
-
if not (authorization and scheme and credentials):
|
154 |
-
if self.auto_error:
|
155 |
-
raise HTTPException(
|
156 |
-
status_code=HTTP_403_FORBIDDEN, detail="Not authenticated"
|
157 |
-
)
|
158 |
-
else:
|
159 |
-
return None
|
160 |
-
if scheme.lower() != "digest":
|
161 |
-
raise HTTPException(
|
162 |
-
status_code=HTTP_403_FORBIDDEN,
|
163 |
-
detail="Invalid authentication credentials",
|
164 |
-
)
|
165 |
-
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Image-1cf93ae5.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as g,e as u,s as d,N as y,T as f,K as c,U as i,p as o,n as r,A as v}from"./index-1d65707a.js";function b(t){let e,s;return{c(){e=y("img"),f(e.src,s=t[1]+t[0])||c(e,"src",s),c(e,"class","svelte-gqt00k"),i(e,"table",t[2]==="table"),i(e,"gallery",t[2]==="gallery"),i(e,"selected",t[3])},m(l,a){o(l,e,a)},p(l,[a]){a&3&&!f(e.src,s=l[1]+l[0])&&c(e,"src",s),a&4&&i(e,"table",l[2]==="table"),a&4&&i(e,"gallery",l[2]==="gallery"),a&8&&i(e,"selected",l[3])},i:r,o:r,d(l){l&&v(e)}}}function q(t,e,s){let{value:l}=e,{samples_dir:a}=e,{type:m}=e,{selected:_=!1}=e;return t.$$set=n=>{"value"in n&&s(0,l=n.value),"samples_dir"in n&&s(1,a=n.samples_dir),"type"in n&&s(2,m=n.type),"selected"in n&&s(3,_=n.selected)},[l,a,m,_]}class I extends g{constructor(e){super(),u(this,e,q,b,d,{value:0,samples_dir:1,type:2,selected:3})}}const E=I;export{E};
|
2 |
-
//# sourceMappingURL=Image-1cf93ae5.js.map
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Model3D-db673911.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as o,e as d,s as u,N as _,P as g,K as r,U as i,p as v,M as y,R as m,n as c,A as b}from"./index-3370be2a.js";function M(a){let e,s;return{c(){e=_("div"),s=g(a[0]),r(e,"class","svelte-1ayixqk"),i(e,"table",a[1]==="table"),i(e,"gallery",a[1]==="gallery"),i(e,"selected",a[2])},m(t,l){v(t,e,l),y(e,s)},p(t,[l]){l&1&&m(s,t[0]),l&2&&i(e,"table",t[1]==="table"),l&2&&i(e,"gallery",t[1]==="gallery"),l&4&&i(e,"selected",t[2])},i:c,o:c,d(t){t&&b(e)}}}function D(a,e,s){let{value:t}=e,{type:l}=e,{selected:f=!1}=e;return a.$$set=n=>{"value"in n&&s(0,t=n.value),"type"in n&&s(1,l=n.type),"selected"in n&&s(2,f=n.selected)},[t,l,f]}class h extends o{constructor(e){super(),d(this,e,D,M,u,{value:0,type:1,selected:2})}}const E=h;export{E};
|
2 |
-
//# sourceMappingURL=Model3D-db673911.js.map
|
|
|
|
|
|
spaces/Dabs/wordcloud/app.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
from wordcloud import WordCloud, get_single_color_func
|
2 |
-
from stop_words import get_stop_words
|
3 |
-
import numpy as np
|
4 |
-
from PIL import Image
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
-
from collections import Counter
|
7 |
-
import gradio as gr
|
8 |
-
|
9 |
-
|
10 |
-
def create_wc(text, lang, custom_sw, input_img, color_rgb):
|
11 |
-
STOPWORDS = set(get_stop_words(lang))
|
12 |
-
STOPWORDS.update(custom_sw.replace(" ", "").split(","))
|
13 |
-
words = text.lower().split(" ")
|
14 |
-
words = [word for word in words if word not in STOPWORDS]
|
15 |
-
mask = np.array(input_img)
|
16 |
-
text_dict = Counter(words)
|
17 |
-
wordcloud = WordCloud(background_color="rgba(0, 0, 0, 0)", mode="RGBA",mask=mask, width=1000, height=1500, stopwords=STOPWORDS).generate_from_frequencies(text_dict)
|
18 |
-
# wordcloud.recolor(colormap=colormap)
|
19 |
-
wordcloud.recolor(color_func=get_single_color_func(f'rgb({color_rgb})'))
|
20 |
-
|
21 |
-
return wordcloud
|
22 |
-
|
23 |
-
text_example = """
|
24 |
-
Harry Potter is a series of seven fantasy novels written by British author J. K. Rowling. The novels chronicle the lives of a young wizard, Harry Potter, and his friends Hermione Granger and Ron Weasley, all of whom are students at Hogwarts School of Witchcraft and Wizardry. The main story arc concerns Harry's struggle against Lord Voldemort, a dark wizard who intends to become immortal, overthrow the wizard governing body known as the Ministry of Magic and subjugate all wizards and Muggles (non-magical people).
|
25 |
-
The series was originally published in English by Bloomsbury in the United Kingdom and Scholastic Press in the United States. All versions around the world are printed by Grafica Veneta in Italy.[1] A series of many genres, including fantasy, drama, coming of age, and the British school story (which includes elements of mystery, thriller, adventure, horror, and romance), the world of Harry Potter explores numerous themes and includes many cultural meanings and references.[2] According to Rowling, the main theme is death.[3] Other major themes in the series include prejudice, corruption, and madness.[4]
|
26 |
-
Since the release of the first novel, Harry Potter and the Philosopher's Stone, on 26 June 1997, the books have found immense popularity, positive reviews, and commercial success worldwide. They have attracted a wide adult audience as well as younger readers and are often considered cornerstones of modern young adult literature.[5] As of February 2018, the books have sold more than 500 million copies worldwide, making them the best-selling book series in history, and have been translated into eighty languages.[6] The last four books consecutively set records as the fastest-selling books in history, with the final instalment selling roughly 2.7 million copies in the United Kingdom and 8.3 million copies in the United States within twenty-four hours of its release.
|
27 |
-
The original seven books were adapted into an eight-part namesake film series by Warner Bros. Pictures. In 2016, the total value of the Harry Potter franchise was estimated at $25 billion,[7] making Harry Potter one of the highest-grossing media franchises of all time. Harry Potter and the Cursed Child is a play based on a story co-written by Rowling.
|
28 |
-
The success of the books and films has allowed the Harry Potter franchise to expand with numerous derivative works, a travelling exhibition that premiered in Chicago in 2009, a studio tour in London that opened in 2012, a digital platform on which J. K. Rowling updates the series with new information and insight, and a pentalogy of spin-off films premiering in November 2016 with Fantastic Beasts and Where to Find Them, among many other developments. Themed attractions, collectively known as The Wizarding World of Harry Potter, have been built at several Universal Parks & Resorts amusement parks around the world.
|
29 |
-
"""
|
30 |
-
|
31 |
-
iface = gr.Interface(create_wc,
|
32 |
-
["text", gr.inputs.Dropdown(["en", "es"]) ,"text", "image", "text"],
|
33 |
-
"pil",
|
34 |
-
examples = [[text_example, "en", "harry, potter", "glasses.png", "128,0,0"]],
|
35 |
-
title="Wordcloud",
|
36 |
-
description="Create a wordcloud from a text. Use the custom sw field to input custom stopwords separated by comma")
|
37 |
-
|
38 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|