parquet-converter commited on
Commit
e6f326b
·
1 Parent(s): 878e5b1

Update parquet files (step 79 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/unfinished/t3nsor/__init__.py +0 -136
  2. spaces/17TheWord/vits-models/app.py +0 -265
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Code Visual To Flowchart 41 Crack A Must-Have for Software Engineers and Programmers.md +0 -145
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/From Up on Poppy Hill English Dub 1080p The Best Way to Enjoy the Studio Ghibli Classic.md +0 -12
  5. spaces/1gistliPinn/ChatGPT4/Examples/Basta Guardare Il Cielo Film Completo Ita Download.md +0 -5
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Criminal Case Paris - The Hidden Object Game That Takes You to the Heart of Romance.md +0 -113
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download File dari Google Drive Tanpa Buka Halaman Web.md +0 -163
  8. spaces/1phancelerku/anime-remove-background/Archero Mod APK iOS Everything You Need to Know About the Game and the Hack.md +0 -133
  9. spaces/1phancelerku/anime-remove-background/Download Aim King 8 Ball Pool APK and Become a Pro in No Time.md +0 -136
  10. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py +0 -187
  11. spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/got.py +0 -71
  12. spaces/AIWaves/Software_Company/src/agents/Component/PromptComponent.py +0 -133
  13. spaces/AP123/dreamgaussian/cam_utils.py +0 -146
  14. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py +0 -4
  15. spaces/Abhi5ingh/fashionsd/app.py +0 -168
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/hsladjustpipeline.js +0 -2
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/IsInTouching.js +0 -19
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/InjectProperties.js +0 -32
  19. spaces/AlexWelcing/MusicLM/__init__.py +0 -3
  20. spaces/Ali-Omrani/CCR/app.py +0 -105
  21. spaces/Alican/pixera/models/__init__.py +0 -67
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/prior_transformer.py +0 -364
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddim.py +0 -148
  24. spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/README.md +0 -42
  25. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/utils/flops_counter.py +0 -599
  26. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/photometric.py +0 -428
  27. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/dataset_wrappers.py +0 -50
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/markup.py +0 -246
  29. spaces/Audio-AGI/WavJourney/code_generator.py +0 -188
  30. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/catalog.py +0 -236
  31. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/README.md +0 -9
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/__init__.py +0 -1
  33. spaces/AzumaSeren100/XuanShen-Bert-VITS2/server.py +0 -124
  34. spaces/Benson/text-generation/Examples/Descargar Gratis Gta 5 Mvil Apk Para Android.md +0 -42
  35. spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/+page.server.ts +0 -33
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/standard.py +0 -532
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel.py +0 -37
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build.py +0 -153
  39. spaces/CALM/Dashboard/streamlit_observable/frontend/src/Observable.tsx +0 -161
  40. spaces/ChrisCaviar/ControlNet-v1-1/app_mlsd.py +0 -113
  41. spaces/Cloudfeng/anime-remove-background/README.md +0 -14
  42. spaces/CofAI/chat.b4/client/css/global.css +0 -70
  43. spaces/CoreyMorris/MMLU-by-task-Leaderboard/README.md +0 -9
  44. spaces/Cpp4App/Cpp4App/CDM/result_processing/eval_classes.py +0 -215
  45. spaces/DaFujaTyping/hf-Chat-ui/src/lib/buildPrompt.ts +0 -30
  46. spaces/Dagfinn1962/prodia2/main.css +0 -67
  47. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/loss/led_loss.py +0 -47
  48. spaces/EPFL-VILAB/MultiMAE/multimae/multimae_utils.py +0 -253
  49. spaces/EXPOSUREEE/Ai-Image-Enhancer/scripts/generate_meta_info.py +0 -58
  50. spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py +0 -126
spaces/101-5/gpt4free/g4f/.v1/unfinished/t3nsor/__init__.py DELETED
@@ -1,136 +0,0 @@
1
- from time import time
2
-
3
- from requests import post
4
-
5
- headers = {
6
- 'authority': 'www.t3nsor.tech',
7
- 'accept': '*/*',
8
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
9
- 'cache-control': 'no-cache',
10
- 'content-type': 'application/json',
11
- 'origin': 'https://www.t3nsor.tech',
12
- 'pragma': 'no-cache',
13
- 'referer': 'https://www.t3nsor.tech/',
14
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
15
- 'sec-ch-ua-mobile': '?0',
16
- 'sec-ch-ua-platform': '"macOS"',
17
- 'sec-fetch-dest': 'empty',
18
- 'sec-fetch-mode': 'cors',
19
- 'sec-fetch-site': 'same-origin',
20
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
21
- }
22
-
23
-
24
- class T3nsorResponse:
25
- class Completion:
26
- class Choices:
27
- def __init__(self, choice: dict) -> None:
28
- self.text = choice['text']
29
- self.content = self.text.encode()
30
- self.index = choice['index']
31
- self.logprobs = choice['logprobs']
32
- self.finish_reason = choice['finish_reason']
33
-
34
- def __repr__(self) -> str:
35
- return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
36
-
37
- def __init__(self, choices: dict) -> None:
38
- self.choices = [self.Choices(choice) for choice in choices]
39
-
40
- class Usage:
41
- def __init__(self, usage_dict: dict) -> None:
42
- self.prompt_tokens = usage_dict['prompt_chars']
43
- self.completion_tokens = usage_dict['completion_chars']
44
- self.total_tokens = usage_dict['total_chars']
45
-
46
- def __repr__(self):
47
- return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
48
-
49
- def __init__(self, response_dict: dict) -> None:
50
- self.response_dict = response_dict
51
- self.id = response_dict['id']
52
- self.object = response_dict['object']
53
- self.created = response_dict['created']
54
- self.model = response_dict['model']
55
- self.completion = self.Completion(response_dict['choices'])
56
- self.usage = self.Usage(response_dict['usage'])
57
-
58
- def json(self) -> dict:
59
- return self.response_dict
60
-
61
-
62
- class Completion:
63
- model = {
64
- 'model': {
65
- 'id': 'gpt-3.5-turbo',
66
- 'name': 'Default (GPT-3.5)'
67
- }
68
- }
69
-
70
- def create(
71
- prompt: str = 'hello world',
72
- messages: list = []) -> T3nsorResponse:
73
- response = post('https://www.t3nsor.tech/api/chat', headers=headers, json=Completion.model | {
74
- 'messages': messages,
75
- 'key': '',
76
- 'prompt': prompt
77
- })
78
-
79
- return T3nsorResponse({
80
- 'id': f'cmpl-1337-{int(time())}',
81
- 'object': 'text_completion',
82
- 'created': int(time()),
83
- 'model': Completion.model,
84
- 'choices': [{
85
- 'text': response.text,
86
- 'index': 0,
87
- 'logprobs': None,
88
- 'finish_reason': 'stop'
89
- }],
90
- 'usage': {
91
- 'prompt_chars': len(prompt),
92
- 'completion_chars': len(response.text),
93
- 'total_chars': len(prompt) + len(response.text)
94
- }
95
- })
96
-
97
-
98
- class StreamCompletion:
99
- model = {
100
- 'model': {
101
- 'id': 'gpt-3.5-turbo',
102
- 'name': 'Default (GPT-3.5)'
103
- }
104
- }
105
-
106
- def create(
107
- prompt: str = 'hello world',
108
- messages: list = []) -> T3nsorResponse:
109
- print('t3nsor api is down, this may not work, refer to another module')
110
-
111
- response = post('https://www.t3nsor.tech/api/chat', headers=headers, stream=True, json=Completion.model | {
112
- 'messages': messages,
113
- 'key': '',
114
- 'prompt': prompt
115
- })
116
-
117
- for chunk in response.iter_content(chunk_size=2046):
118
- yield T3nsorResponse({
119
- 'id': f'cmpl-1337-{int(time())}',
120
- 'object': 'text_completion',
121
- 'created': int(time()),
122
- 'model': Completion.model,
123
-
124
- 'choices': [{
125
- 'text': chunk.decode(),
126
- 'index': 0,
127
- 'logprobs': None,
128
- 'finish_reason': 'stop'
129
- }],
130
-
131
- 'usage': {
132
- 'prompt_chars': len(prompt),
133
- 'completion_chars': len(chunk.decode()),
134
- 'total_chars': len(prompt) + len(chunk.decode())
135
- }
136
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/vits-models/app.py DELETED
@@ -1,265 +0,0 @@
1
- # coding=utf-8
2
- import os
3
- import re
4
- import argparse
5
- import utils
6
- import commons
7
- import json
8
- import torch
9
- import gradio as gr
10
- from models import SynthesizerTrn
11
- from text import text_to_sequence, _clean_text
12
- from torch import no_grad, LongTensor
13
- import gradio.processing_utils as gr_processing_utils
14
- import logging
15
- logging.getLogger('numba').setLevel(logging.WARNING)
16
- limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
17
-
18
- hps_ms = utils.get_hparams_from_file(r'config/config.json')
19
-
20
- audio_postprocess_ori = gr.Audio.postprocess
21
-
22
- def audio_postprocess(self, y):
23
- data = audio_postprocess_ori(self, y)
24
- if data is None:
25
- return None
26
- return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
27
-
28
-
29
- gr.Audio.postprocess = audio_postprocess
30
-
31
- def get_text(text, hps, is_symbol):
32
- text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
33
- if hps.data.add_blank:
34
- text_norm = commons.intersperse(text_norm, 0)
35
- text_norm = LongTensor(text_norm)
36
- return text_norm, clean_text
37
-
38
- def create_tts_fn(net_g_ms, speaker_id):
39
- def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
40
- text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
41
- if limitation:
42
- text_len = len(re.sub("\[([A-Z]{2})\]", "", text))
43
- max_len = 100
44
- if is_symbol:
45
- max_len *= 3
46
- if text_len > max_len:
47
- return "Error: Text is too long", None
48
- if not is_symbol:
49
- if language == 0:
50
- text = f"[ZH]{text}[ZH]"
51
- elif language == 1:
52
- text = f"[JA]{text}[JA]"
53
- else:
54
- text = f"{text}"
55
- stn_tst, clean_text = get_text(text, hps_ms, is_symbol)
56
- with no_grad():
57
- x_tst = stn_tst.unsqueeze(0).to(device)
58
- x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
59
- sid = LongTensor([speaker_id]).to(device)
60
- audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
61
- length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
62
-
63
- return "Success", (22050, audio)
64
- return tts_fn
65
-
66
- def create_to_symbol_fn(hps):
67
- def to_symbol_fn(is_symbol_input, input_text, temp_text, temp_lang):
68
- if temp_lang == 'Chinese':
69
- clean_text = f'[ZH]{input_text}[ZH]'
70
- elif temp_lang == "Japanese":
71
- clean_text = f'[JA]{input_text}[JA]'
72
- else:
73
- clean_text = input_text
74
- return (_clean_text(clean_text, hps.data.text_cleaners), input_text) if is_symbol_input else (temp_text, temp_text)
75
-
76
- return to_symbol_fn
77
- def change_lang(language):
78
- if language == 0:
79
- return 0.6, 0.668, 1.2, "Chinese"
80
- elif language == 1:
81
- return 0.6, 0.668, 1, "Japanese"
82
- else:
83
- return 0.6, 0.668, 1, "Mix"
84
-
85
- download_audio_js = """
86
- () =>{{
87
- let root = document.querySelector("body > gradio-app");
88
- if (root.shadowRoot != null)
89
- root = root.shadowRoot;
90
- let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio");
91
- let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea");
92
- if (audio == undefined)
93
- return;
94
- text = text.value;
95
- if (text == undefined)
96
- text = Math.floor(Math.random()*100000000);
97
- audio = audio.src;
98
- let oA = document.createElement("a");
99
- oA.download = text.substr(0, 20)+'.wav';
100
- oA.href = audio;
101
- document.body.appendChild(oA);
102
- oA.click();
103
- oA.remove();
104
- }}
105
- """
106
-
107
- if __name__ == '__main__':
108
- parser = argparse.ArgumentParser()
109
- parser.add_argument('--device', type=str, default='cpu')
110
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
111
- args = parser.parse_args()
112
- device = torch.device(args.device)
113
-
114
- models = []
115
- with open("pretrained_models/info.json", "r", encoding="utf-8") as f:
116
- models_info = json.load(f)
117
- for i, info in models_info.items():
118
- sid = info['sid']
119
- name_en = info['name_en']
120
- name_zh = info['name_zh']
121
- title = info['title']
122
- cover = f"pretrained_models/{i}/{info['cover']}"
123
- example = info['example']
124
- language = info['language']
125
- net_g_ms = SynthesizerTrn(
126
- len(hps_ms.symbols),
127
- hps_ms.data.filter_length // 2 + 1,
128
- hps_ms.train.segment_size // hps_ms.data.hop_length,
129
- n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0,
130
- **hps_ms.model)
131
- utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None)
132
- _ = net_g_ms.eval().to(device)
133
- models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms)))
134
- with gr.Blocks() as app:
135
- gr.Markdown(
136
- "# <center> vits-models\n"
137
- "## <center> Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n"
138
- "## <center> ·请不要生成会对个人以及组织造成侵害的内容\n"
139
- "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=sayashi.vits-models)\n\n"
140
- "[Open In Colab]"
141
- "(https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)"
142
- " without queue and length limitation.(无需等待队列,并且没有长度限制)\n\n"
143
- "[Finetune your own model](https://github.com/SayaSS/vits-finetuning)"
144
- )
145
-
146
- with gr.Tabs():
147
- with gr.TabItem("EN"):
148
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
149
- with gr.TabItem(name_en):
150
- with gr.Row():
151
- gr.Markdown(
152
- '<div align="center">'
153
- f'<a><strong>{title}</strong></a>'
154
- f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
155
- '</div>'
156
- )
157
- with gr.Row():
158
- with gr.Column():
159
- input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}")
160
- lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"],
161
- type="index", value=language)
162
- temp_lang = gr.Variable(value=language)
163
- with gr.Accordion(label="Advanced Options", open=False):
164
- temp_text_var = gr.Variable()
165
- symbol_input = gr.Checkbox(value=False, label="Symbol input")
166
- symbol_list = gr.Dataset(label="Symbol list", components=[input_text],
167
- samples=[[x] for x in hps_ms.symbols])
168
- symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
169
- btn = gr.Button(value="Generate", variant="primary")
170
- with gr.Row():
171
- ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
172
- nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
173
- ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
174
- with gr.Column():
175
- o1 = gr.Textbox(label="Output Message")
176
- o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
177
- download = gr.Button("Download Audio")
178
- btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2])
179
- download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
180
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls, temp_lang])
181
- symbol_input.change(
182
- to_symbol_fn,
183
- [symbol_input, input_text, temp_text_var, temp_lang],
184
- [input_text, temp_text_var]
185
- )
186
- symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
187
- _js=f"""
188
- (i,symbols) => {{
189
- let root = document.querySelector("body > gradio-app");
190
- if (root.shadowRoot != null)
191
- root = root.shadowRoot;
192
- let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea");
193
- let startPos = text_input.selectionStart;
194
- let endPos = text_input.selectionEnd;
195
- let oldTxt = text_input.value;
196
- let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
197
- text_input.value = result;
198
- let x = window.scrollX, y = window.scrollY;
199
- text_input.focus();
200
- text_input.selectionStart = startPos + symbols[i].length;
201
- text_input.selectionEnd = startPos + symbols[i].length;
202
- text_input.blur();
203
- window.scrollTo(x, y);
204
- return text_input.value;
205
- }}""")
206
- with gr.TabItem("中文"):
207
- for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
208
- with gr.TabItem(name_zh):
209
- with gr.Row():
210
- gr.Markdown(
211
- '<div align="center">'
212
- f'<a><strong>{title}</strong></a>'
213
- f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
214
- '</div>'
215
- )
216
- with gr.Row():
217
- with gr.Column():
218
- input_text = gr.Textbox(label="文本 (100字上限)" if limitation else "文本", lines=5, value=example, elem_id=f"input-text-zh-{name_zh}")
219
- lang = gr.Dropdown(label="语言", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
220
- type="index", value="中文"if language == "Chinese" else "日语")
221
- temp_lang = gr.Variable(value=language)
222
- with gr.Accordion(label="高级选项", open=False):
223
- temp_text_var = gr.Variable()
224
- symbol_input = gr.Checkbox(value=False, label="符号输入")
225
- symbol_list = gr.Dataset(label="符号列表", components=[input_text],
226
- samples=[[x] for x in hps_ms.symbols])
227
- symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
228
- btn = gr.Button(value="生成", variant="primary")
229
- with gr.Row():
230
- ns = gr.Slider(label="控制感情变化程度", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
231
- nsw = gr.Slider(label="控制音素发音长度", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
232
- ls = gr.Slider(label="控制整体语速", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
233
- with gr.Column():
234
- o1 = gr.Textbox(label="输出信息")
235
- o2 = gr.Audio(label="输出音频", elem_id=f"tts-audio-zh-{name_zh}")
236
- download = gr.Button("下载音频")
237
- btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2])
238
- download.click(None, [], [], _js=download_audio_js.format(audio_id=f"zh-{name_zh}"))
239
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
240
- symbol_input.change(
241
- to_symbol_fn,
242
- [symbol_input, input_text, temp_text_var, temp_lang],
243
- [input_text, temp_text_var]
244
- )
245
- symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
246
- _js=f"""
247
- (i,symbols) => {{
248
- let root = document.querySelector("body > gradio-app");
249
- if (root.shadowRoot != null)
250
- root = root.shadowRoot;
251
- let text_input = root.querySelector("#input-text-zh-{name_zh}").querySelector("textarea");
252
- let startPos = text_input.selectionStart;
253
- let endPos = text_input.selectionEnd;
254
- let oldTxt = text_input.value;
255
- let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
256
- text_input.value = result;
257
- let x = window.scrollX, y = window.scrollY;
258
- text_input.focus();
259
- text_input.selectionStart = startPos + symbols[i].length;
260
- text_input.selectionEnd = startPos + symbols[i].length;
261
- text_input.blur();
262
- window.scrollTo(x, y);
263
- return text_input.value;
264
- }}""")
265
- app.queue(concurrency_count=1).launch(show_api=False, share=args.share)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Code Visual To Flowchart 41 Crack A Must-Have for Software Engineers and Programmers.md DELETED
@@ -1,145 +0,0 @@
1
- <br />
2
- <h1>Code Visual To Flowchart 41 Crack: How to Generate Flowcharts from Code Easily</h1>
3
- <p>Are you a programmer who wants to document your source code or reverse engineer a program? Do you want to create professional-looking flowcharts from your code without spending hours on drawing and editing? If yes, then you need Code Visual To Flowchart 41 Crack, a powerful tool that can automatically generate flowcharts from code in various programming languages. In this article, you will learn what Code Visual To Flowchart is, why you should use it, how to download and install it, how to use it, and what benefits you can get from it. Let's get started!</p>
4
- <h2>Introduction</h2>
5
- <h3>What is Code Visual To Flowchart?</h3>
6
- <p>Code Visual To Flowchart is an automatic flow chart generator software that can reverse engineer a program, create programming flowcharts from code, and document source code. It can generate Bmp, Visio, Word, Excel, PowerPoint, and HTML flowcharts documents from code. It supports the following programming languages: C, C++, VC++ (Visual C++ .NET), VB (Visual Basic), VBA, Qbasic (quickbasic), VBScript (VBS), ASP, Visual C# (C sharp), Visual Basic .NET (VB.NET), Visual J# .NET, VC++.NET, ASP.NET, Java, JSP, JavaScript (JScript), Delphi (Object Pascal), PowerBuilder (PowerScript), PHP, Visual FoxPro, PL/SQL, T-SQL (Transact-sql) and Perl.</p>
7
- <h2>Code Visual To Flowchart 41 Crack</h2><br /><p><b><b>DOWNLOAD</b> &#9733; <a href="https://byltly.com/2uKwwm">https://byltly.com/2uKwwm</a></b></p><br /><br />
8
- <h3>Why use Code Visual To Flowchart?</h3>
9
- <p>Code Visual To Flowchart can help you in many ways. Here are some of the reasons why you should use it:</p>
10
- <ul>
11
- <li>It can help you understand complex code logic and structure by visualizing it in a clear and intuitive way.</li>
12
- <li>It can help you document your code for yourself or others by generating flowcharts that show the logic flow and algorithm of your program.</li>
13
- <li>It can help you debug your code by showing you where the errors or bugs are in your code.</li>
14
- <li>It can help you learn new programming languages by showing you how the syntax and structure of different languages work.</li>
15
- <li>It can help you improve your coding skills by showing you how to write better and more efficient code.</li>
16
- </ul>
17
- <h3>How to download and install Code Visual To Flowchart 41 Crack?</h3>
18
- <p>To download and install Code Visual To Flowchart 41 Crack, you need to follow these steps:</p>
19
- <p>Code Visual To Flowchart 41 Crack download<br />
20
- Code Visual To Flowchart 41 Crack free<br />
21
- Code Visual To Flowchart 41 Crack full version<br />
22
- Code Visual To Flowchart 41 Crack serial key<br />
23
- Code Visual To Flowchart 41 Crack license key<br />
24
- Code Visual To Flowchart 41 Crack activation code<br />
25
- Code Visual To Flowchart 41 Crack patch<br />
26
- Code Visual To Flowchart 41 Crack keygen<br />
27
- Code Visual To Flowchart 41 Crack torrent<br />
28
- Code Visual To Flowchart 41 Crack online<br />
29
- Code Visual To Flowchart 41 Crack generator<br />
30
- Code Visual To Flowchart 41 Crack software<br />
31
- Code Visual To Flowchart 41 Crack tool<br />
32
- Code Visual To Flowchart 41 Crack converter<br />
33
- Code Visual To Flowchart 41 Crack editor<br />
34
- Code Visual To Flowchart 41 Crack viewer<br />
35
- Code Visual To Flowchart 41 Crack maker<br />
36
- Code Visual To Flowchart 41 Crack builder<br />
37
- Code Visual To Flowchart 41 Crack creator<br />
38
- Code Visual To Flowchart 41 Crack diagram<br />
39
- Code Visual To Flowchart 41 Crack chart<br />
40
- Code Visual To Flowchart 41 Crack graph<br />
41
- Code Visual To Flowchart 41 Crack design<br />
42
- Code Visual To Flowchart 41 Crack layout<br />
43
- Code Visual To Flowchart 41 Crack format<br />
44
- Code Visual To Flowchart 41 Crack style<br />
45
- Code Visual To Flowchart 41 Crack template<br />
46
- Code Visual To Flowchart 41 Crack example<br />
47
- Code Visual To Flowchart 41 Crack sample<br />
48
- Code Visual To Flowchart 41 Crack tutorial<br />
49
- Code Visual To Flowchart 41 Crack guide<br />
50
- Code Visual To Flowchart 41 Crack manual<br />
51
- Code Visual To Flowchart 41 Crack instruction<br />
52
- Code Visual To Flowchart 41 Crack review<br />
53
- Code Visual To Flowchart 41 Crack rating<br />
54
- Code Visual To Flowchart 41 Crack feedback<br />
55
- Code Visual To Flowchart 41 Crack testimonial<br />
56
- Code Visual To Flowchart 41 Crack comparison<br />
57
- Code Visual To Flowchart 41 Crack alternative<br />
58
- Code Visual To Flowchart 41 Crack solution<br />
59
- Code Visual To Flowchart 41 Crack method<br />
60
- Code Visual To Flowchart 41 Crack technique<br />
61
- Code Visual To Flowchart 41 Crack approach<br />
62
- Code Visual To Flowchart 41 Crack strategy<br />
63
- Code Visual To Flowchart 41 Crack tip<br />
64
- Code Visual To Flowchart 41 Crack trick<br />
65
- Code Visual To Flowchart 41 Crack hack<br />
66
- Code Visual To Flowchart 41 Crack cheat<br />
67
- Code Visual To Flowchart 41 Crack mod<br />
68
- Code Visual To Flowchart 41 Crack update</p>
69
- <ol>
70
- <li>Go to <a href="https://code-visual-to-flowchart.software.informer.com/4.2/">this link</a> and click on the green Download button.</li>
71
- <li>Save the cvf.exe file on your computer and run it.</li>
72
- <li>Follow the instructions on the screen to complete the installation process.</li>
73
- <li>Copy the crack file from the downloaded folder and paste it into the installation directory of Code Visual To Flowchart.</li>
74
- <li>Run Code Visual To Flowchart as administrator and enjoy!</li>
75
- </ol>
76
- <h2>How to use Code Visual To Flowchart 41 Crack?</h2>
77
- <h3>Step 1: Open your code file in Code Visual To Flowchart</h3>
78
- <p>To open your code file in Code Visual To Flowchart, you need to do the following:</p>
79
- <ol>
80
- <li>Launch Code Visual To Flowchart from your desktop or start menu.</li>
81
- <li>Click on the File menu and select Open Source File.</li>
82
- <li>Browse to the location of your code file and select it.</li>
83
- <li>Click on Open.</li>
84
- </ol>
85
- <p>You will see your code displayed in the left pane of the main window. You can also drag and drop your code file into the left pane.</p>
86
- <h3>Step 2: Choose the output format and options</h3>
87
- <p>To choose the output format and options for your flowchart, you need to do the following:</p>
88
- <ol>
89
- <li>Click on the Output menu and select Output Format.</li>
90
- <li>Select the format that you want for your flowchart. You can choose from Bmp, Visio, Word, Excel, PowerPoint, or HTML.</li>
91
- <li>Click on OK.</li>
92
- <li>Click on the Output menu again and select Output Options.</li>
93
- <li>Select the options that you want for your flowchart. You can choose from different styles, colors, fonts, sizes, shapes, etc.</li>
94
- <li>Click on OK.</li>
95
- </ol>
96
- <h3>Step 3: Generate the flowchart and save it</h3>
97
- <p>To generate the flowchart and save it, you need to do the following:</p>
98
- <ol>
99
- <li>Click on the Chart menu and select Generate Chart.</li>
100
- <li>You will see your flowchart displayed in the right pane of the main window. You can zoom in or out by using the mouse wheel or the toolbar buttons. You can also drag or resize the chart by using the mouse cursor.</li>
101
- <li>To save your flowchart as a file, click on the File menu and select Save Chart As.</li>
102
- <li>Browse to the location where you want to save your file and enter a name for it.</li>
103
- <li>Select the file type that matches your output format. For example, if you chose HTML as your output format, select HTML files as your file type.</li>
104
- <li>Click on Save.</li>
105
- </ol>
106
- <p>You have successfully generated a flowchart from your code using Code Visual To Flowchart 41 Crack!</p>
107
- <h2>Benefits of using Code Visual To Flowchart 41 Crack</h2>
108
- <h3>Save time and effort</h3>
109
- <p>By using Code Visual To Flowchart 41 Crack, you can save a lot of time and effort that you would otherwise spend on drawing and editing flowcharts manually. You don't need to worry about aligning or connecting shapes or symbols. You don't need to worry about formatting or styling your chart. You don't need to worry about updating or modifying your chart when your code changes. All you need to do is open your code file in Code Visual To Flowchart 41 Crack and let it do all the work for you!</p>
110
- <h3>Improve code readability and understanding</h3>
111
- <p>By using Code Visual To Flowchart 41 Crack, you can improve your code readability and understanding by visualizing it in a clear and intuitive way. You can see how your code flows from one statement to another. You can see how your code branches into different paths based on conditions or loops. You can see how your code calls different functions or subroutines. You can see how your code handles errors or exceptions. You can see how your code interacts with external resources or inputs/outputs. You can see all these details at a glance by looking at your flowchart!</p>
112
- <h3>Document and debug your code easily</h3>
113
- <p>By using Code Visual To Flowchart 41 Crack, you can document and debug your code easily by generating flowcharts that show the logic flow and algorithm of your program. You can use these flowcharts as documentation for yourself or others who need to understand or maintain your code. You can also use these flowcharts as debugging tools for finding errors or bugs in your code. You can compare your flowcharts with your expected results or specifications. You can trace where your code goes wrong or fails by following the arrows in your flowcharts. You can fix or improve your code accordingly by referring back to your flowcharts!</p>
114
- <h2>Conclusion</h2>
115
- <h3>Summary of the main points</h3>
116
- ```html and install it, how to use it, and what benefits you can get from it. You learned that Code Visual To Flowchart is an automatic flow chart generator software that can reverse engineer a program, create programming flowcharts from code, and document source code. You learned that Code Visual To Flowchart can help you save time and effort, improve code readability and understanding, and document and debug your code easily. You learned how to download and install Code Visual To Flowchart 41 Crack, how to open your code file in Code Visual To Flowchart, how to choose the output format and options for your flowchart, how to generate the flowchart and save it, and how to use the flowchart for various purposes.</p>
117
- <h3>Call to action</h3>
118
- <p>If you are a programmer who wants to document your source code or reverse engineer a program, you should definitely try Code Visual To Flowchart 41 Crack. It is a powerful tool that can automatically generate flowcharts from code in various programming languages. It can help you save time and effort, improve code readability and understanding, and document and debug your code easily. You can download Code Visual To Flowchart 41 Crack from <a href="https://code-visual-to-flowchart.software.informer.com/4.2/">this link</a> and follow the steps in this article to use it. Don't miss this opportunity to create professional-looking flowcharts from your code without spending hours on drawing and editing. Download Code Visual To Flowchart 41 Crack today and see the difference for yourself!</p>
119
- <h2>FAQs</h2>
120
- <h3>What is a flowchart?</h3>
121
- <p>A flowchart is a graphical representation of the steps or logic of a process or program. It uses different shapes or symbols to represent different types of actions or decisions, and arrows to show the direction or sequence of the flow.</p>
122
- <h3>What are the benefits of using flowcharts?</h3>
123
- <p>Flowcharts can help you in many ways, such as:</p>
124
- <ul>
125
- <li>They can help you visualize complex processes or programs in a clear and intuitive way.</li>
126
- <li>They can help you communicate your ideas or solutions to others effectively.</li>
127
- <li>They can help you identify problems or errors in your processes or programs quickly.</li>
128
- <li>They can help you optimize or improve your processes or programs efficiently.</li>
129
- </ul>
130
- <h3>What are the features of Code Visual To Flowchart?</h3>
131
- <p>Code Visual To Flowchart has many features that make it a powerful tool for generating flowcharts from code, such as:</p>
132
- <ul>
133
- <li>It can support multiple programming languages, such as C, C++, Java, PHP, etc.</li>
134
- <li>It can generate different types of flowcharts, such as Bmp, Visio, Word, Excel, PowerPoint, or HTML.</li>
135
- <li>It can customize the style, color, font, size, shape, etc. of your flowcharts.</li>
136
- <li>It can sync your code and flowchart automatically when you edit either of them.</li>
137
- <li>It can export your flowcharts as files or images for further use.</li>
138
- </ul>
139
- <h3>How to get Code Visual To Flowchart 41 Crack?</h3>
140
- <p>To get Code Visual To Flowchart 41 Crack, you need to download the cvf.exe file from <a href="https://code-visual-to-flowchart.software.informer.com/4.2/">this link</a>, install it on your computer, copy the crack file from the downloaded folder and paste it into the installation directory of Code Visual To Flowchart, run Code Visual To Flowchart as administrator and enjoy!</p>
141
- <h3>Is Code Visual To Flowchart 41 Crack safe to use?</h3>
142
- <p>Code Visual To Flowchart 41 Crack is safe to use as long as you download it from a reliable source and scan it with an antivirus program before running it. However, we do not recommend using cracked software as it may violate the terms and conditions of the original software developer and may cause legal issues. We suggest that you purchase the official version of Code Visual To Flowchart from <a href="http://www.fatesoft.com/">this link</a> if you want to support the developer and enjoy more features and updates.</p>
143
- ```</p> 0a6ba089eb<br />
144
- <br />
145
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/From Up on Poppy Hill English Dub 1080p The Best Way to Enjoy the Studio Ghibli Classic.md DELETED
@@ -1,12 +0,0 @@
1
- <br />
2
- <h1>From Up on Poppy Hill English Dub 1080p: A Review of the Charming Anime Film by Studio Ghibli</h1>
3
- If you are looking for a heartwarming, wholesome, and nostalgic anime film to watch, you might want to check out From Up on Poppy Hill (2011), a historical drama by Studio Ghibli, the acclaimed animation studio behind classics like Spirited Away (2001), Howl's Moving Castle (2004), and My Neighbor Totoro (1988). From Up on Poppy Hill is set in Yokohama in 1963, a year before Japan hosts the Olympics for the first time after World War II. It tells the story of Umi, a high school girl who runs a boarding house by the sea while her mother is abroad, and Shun, a boy who leads a student club that fights to save their old clubhouse from demolition. As they work together to preserve their school's history and culture, they also develop a friendship that blossoms into romance. However, a buried secret from their past threatens to tear them apart. In this review, I will explain why From Up on Poppy Hill is a charming anime film that deserves your attention. I will discuss its historical and cultural context, its characters and relationships, its artistic and technical aspects, and its themes and messages. I will also give you my personal opinion on the film's strengths and weaknesses, as well as a recommendation for who should watch it and where to find it online. <h2>The Historical and Cultural Context of From Up on Poppy Hill</h2>
4
- One of the most interesting aspects of From Up on Poppy Hill is its historical and cultural context. The film depicts Japan in a time of transition, when it was recovering from the devastation of war and preparing to host the Olympics as a symbol of its new start. The film shows both the excitement and the anxiety that people felt about the future, as well as the nostalgia for the past. The film also explores the contrast between tradition and modernity, and the importance of preserving cultural heritage. Umi represents tradition, as she follows her father's naval flag ritual every morning, cooks traditional meals for her boarders, and respects her elders. Shun represents modernity, as he rides a motorbike, publishes a school newspaper, and advocates for social change. Their clubhouse, called the Latin Quarter (or Quartier Latin in French), is a place where both traditions and innovations coexist. It is filled with books, antiques, musical instruments, scientific equipment, art works, and other treasures that reflect Japan's rich history and culture. The film also pays homage to French culture and literature, which influenced many Japanese artists in the post-war era. The title of the film is inspired by a French song called "Coquelicot-zaka kara" (From Coquelicot Hill), which Umi sings in one scene. The Latin Quarter is named after a famous district in Paris where many intellectuals gathered. The film also references works by French authors like Antoine de Saint-Exupéry (The Little Prince), Jules Verne (Twenty Thousand Leagues Under the Sea), Victor Hugo (Les Misérables), Alexandre Dumas (The Count of Monte Cristo), Honoré de Balzac (The Human Comedy), Charles Baudelaire (The Flowers of Evil), Jean-Paul Sartre (No Exit), Albert Camus (The Stranger), Jean Cocteau (Beauty and the Beast), Antoine de Saint-Exupéry (The Little Prince), Jules Verne (Twenty Thousand Leagues Under the Sea), Victor Hugo (Les Misérables), Alexandre Dumas (The Count of Monte Cristo), Honoré de Balzac (The Human Comedy), Charles Baudelaire (The Flowers of Evil), Jean-Paul Sartre (No Exit), Albert Camus (The Stranger), Jean Cocteau (Beauty and the Beast), etc. <h2>The Characters and Relationships in From Up on Poppy Hill</h2>
5
- Another appealing aspect of From Up on Poppy Hill is its characters and relationships. The film features a large cast of colorful and memorable characters who add humor, drama, and charm to the story. The main protagonists are Umi and Shun, two high school students who are both mature, kind, and responsible beyond their years. Umi is a hardworking and caring girl who lost her father in the Korean War and manages a boarding house while her mother is studying in America. She is also good at cooking, sewing, and gardening. Shun is an adventurous and charismatic boy who was adopted as an infant and does not know his biological parents. He is also passionate about journalism, poetry, and sailing. They meet when Shun sees Umi's flag signal from his clubhouse and writes an article about it. They soon become friends and join forces to save their clubhouse from being torn down by the school administration. As they spend more time together, they also develop romantic feelings for each other. However, they face a major obstacle when they discover that they might be siblings, as they share the same photograph of their fathers who were friends in the navy. This shocking revelation tests their relationship and forces them to confront their pasts. The supporting characters are also well-developed and contribute to the plot and atmosphere of the film. They include: - The members of the Latin Quarter club, a group of eccentric and enthusiastic students who have various hobbies and interests, such as astronomy, philosophy, biology, drama, music, etc. They organize a festival to showcase their clubhouse and appeal to the public opinion. - Umi's family and friends, who live with her in the boarding house or visit her often. They include her grandmother, who runs the house; her younger sister Sora, who attends elementary school; her older sister Riku, who works as a nurse; her aunt Kyoko, who is a photographer; her childhood friend Sachiko, who helps her with chores; and Miki, a boarder who works as a typist. - Other minor characters, such as Tokumaru-sensei, the school principal who wants to demolish the clubhouse; Yoshio Onodera, Shun's adoptive father who runs a shipyard; Akio Kazama, a journalist who helps Shun find out his true identity; Ryoko Matsuzaki, a beautiful student council president who has a crush on Shun; Shiro Mizunuma, a handsome student council vice president who has a crush on Umi; etc. <h2>The Artistic and Technical Aspects of From Up on Poppy Hill</h2>
6
- From Up on Poppy Hill is also remarkable for its artistic and technical aspects. The film showcases Studio Ghibli's signature animation style and quality, which combines hand-drawn 2D animation with computer-generated 3D effects. The film uses vivid colors, realistic lighting, and detailed backgrounds to create a stunning visual experience. The film also features beautiful music and sound effects that enhance its mood and emotion. The film's soundtrack was composed by Satoshi Takebe, who blended orchestral music with jazz elements. The film's theme song was performed by Aoi Teshima, who sang both in Japanese and Here is the continuation of the article. in French. The film's sound effects are realistic and immersive, such as the waves of the sea, the wind of the hill, and the noise of the city. The film's voice acting and dubbing are also excellent, featuring a talented cast of actors who bring their characters to life. The English dub features stars like Sarah Bolger, Anton Yelchin, Gillian Anderson, Beau Bridges, Jamie Lee Curtis, Bruce Dern, Christina Hendricks, Ron Howard, and Chris Noth. The film was directed by Goro Miyazaki, the son of Hayao Miyazaki, the legendary founder of Studio Ghibli. This was his second feature film after Tales from Earthsea (2006), which received mixed reviews from critics and fans. From Up on Poppy Hill was a more successful and acclaimed project for him, as he collaborated with his father on the screenplay, which was based on a manga by Chizuru Takahashi and Tetsuro Sayama. The film won several awards, including the Japan Academy Prize for Animation of the Year and the Asia Pacific Screen Award for Best Animated Feature Film. <h2>The Themes and Messages of From Up on Poppy Hill</h2>
7
- From Up on Poppy Hill is a film that explores various themes and messages that resonate with audiences of all ages and backgrounds. Some of the main themes and messages are: - The importance of family, community, and identity. The film shows how Umi and Shun struggle with their family histories and identities, as they search for their roots and their place in the world. They also learn to appreciate their adoptive families and communities, who support them and love them unconditionally. They realize that family is not only defined by blood, but also by bonds. - The value of courage, honesty, and resilience. The film shows how Umi and Shun face their challenges and overcome their fears with courage, honesty, and resilience. They do not give up on their dreams and goals, even when they encounter difficulties and setbacks. They also do not lie or hide their feelings, even when they are painful or uncomfortable. They face the truth and accept the consequences. - The appreciation of beauty, simplicity, and nostalgia. The film shows how Umi and Shun find beauty, simplicity, and nostalgia in their everyday lives. They enjoy the simple pleasures of cooking, gardening, sailing, reading, and spending time with each other. They also cherish the memories of their pasts, such as Umi's flag ritual, Shun's poem book, and their fathers' photograph. They appreciate what they have and what they had. <h1>Conclusion</h1>
8
- In conclusion, From Up on Poppy Hill is a charming anime film that offers a delightful and touching experience for viewers. It is a film that combines historical and cultural context, characters and relationships, artistic and technical aspects, and themes and messages in a harmonious and engaging way. It is a film that celebrates love, life, and legacy in a changing world. In my opinion, From Up on Poppy Hill is one of the best films by Studio Ghibli. It is not as fantastical or adventurous as some of their other works, but it is more realistic and relatable. It is a film that captures the essence of human emotions and interactions in a simple yet profound way. It is a film that makes me smile, cry, and think. I would recommend this film to anyone who loves anime, history, romance, or drama. I would also recommend this film to anyone who wants to watch a wholesome, heartwarming, and nostalgic story that will make them feel good. You can watch this film online on platforms like Microsoft Store, Animefever, Internet Archive, or Bilibili. You can also buy or rent this film on DVD or Blu-ray. I hope you enjoyed this review and found it helpful. If you have any questions or comments, please feel free to share them with me. Thank you for reading! <h2>FAQs</h2>
9
- - Q: What is the meaning of Umi's flag signal? - A: Umi's flag signal is a naval code that means "I pray for safe voyages". She does it every morning to honor her father who died in the Korean War. - Q: What is the significance of Shun's poem book? - A: Shun's poem book is a collection of poems by Kenji Miyazawa, a Japanese poet and author who wrote about nature and social issues. Shun inherited it from his biological father who was also a poet and a sailor. - Q: What is the origin of the Latin Quarter club? - A: The Latin Quarter club was founded by Umi and Shun's fathers when they were students at the same school. They named it after the Parisian district where they met and became friends. - Q: What is the outcome of the clubhouse protest? - A: The clubhouse protest succeeds in convincing the school administration and the public opinion to spare the clubhouse from demolition. The students are allowed to keep their club activities and their cultural heritage. - Q: What is the resolution of Umi and Shun's relationship? - A: Umi and Shun find out that they are not siblings, but cousins. They are relieved and happy to learn that they can be together without any guilt or shame. They confess their love for each other and kiss under Umi's flag signal. </p>
10
- <h2>From Up on Poppy Hill English Dub 1080pFrom Up on Poppy Hill English Dub 1080p</h2><br /><p><b><b>Download</b> &#8230;&#8230;&#8230; <a href="https://byltly.com/2uKAb5">https://byltly.com/2uKAb5</a></b></p><br /><br /> 0a6ba089eb<br />
11
- <br />
12
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Basta Guardare Il Cielo Film Completo Ita Download.md DELETED
@@ -1,5 +0,0 @@
1
-
2
- <p>Perché ci si lamenta che Zucconi non interviene nel dibattito politico? Altrove parla che è una bellezza. E mena, pure.<br />Ieri, 14 ore fa :"Sarebbe bastato guardare la diretta TV per assistere al suicidio del Senato della Repubblica. Renzi sta cercando di uccidere un uomo morto".<br />Il che si ricollega al tutti ladri-tutti morti. Qua e là qualche verità Grillo l'ha detta pure. La ggente l'ha captata, e questo spiega il suo successo ( d'altra parte "ciò che è reale è razionale e ciò che è razionale è reale spiegazione", ricordate?).</p>
3
- <h2>Basta Guardare Il Cielo Film Completo Ita Download</h2><br /><p><b><b>Download Zip</b> &#10084; <a href="https://imgfil.com/2uy07H">https://imgfil.com/2uy07H</a></b></p><br /><br /> aaccfb2cb3<br />
4
- <br />
5
- <br />
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Criminal Case Paris - The Hidden Object Game That Takes You to the Heart of Romance.md DELETED
@@ -1,113 +0,0 @@
1
- <br />
2
- <h1>Criminal Case: City of Romance - A Review</h1>
3
- <p>If you are a fan of hidden object, adventure games, you might want to check out Criminal Case: City of Romance. This is the eighth installment of the popular Criminal Case series, which has been downloaded by millions of players around the world. In this game, you join the Parisian Police Squad to solve a series of murder cases in the city of love. You will investigate crime scenes, examine clues, interrogate suspects, and catch killers, while exploring different districts and themes based on romance. You can also play with your friends and compete for the best detective title.</p>
4
- <p>In this article, I will give you a brief overview of the game, its gameplay, graphics, sound, pros and cons, and my final verdict. I will also share some tips and tricks to help you enjoy the game more. So, let's get started!</p>
5
- <h2>criminal case city of romance download</h2><br /><p><b><b>Download File</b> &#10145; <a href="https://urlin.us/2uSVPC">https://urlin.us/2uSVPC</a></b></p><br /><br />
6
- <h2>Introduction</h2>
7
- <p>Criminal Case: City of Romance is a free-to-play game that was released in 2020 by Pretty Simple, a French game developer. It is available for iOS and Android devices, as well as on Facebook. The game is divided into six districts across Paris, each with its own romantic theme: Fantasy, Attraction, Obsession, Jealousy, Separation, and Engagement. There are 17 cases in total, each with a unique storyline and characters.</p>
8
- <p>To play the game, you need to download it from the App Store or Google Play, or access it from Facebook. You will need an internet connection to play the game. You will also need energy to investigate crime scenes, which can be replenished by waiting, watching ads, buying with real money, or asking your friends for help. You can also earn coins, stars, cash, and other rewards by playing the game.</p>
9
- <h2>Gameplay</h2>
10
- <p>The gameplay of Criminal Case: City of Romance is similar to other games in the Criminal Case series. You will be assigned a case by your chief inspector Hugo Mercier, who will guide you throughout the investigation. You will then go to various crime scenes to look for clues. You will need to find all the items on a list within a time limit. The faster you find them, the higher your score will be. You can also use hints or boosters to help you find items faster.</p>
11
- <p>Once you have collected enough clues from a crime scene, you will be able to analyze them in the laboratory with your forensic expert Enzo Jonas. You will need to perform various mini-games such as matching fingerprints, assembling puzzles, identifying substances, etc. Some analyses may take time to complete, but you can speed them up by using cash or watching ads.</p>
12
- <p>After analyzing clues, you will be able to interrogate witnesses and suspects with your partner Riya Laghari. You will need to ask them questions based on the evidence you have found. Some suspects may lie or hide information from you, so you will need to pay attention to their <p>expressions and body language. You can also use stars to arrest suspects or confirm their alibis. You will need to arrest the right killer at the end of each case, based on the evidence and motive you have gathered.</p>
13
- <p>Besides solving cases, you can also explore different districts and themes in the game. You will encounter various romantic scenarios, such as a fairy tale wedding, a masquerade ball, a love triangle, a secret admirer, etc. You will also meet different characters, such as a prince charming, a femme fatale, a celebrity chef, a fashion designer, etc. You can interact with them and learn more about their stories and personalities.</p>
14
- <p>To make the game more fun and challenging, you can also follow some tips and tricks. For example, you can play the game every day to get daily bonuses and rewards. You can also replay crime scenes to earn more stars and coins. You can also join a team or create your own to chat with other players and exchange gifts. You can also participate in special events and tournaments to win exclusive items and prizes.</p>
15
- <h2>Graphics and Sound</h2>
16
- <p>The graphics and sound of Criminal Case: City of Romance are impressive and immersive. The game has a colorful and detailed visual design that captures the beauty and charm of Paris. The crime scenes are well-designed and realistic, with various objects and backgrounds that fit the theme of each district. The characters are also well-drawn and animated, with different expressions and outfits that reflect their moods and roles.</p>
17
- <p>criminal case paris game download<br />
18
- how to play criminal case city of romance<br />
19
- criminal case city of romance app store<br />
20
- criminal case city of romance google play<br />
21
- criminal case city of romance facebook<br />
22
- criminal case city of romance cheats and tips<br />
23
- criminal case city of romance walkthrough and guide<br />
24
- criminal case city of romance hidden object game<br />
25
- criminal case city of romance adventure game<br />
26
- criminal case city of romance murder mystery game<br />
27
- criminal case city of romance pretty simple<br />
28
- criminal case city of romance review and rating<br />
29
- criminal case city of romance latest update<br />
30
- criminal case city of romance free download for pc<br />
31
- criminal case city of romance free download for android<br />
32
- criminal case city of romance free download for ios<br />
33
- criminal case city of romance apk download<br />
34
- criminal case city of romance mod apk download<br />
35
- criminal case city of romance hack download<br />
36
- criminal case city of romance unlimited energy download<br />
37
- criminal case city of romance offline download<br />
38
- criminal case city of romance online download<br />
39
- criminal case city of romance full version download<br />
40
- criminal case city of romance no ads download<br />
41
- criminal case city of romance premium download<br />
42
- criminal case city of romance best scenes<br />
43
- criminal case city of romance best characters<br />
44
- criminal case city of romance best cases<br />
45
- criminal case city of romance best outfits<br />
46
- criminal case city of romance best pets<br />
47
- criminal case city of romance best weapons<br />
48
- criminal case city of romance best clues<br />
49
- criminal case city of romance best suspects<br />
50
- criminal case city of romance best killers<br />
51
- criminal case city of romance best endings<br />
52
- criminal case city of romance how to get stars<br />
53
- criminal case city of romance how to get coins<br />
54
- criminal case city of romance how to get cash<br />
55
- criminal case city of romance how to get burgers<br />
56
- criminal case city of romance how to get lucky cards<br />
57
- criminal case city of romance how to get trophies<br />
58
- criminal case city of romance how to get medals<br />
59
- criminal case city of romance how to get achievements<br />
60
- criminal case city of romance how to get hints<br />
61
- criminal case city of romance how to get magnifying glasses<br />
62
- criminal case city of romance how to get flashlights<br />
63
- criminal case city of romance how to level up fast<br />
64
- criminal case city of romance how to solve puzzles fast<br />
65
- criminal case city of romance how to find objects fast</p>
66
- <p>The game also has a catchy and atmospheric music and sound effects that enhance the mood and tone of the game. The music is varied and fitting for each district and theme, ranging from classical to jazz to pop. The sound effects are also realistic and engaging, such as the sound of clicking items, analyzing clues, interrogating suspects, etc. The game also has voice-overs for some characters, such as your chief inspector, your partner, and your forensic expert.</p>
67
- <h2>Pros and Cons</h2>
68
- <p>Like any other game, Criminal Case: City of Romance has its pros and cons. Here are some of them:</p>
69
- <table>
70
- <tr>
71
- <th>Pros</th>
72
- <th>Cons</th>
73
- </tr>
74
- <tr>
75
- <td>- Engaging and addictive gameplay that keeps you hooked on solving cases</td>
76
- <td>- Limited energy that may prevent you from playing for long periods</td>
77
- </tr>
78
- <tr>
79
- <td>- Interesting and diverse storylines and characters that make you curious about their secrets</td>
80
- <td>- Repetitive and predictable patterns that may make some cases easy or boring</td>
81
- </tr>
82
- <tr>
83
- <td>- Stunning and realistic graphics that make you feel like you are in Paris</td>
84
- <td>- High storage space and data usage that may slow down your device or connection</td>
85
- </tr>
86
- <tr>
87
- <td>- Fun and lively music and sound effects that create a great atmosphere for the game</td>
88
- <td>- No option to mute or adjust the volume of the music or sound effects</td>
89
- </tr>
90
- <tr>
91
- <td>- Social and competitive features that allow you to play with your friends and other players</td>
92
- <td>- In-app purchases that may give some players an unfair advantage or pressure you to spend money</td>
93
- </tr>
94
- </table>
95
- <p>Overall, I think the pros outweigh the cons, and I would recommend this game to anyone who likes hidden object, adventure games.</p>
96
- <h2>Conclusion</h2>
97
- <p>In conclusion, Criminal Case: City of Romance is a fun and exciting game that lets you experience the thrill of solving murder cases in the city of love. You will enjoy the gameplay, graphics, sound, storylines, characters, themes, and features of this game. You will also learn some facts about Parisian culture and history along the way. If you are looking for a game that combines mystery, romance, adventure, and challenge, this is the game for you.</p>
98
- <p>I hope you found this article helpful and informative. If you have any questions or feedback about the game or this article, please feel free to contact me at [email](^i^). I would love to hear from you. Thank you for reading!</p>
99
- <h3>Frequently Asked Questions (FAQs)</h3>
100
- <h4>Q: How can I get more energy in Criminal Case: City of Romance?</h4>
101
- <p>A: There are several ways to get more energy in the game. You can wait for it to regenerate over time (1 point every 3 minutes), watch ads (20 points per ad), buy it with real money (various packages available), ask your friends for help (1 point per friend), or get it from your team members (5 points per member).</p>
102
- <h4>Q: How can I get more stars in Criminal Case: City of Romance?</h4>
103
- <p>A: Stars are used to perform <p>various actions in the game, such as arresting suspects, confirming alibis, unlocking new districts, etc. You can earn stars by playing crime scenes and getting high scores. You can also replay crime scenes to earn more stars. You can also buy stars with real money (various packages available).</p>
104
- <h4>Q: How can I get more coins in Criminal Case: City of Romance?</h4>
105
- <p>A: Coins are used to buy items and outfits in the game, as well as hints and boosters. You can earn coins by playing crime scenes and getting high scores. You can also get coins from your friends, team members, or daily bonuses. You can also watch ads to get coins (100 coins per ad). You can also buy coins with real money (various packages available).</p>
106
- <h4>Q: How can I get more cash in Criminal Case: City of Romance?</h4>
107
- <p>A: Cash is the premium currency in the game, which can be used to speed up analyses, unlock new districts, buy exclusive items and outfits, etc. You can earn cash by leveling up, completing achievements, or participating in special events and tournaments. You can also buy cash with real money (various packages available).</p>
108
- <h4>Q: How can I get more items and outfits in Criminal Case: City of Romance?</h4>
109
- <p>A: Items and outfits are used to customize your avatar and improve your skills in the game. You can get items and outfits by buying them with coins or cash in the shop, or by winning them from special events and tournaments. You can also get items and outfits from your friends, team members, or daily bonuses.</p>
110
- <h4>Q: How can I join or create a team in Criminal Case: City of Romance?</h4>
111
- <p>A: A team is a group of players who can chat, exchange gifts, and help each other in the game. You can join or create a team by tapping on the team icon on the bottom right corner of the screen. You will need to be at least level 10 to join or create a team. You can search for an existing team by name or ID, or browse through the list of recommended teams. You can also create your own team by choosing a name, an ID, a description, a badge, and a language. You can invite your friends to join your team, or accept requests from other players who want to join.</p> 197e85843d<br />
112
- <br />
113
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download File dari Google Drive Tanpa Buka Halaman Web.md DELETED
@@ -1,163 +0,0 @@
1
-
2
- <h1>How to Download Files from Google Drive</h1>
3
- <p>Google Drive is a cloud-based storage and syncing service from Google. It is designed to give you free 15 GB of storage for all your files, including photos, documents, music, and so much more. You can access your files from any device, as long as you have an internet connection.</p>
4
- <h2>download dari link google drive</h2><br /><p><b><b>Download</b> &raquo;&raquo;&raquo; <a href="https://urlin.us/2uSThK">https://urlin.us/2uSThK</a></b></p><br /><br />
5
- <p>But what if you want to download your files from Google Drive to your computer or mobile device? Maybe you want to back up your data, work offline, or share your files with others. In this article, we will show you how to download files from Google Drive easily and quickly.</p>
6
- <p>Before we get started, let's go over some of the benefits and features of using Google Drive, as well as how to create a Google account and access Google Drive.</p>
7
- <h2>Introduction</h2>
8
- <h3>What is Google Drive and Why Use It</h3>
9
- <p>Google Drive is more than just a place to store your files. It is also a platform that allows you to create, share, and collaborate on various types of content. Here are some of the reasons why you might want to use Google Drive:</p>
10
- <ul>
11
- <li>You can store any type of file in Google Drive, such as PDFs, images, videos, audio, etc.</li>
12
- <li>You can access your files from any device, whether it's a computer, tablet, or smartphone.</li>
13
- <li>You can organize your files into folders and subfolders, and search for them easily.</li>
14
- <li>You can share your files with anyone, either by inviting them by email or by creating a link that anyone can use.</li>
15
- <li>You can control how others can use your files, such as whether they can view, comment, or edit them.</li>
16
- <li>You can use Google Docs, Sheets, and Slides, cloud-based apps that let you create and edit documents, spreadsheets, and presentations online. You can also share and comment on these files with others in real time.</li>
17
- <li>You can use Drive for desktop, a tool that lets you sync your files between your computer and Google Drive.</li>
18
- <li>You can use the Google Drive app on your mobile device to access your files, scan documents, and upload photos.</li>
19
- <li>You can use Google Photos, a service that lets you store and manage your photos and videos in Google Drive.</li>
20
- <li>You can use Google Backup and Sync, a tool that lets you back up your files and folders from your computer to Google Drive.</li>
21
- </ul>
22
- <p>As you can see, Google Drive offers a lot of benefits and features that can help you store, manage, and work with your files. But how do you get started with Google Drive?</p>
23
- <h3>How to Create a Google Account and Access Google Drive</h3>
24
- <p>To use Google Drive, you need to have a Google account. A Google account is a free account that gives you access to various Google services, such as Gmail, YouTube, Maps, Calendar, etc. If you already have a Google account, you can skip this step. If not, here's how to create one:</p>
25
- <ol>
26
- <li>Go to <a href="">https://accounts.google.com/signup</a> and fill out the form with your name, email address, password, and other details.</li>
27
- <li>Verify your email address by clicking on the link that Google sends you.</li>
28
- <li>Agree to the terms and conditions and privacy policy of Google.</li>
29
- <li>Congratulations! You have created your Google account. You can now access Google Drive and other Google services.</li>
30
- </ol>
31
- <p>To access Google Drive, you have two options:</p>
32
- <p>cara membuat link download dari google drive<br />
33
- cara download file di google drive yang dikunci<br />
34
- cara download file di google drive yang terkunci view only<br />
35
- cara download file di google drive yang tidak bisa didownload<br />
36
- cara download file di google drive yang hanya bisa dilihat<br />
37
- cara download file di google drive yang dibatasi<br />
38
- cara download file di google drive yang tidak ada tombol downloadnya<br />
39
- cara download file di google drive yang hanya bisa memberi komentar<br />
40
- cara membuat link download langsung dari google drive<br />
41
- cara membuat link download otomatis dari google drive<br />
42
- cara membuat link download direct dari google drive<br />
43
- cara membuat link download tanpa masuk ke google drive<br />
44
- cara membuat link download tanpa membuka google drive<br />
45
- cara membuat link download tanpa login ke google drive<br />
46
- cara membuat link download tanpa verifikasi google drive<br />
47
- cara mendownload file dari google drive dengan cepat<br />
48
- cara mendownload file dari google drive dengan mudah<br />
49
- cara mendownload file dari google drive dengan idm<br />
50
- cara mendownload file dari google drive dengan gdirect<br />
51
- cara mendownload file dari google drive dengan uc browser<br />
52
- tips dan trik download file di google drive<br />
53
- solusi download file di google drive yang bermasalah<br />
54
- alasan download file di google drive gagal atau error<br />
55
- langkah-langkah download file di google drive yang benar<br />
56
- syarat dan ketentuan download file di google drive yang harus dipatuhi<br />
57
- keuntungan dan kerugian download file di google drive<br />
58
- perbedaan dan persamaan download file di google drive dan dropbox<br />
59
- pengertian dan fungsi download file di google drive<br />
60
- tutorial dan panduan download file di google drive untuk pemula<br />
61
- contoh dan aplikasi download file di google drive untuk berbagai keperluan</p>
62
- <ul>
63
- <li>You can go to <a href="">https://drive.google.com</a> and sign in with your Google account.</li>
64
- <li>You can download the Google Drive app on your computer or mobile device and sign in with your Google account.</li>
65
- </ul>
66
- <p>Either way, you will see the main interface of Google Drive, where you can view, upload, create, and share your files.</p>
67
- <h2>How to Share Files on Google Drive</h2>
68
- <h3>How to Share Files with Specific People or Groups</h3>
69
- <p>One of the main features of Google Drive is the ability to share your files with others. You can share your files with specific people or groups by inviting them by email. Here's how:</p>
70
- <ol>
71
- <li>Select the file or folder that you want to share on Google Drive.</li>
72
- <li>Click on the Share button on the top right corner of the screen.</li>
73
- <li>A pop-up window will appear where you can enter the email addresses of the people or groups that you want to share with. You can also add a note or message if you want.</li>
74
- <li>Choose the level of access that you want to give them. You can choose between Viewer, Commenter, or Editor. A Viewer can only view the file, a Commenter can view and comment on the file, and an Editor can view, comment, and edit the file.</li>
75
- <li>Click on Send or Done. The people or groups that you invited will receive an email notification with a link to access the file.</li>
76
- </ol>
77
- <h3>How to Share Files Publicly or With Anyone Who Has the Link</h3>
78
- <p>If you want to share your files with anyone who has the link, without requiring them to sign in with a Google account, you can do so by creating a public link. Here's how:</p>
79
- <ol>
80
- <li>Select the file or folder that you want to share on Google Drive.</li>
81
- <li>Click on the Share button on the top right corner of the screen.</li>
82
- <li>A pop-up window will appear where you can click on Change next to Anyone with the link.</li>
83
- <li>A drop-down menu will appear where you can choose who can access the link. You can choose between Anyone on the internet or Anyone at [your organization]. You can also choose the level of access that they have: Viewer, Commenter, or Editor.</li>
84
- <li>Click on Done. You will see a link that you can copy and paste anywhere you want to share it. Anyone who has the link will be able to access the file without signing in.</li>
85
- </ol>
86
- <h3>How to Change or Revoke Sharing Permissions</h3>
87
- <p>If you want to change or revoke the sharing permissions for your files, you can do so at any time. Here's how:</p>
88
- <ol>
89
- <li>Select the file or folder that you want to change or revoke permissions for on Google Drive.</li>
90
- <li>Click on the Share button on the top right corner of the screen.</li>
91
- <li>A pop-up window will appear where you can see the list of people or groups that you have shared the file with. You can also see the level of access that they have.</li>
92
- <li>To change the level of access, click on the drop-down menu next to their name and choose a different option: Viewer, Commenter, or Editor.</li>
93
- <li>To revoke the sharing permission, click on the Remove button next to their name. They will no longer be able to access the file.</li>
94
- <li>To revoke the public link, click on Change next to Anyone with the link and choose Restricted. Only the people that you have invited by email will be able to access the file.</li>
95
- <li>Click on Done to save your changes.</li>
96
- </ol>
97
- <h2>How to Download Files from Google Drive</h2>
98
- <h3>How to Download a Single File or Folder</h3>
99
- <p>If you want to download a single file or folder from Google Drive to your computer or mobile device, you can do so by following these steps:</p>
100
- <ol>
101
- <li>Select the file or folder that you want to download on Google Drive.</li>
102
- <li>Click on the More actions button (three vertical dots) on the top right corner of the screen.</li>
103
- <li>Click on Download. The file or folder will start downloading to your device.</li>
104
- <li>You can also right-click on the file or folder and choose Download from the menu.</li>
105
- </ol>
106
- <h3>How to Download Multiple Files or Folders</h3>
107
- <p>If you want to download multiple files or folders from Google Drive to your computer or mobile device, you can do so by following these steps:</p>
108
- <ol>
109
- <li>Select the files or folders that you want to download on Google Drive. You can use the Shift or Ctrl keys to select multiple items.</li>
110
- <li>Right-click on any of the selected items and choose Download from the menu. The files or folders will be compressed into a ZIP file and start downloading to your device.</li>
111
- <li>You can also click on the More actions button (three vertical dots) on the top right corner of the screen and choose Download from there.</li>
112
- </ol>
113
- <h3>How to Download Files that are Locked or Restricted</h3>
114
- <p>Sometimes, you might encounter files that are locked or restricted by their owners. This means that you cannot view, comment, or edit them without their permission. However, you might still be able to download them if they have enabled that option. Here's how:</p>
115
- <ol>
116
- <li>Select the file that you want to download on Google Drive.</li>
117
- <li>Click on the Request access button on the top right corner of the screen.</li>
118
- <li>A pop-up window will appear where you can enter your email address and a message to request access from the owner of the file.</li>
119
- <li>Click on Send request. The owner of the file will receive an email notification with your request.</li>
120
- <li>If they approve your request, you will receive an email notification with a link to access and download the file.</li>
121
- </ol>
122
- <h2>How to Troubleshoot Google Drive Issues</h2>
123
- <h3>How to Fix Common Issues with Google Drive</h3>
124
- <p>Sometimes, you might face some issues with Google Drive, such as slow loading, syncing errors, missing files, etc. Here are some of the common issues and how to fix them:</p>
125
- <table border="1">
126
- <tr><th>Issue</th><th>Solution</th></tr>
127
- <tr><td>Google Drive is not loading or is slow</td><td>Check your internet connection and make sure it is stable and fast. Try reloading the page or restarting your browser. Clear your browser's cache and cookies. Disable any extensions or plugins that might interfere with Google Drive. Update your browser to the latest version.</td></tr>
128
- <tr><td>Google Drive is not syncing or is showing errors</td><td>Check your internet connection and make sure it is stable and fast. Try pausing and resuming the sync process. Check your storage space and make sure you have enough room for your files. Check your firewall and antivirus settings and make sure they are not blocking Google Drive. Update your Google Drive app to the latest version.</td></tr>
129
- <tr><td>Google Drive files are missing or deleted</td><td>Check your trash folder and see if your files are there. You can restore them by right-clicking and choosing Restore from trash. Check your activity panel and see if someone else has moved or deleted your files. You can undo their actions by clicking on Undo changes. Contact Google support and request a file recovery within 25 days of deletion.</td></tr>
130
- </table>
131
- <h3>How to Fix Error Messages and Corrupted Files</h3>
132
- <p>Sometimes, you might encounter error messages or corrupted files when trying to download files from Google Drive. Here are some of the common error messages and corrupted files and how to fix them:</p>
133
- <table border="1">
134
- <tr><th>Error Message</th><th>Solution</th></tr>
135
- <tr><td>Sorry, you can't view or download this file at this time.</td><td>This error message usually means that the file has exceeded its download limit. You can try again later or make a copy of the file in your own Google Drive and download it from there. To make a copy, open the file link, sign in with your Google account, click on the More actions button (three vertical dots), and choose Make a copy.</td></tr>
136
- <tr><td>Unable to access document. Please try again later.</td><td>This error message usually means that there is a temporary issue with Google Drive or the file itself. You can try reloading the page or restarting your browser. You can also check the Google Workspace Status Dashboard to see if there are any service disruptions or outages.</td></tr>
137
- <tr><td>The file is corrupt and cannot be opened.</td><td>This error message usually means that the file has been damaged or corrupted during the download process. You can try downloading the file again or using a different browser. You can also try using a file repair tool or software to fix the corrupted file.</td></tr>
138
- </table>
139
- <h3>How to Send Feedback and Report Bugs to Google</h3>
140
- <p>If you encounter any issues or bugs with Google Drive that are not covered by the solutions above, you can send feedback and report them to Google. This will help Google improve their service and fix any problems. Here's how:</p>
141
- <ol>
142
- <li>Click on the Help button (question mark icon) on the top right corner of the screen.</li>
143
- <li>Click on Send feedback.</li>
144
- <li>A pop-up window will appear where you can describe your issue or bug in detail. You can also include a screenshot if you want.</li>
145
- <li>Click on Send. Google will receive your feedback and work on resolving your issue or bug.</li>
146
- </ol>
147
- <h2>Conclusion</h2>
148
- <p>In this article, we have shown you how to download files from Google Drive easily and quickly. We have also covered some of the benefits and features of using Google Drive, as well as how to share, troubleshoot, and send feedback on Google Drive.</p>
149
- <p>Google Drive is a powerful and convenient tool that can help you store, manage, and work with your files online. Whether you want to back up your data, work offline, or share your files with others, Google Drive can help you do it all.</p>
150
- <p>We hope you found this article helpful and informative. If you have any questions or comments, feel free to leave them below. We would love to hear from you!</p>
151
- <h2>FAQs</h2>
152
- <h3>Q: How much storage space do I get with Google Drive?</h3>
153
- <p>A: You get 15 GB of free storage space with a Google account. You can upgrade to more storage with a Google One subscription.</p>
154
- <h3>Q: How can I access my files offline on Google Drive?</h3>
155
- <p>A: You can turn on offline access for your files on Google Drive. This will allow you to view and edit your files without an internet connection. Learn how to turn on offline access here.</p>
156
- <h3>Q: How can I sync my files across different devices with Google Drive?</h3>
157
- <p>A: You can use Drive for desktop, a tool that lets you sync your files between your computer and Google Drive. You can also use the Google Drive app on your mobile device to access your files. Learn how to sync your files here.</p>
158
- <h3>Q: How can I create and collaborate on documents, spreadsheets, and presentations with Google Drive?</h3>
159
- <p>A: You can use Google Docs, Sheets, and Slides, cloud-based apps that let you create and edit documents, spreadsheets, and presentations online. You can also share and comment on these files with others in real time. Learn how to use these apps here.</p>
160
- <h3>Q: How can I make a direct download link for my Google Drive files?</h3>
161
- <p>A: You can make a direct download link for your files by using a custom link that contains your file ID. This will bypass the web page that opens when you click a Google Drive file link and start the file download immediately. Learn how to make a direct download link here.</p> 197e85843d<br />
162
- <br />
163
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Archero Mod APK iOS Everything You Need to Know About the Game and the Hack.md DELETED
@@ -1,133 +0,0 @@
1
-
2
- <h1>Archero Mod Apk Ios: A Guide to the Ultimate Action Game</h1>
3
- <p>If you are looking for a fun and challenging action game that will test your skills and reflexes, you should try <strong>Archero</strong>. This is a game where you play as a lone archer who has to fight against waves of enemies and obstacles in different worlds. You can also customize your hero with various skills and equipment to make him more powerful and survive longer.</p>
4
- <h2>archero mod apk ios</h2><br /><p><b><b>Download Zip</b> &#9913;&#9913;&#9913; <a href="https://jinyurl.com/2uNLtJ">https://jinyurl.com/2uNLtJ</a></b></p><br /><br />
5
- <p>However, if you want to enjoy the game without any limitations or restrictions, you may want to use <strong>Archero mod apk ios</strong>. This is a modified version of the game that allows you to access all the features and content for free. You can also get unlimited coins, gems, energy, and other resources to upgrade your hero and unlock new items.</p>
6
- <p>In this article, we will explain what Archero is, how to download and install Archero mod apk ios, why you should use it, and some tips and tricks for playing the game. Let's get started!</p>
7
- <h2>What is Archero?</h2>
8
- <p>Archero is a popular action game developed by Habby, a Chinese studio that specializes in casual games. The game was released in 2019 for Android and iOS devices, and has since gained millions of fans around the world. The game has also received positive reviews from critics and users alike, who praised its addictive gameplay, beautiful graphics, and variety of content.</p>
9
- <h3>Gameplay and features</h3>
10
- <p>The gameplay of Archero is simple but challenging. You control your hero with a virtual joystick on the screen, and he will automatically shoot arrows at the nearest enemy when you stop moving. Your goal is to clear each level by defeating all the enemies and avoiding their attacks. You will also encounter different obstacles, such as spikes, traps, walls, and portals, that will make your journey more difficult.</p>
11
- <p>archero mod apk ios download<br />
12
- archero mod apk ios no jailbreak<br />
13
- archero mod apk ios unlimited gems<br />
14
- archero mod apk ios latest version<br />
15
- archero mod apk ios free<br />
16
- archero mod apk ios 2023<br />
17
- archero mod apk ios reddit<br />
18
- archero mod apk ios online<br />
19
- archero mod apk ios hack<br />
20
- archero mod apk ios install<br />
21
- archero mod apk ios update<br />
22
- archero mod apk ios offline<br />
23
- archero mod apk ios cheats<br />
24
- archero mod apk ios gameplay<br />
25
- archero mod apk ios review<br />
26
- archero mod apk ios 4.14.0<br />
27
- archero mod apk ios god mode<br />
28
- archero mod apk ios mega<br />
29
- archero mod apk ios mediafire<br />
30
- archero mod apk ios vip<br />
31
- archero mod apk ios obb<br />
32
- archero mod apk ios tutorial<br />
33
- archero mod apk ios link<br />
34
- archero mod apk ios file<br />
35
- archero mod apk ios generator<br />
36
- archero mod apk ios coins<br />
37
- archero mod apk ios weapons<br />
38
- archero mod apk ios armor<br />
39
- archero mod apk ios skills<br />
40
- archero mod apk ios levels<br />
41
- archero mod apk ios characters<br />
42
- archero mod apk ios chapters<br />
43
- archero mod apk ios bosses<br />
44
- archero mod apk ios enemies<br />
45
- archero mod apk ios items<br />
46
- archero mod apk ios pets<br />
47
- archero mod apk ios codes<br />
48
- archero mod apk ios features<br />
49
- archero mod apk ios benefits<br />
50
- archero mod apk ios tips<br />
51
- archero mod apk ios tricks<br />
52
- archero mod apk ios guide<br />
53
- archero mod apk ios support<br />
54
- archero mod apk ios forum<br />
55
- archero mod apk ios community<br />
56
- archero mod apk ios feedback<br />
57
- archero mod apk ios rating<br />
58
- archero mod apk ios comparison</p>
59
- <p>As you progress through the game, you will face stronger enemies and bosses that will require more strategy and skill to defeat. You will also be able to level up your hero and choose from various skills that will enhance his abilities. For example, you can increase his attack speed, damage, range, or critical rate. You can also get skills that will give him extra arrows, elemental effects, shields, or pets.</p>
60
- <p>The game features hundreds of levels across different worlds, each with its own theme and design. You can explore forests, deserts, dungeons, temples, caves, and more. The game also offers daily challenges and events that will reward you with coins, gems, energy, scrolls, chests, and other items. You can use these items to upgrade your equipment or buy new ones from the shop.</p>
61
- <h3>How to download and install Archero mod apk ios</h3>
62
- <p>If you want to play Archero on your iOS device with all the benefits of the modded version, you will need to follow these steps:</p>
63
- <ol>
64
- <li>Download the Archero mod apk ios file from a reliable source. You can use this link as an example.</li>
65
- <li>Install a third-party app installer on your device, such as TutuApp or AppValley. These are apps that allow you to install modified or hacked apps on your iOS device without jailbreaking it.</li>
66
- <li>Open the app installer and search for Archero mod apk ios. You should see it in the list of available apps.</li>
67
- <li>Tap on the install button and wait for the installation process to finish.</li>
68
- <li>Once installed, you may need to trust the app developer in your device settings. To do this, go to Settings > General > Profiles & Device Management > [name of developer] > Trust.</li>
69
- <li>Now you can launch Archero mod apk ios from your home screen and enjoy the game with all the mod features.</li>
70
- </ol>
71
- <h2>Why use Archero mod apk ios?</h2>
72
- <p>You may be wondering why you should use Archero mod apk ios instead of the original version of the game. Well, there are several reasons why you may want to do so. Here are some of them:</p>
73
- <h3>Benefits of using Archero mod apk ios</h3>
74
- <ul>
75
- <li>You can get unlimited coins, gems, energy, and other resources that you can use to upgrade your hero and equipment. This will make your gameplay easier and more enjoyable.</li>
76
- <li>You can access all the features and content of the game for free, without having to spend any real money or watch any ads. You can also unlock all the worlds, levels, skills, and items that are otherwise locked or require premium currency.</li>
77
- <li>You can customize your hero with any skills and equipment that you want, without having to rely on random drops or choices. You can also change your skills and equipment anytime during the game, without losing any progress or resources.</li>
78
- <li>You can enjoy the game without any bugs, glitches, or errors that may affect your performance or experience. The mod apk is updated regularly to ensure its compatibility and functionality with the latest version of the game.</li>
79
- </ul>
80
- <h3>Risks and precautions of using Archero mod apk ios</h3>
81
- <p>However, using Archero mod apk ios also comes with some risks and precautions that you should be aware of. Here are some of them:</p>
82
- <ul>
83
- <li>You may violate the terms and conditions of the game developer and publisher, which may result in your account being banned or suspended. You may also lose your progress and data if this happens.</li>
84
- <li>You may expose your device to malware or viruses that may harm your system or steal your personal information. You should always download the mod apk from a trusted source and scan it with an antivirus before installing it.</li>
85
- <li>You may encounter some compatibility or stability issues with the mod apk, especially if you have an older device or a different version of iOS. You should always check the requirements and specifications of the mod apk before downloading it.</li>
86
- <li>You may lose some of the fun and challenge of the game by using the mod apk, as it may make the game too easy or boring for you. You should always use the mod apk responsibly and moderately, and not abuse its features or advantages.</li>
87
- </ul>
88
- <h2>Tips and tricks for playing Archero</h2>
89
- <p>Now that you know how to download and install Archero mod apk ios, and why you should use it, you may want to learn some tips and tricks for playing the game. Here are some of them:</p>
90
- <h3>How to dodge enemy attacks</h3>
91
- <p>One of the most important skills that you need to master in Archero is dodging enemy attacks. This will help you avoid taking damage and losing health, which will affect your survival and performance. Here are some tips on how to dodge enemy attacks:</p>
92
- <ul>
93
- <li>Always keep moving and don't stay in one spot for too long. This will make you harder to hit by enemies and give you more opportunities to attack them.</li>
94
- <li>Learn the patterns and behaviors of different enemies and bosses. This will help you anticipate their movements and attacks, and react accordingly.</li>
95
- <li>Use obstacles and walls to your advantage. You can hide behind them or use them to block enemy projectiles. However, be careful not to trap yourself or get cornered by enemies.</li>
96
- <li>Use skills that will help you dodge enemy attacks, such as invincibility, dash, teleport, slow time, or freeze. These skills will give you a temporary edge over your enemies and allow you to escape from dangerous situations.</li>
97
- </ul>
98
- <h3>How to summon and deal with the devil</h3>
99
- <p>Another interesting feature of Archero is the devil, who is a mysterious character that will appear after you defeat a boss. The devil will offer you a deal: he will give you a powerful skill in exchange for some of your health. You can either accept or decline his offer. Here are some tips on how to summon and deal with the devil:</p>
100
- <ul>
101
- <li>To summon the devil, you need to defeat a boss without taking any damage. This means that you need to dodge all the boss's attacks and not get hit by any enemies or obstacles along the way.</li>
102
- <li>The skill that the devil offers you is random, but it is usually one of the best skills in the game. Some examples are multishot, ricochet, diagonal arrows, extra life, or death nova.</li>
103
- <li>The amount of health that the devil takes from you is also random, but it is usually around 20% to 40% of your maximum health. This means that you need to weigh the pros and cons of accepting his offer. If you have a lot of health or a healing skill, you may be able to afford the trade. However, if you have low health or no healing skill, you may want to decline his offer.</li>
104
- <li>The devil's offer is optional, and you can always choose to skip it and get a normal skill instead. However, if you skip the devil's offer, you will not be able to summon him again in the same run.</li>
105
- </ul>
106
- <h3>How to choose the best skills and equipment</h3>
107
- <p>Another crucial aspect of Archero is choosing the best skills and equipment for your hero. This will affect your damage, defense, speed, and overall performance in the game. Here are some tips on how to choose the best skills and equipment:</p>
108
- <ul>
109
- <li>Choose skills that complement your playstyle and preferences. For example, if you like to move around a lot, you may want to choose skills that increase your movement speed, dash distance, or invincibility duration. If you like to stay in one spot and shoot from afar, you may want to choose skills that increase your attack range, damage, or critical rate.</li>
110
- <li>Choose skills that synergize with each other and create powerful combinations. For example, multishot and ricochet are two skills that work well together, as they allow you to shoot multiple arrows that bounce off enemies and walls. Another example is diagonal arrows and piercing shot, which allow you to shoot arrows in four directions that go through enemies.</li>
111
- <li>Choose equipment that suits your hero and skills. For example, if you have a lot of skills that increase your arrow count or elemental effects, you may want to use a bow that has a high attack speed or damage. If you have a lot of skills that increase your defense or health, you may want to use an armor that has a high health or resistance.</li>
112
- <li>Upgrade your equipment regularly with scrolls and coins. This will improve their stats and make them more effective. You can also fuse equipment of the same type and rarity to create a higher rarity equipment with better stats.</li>
113
- </ul>
114
- <h2>Conclusion</h2>
115
- <p>Archero is a fun and challenging action game that will keep you entertained for hours. You can play as a lone archer who has to fight against hordes of enemies and obstacles in different worlds. You can also customize your hero with various skills and equipment to make him more powerful and survive longer.</p>
116
- <p>If you want to enjoy the game without any limitations or restrictions, you can use Archero mod apk ios. This is a modified version of the game that allows you to access all the features and content for free. You can also get unlimited coins, gems, energy, and other resources to upgrade your hero and unlock new items.</p>
117
- <p>However, using Archero mod apk ios also comes with some risks and precautions that you should be aware of. You may violate the terms and conditions of the game developer and publisher, which may result in your account being banned or suspended. You may also expose your device to malware or viruses that may harm your system or steal your personal information. You may also lose some of the fun and challenge of the game by using the mod apk, as it may make the game too easy or boring for you.</p>
118
- <p>Therefore, you should always use Archero mod apk ios responsibly and moderately, and not abuse its features or advantages. You should also download the mod apk from a trusted source and scan it with an antivirus before installing it. You should also check the requirements and specifications of the mod apk before downloading it.</p>
119
- <p>We hope this article has helped you understand what Archero is, how to download and install Archero mod apk ios, why you should use it, and some tips and tricks for playing the game. Have fun playing Archero!</p>
120
- <h2>FAQs</h2>
121
- <p>Here are some frequently asked questions about Archero mod apk ios:</p>
122
- <h4>Q: Is Archero mod apk ios safe to use?</h4>
123
- <p>A: Archero mod apk ios is safe to use as long as you download it from a reliable source and scan it with an antivirus before installing it. However, you should always be careful when using modified or hacked apps on your device, as they may contain malware or viruses that may harm your system or steal your personal information.</p>
124
- <h4>Q: Is Archero mod apk ios legal to use?</h4>
125
- <p>A: Archero mod apk ios is not legal to use, as it violates the terms and conditions of the game developer and publisher. Using Archero mod apk ios may result in your account being banned or suspended by the game authorities. You may also face legal consequences if you are caught using Archero mod apk ios.</p>
126
- <h4>Q: How do I update Archero mod apk ios?</h4>
127
- <p>A: To update Arch ero mod apk ios, you need to download the latest version of the mod apk from the same source that you downloaded it from before. You can also check for updates from the app installer that you used to install the mod apk. You should always update the mod apk to ensure its compatibility and functionality with the latest version of the game.</p>
128
- <h4>Q: How do I uninstall Archero mod apk ios?</h4>
129
- <p>A: To uninstall Archero mod apk ios, you need to delete the app from your device. You can do this by long-pressing the app icon and tapping on the delete option. You can also delete the app from your device settings by going to Settings > General > iPhone Storage > Archero > Delete App.</p>
130
- <h4>Q: Can I play Archero mod apk ios online with other players?</h4>
131
- <p>A: Archero mod apk ios does not support online multiplayer mode, as it may cause conflicts or errors with the game servers. You can only play Archero mod apk ios offline with your device. However, you can still enjoy the game's features and content without any limitations or restrictions.</p> 401be4b1e0<br />
132
- <br />
133
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Aim King 8 Ball Pool APK and Become a Pro in No Time.md DELETED
@@ -1,136 +0,0 @@
1
-
2
- <h1>Aim King 8 Ball Pool APK: A Guide to the Best Tool for 8 Ball Pool Players</h1>
3
- <p>If you are a fan of 8 ball pool, you might have heard of Aim King 8 Ball Pool APK, a tool that can help you improve your skills and win more games. But what is it exactly, and how does it work? In this article, we will answer these questions and more, and give you some tips and tricks on how to use it effectively. Let's get started!</p>
4
- <h2>aim king 8 ball pool apk</h2><br /><p><b><b>Download</b> &#128504; <a href="https://jinyurl.com/2uNMUn">https://jinyurl.com/2uNMUn</a></b></p><br /><br />
5
- <h2>What is Aim King 8 Ball Pool APK?</h2>
6
- <p>Aim King 8 Ball Pool APK is an app that you can download and install on your Android device, and use it while playing 8 ball pool. It is a guide tool that uses AI image recognition technology to display extended guidelines in real time, making your shots more accurate and precise. It also allows you to adjust the sensitivity and accuracy settings according to your preference. With Aim King 8 Ball Pool APK, you can become a master of 8 ball pool in no time!</p>
7
- <h3>Features of Aim King 8 Ball Pool APK</h3>
8
- <p>Some of the features that make Aim King 8 Ball Pool APK stand out from other similar tools are:</p>
9
- <ul>
10
- <li>It is compatible with all versions of 8 ball pool, including the latest one.</li>
11
- <li>It is easy to use, with a simple and user-friendly interface.</li>
12
- <li>It does not require root access or any other permissions.</li>
13
- <li>It does not interfere with the game performance or graphics.</li>
14
- <li>It does not contain any ads or malware.</li>
15
- </ul>
16
- <h3>How to Download and Install Aim King 8 Ball Pool APK</h3>
17
- <p>To download and install Aim King 8 Ball Pool APK on your Android device, follow these steps:</p>
18
- <ol>
19
- <li>Go to [this link](^1^) and click on the download button.</li>
20
- <li>Wait for the file to be downloaded on your device.</li>
21
- <li>Open the file manager app on your device and locate the downloaded file.</li>
22
- <li>Tap on the file and allow the installation from unknown sources if prompted.</li>
23
- <li>Wait for the installation to be completed.</li>
24
- <li>Launch the app and enjoy!</li>
25
- </ol>
26
- <h2>Why Use Aim King 8 Ball Pool APK?</h2>
27
- <p>You might be wondering why you should use Aim King 8 Ball Pool APK instead of playing the game normally. Well, there are some good reasons why you should consider using this tool, as well as some drawbacks that you should be aware of. Let's take a look at them.</p>
28
- <h3>Benefits of Using Aim King 8 Ball Pool APK</h3>
29
- <p>Some of the benefits that you can get from using Aim King 8 Ball Pool APK are:</p>
30
- <p>aim king 8 ball pool apk download<br />
31
- aim king 8 ball pool apk mod<br />
32
- aim king 8 ball pool apk latest version<br />
33
- aim king 8 ball pool apk free<br />
34
- aim king 8 ball pool apk android<br />
35
- aim king 8 ball pool apk ios<br />
36
- aim king 8 ball pool apk hack<br />
37
- aim king 8 ball pool apk no root<br />
38
- aim king 8 ball pool apk unlimited coins<br />
39
- aim king 8 ball pool apk online<br />
40
- aim king 8 ball pool apk offline<br />
41
- aim king 8 ball pool apk for pc<br />
42
- aim king 8 ball pool apk for laptop<br />
43
- aim king 8 ball pool apk for windows<br />
44
- aim king 8 ball pool apk for mac<br />
45
- aim king 8 ball pool apk review<br />
46
- aim king 8 ball pool apk features<br />
47
- aim king 8 ball pool apk guide<br />
48
- aim king 8 ball pool apk tutorial<br />
49
- aim king 8 ball pool apk tips<br />
50
- aim king 8 ball pool apk tricks<br />
51
- aim king 8 ball pool apk cheats<br />
52
- aim king 8 ball pool apk best settings<br />
53
- aim king 8 ball pool apk how to use<br />
54
- aim king 8 ball pool apk how to install<br />
55
- aim king 8 ball pool apk how to update<br />
56
- aim king 8 ball pool apk how to uninstall<br />
57
- aim king 8 ball pool apk how to play<br />
58
- aim king 8 ball pool apk how to win<br />
59
- aim king 8 ball pool apk how to get coins<br />
60
- aim king 8 ball pool apk comparison<br />
61
- aim king 8 ball pool apk alternatives<br />
62
- aim king 8 ball pool apk vs other tools<br />
63
- aim king 8 ball pool apk pros and cons<br />
64
- aim king 8 ball pool apk benefits and drawbacks<br />
65
- aim king 8 ball pool apk advantages and disadvantages<br />
66
- aim king 8 ball pool apk testimonials and feedbacks<br />
67
- aim king 8 ball pool apk ratings and reviews<br />
68
- aim king 8 ball pool apk quality and performance<br />
69
- aim king 8 ball pool apk reliability and security</p>
70
- <ul>
71
- <li>You can improve your skills and confidence in playing 8 ball pool.</li>
72
- <li>You can win more games and earn more coins and rewards.</li>
73
- <li>You can challenge your friends and other players online and show off your abilities.</li>
74
- <li>You can have more fun and enjoyment while playing the game.</li>
75
- </ul>
76
- <h3>Drawbacks of Using Aim King 8 Ball Pool APK</h3>
77
- <p>Some of the drawbacks that you should be aware of when using Aim King 8 Ball Pool APK are:</p>
78
- <ul>
79
- <li>You might lose the thrill and challenge of playing the game naturally.</li>
80
- <li>You might get addicted to using the tool and lose interest in the game.</li>
81
- <li>You might get detected and banned by the game developers if you use the tool excessively or carelessly.</li>
82
- <li>You might face ethical issues and criticism from other players who consider using the tool as cheating.</li>
83
- </ul>
84
- <h2>Tips and Tricks for Using Aim King 8 Ball Pool APK</h2>
85
- <p>Now that you know what Aim King 8 Ball Pool APK is and what are its pros and cons, you might want to know how to use it effectively and safely. Here are some tips and tricks that you can follow:</p>
86
- <h3>How to Use the Extended Guidelines Feature</h3>
87
- <p>The extended guidelines feature is the main function of Aim King 8 Ball Pool APK. It shows you the trajectory of your cue ball and the target ball, as well as the angle and distance of your shot. To use this feature, you need to:</p>
88
- <ol>
89
- <li>Open the app and grant it permission to access your screen.</li>
90
- <li>Open the game and start a match.</li>
91
- <li>Tap on the Aim King icon on your screen to activate the extended guidelines.</li>
92
- <li>Aim your shot using the guidelines and adjust your power and spin as needed.</li>
93
- <li>Tap on the Aim King icon again to deactivate the extended guidelines.</li>
94
- <li>Release your shot and watch the result.</li>
95
- </ol>
96
- <h3>How to Adjust the Sensitivity and Accuracy Settings</h3>
97
- <p>The sensitivity and accuracy settings allow you to customize the performance of Aim King 8 Ball Pool APK according to your preference. The sensitivity setting determines how responsive the app is to your touch, while the accuracy setting determines how precise the app is in calculating the guidelines. To adjust these settings, you need to:</p>
98
- <ol>
99
- <li>Open the app and tap on the settings icon on the top right corner.</li>
100
- <li>Slide the sensitivity bar to increase or decrease the sensitivity level.</li>
101
- <li>Slide the accuracy bar to increase or decrease the accuracy level.</li>
102
- <li>Tap on the save button to apply your changes.</li>
103
- </ol>
104
- <h3>How to Avoid Detection and Ban by the Game Developers</h3>
105
- <p>One of the risks of using Aim King 8 Ball Pool APK is getting detected and banned by the game developers, who might consider it as a violation of their terms of service. To avoid this, you need to:</p>
106
- <ul>
107
- <li>Use the tool sparingly and moderately, and not in every match or shot.</li>
108
- <li>Use the tool only in offline mode or in private matches with your friends.</li>
109
- <li>Do not brag or boast about using the tool in public chats or forums.</li>
110
- <li>Do not update the game or the tool unless you are sure that they are compatible.</li>
111
- </ul>
112
- <h2>Conclusion</h2>
113
- <p>Aim King 8 Ball Pool APK is a great tool for 8 ball pool players who want to improve their skills and win more games. It offers extended guidelines, sensitivity and accuracy settings, and other features that can enhance your gameplay. However, it also has some drawbacks, such as losing the challenge, getting addicted, getting banned, and facing ethical issues. Therefore, you should use it wisely and responsibly, and not rely on it too much. Remember, it is just a tool, not a magic wand!</p>
114
- <h3>Summary of the Main Points</h3>
115
- <p>In this article, we have discussed:</p>
116
- <ul>
117
- <li>What is Aim King 8 Ball Pool APK and how does it work?</li>
118
- <li>Why use Aim King 8 Ball Pool APK and what are its benefits and drawbacks?</li>
119
- <li>How to use Aim King 8 Ball Pool APK effectively and safely?</li>
120
- </ul>
121
- <h3>Call to Action for the Readers</h3>
122
- <p>If you are interested in trying out Aim King 8 Ball Pool APK, you can download it from [this link] and follow the instructions we have provided. However, if you are not comfortable with using it or you prefer playing the game naturally, you can skip it and enjoy 8 ball pool without any tools. The choice is yours!</p>
123
- <h2>FAQs</h2>
124
- <p>Here are some frequently asked questions about Aim King 8 Ball Pool APK:</p>
125
- <h4>Is Aim King 8 Ball Pool APK safe to use?</h4>
126
- <p>Aim King 8 Ball Pool APK is safe to use as long as you download it from a trusted source and follow our tips on how to avoid detection and ban by the game developers. However, you should always be careful when using any third-party app that modifies the game, as there is always a possibility of malware or virus infection, data theft, or account suspension.</p>
127
- <h4>Does Aim King 8 Ball Pool APK work on iOS devices?</h4>
128
- <p>No, Aim King 8 Ball Pool APK is only compatible with Android devices. If you have an iOS device, you will not be able to use this tool. However, there might be other similar tools that work on iOS devices, but we cannot guarantee their safety or effectiveness.</p>
129
- <h4>Can I use Aim King 8 Ball Pool APK with other 8 ball pool tools or mods?</h4>
130
- <p>We do not recommend using Aim King 8 Ball Pool APK with other 8 ball pool tools or mods, as they might conflict with each other and cause errors or glitches in the game. Moreover, using multiple tools or mods might increase the chance of getting detected and banned by the game developers. Therefore, you should use Aim King 8 Ball Pool APK alone and disable any other tools or mods that you have installed.</p>
131
- <h4>How can I contact the developer of Aim King 8 Ball Pool APK?</h4>
132
- <p>If you have any questions, feedback, or suggestions about Aim King 8 Ball Pool APK, you can contact the developer through their email address: [email protected]. They will try to respond to your queries as soon as possible.</p>
133
- <h4>Where can I find more information about Aim King 8 Ball Pool APK?</h4>
134
- <p>If you want to learn more about Aim King 8 Ball Pool APK, you can visit their official website: [this link]. There, you can find more details about the app, such as its features, screenshots, videos, reviews, and updates. You can also join their community and interact with other users who use the app.</p> 401be4b1e0<br />
135
- <br />
136
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/backbones/iresnet.py DELETED
@@ -1,187 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
- __all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']
5
-
6
-
7
- def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
8
- """3x3 convolution with padding"""
9
- return nn.Conv2d(in_planes,
10
- out_planes,
11
- kernel_size=3,
12
- stride=stride,
13
- padding=dilation,
14
- groups=groups,
15
- bias=False,
16
- dilation=dilation)
17
-
18
-
19
- def conv1x1(in_planes, out_planes, stride=1):
20
- """1x1 convolution"""
21
- return nn.Conv2d(in_planes,
22
- out_planes,
23
- kernel_size=1,
24
- stride=stride,
25
- bias=False)
26
-
27
-
28
- class IBasicBlock(nn.Module):
29
- expansion = 1
30
- def __init__(self, inplanes, planes, stride=1, downsample=None,
31
- groups=1, base_width=64, dilation=1):
32
- super(IBasicBlock, self).__init__()
33
- if groups != 1 or base_width != 64:
34
- raise ValueError('BasicBlock only supports groups=1 and base_width=64')
35
- if dilation > 1:
36
- raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
37
- self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
38
- self.conv1 = conv3x3(inplanes, planes)
39
- self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
40
- self.prelu = nn.PReLU(planes)
41
- self.conv2 = conv3x3(planes, planes, stride)
42
- self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
43
- self.downsample = downsample
44
- self.stride = stride
45
-
46
- def forward(self, x):
47
- identity = x
48
- out = self.bn1(x)
49
- out = self.conv1(out)
50
- out = self.bn2(out)
51
- out = self.prelu(out)
52
- out = self.conv2(out)
53
- out = self.bn3(out)
54
- if self.downsample is not None:
55
- identity = self.downsample(x)
56
- out += identity
57
- return out
58
-
59
-
60
- class IResNet(nn.Module):
61
- fc_scale = 7 * 7
62
- def __init__(self,
63
- block, layers, dropout=0, num_features=512, zero_init_residual=False,
64
- groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
65
- super(IResNet, self).__init__()
66
- self.fp16 = fp16
67
- self.inplanes = 64
68
- self.dilation = 1
69
- if replace_stride_with_dilation is None:
70
- replace_stride_with_dilation = [False, False, False]
71
- if len(replace_stride_with_dilation) != 3:
72
- raise ValueError("replace_stride_with_dilation should be None "
73
- "or a 3-element tuple, got {}".format(replace_stride_with_dilation))
74
- self.groups = groups
75
- self.base_width = width_per_group
76
- self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
77
- self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
78
- self.prelu = nn.PReLU(self.inplanes)
79
- self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
80
- self.layer2 = self._make_layer(block,
81
- 128,
82
- layers[1],
83
- stride=2,
84
- dilate=replace_stride_with_dilation[0])
85
- self.layer3 = self._make_layer(block,
86
- 256,
87
- layers[2],
88
- stride=2,
89
- dilate=replace_stride_with_dilation[1])
90
- self.layer4 = self._make_layer(block,
91
- 512,
92
- layers[3],
93
- stride=2,
94
- dilate=replace_stride_with_dilation[2])
95
- self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
96
- self.dropout = nn.Dropout(p=dropout, inplace=True)
97
- self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
98
- self.features = nn.BatchNorm1d(num_features, eps=1e-05)
99
- nn.init.constant_(self.features.weight, 1.0)
100
- self.features.weight.requires_grad = False
101
-
102
- for m in self.modules():
103
- if isinstance(m, nn.Conv2d):
104
- nn.init.normal_(m.weight, 0, 0.1)
105
- elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
106
- nn.init.constant_(m.weight, 1)
107
- nn.init.constant_(m.bias, 0)
108
-
109
- if zero_init_residual:
110
- for m in self.modules():
111
- if isinstance(m, IBasicBlock):
112
- nn.init.constant_(m.bn2.weight, 0)
113
-
114
- def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
115
- downsample = None
116
- previous_dilation = self.dilation
117
- if dilate:
118
- self.dilation *= stride
119
- stride = 1
120
- if stride != 1 or self.inplanes != planes * block.expansion:
121
- downsample = nn.Sequential(
122
- conv1x1(self.inplanes, planes * block.expansion, stride),
123
- nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
124
- )
125
- layers = []
126
- layers.append(
127
- block(self.inplanes, planes, stride, downsample, self.groups,
128
- self.base_width, previous_dilation))
129
- self.inplanes = planes * block.expansion
130
- for _ in range(1, blocks):
131
- layers.append(
132
- block(self.inplanes,
133
- planes,
134
- groups=self.groups,
135
- base_width=self.base_width,
136
- dilation=self.dilation))
137
-
138
- return nn.Sequential(*layers)
139
-
140
- def forward(self, x):
141
- with torch.cuda.amp.autocast(self.fp16):
142
- x = self.conv1(x)
143
- x = self.bn1(x)
144
- x = self.prelu(x)
145
- x = self.layer1(x)
146
- x = self.layer2(x)
147
- x = self.layer3(x)
148
- x = self.layer4(x)
149
- x = self.bn2(x)
150
- x = torch.flatten(x, 1)
151
- x = self.dropout(x)
152
- x = self.fc(x.float() if self.fp16 else x)
153
- x = self.features(x)
154
- return x
155
-
156
-
157
- def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
158
- model = IResNet(block, layers, **kwargs)
159
- if pretrained:
160
- raise ValueError()
161
- return model
162
-
163
-
164
- def iresnet18(pretrained=False, progress=True, **kwargs):
165
- return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
166
- progress, **kwargs)
167
-
168
-
169
- def iresnet34(pretrained=False, progress=True, **kwargs):
170
- return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
171
- progress, **kwargs)
172
-
173
-
174
- def iresnet50(pretrained=False, progress=True, **kwargs):
175
- return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
176
- progress, **kwargs)
177
-
178
-
179
- def iresnet100(pretrained=False, progress=True, **kwargs):
180
- return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
181
- progress, **kwargs)
182
-
183
-
184
- def iresnet200(pretrained=False, progress=True, **kwargs):
185
- return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
186
- progress, **kwargs)
187
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/got.py DELETED
@@ -1,71 +0,0 @@
1
- import networkx as nx
2
- import matplotlib.pyplot as plt
3
- from pyvis.network import Network
4
- import pandas as pd
5
- import streamlit as st
6
-
7
-
8
- def got_func(physics):
9
- got_net = Network(height="600px", width="100%", font_color="black",heading='Game of Thrones Graph')
10
-
11
- # set the physics layout of the network
12
- got_net.barnes_hut()
13
- got_data = pd.read_csv("stormofswords.csv")
14
- #got_data = pd.read_csv("stormofswords.csv")
15
- #got_data.rename(index={0: "Source", 1: "Target", 2: "Weight"})
16
- sources = got_data['Source']
17
- targets = got_data['Target']
18
- weights = got_data['Weight']
19
-
20
- edge_data = zip(sources, targets, weights)
21
-
22
- for e in edge_data:
23
- src = e[0]
24
- dst = e[1]
25
- w = e[2]
26
-
27
- got_net.add_node(src, src, title=src)
28
- got_net.add_node(dst, dst, title=dst)
29
- got_net.add_edge(src, dst, value=w)
30
-
31
- neighbor_map = got_net.get_adj_list()
32
-
33
- # add neighbor data to node hover data
34
- for node in got_net.nodes:
35
- node["title"] += " Neighbors:<br>" + "<br>".join(neighbor_map[node["id"]])
36
- node["value"] = len(neighbor_map[node["id"]])
37
- if physics:
38
- got_net.show_buttons(filter_=['physics'])
39
- got_net.show("gameofthrones.html")
40
-
41
-
42
- def simple_func(physics):
43
- nx_graph = nx.cycle_graph(10)
44
- nx_graph.nodes[1]['title'] = 'Number 1'
45
- nx_graph.nodes[1]['group'] = 1
46
- nx_graph.nodes[3]['title'] = 'I belong to a different group!'
47
- nx_graph.nodes[3]['group'] = 10
48
- nx_graph.add_node(20, size=20, title='couple', group=2)
49
- nx_graph.add_node(21, size=15, title='couple', group=2)
50
- nx_graph.add_edge(20, 21, weight=5)
51
- nx_graph.add_node(25, size=25, label='lonely', title='lonely node', group=3)
52
-
53
-
54
- nt = Network("500px", "500px",notebook=True,heading='')
55
- nt.from_nx(nx_graph)
56
- #physics=st.sidebar.checkbox('add physics interactivity?')
57
- if physics:
58
- nt.show_buttons(filter_=['physics'])
59
- nt.show('test.html')
60
-
61
-
62
- def karate_func(physics):
63
- G = nx.karate_club_graph()
64
-
65
-
66
- nt = Network("500px", "500px",notebook=True,heading='Zachary’s Karate Club graph')
67
- nt.from_nx(G)
68
- #physics=st.sidebar.checkbox('add physics interactivity?')
69
- if physics:
70
- nt.show_buttons(filter_=['physics'])
71
- nt.show('karate.html')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Software_Company/src/agents/Component/PromptComponent.py DELETED
@@ -1,133 +0,0 @@
1
- from abc import abstractmethod
2
-
3
-
4
- class PromptComponent:
5
- def __init__(self):
6
- pass
7
-
8
- @abstractmethod
9
- def get_prompt(self, agent):
10
- pass
11
-
12
- class TaskComponent(PromptComponent):
13
- def __init__(self, task):
14
- super().__init__()
15
- self.task = task
16
-
17
- def get_prompt(self, agent):
18
- return f"""The task you need to execute is: <task>{self.task}</task>.\n"""
19
-
20
-
21
- class OutputComponent(PromptComponent):
22
- def __init__(self, output):
23
- super().__init__()
24
- self.output = output
25
-
26
- def get_prompt(self, agent):
27
- return f"""Please contact the above to extract <{self.output}> and </{self.output}>, \
28
- do not perform additional output, please output in strict accordance with the above format!\n"""
29
-
30
-
31
- class SystemComponent(PromptComponent):
32
- def __init__(self,system_prompt):
33
- super().__init__()
34
- self.system_prompt = system_prompt
35
-
36
- def get_prompt(self, agent):
37
- return self.system_prompt
38
-
39
- class LastComponent(PromptComponent):
40
- def __init__(self, last_prompt):
41
- super().__init__()
42
- self.last_prompt = last_prompt
43
-
44
- def get_prompt(self, agent):
45
- return self.last_prompt
46
-
47
-
48
- class StyleComponent(PromptComponent):
49
- """
50
- 角色、风格组件
51
- """
52
-
53
- def __init__(self, role):
54
- super().__init__()
55
- self.role = role
56
-
57
- def get_prompt(self, agent):
58
- name = agent.name
59
- style = agent.style
60
- return f"""Now your role is:\n<role>{self.role}</role>, your name is:\n<name>{name}</name>. \
61
- You need to follow the output style:\n<style>{style}</style>.\n"""
62
-
63
-
64
- class RuleComponent(PromptComponent):
65
- def __init__(self, rule):
66
- super().__init__()
67
- self.rule = rule
68
-
69
- def get_prompt(self, agent):
70
- return f"""The rule you need to follow is:\n<rule>{self.rule}</rule>.\n"""
71
-
72
-
73
- class DemonstrationComponent(PromptComponent):
74
- """
75
- input a list,the example of answer.
76
- """
77
-
78
- def __init__(self, demonstrations):
79
- super().__init__()
80
- self.demonstrations = demonstrations
81
-
82
- def add_demonstration(self, demonstration):
83
- self.demonstrations.append(demonstration)
84
-
85
- def get_prompt(self, agent):
86
- prompt = "Here are demonstrations you can refer to:\n<demonstrations>"
87
- for demonstration in self.demonstrations:
88
- prompt += "\n" + demonstration
89
- prompt += "</demonstrations>\n"
90
- return prompt
91
-
92
-
93
- class CoTComponent(PromptComponent):
94
- """
95
- input a list,the example of answer.
96
- """
97
-
98
- def __init__(self, demonstrations):
99
- super().__init__()
100
- self.demonstrations = demonstrations
101
-
102
- def add_demonstration(self, demonstration):
103
- self.demonstrations.append(demonstration)
104
-
105
- def get_prompt(self, agent):
106
- prompt = "You need to think in detail before outputting, the thinking case is as follows:\n<demonstrations>"
107
- for demonstration in self.demonstrations:
108
- prompt += "\n" + demonstration
109
- prompt += "</demonstrations>\n"
110
- return prompt
111
-
112
-
113
- class CustomizeComponent(PromptComponent):
114
- """
115
- Custom template
116
- template(str) : example: "i am {}"
117
- keywords(list) : example : ["name"]
118
- example : agent.environment.shared_memory["name"] = "Lilong"
119
- the component will get the keyword attribute from the environment, and then add it to the template.
120
- Return : "i am Lilong"
121
- """
122
- def __init__(self, template, keywords) -> None:
123
- super().__init__()
124
- self.template = template
125
- self.keywords = keywords
126
-
127
- def get_prompt(self, agent):
128
- template_keyword = {}
129
- for keyword in self.keywords:
130
-
131
- current_keyword = agent.environment.shared_memory[keyword]
132
- template_keyword[keyword] = current_keyword
133
- return self.template.format(**template_keyword)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/dreamgaussian/cam_utils.py DELETED
@@ -1,146 +0,0 @@
1
- import numpy as np
2
- from scipy.spatial.transform import Rotation as R
3
-
4
- import torch
5
-
6
- def dot(x, y):
7
- if isinstance(x, np.ndarray):
8
- return np.sum(x * y, -1, keepdims=True)
9
- else:
10
- return torch.sum(x * y, -1, keepdim=True)
11
-
12
-
13
- def length(x, eps=1e-20):
14
- if isinstance(x, np.ndarray):
15
- return np.sqrt(np.maximum(np.sum(x * x, axis=-1, keepdims=True), eps))
16
- else:
17
- return torch.sqrt(torch.clamp(dot(x, x), min=eps))
18
-
19
-
20
- def safe_normalize(x, eps=1e-20):
21
- return x / length(x, eps)
22
-
23
-
24
- def look_at(campos, target, opengl=True):
25
- # campos: [N, 3], camera/eye position
26
- # target: [N, 3], object to look at
27
- # return: [N, 3, 3], rotation matrix
28
- if not opengl:
29
- # camera forward aligns with -z
30
- forward_vector = safe_normalize(target - campos)
31
- up_vector = np.array([0, 1, 0], dtype=np.float32)
32
- right_vector = safe_normalize(np.cross(forward_vector, up_vector))
33
- up_vector = safe_normalize(np.cross(right_vector, forward_vector))
34
- else:
35
- # camera forward aligns with +z
36
- forward_vector = safe_normalize(campos - target)
37
- up_vector = np.array([0, 1, 0], dtype=np.float32)
38
- right_vector = safe_normalize(np.cross(up_vector, forward_vector))
39
- up_vector = safe_normalize(np.cross(forward_vector, right_vector))
40
- R = np.stack([right_vector, up_vector, forward_vector], axis=1)
41
- return R
42
-
43
-
44
- # elevation & azimuth to pose (cam2world) matrix
45
- def orbit_camera(elevation, azimuth, radius=1, is_degree=True, target=None, opengl=True):
46
- # radius: scalar
47
- # elevation: scalar, in (-90, 90), from +y to -y is (-90, 90)
48
- # azimuth: scalar, in (-180, 180), from +z to +x is (0, 90)
49
- # return: [4, 4], camera pose matrix
50
- if is_degree:
51
- elevation = np.deg2rad(elevation)
52
- azimuth = np.deg2rad(azimuth)
53
- x = radius * np.cos(elevation) * np.sin(azimuth)
54
- y = - radius * np.sin(elevation)
55
- z = radius * np.cos(elevation) * np.cos(azimuth)
56
- if target is None:
57
- target = np.zeros([3], dtype=np.float32)
58
- campos = np.array([x, y, z]) + target # [3]
59
- T = np.eye(4, dtype=np.float32)
60
- T[:3, :3] = look_at(campos, target, opengl)
61
- T[:3, 3] = campos
62
- return T
63
-
64
-
65
- class OrbitCamera:
66
- def __init__(self, W, H, r=2, fovy=60, near=0.01, far=100):
67
- self.W = W
68
- self.H = H
69
- self.radius = r # camera distance from center
70
- self.fovy = np.deg2rad(fovy) # deg 2 rad
71
- self.near = near
72
- self.far = far
73
- self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point
74
- self.rot = R.from_matrix(np.eye(3))
75
- self.up = np.array([0, 1, 0], dtype=np.float32) # need to be normalized!
76
-
77
- @property
78
- def fovx(self):
79
- return 2 * np.arctan(np.tan(self.fovy / 2) * self.W / self.H)
80
-
81
- @property
82
- def campos(self):
83
- return self.pose[:3, 3]
84
-
85
- # pose (c2w)
86
- @property
87
- def pose(self):
88
- # first move camera to radius
89
- res = np.eye(4, dtype=np.float32)
90
- res[2, 3] = self.radius # opengl convention...
91
- # rotate
92
- rot = np.eye(4, dtype=np.float32)
93
- rot[:3, :3] = self.rot.as_matrix()
94
- res = rot @ res
95
- # translate
96
- res[:3, 3] -= self.center
97
- return res
98
-
99
- # view (w2c)
100
- @property
101
- def view(self):
102
- return np.linalg.inv(self.pose)
103
-
104
- # projection (perspective)
105
- @property
106
- def perspective(self):
107
- y = np.tan(self.fovy / 2)
108
- aspect = self.W / self.H
109
- return np.array(
110
- [
111
- [1 / (y * aspect), 0, 0, 0],
112
- [0, -1 / y, 0, 0],
113
- [
114
- 0,
115
- 0,
116
- -(self.far + self.near) / (self.far - self.near),
117
- -(2 * self.far * self.near) / (self.far - self.near),
118
- ],
119
- [0, 0, -1, 0],
120
- ],
121
- dtype=np.float32,
122
- )
123
-
124
- # intrinsics
125
- @property
126
- def intrinsics(self):
127
- focal = self.H / (2 * np.tan(self.fovy / 2))
128
- return np.array([focal, focal, self.W // 2, self.H // 2], dtype=np.float32)
129
-
130
- @property
131
- def mvp(self):
132
- return self.perspective @ np.linalg.inv(self.pose) # [4, 4]
133
-
134
- def orbit(self, dx, dy):
135
- # rotate along camera up/side axis!
136
- side = self.rot.as_matrix()[:3, 0]
137
- rotvec_x = self.up * np.radians(-0.05 * dx)
138
- rotvec_y = side * np.radians(-0.05 * dy)
139
- self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot
140
-
141
- def scale(self, delta):
142
- self.radius *= 1.1 ** (-delta)
143
-
144
- def pan(self, dx, dy, dz=0):
145
- # pan in camera coordinate system (careful on the sensitivity!)
146
- self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([-dx, -dy, dz])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = ['./resnet50_8xb32_in1k.py']
2
-
3
- # schedule settings
4
- optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic')
 
 
 
 
 
spaces/Abhi5ingh/fashionsd/app.py DELETED
@@ -1,168 +0,0 @@
1
- from typing import Optional
2
-
3
- import numpy as np
4
- import cv2
5
- import streamlit as st
6
- from PIL import Image
7
- import os
8
- import tempfile
9
-
10
- from sdfile import PIPELINES, generate
11
-
12
- DEFAULT_PROMPT = "belted shirt black belted portrait-collar wrap blouse with black prints"
13
- DEAFULT_WIDTH, DEFAULT_HEIGHT = 512,512
14
- OUTPUT_IMAGE_KEY = "output_img"
15
- LOADED_IMAGE_KEY = "loaded_img"
16
-
17
- def get_image(key: str) -> Optional[Image.Image]:
18
- if key in st.session_state:
19
- return st.session_state[key]
20
- return None
21
-
22
- def set_image(key:str, img: Image.Image):
23
- st.session_state[key] = img
24
-
25
- def prompt_and_generate_button(prefix, pipeline_name: PIPELINES, **kwargs):
26
- prompt = st.text_area(
27
- "Prompt",
28
- value = DEFAULT_PROMPT,
29
- key = f"{prefix}-prompt"
30
- )
31
- negative_prompt = st.text_area(
32
- "Negative prompt",
33
- value = "",
34
- key =f"{prefix}-negative_prompt",
35
- )
36
- col1,col2 =st.columns(2)
37
- with col1:
38
- steps = st.slider(
39
- "Number of inference steps",
40
- min_value=1,
41
- max_value=200,
42
- value=30,
43
- key=f"{prefix}-inference-steps",
44
- )
45
- with col2:
46
- guidance_scale = st.slider(
47
- "Guidance scale",
48
- min_value=0.0,
49
- max_value=20.0,
50
- value= 7.5,
51
- step = 0.5,
52
- key=f"{prefix}-guidance-scale",
53
- )
54
- enable_cpu_offload = st.checkbox(
55
- "Enable CPU offload if you run out of memory",
56
- key =f"{prefix}-cpu-offload",
57
- value= False,
58
- )
59
-
60
- if st.button("Generate Image", key = f"{prefix}-btn"):
61
- with st.spinner("Generating image ..."):
62
- image = generate(
63
- prompt,
64
- pipeline_name,
65
- negative_prompt=negative_prompt,
66
- num_inference_steps=steps,
67
- guidance_scale=guidance_scale,
68
- enable_cpu_offload=enable_cpu_offload,
69
- **kwargs,
70
- )
71
- set_image(OUTPUT_IMAGE_KEY,image.copy())
72
- st.image(image)
73
- def width_and_height_sliders(prefix):
74
- col1, col2 = st.columns(2)
75
- with col1:
76
- width = st.slider(
77
- "Width",
78
- min_value=64,
79
- max_value=1600,
80
- step=16,
81
- value=512,
82
- key=f"{prefix}-width",
83
- )
84
- with col2:
85
- height = st.slider(
86
- "Height",
87
- min_value=64,
88
- max_value=1600,
89
- step=16,
90
- value=512,
91
- key=f"{prefix}-height",
92
- )
93
- return width, height
94
-
95
- def image_uploader(prefix):
96
- image = st.file_uploader("Image", ["jpg", "png"], key=f"{prefix}-uploader")
97
- if image:
98
- image = Image.open(image)
99
- print(f"loaded input image of size ({image.width}, {image.height})")
100
- return image
101
-
102
- return get_image(LOADED_IMAGE_KEY)
103
-
104
- def sketching():
105
- image = image_uploader("sketch2img")
106
-
107
- if not image:
108
- return None,None
109
-
110
- with tempfile.TemporaryDirectory() as temp_dir:
111
- temp_image_path = os.path.join(temp_dir, "uploaded_image.jpg")
112
- image.save(temp_image_path)
113
-
114
- image = cv2.imread(temp_image_path)
115
- image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
116
- image_blur = cv2.GaussianBlur(image,(5,5),0)
117
- sketch = cv2.adaptiveThreshold(image_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
118
- sketch_pil = Image.fromarray(sketch)
119
- return sketch_pil
120
-
121
- def txt2img_tab():
122
- prefix = "txt2img"
123
- width, height = width_and_height_sliders(prefix)
124
- prompt_and_generate_button(prefix,"txt2img",width=width,height=height)
125
-
126
- def sketching_tab():
127
- prefix = "sketch2img"
128
- col1,col2 = st.columns(2)
129
- with col1:
130
- image = sketching()
131
- with col2:
132
- if image:
133
- controlnet_conditioning_scale = st.slider(
134
- "Strength or dependence on the input sketch",
135
- min_value=0.0,
136
- max_value= 1.0,
137
- value = 0.5,
138
- step = 0.05,
139
- key=f"{prefix}-controlnet_conditioning_scale",
140
- )
141
- prompt_and_generate_button(
142
- prefix,
143
- "sketch2img",
144
- image=image,
145
- controlnet_conditioning_scale=controlnet_conditioning_scale,
146
- )
147
-
148
- def main():
149
- st.set_page_config(layout="wide")
150
- st.title("Fashion-SDX: Playground")
151
-
152
- tab1,tab2 = st.tabs(
153
- ["Text to image", "Sketch to image"]
154
- )
155
- with tab1:
156
- txt2img_tab()
157
- with tab2:
158
- sketching_tab()
159
-
160
- with st.sidebar:
161
- st.header("Most Recent Output Image")
162
- output_image = get_image((OUTPUT_IMAGE_KEY))
163
- if output_image:
164
- st.image(output_image)
165
- else:
166
- st.markdown("no output generated yet")
167
- if __name__ =="__main__":
168
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/hsladjustpipeline.js DELETED
@@ -1,2 +0,0 @@
1
- import HslAdjustPostFxPipeline from './shaders/hsladjust/HslAdjustPostFxPipeline.js';
2
- export default HslAdjustPostFxPipeline;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/IsInTouching.js DELETED
@@ -1,19 +0,0 @@
1
- import IsPointerInBounds from '../../../plugins/utils/input/IsPointerInBounds.js';
2
- import IsGameObject from '../../../plugins/utils/system/IsGameObject.js';
3
-
4
- var IsInTouching = function (pointer, gameObject) {
5
- if (IsGameObject(pointer) || (typeof (pointer) === 'string')) {
6
- gameObject = pointer;
7
- pointer = undefined;
8
- }
9
-
10
- if (gameObject === undefined) {
11
- gameObject = this;
12
- } else if (typeof (gameObject) === 'string') {
13
- gameObject = this.getElement(gameObject);
14
- }
15
-
16
- return IsPointerInBounds(gameObject, pointer);
17
- }
18
-
19
- export default IsInTouching;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/InjectProperties.js DELETED
@@ -1,32 +0,0 @@
1
- var InjectProperties = function (table) {
2
- Object.defineProperty(table, 'childOY', {
3
- configurable: true,
4
- get: function () {
5
- return table.tableOY;
6
- },
7
- set: function (value) {
8
- table.tableOY = value;
9
- }
10
- });
11
- Object.defineProperty(table, 'topChildOY', {
12
- get: function () {
13
- return table.topTableOY;
14
- }
15
- });
16
- Object.defineProperty(table, 'bottomChildOY', {
17
- get: function () {
18
- return table.bottomTableOY;
19
- }
20
- });
21
- Object.defineProperty(table, 'childVisibleHeight', {
22
- get: function () {
23
- return table.instHeight;
24
- }
25
- });
26
- Object.defineProperty(table, 'childHeight', {
27
- get: function () {
28
- return table.tableHeight;
29
- }
30
- });
31
- };
32
- export default InjectProperties;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWelcing/MusicLM/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from musiclm_pytorch.musiclm_pytorch import MuLaN, MuLaNEmbedQuantizer, MusicLM
2
-
3
- from musiclm_pytorch.musiclm_pytorch import AudioSpectrogramTransformer, TextTransformer
 
 
 
 
spaces/Ali-Omrani/CCR/app.py DELETED
@@ -1,105 +0,0 @@
1
- import pickle
2
- import os
3
- import gradio as gr
4
- import gradio as gr
5
- import pandas as pd
6
- from sentence_transformers import SentenceTransformer, util
7
-
8
- def encode_column(model, filename, col_name):
9
- df = pd.read_csv(filename)
10
- df["embedding"] = list(model.encode(df[col_name]))
11
- return df
12
-
13
- def item_level_ccr(data_encoded_df, questionnaire_encoded_df):
14
- q_embeddings = questionnaire_encoded_df.embedding
15
- d_embeddings = data_encoded_df.embedding
16
- similarities = util.pytorch_cos_sim(d_embeddings, q_embeddings)
17
- for i in range(1,len(questionnaire_encoded_df)+1):
18
- data_encoded_df["sim_item_{}".format(i)] = similarities[:, i-1]
19
- return data_encoded_df
20
-
21
- # encoding questionnaire
22
- def ccr_wrapper(data_file, data_col, q_file, q_col, model='all-MiniLM-L6-v2'):
23
- """
24
- Returns a Dataframe that is the content of data_file with one additional column for CCR value per question
25
-
26
- Parameters:
27
- data_file (str): path to the file containing user text
28
- data_col (str): column that includes user text
29
- q_file (str): path to the file containing questionnaires
30
- q_col (str): column that includes questions
31
- model (str): name of the SBERT model to use for CCR see https://www.sbert.net/docs/pretrained_models.html for full list
32
-
33
- """
34
- try:
35
- model = SentenceTransformer(model)
36
- except:
37
- print("model name was not included, using all-MiniLM-L6-v2")
38
- model = SentenceTransformer('all-MiniLM-L6-v2')
39
-
40
- questionnaire_filename = q_file.name
41
- data_filename = data_file.name
42
-
43
- q_encoded_df = encode_column(model, questionnaire_filename, q_col)
44
- data_encoded_df = encode_column(model, data_filename, data_col)
45
- ccr_df = item_level_ccr(data_encoded_df, q_encoded_df)
46
-
47
-
48
- ccr_df.to_csv("ccr_results.csv")
49
- return "ccr_results.csv"
50
-
51
-
52
-
53
- def read_dataframe(data_file, data_col, q_file, q_col):
54
-
55
- # df = pd.read_csv(data_file.name)
56
- return data_file.name
57
-
58
-
59
-
60
- def single_text_ccr(text, question):
61
- model = SentenceTransformer('all-MiniLM-L6-v2')
62
- text_embedding = model.encode(text)
63
- question_embedding = model.encode(question)
64
- return round(util.pytorch_cos_sim(text_embedding, question_embedding).item(),3)
65
-
66
-
67
-
68
-
69
-
70
- with gr.Blocks() as demo:
71
- # gr.Markdown('This is the first page for CCR, info goes here!')
72
- gr.Markdown("""<h1><center>Contextual Construct Representations</center></h1>
73
- <h3><center>Ali Omrani and Mohammad Atari</center></h3>""")
74
-
75
- gr.Markdown("""<br><h4>Play around with your items!</h4>""")
76
-
77
- with gr.Row():
78
- user_txt = gr.Textbox(label="Input Text", placeholder="Enter your desired text here ...")
79
- question = gr.Textbox(label="Question", placeholder="Enter the question text here ...")
80
-
81
- submit2 = gr.Button("Get CCR for this Text!")
82
-
83
- submit2.click(single_text_ccr, inputs=[user_txt, question], outputs=gr.Textbox(label="CCR Value"))
84
-
85
- gr.Markdown("""<br><h4>Or process a whole file!</h4>""")
86
-
87
- with gr.Row():
88
- model_name = gr.Dropdown(label="Choose the Model",
89
- choices=["all-mpnet-base-v2","multi-qa-mpnet-base-dot-v1", "distiluse-base-multilingual-cased-v2",
90
- "distiluse-base-multilingual-cased-v1", "paraphrase-MiniLM-L3-v2", "paraphrase-multilingual-MiniLM-L12-v2",
91
- "paraphrase-albert-small-v2", "paraphrase-multilingual-mpnet-base-v2", "multi-qa-MiniLM-L6-cos-v1",
92
- "all-MiniLM-L6-v2", "multi-qa-distilbert-cos-v1", "all-MiniLM-L12-v2", "all-distilroberta-v1"])
93
- with gr.Row():
94
- with gr.Column():
95
- user_data = gr.File(label="Participant Data File")
96
- text_col = gr.Textbox(label="Text Column", placeholder="text column ... ")
97
- with gr.Column():
98
- questionnaire_data = gr.File(label="Questionnaire File")
99
- q_col = gr.Textbox(label="Question Column", placeholder="questionnaire column ... ")
100
-
101
- submit = gr.Button("Get CCR!")
102
-
103
- outputs=gr.File()
104
- submit.click(ccr_wrapper, inputs=[user_data, text_col,questionnaire_data,q_col, model_name], outputs=[outputs])
105
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alican/pixera/models/__init__.py DELETED
@@ -1,67 +0,0 @@
1
- """This package contains modules related to objective functions, optimizations, and network architectures.
2
-
3
- To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
4
- You need to implement the following five functions:
5
- -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
6
- -- <set_input>: unpack data from dataset and apply preprocessing.
7
- -- <forward>: produce intermediate results.
8
- -- <optimize_parameters>: calculate loss, gradients, and update network weights.
9
- -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
10
-
11
- In the function <__init__>, you need to define four lists:
12
- -- self.loss_names (str list): specify the training losses that you want to plot and save.
13
- -- self.model_names (str list): define networks used in our training.
14
- -- self.visual_names (str list): specify the images that you want to display and save.
15
- -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
16
-
17
- Now you can use the model class by specifying flag '--model dummy'.
18
- See our template model class 'template_model.py' for more details.
19
- """
20
-
21
- import importlib
22
- from models.base_model import BaseModel
23
-
24
-
25
- def find_model_using_name(model_name):
26
- """Import the module "models/[model_name]_model.py".
27
-
28
- In the file, the class called DatasetNameModel() will
29
- be instantiated. It has to be a subclass of BaseModel,
30
- and it is case-insensitive.
31
- """
32
- model_filename = "models." + model_name + "_model"
33
- modellib = importlib.import_module(model_filename)
34
- model = None
35
- target_model_name = model_name.replace('_', '') + 'model'
36
- for name, cls in modellib.__dict__.items():
37
- if name.lower() == target_model_name.lower() \
38
- and issubclass(cls, BaseModel):
39
- model = cls
40
-
41
- if model is None:
42
- print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
43
- exit(0)
44
-
45
- return model
46
-
47
-
48
- def get_option_setter(model_name):
49
- """Return the static method <modify_commandline_options> of the model class."""
50
- model_class = find_model_using_name(model_name)
51
- return model_class.modify_commandline_options
52
-
53
-
54
- def create_model(opt):
55
- """Create a model given the option.
56
-
57
- This function warps the class CustomDatasetDataLoader.
58
- This is the main interface between this package and 'train.py'/'test.py'
59
-
60
- Example:
61
- >>> from models import create_model
62
- >>> model = create_model(opt)
63
- """
64
- model = find_model_using_name(opt.model)
65
- instance = model(opt)
66
- print("model [%s] was created" % type(instance).__name__)
67
- return instance
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/prior_transformer.py DELETED
@@ -1,364 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Dict, Optional, Union
3
-
4
- import torch
5
- import torch.nn.functional as F
6
- from torch import nn
7
-
8
- from ..configuration_utils import ConfigMixin, register_to_config
9
- from ..utils import BaseOutput
10
- from .attention import BasicTransformerBlock
11
- from .attention_processor import AttentionProcessor, AttnProcessor
12
- from .embeddings import TimestepEmbedding, Timesteps
13
- from .modeling_utils import ModelMixin
14
-
15
-
16
- @dataclass
17
- class PriorTransformerOutput(BaseOutput):
18
- """
19
- The output of [`PriorTransformer`].
20
-
21
- Args:
22
- predicted_image_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):
23
- The predicted CLIP image embedding conditioned on the CLIP text embedding input.
24
- """
25
-
26
- predicted_image_embedding: torch.FloatTensor
27
-
28
-
29
- class PriorTransformer(ModelMixin, ConfigMixin):
30
- """
31
- A Prior Transformer model.
32
-
33
- Parameters:
34
- num_attention_heads (`int`, *optional*, defaults to 32): The number of heads to use for multi-head attention.
35
- attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head.
36
- num_layers (`int`, *optional*, defaults to 20): The number of layers of Transformer blocks to use.
37
- embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `hidden_states`
38
- num_embeddings (`int`, *optional*, defaults to 77):
39
- The number of embeddings of the model input `hidden_states`
40
- additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the
41
- projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings +
42
- additional_embeddings`.
43
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
44
- time_embed_act_fn (`str`, *optional*, defaults to 'silu'):
45
- The activation function to use to create timestep embeddings.
46
- norm_in_type (`str`, *optional*, defaults to None): The normalization layer to apply on hidden states before
47
- passing to Transformer blocks. Set it to `None` if normalization is not needed.
48
- embedding_proj_norm_type (`str`, *optional*, defaults to None):
49
- The normalization layer to apply on the input `proj_embedding`. Set it to `None` if normalization is not
50
- needed.
51
- encoder_hid_proj_type (`str`, *optional*, defaults to `linear`):
52
- The projection layer to apply on the input `encoder_hidden_states`. Set it to `None` if
53
- `encoder_hidden_states` is `None`.
54
- added_emb_type (`str`, *optional*, defaults to `prd`): Additional embeddings to condition the model.
55
- Choose from `prd` or `None`. if choose `prd`, it will prepend a token indicating the (quantized) dot
56
- product between the text embedding and image embedding as proposed in the unclip paper
57
- https://arxiv.org/abs/2204.06125 If it is `None`, no additional embeddings will be prepended.
58
- time_embed_dim (`int, *optional*, defaults to None): The dimension of timestep embeddings.
59
- If None, will be set to `num_attention_heads * attention_head_dim`
60
- embedding_proj_dim (`int`, *optional*, default to None):
61
- The dimension of `proj_embedding`. If None, will be set to `embedding_dim`.
62
- clip_embed_dim (`int`, *optional*, default to None):
63
- The dimension of the output. If None, will be set to `embedding_dim`.
64
- """
65
-
66
- @register_to_config
67
- def __init__(
68
- self,
69
- num_attention_heads: int = 32,
70
- attention_head_dim: int = 64,
71
- num_layers: int = 20,
72
- embedding_dim: int = 768,
73
- num_embeddings=77,
74
- additional_embeddings=4,
75
- dropout: float = 0.0,
76
- time_embed_act_fn: str = "silu",
77
- norm_in_type: Optional[str] = None, # layer
78
- embedding_proj_norm_type: Optional[str] = None, # layer
79
- encoder_hid_proj_type: Optional[str] = "linear", # linear
80
- added_emb_type: Optional[str] = "prd", # prd
81
- time_embed_dim: Optional[int] = None,
82
- embedding_proj_dim: Optional[int] = None,
83
- clip_embed_dim: Optional[int] = None,
84
- ):
85
- super().__init__()
86
- self.num_attention_heads = num_attention_heads
87
- self.attention_head_dim = attention_head_dim
88
- inner_dim = num_attention_heads * attention_head_dim
89
- self.additional_embeddings = additional_embeddings
90
-
91
- time_embed_dim = time_embed_dim or inner_dim
92
- embedding_proj_dim = embedding_proj_dim or embedding_dim
93
- clip_embed_dim = clip_embed_dim or embedding_dim
94
-
95
- self.time_proj = Timesteps(inner_dim, True, 0)
96
- self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, out_dim=inner_dim, act_fn=time_embed_act_fn)
97
-
98
- self.proj_in = nn.Linear(embedding_dim, inner_dim)
99
-
100
- if embedding_proj_norm_type is None:
101
- self.embedding_proj_norm = None
102
- elif embedding_proj_norm_type == "layer":
103
- self.embedding_proj_norm = nn.LayerNorm(embedding_proj_dim)
104
- else:
105
- raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}")
106
-
107
- self.embedding_proj = nn.Linear(embedding_proj_dim, inner_dim)
108
-
109
- if encoder_hid_proj_type is None:
110
- self.encoder_hidden_states_proj = None
111
- elif encoder_hid_proj_type == "linear":
112
- self.encoder_hidden_states_proj = nn.Linear(embedding_dim, inner_dim)
113
- else:
114
- raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}")
115
-
116
- self.positional_embedding = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, inner_dim))
117
-
118
- if added_emb_type == "prd":
119
- self.prd_embedding = nn.Parameter(torch.zeros(1, 1, inner_dim))
120
- elif added_emb_type is None:
121
- self.prd_embedding = None
122
- else:
123
- raise ValueError(
124
- f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`."
125
- )
126
-
127
- self.transformer_blocks = nn.ModuleList(
128
- [
129
- BasicTransformerBlock(
130
- inner_dim,
131
- num_attention_heads,
132
- attention_head_dim,
133
- dropout=dropout,
134
- activation_fn="gelu",
135
- attention_bias=True,
136
- )
137
- for d in range(num_layers)
138
- ]
139
- )
140
-
141
- if norm_in_type == "layer":
142
- self.norm_in = nn.LayerNorm(inner_dim)
143
- elif norm_in_type is None:
144
- self.norm_in = None
145
- else:
146
- raise ValueError(f"Unsupported norm_in_type: {norm_in_type}.")
147
-
148
- self.norm_out = nn.LayerNorm(inner_dim)
149
-
150
- self.proj_to_clip_embeddings = nn.Linear(inner_dim, clip_embed_dim)
151
-
152
- causal_attention_mask = torch.full(
153
- [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10000.0
154
- )
155
- causal_attention_mask.triu_(1)
156
- causal_attention_mask = causal_attention_mask[None, ...]
157
- self.register_buffer("causal_attention_mask", causal_attention_mask, persistent=False)
158
-
159
- self.clip_mean = nn.Parameter(torch.zeros(1, clip_embed_dim))
160
- self.clip_std = nn.Parameter(torch.zeros(1, clip_embed_dim))
161
-
162
- @property
163
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
164
- def attn_processors(self) -> Dict[str, AttentionProcessor]:
165
- r"""
166
- Returns:
167
- `dict` of attention processors: A dictionary containing all attention processors used in the model with
168
- indexed by its weight name.
169
- """
170
- # set recursively
171
- processors = {}
172
-
173
- def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
174
- if hasattr(module, "set_processor"):
175
- processors[f"{name}.processor"] = module.processor
176
-
177
- for sub_name, child in module.named_children():
178
- fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
179
-
180
- return processors
181
-
182
- for name, module in self.named_children():
183
- fn_recursive_add_processors(name, module, processors)
184
-
185
- return processors
186
-
187
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
188
- def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
189
- r"""
190
- Sets the attention processor to use to compute attention.
191
-
192
- Parameters:
193
- processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
194
- The instantiated processor class or a dictionary of processor classes that will be set as the processor
195
- for **all** `Attention` layers.
196
-
197
- If `processor` is a dict, the key needs to define the path to the corresponding cross attention
198
- processor. This is strongly recommended when setting trainable attention processors.
199
-
200
- """
201
- count = len(self.attn_processors.keys())
202
-
203
- if isinstance(processor, dict) and len(processor) != count:
204
- raise ValueError(
205
- f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
206
- f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
207
- )
208
-
209
- def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
210
- if hasattr(module, "set_processor"):
211
- if not isinstance(processor, dict):
212
- module.set_processor(processor)
213
- else:
214
- module.set_processor(processor.pop(f"{name}.processor"))
215
-
216
- for sub_name, child in module.named_children():
217
- fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
218
-
219
- for name, module in self.named_children():
220
- fn_recursive_attn_processor(name, module, processor)
221
-
222
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
223
- def set_default_attn_processor(self):
224
- """
225
- Disables custom attention processors and sets the default attention implementation.
226
- """
227
- self.set_attn_processor(AttnProcessor())
228
-
229
- def forward(
230
- self,
231
- hidden_states,
232
- timestep: Union[torch.Tensor, float, int],
233
- proj_embedding: torch.FloatTensor,
234
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
235
- attention_mask: Optional[torch.BoolTensor] = None,
236
- return_dict: bool = True,
237
- ):
238
- """
239
- The [`PriorTransformer`] forward method.
240
-
241
- Args:
242
- hidden_states (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):
243
- The currently predicted image embeddings.
244
- timestep (`torch.LongTensor`):
245
- Current denoising step.
246
- proj_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`):
247
- Projected embedding vector the denoising process is conditioned on.
248
- encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_embeddings, embedding_dim)`):
249
- Hidden states of the text embeddings the denoising process is conditioned on.
250
- attention_mask (`torch.BoolTensor` of shape `(batch_size, num_embeddings)`):
251
- Text mask for the text embeddings.
252
- return_dict (`bool`, *optional*, defaults to `True`):
253
- Whether or not to return a [`~models.prior_transformer.PriorTransformerOutput`] instead of a plain
254
- tuple.
255
-
256
- Returns:
257
- [`~models.prior_transformer.PriorTransformerOutput`] or `tuple`:
258
- If return_dict is True, a [`~models.prior_transformer.PriorTransformerOutput`] is returned, otherwise a
259
- tuple is returned where the first element is the sample tensor.
260
- """
261
- batch_size = hidden_states.shape[0]
262
-
263
- timesteps = timestep
264
- if not torch.is_tensor(timesteps):
265
- timesteps = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device)
266
- elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
267
- timesteps = timesteps[None].to(hidden_states.device)
268
-
269
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
270
- timesteps = timesteps * torch.ones(batch_size, dtype=timesteps.dtype, device=timesteps.device)
271
-
272
- timesteps_projected = self.time_proj(timesteps)
273
-
274
- # timesteps does not contain any weights and will always return f32 tensors
275
- # but time_embedding might be fp16, so we need to cast here.
276
- timesteps_projected = timesteps_projected.to(dtype=self.dtype)
277
- time_embeddings = self.time_embedding(timesteps_projected)
278
-
279
- if self.embedding_proj_norm is not None:
280
- proj_embedding = self.embedding_proj_norm(proj_embedding)
281
-
282
- proj_embeddings = self.embedding_proj(proj_embedding)
283
- if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
284
- encoder_hidden_states = self.encoder_hidden_states_proj(encoder_hidden_states)
285
- elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
286
- raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set")
287
-
288
- hidden_states = self.proj_in(hidden_states)
289
-
290
- positional_embeddings = self.positional_embedding.to(hidden_states.dtype)
291
-
292
- additional_embeds = []
293
- additional_embeddings_len = 0
294
-
295
- if encoder_hidden_states is not None:
296
- additional_embeds.append(encoder_hidden_states)
297
- additional_embeddings_len += encoder_hidden_states.shape[1]
298
-
299
- if len(proj_embeddings.shape) == 2:
300
- proj_embeddings = proj_embeddings[:, None, :]
301
-
302
- if len(hidden_states.shape) == 2:
303
- hidden_states = hidden_states[:, None, :]
304
-
305
- additional_embeds = additional_embeds + [
306
- proj_embeddings,
307
- time_embeddings[:, None, :],
308
- hidden_states,
309
- ]
310
-
311
- if self.prd_embedding is not None:
312
- prd_embedding = self.prd_embedding.to(hidden_states.dtype).expand(batch_size, -1, -1)
313
- additional_embeds.append(prd_embedding)
314
-
315
- hidden_states = torch.cat(
316
- additional_embeds,
317
- dim=1,
318
- )
319
-
320
- # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
321
- additional_embeddings_len = additional_embeddings_len + proj_embeddings.shape[1] + 1
322
- if positional_embeddings.shape[1] < hidden_states.shape[1]:
323
- positional_embeddings = F.pad(
324
- positional_embeddings,
325
- (
326
- 0,
327
- 0,
328
- additional_embeddings_len,
329
- self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
330
- ),
331
- value=0.0,
332
- )
333
-
334
- hidden_states = hidden_states + positional_embeddings
335
-
336
- if attention_mask is not None:
337
- attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
338
- attention_mask = F.pad(attention_mask, (0, self.additional_embeddings), value=0.0)
339
- attention_mask = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
340
- attention_mask = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0)
341
-
342
- if self.norm_in is not None:
343
- hidden_states = self.norm_in(hidden_states)
344
-
345
- for block in self.transformer_blocks:
346
- hidden_states = block(hidden_states, attention_mask=attention_mask)
347
-
348
- hidden_states = self.norm_out(hidden_states)
349
-
350
- if self.prd_embedding is not None:
351
- hidden_states = hidden_states[:, -1]
352
- else:
353
- hidden_states = hidden_states[:, additional_embeddings_len:]
354
-
355
- predicted_image_embedding = self.proj_to_clip_embeddings(hidden_states)
356
-
357
- if not return_dict:
358
- return (predicted_image_embedding,)
359
-
360
- return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding)
361
-
362
- def post_process_latents(self, prior_latents):
363
- prior_latents = (prior_latents * self.clip_std) + self.clip_mean
364
- return prior_latents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_ddim.py DELETED
@@ -1,148 +0,0 @@
1
- import torch
2
-
3
- from diffusers import DDIMScheduler
4
-
5
- from .test_schedulers import SchedulerCommonTest
6
-
7
-
8
- class DDIMSchedulerTest(SchedulerCommonTest):
9
- scheduler_classes = (DDIMScheduler,)
10
- forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
11
-
12
- def get_scheduler_config(self, **kwargs):
13
- config = {
14
- "num_train_timesteps": 1000,
15
- "beta_start": 0.0001,
16
- "beta_end": 0.02,
17
- "beta_schedule": "linear",
18
- "clip_sample": True,
19
- }
20
-
21
- config.update(**kwargs)
22
- return config
23
-
24
- def full_loop(self, **config):
25
- scheduler_class = self.scheduler_classes[0]
26
- scheduler_config = self.get_scheduler_config(**config)
27
- scheduler = scheduler_class(**scheduler_config)
28
-
29
- num_inference_steps, eta = 10, 0.0
30
-
31
- model = self.dummy_model()
32
- sample = self.dummy_sample_deter
33
-
34
- scheduler.set_timesteps(num_inference_steps)
35
-
36
- for t in scheduler.timesteps:
37
- residual = model(sample, t)
38
- sample = scheduler.step(residual, t, sample, eta).prev_sample
39
-
40
- return sample
41
-
42
- def test_timesteps(self):
43
- for timesteps in [100, 500, 1000]:
44
- self.check_over_configs(num_train_timesteps=timesteps)
45
-
46
- def test_steps_offset(self):
47
- for steps_offset in [0, 1]:
48
- self.check_over_configs(steps_offset=steps_offset)
49
-
50
- scheduler_class = self.scheduler_classes[0]
51
- scheduler_config = self.get_scheduler_config(steps_offset=1)
52
- scheduler = scheduler_class(**scheduler_config)
53
- scheduler.set_timesteps(5)
54
- assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1]))
55
-
56
- def test_betas(self):
57
- for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
58
- self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
59
-
60
- def test_schedules(self):
61
- for schedule in ["linear", "squaredcos_cap_v2"]:
62
- self.check_over_configs(beta_schedule=schedule)
63
-
64
- def test_prediction_type(self):
65
- for prediction_type in ["epsilon", "v_prediction"]:
66
- self.check_over_configs(prediction_type=prediction_type)
67
-
68
- def test_clip_sample(self):
69
- for clip_sample in [True, False]:
70
- self.check_over_configs(clip_sample=clip_sample)
71
-
72
- def test_timestep_spacing(self):
73
- for timestep_spacing in ["trailing", "leading"]:
74
- self.check_over_configs(timestep_spacing=timestep_spacing)
75
-
76
- def test_rescale_betas_zero_snr(self):
77
- for rescale_betas_zero_snr in [True, False]:
78
- self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
79
-
80
- def test_thresholding(self):
81
- self.check_over_configs(thresholding=False)
82
- for threshold in [0.5, 1.0, 2.0]:
83
- for prediction_type in ["epsilon", "v_prediction"]:
84
- self.check_over_configs(
85
- thresholding=True,
86
- prediction_type=prediction_type,
87
- sample_max_value=threshold,
88
- )
89
-
90
- def test_time_indices(self):
91
- for t in [1, 10, 49]:
92
- self.check_over_forward(time_step=t)
93
-
94
- def test_inference_steps(self):
95
- for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
96
- self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
97
-
98
- def test_eta(self):
99
- for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]):
100
- self.check_over_forward(time_step=t, eta=eta)
101
-
102
- def test_variance(self):
103
- scheduler_class = self.scheduler_classes[0]
104
- scheduler_config = self.get_scheduler_config()
105
- scheduler = scheduler_class(**scheduler_config)
106
-
107
- assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
108
- assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5
109
- assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5
110
- assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5
111
- assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5
112
- assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5
113
-
114
- def test_full_loop_no_noise(self):
115
- sample = self.full_loop()
116
-
117
- result_sum = torch.sum(torch.abs(sample))
118
- result_mean = torch.mean(torch.abs(sample))
119
-
120
- assert abs(result_sum.item() - 172.0067) < 1e-2
121
- assert abs(result_mean.item() - 0.223967) < 1e-3
122
-
123
- def test_full_loop_with_v_prediction(self):
124
- sample = self.full_loop(prediction_type="v_prediction")
125
-
126
- result_sum = torch.sum(torch.abs(sample))
127
- result_mean = torch.mean(torch.abs(sample))
128
-
129
- assert abs(result_sum.item() - 52.5302) < 1e-2
130
- assert abs(result_mean.item() - 0.0684) < 1e-3
131
-
132
- def test_full_loop_with_set_alpha_to_one(self):
133
- # We specify different beta, so that the first alpha is 0.99
134
- sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
135
- result_sum = torch.sum(torch.abs(sample))
136
- result_mean = torch.mean(torch.abs(sample))
137
-
138
- assert abs(result_sum.item() - 149.8295) < 1e-2
139
- assert abs(result_mean.item() - 0.1951) < 1e-3
140
-
141
- def test_full_loop_with_no_set_alpha_to_one(self):
142
- # We specify different beta, so that the first alpha is 0.99
143
- sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
144
- result_sum = torch.sum(torch.abs(sample))
145
- result_mean = torch.mean(torch.abs(sample))
146
-
147
- assert abs(result_sum.item() - 149.0784) < 1e-2
148
- assert abs(result_mean.item() - 0.1941) < 1e-3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/dnlnet/README.md DELETED
@@ -1,42 +0,0 @@
1
- # Disentangled Non-Local Neural Networks
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- This example is to reproduce ["Disentangled Non-Local Neural Networks"](https://arxiv.org/abs/2006.06668) for semantic segmentation. It is still in progress.
8
-
9
- ## Citation
10
-
11
- ```latex
12
- @misc{yin2020disentangled,
13
- title={Disentangled Non-Local Neural Networks},
14
- author={Minghao Yin and Zhuliang Yao and Yue Cao and Xiu Li and Zheng Zhang and Stephen Lin and Han Hu},
15
- year={2020},
16
- booktitle={ECCV}
17
- }
18
- ```
19
-
20
- ## Results and models (in progress)
21
-
22
- ### Cityscapes
23
-
24
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
25
- | ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
26
- | dnl | R-50-D8 | 512x1024 | 40000 | 7.3 | 2.56 | 78.61 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes_20200904_233629-53d4ea93.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes-20200904_233629.log.json) |
27
- | dnl | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.96 | 78.31 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes_20200904_233629-9928ffef.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes-20200904_233629.log.json) |
28
- | dnl | R-50-D8 | 769x769 | 40000 | 9.2 | 1.50 | 78.44 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes_20200820_232206-0f283785.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes-20200820_232206.log.json) |
29
- | dnl | R-101-D8 | 769x769 | 40000 | 12.6 | 1.02 | 76.39 | 77.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes_20200820_171256-76c596df.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes-20200820_171256.log.json) |
30
- | dnl | R-50-D8 | 512x1024 | 80000 | - | - | 79.33 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes_20200904_233629-58b2f778.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes-20200904_233629.log.json) |
31
- | dnl | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes_20200904_233629-758e2dd4.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes-20200904_233629.log.json) |
32
- | dnl | R-50-D8 | 769x769 | 80000 | - | - | 79.36 | 80.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes_20200820_011925-366bc4c7.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes-20200820_011925.log.json) |
33
- | dnl | R-101-D8 | 769x769 | 80000 | - | - | 79.41 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes_20200821_051111-95ff84ab.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes-20200821_051111.log.json) |
34
-
35
- ### ADE20K
36
-
37
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
38
- | ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
39
- | DNL | R-50-D8 | 512x512 | 80000 | 8.8 | 20.66 | 41.76 | 42.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k_20200826_183354-1cf6e0c1.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k-20200826_183354.log.json) |
40
- | DNL | R-101-D8 | 512x512 | 80000 | 12.8 | 12.54 | 43.76 | 44.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k_20200826_183354-d820d6ea.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k-20200826_183354.log.json) |
41
- | DNL | R-50-D8 | 512x512 | 160000 | - | - | 41.87 | 43.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k_20200826_183350-37837798.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k-20200826_183350.log.json) |
42
- | DNL | R-101-D8 | 512x512 | 160000 | - | - | 44.25 | 45.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k_20200826_183350-ed522c61.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k-20200826_183350.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/utils/flops_counter.py DELETED
@@ -1,599 +0,0 @@
1
- # Modified from flops-counter.pytorch by Vladislav Sovrasov
2
- # original repo: https://github.com/sovrasov/flops-counter.pytorch
3
-
4
- # MIT License
5
-
6
- # Copyright (c) 2018 Vladislav Sovrasov
7
-
8
- # Permission is hereby granted, free of charge, to any person obtaining a copy
9
- # of this software and associated documentation files (the "Software"), to deal
10
- # in the Software without restriction, including without limitation the rights
11
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
- # copies of the Software, and to permit persons to whom the Software is
13
- # furnished to do so, subject to the following conditions:
14
-
15
- # The above copyright notice and this permission notice shall be included in
16
- # all copies or substantial portions of the Software.
17
-
18
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
- # SOFTWARE.
25
-
26
- import sys
27
- from functools import partial
28
-
29
- import numpy as np
30
- import torch
31
- import torch.nn as nn
32
-
33
- import annotator.uniformer.mmcv as mmcv
34
-
35
-
36
- def get_model_complexity_info(model,
37
- input_shape,
38
- print_per_layer_stat=True,
39
- as_strings=True,
40
- input_constructor=None,
41
- flush=False,
42
- ost=sys.stdout):
43
- """Get complexity information of a model.
44
-
45
- This method can calculate FLOPs and parameter counts of a model with
46
- corresponding input shape. It can also print complexity information for
47
- each layer in a model.
48
-
49
- Supported layers are listed as below:
50
- - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
51
- - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
52
- ``nn.ReLU6``.
53
- - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
54
- ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
55
- ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
56
- ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
57
- ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
58
- - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
59
- ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
60
- ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
61
- - Linear: ``nn.Linear``.
62
- - Deconvolution: ``nn.ConvTranspose2d``.
63
- - Upsample: ``nn.Upsample``.
64
-
65
- Args:
66
- model (nn.Module): The model for complexity calculation.
67
- input_shape (tuple): Input shape used for calculation.
68
- print_per_layer_stat (bool): Whether to print complexity information
69
- for each layer in a model. Default: True.
70
- as_strings (bool): Output FLOPs and params counts in a string form.
71
- Default: True.
72
- input_constructor (None | callable): If specified, it takes a callable
73
- method that generates input. otherwise, it will generate a random
74
- tensor with input shape to calculate FLOPs. Default: None.
75
- flush (bool): same as that in :func:`print`. Default: False.
76
- ost (stream): same as ``file`` param in :func:`print`.
77
- Default: sys.stdout.
78
-
79
- Returns:
80
- tuple[float | str]: If ``as_strings`` is set to True, it will return
81
- FLOPs and parameter counts in a string format. otherwise, it will
82
- return those in a float number format.
83
- """
84
- assert type(input_shape) is tuple
85
- assert len(input_shape) >= 1
86
- assert isinstance(model, nn.Module)
87
- flops_model = add_flops_counting_methods(model)
88
- flops_model.eval()
89
- flops_model.start_flops_count()
90
- if input_constructor:
91
- input = input_constructor(input_shape)
92
- _ = flops_model(**input)
93
- else:
94
- try:
95
- batch = torch.ones(()).new_empty(
96
- (1, *input_shape),
97
- dtype=next(flops_model.parameters()).dtype,
98
- device=next(flops_model.parameters()).device)
99
- except StopIteration:
100
- # Avoid StopIteration for models which have no parameters,
101
- # like `nn.Relu()`, `nn.AvgPool2d`, etc.
102
- batch = torch.ones(()).new_empty((1, *input_shape))
103
-
104
- _ = flops_model(batch)
105
-
106
- flops_count, params_count = flops_model.compute_average_flops_cost()
107
- if print_per_layer_stat:
108
- print_model_with_flops(
109
- flops_model, flops_count, params_count, ost=ost, flush=flush)
110
- flops_model.stop_flops_count()
111
-
112
- if as_strings:
113
- return flops_to_string(flops_count), params_to_string(params_count)
114
-
115
- return flops_count, params_count
116
-
117
-
118
- def flops_to_string(flops, units='GFLOPs', precision=2):
119
- """Convert FLOPs number into a string.
120
-
121
- Note that Here we take a multiply-add counts as one FLOP.
122
-
123
- Args:
124
- flops (float): FLOPs number to be converted.
125
- units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
126
- 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
127
- choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
128
- precision (int): Digit number after the decimal point. Default: 2.
129
-
130
- Returns:
131
- str: The converted FLOPs number with units.
132
-
133
- Examples:
134
- >>> flops_to_string(1e9)
135
- '1.0 GFLOPs'
136
- >>> flops_to_string(2e5, 'MFLOPs')
137
- '0.2 MFLOPs'
138
- >>> flops_to_string(3e-9, None)
139
- '3e-09 FLOPs'
140
- """
141
- if units is None:
142
- if flops // 10**9 > 0:
143
- return str(round(flops / 10.**9, precision)) + ' GFLOPs'
144
- elif flops // 10**6 > 0:
145
- return str(round(flops / 10.**6, precision)) + ' MFLOPs'
146
- elif flops // 10**3 > 0:
147
- return str(round(flops / 10.**3, precision)) + ' KFLOPs'
148
- else:
149
- return str(flops) + ' FLOPs'
150
- else:
151
- if units == 'GFLOPs':
152
- return str(round(flops / 10.**9, precision)) + ' ' + units
153
- elif units == 'MFLOPs':
154
- return str(round(flops / 10.**6, precision)) + ' ' + units
155
- elif units == 'KFLOPs':
156
- return str(round(flops / 10.**3, precision)) + ' ' + units
157
- else:
158
- return str(flops) + ' FLOPs'
159
-
160
-
161
- def params_to_string(num_params, units=None, precision=2):
162
- """Convert parameter number into a string.
163
-
164
- Args:
165
- num_params (float): Parameter number to be converted.
166
- units (str | None): Converted FLOPs units. Options are None, 'M',
167
- 'K' and ''. If set to None, it will automatically choose the most
168
- suitable unit for Parameter number. Default: None.
169
- precision (int): Digit number after the decimal point. Default: 2.
170
-
171
- Returns:
172
- str: The converted parameter number with units.
173
-
174
- Examples:
175
- >>> params_to_string(1e9)
176
- '1000.0 M'
177
- >>> params_to_string(2e5)
178
- '200.0 k'
179
- >>> params_to_string(3e-9)
180
- '3e-09'
181
- """
182
- if units is None:
183
- if num_params // 10**6 > 0:
184
- return str(round(num_params / 10**6, precision)) + ' M'
185
- elif num_params // 10**3:
186
- return str(round(num_params / 10**3, precision)) + ' k'
187
- else:
188
- return str(num_params)
189
- else:
190
- if units == 'M':
191
- return str(round(num_params / 10.**6, precision)) + ' ' + units
192
- elif units == 'K':
193
- return str(round(num_params / 10.**3, precision)) + ' ' + units
194
- else:
195
- return str(num_params)
196
-
197
-
198
- def print_model_with_flops(model,
199
- total_flops,
200
- total_params,
201
- units='GFLOPs',
202
- precision=3,
203
- ost=sys.stdout,
204
- flush=False):
205
- """Print a model with FLOPs for each layer.
206
-
207
- Args:
208
- model (nn.Module): The model to be printed.
209
- total_flops (float): Total FLOPs of the model.
210
- total_params (float): Total parameter counts of the model.
211
- units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
212
- precision (int): Digit number after the decimal point. Default: 3.
213
- ost (stream): same as `file` param in :func:`print`.
214
- Default: sys.stdout.
215
- flush (bool): same as that in :func:`print`. Default: False.
216
-
217
- Example:
218
- >>> class ExampleModel(nn.Module):
219
-
220
- >>> def __init__(self):
221
- >>> super().__init__()
222
- >>> self.conv1 = nn.Conv2d(3, 8, 3)
223
- >>> self.conv2 = nn.Conv2d(8, 256, 3)
224
- >>> self.conv3 = nn.Conv2d(256, 8, 3)
225
- >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
226
- >>> self.flatten = nn.Flatten()
227
- >>> self.fc = nn.Linear(8, 1)
228
-
229
- >>> def forward(self, x):
230
- >>> x = self.conv1(x)
231
- >>> x = self.conv2(x)
232
- >>> x = self.conv3(x)
233
- >>> x = self.avg_pool(x)
234
- >>> x = self.flatten(x)
235
- >>> x = self.fc(x)
236
- >>> return x
237
-
238
- >>> model = ExampleModel()
239
- >>> x = (3, 16, 16)
240
- to print the complexity information state for each layer, you can use
241
- >>> get_model_complexity_info(model, x)
242
- or directly use
243
- >>> print_model_with_flops(model, 4579784.0, 37361)
244
- ExampleModel(
245
- 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
246
- (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501
247
- (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
248
- (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
249
- (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
250
- (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
251
- (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
252
- )
253
- """
254
-
255
- def accumulate_params(self):
256
- if is_supported_instance(self):
257
- return self.__params__
258
- else:
259
- sum = 0
260
- for m in self.children():
261
- sum += m.accumulate_params()
262
- return sum
263
-
264
- def accumulate_flops(self):
265
- if is_supported_instance(self):
266
- return self.__flops__ / model.__batch_counter__
267
- else:
268
- sum = 0
269
- for m in self.children():
270
- sum += m.accumulate_flops()
271
- return sum
272
-
273
- def flops_repr(self):
274
- accumulated_num_params = self.accumulate_params()
275
- accumulated_flops_cost = self.accumulate_flops()
276
- return ', '.join([
277
- params_to_string(
278
- accumulated_num_params, units='M', precision=precision),
279
- '{:.3%} Params'.format(accumulated_num_params / total_params),
280
- flops_to_string(
281
- accumulated_flops_cost, units=units, precision=precision),
282
- '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
283
- self.original_extra_repr()
284
- ])
285
-
286
- def add_extra_repr(m):
287
- m.accumulate_flops = accumulate_flops.__get__(m)
288
- m.accumulate_params = accumulate_params.__get__(m)
289
- flops_extra_repr = flops_repr.__get__(m)
290
- if m.extra_repr != flops_extra_repr:
291
- m.original_extra_repr = m.extra_repr
292
- m.extra_repr = flops_extra_repr
293
- assert m.extra_repr != m.original_extra_repr
294
-
295
- def del_extra_repr(m):
296
- if hasattr(m, 'original_extra_repr'):
297
- m.extra_repr = m.original_extra_repr
298
- del m.original_extra_repr
299
- if hasattr(m, 'accumulate_flops'):
300
- del m.accumulate_flops
301
-
302
- model.apply(add_extra_repr)
303
- print(model, file=ost, flush=flush)
304
- model.apply(del_extra_repr)
305
-
306
-
307
- def get_model_parameters_number(model):
308
- """Calculate parameter number of a model.
309
-
310
- Args:
311
- model (nn.module): The model for parameter number calculation.
312
-
313
- Returns:
314
- float: Parameter number of the model.
315
- """
316
- num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
317
- return num_params
318
-
319
-
320
- def add_flops_counting_methods(net_main_module):
321
- # adding additional methods to the existing module object,
322
- # this is done this way so that each function has access to self object
323
- net_main_module.start_flops_count = start_flops_count.__get__(
324
- net_main_module)
325
- net_main_module.stop_flops_count = stop_flops_count.__get__(
326
- net_main_module)
327
- net_main_module.reset_flops_count = reset_flops_count.__get__(
328
- net_main_module)
329
- net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501
330
- net_main_module)
331
-
332
- net_main_module.reset_flops_count()
333
-
334
- return net_main_module
335
-
336
-
337
- def compute_average_flops_cost(self):
338
- """Compute average FLOPs cost.
339
-
340
- A method to compute average FLOPs cost, which will be available after
341
- `add_flops_counting_methods()` is called on a desired net object.
342
-
343
- Returns:
344
- float: Current mean flops consumption per image.
345
- """
346
- batches_count = self.__batch_counter__
347
- flops_sum = 0
348
- for module in self.modules():
349
- if is_supported_instance(module):
350
- flops_sum += module.__flops__
351
- params_sum = get_model_parameters_number(self)
352
- return flops_sum / batches_count, params_sum
353
-
354
-
355
- def start_flops_count(self):
356
- """Activate the computation of mean flops consumption per image.
357
-
358
- A method to activate the computation of mean flops consumption per image.
359
- which will be available after ``add_flops_counting_methods()`` is called on
360
- a desired net object. It should be called before running the network.
361
- """
362
- add_batch_counter_hook_function(self)
363
-
364
- def add_flops_counter_hook_function(module):
365
- if is_supported_instance(module):
366
- if hasattr(module, '__flops_handle__'):
367
- return
368
-
369
- else:
370
- handle = module.register_forward_hook(
371
- get_modules_mapping()[type(module)])
372
-
373
- module.__flops_handle__ = handle
374
-
375
- self.apply(partial(add_flops_counter_hook_function))
376
-
377
-
378
- def stop_flops_count(self):
379
- """Stop computing the mean flops consumption per image.
380
-
381
- A method to stop computing the mean flops consumption per image, which will
382
- be available after ``add_flops_counting_methods()`` is called on a desired
383
- net object. It can be called to pause the computation whenever.
384
- """
385
- remove_batch_counter_hook_function(self)
386
- self.apply(remove_flops_counter_hook_function)
387
-
388
-
389
- def reset_flops_count(self):
390
- """Reset statistics computed so far.
391
-
392
- A method to Reset computed statistics, which will be available after
393
- `add_flops_counting_methods()` is called on a desired net object.
394
- """
395
- add_batch_counter_variables_or_reset(self)
396
- self.apply(add_flops_counter_variable_or_reset)
397
-
398
-
399
- # ---- Internal functions
400
- def empty_flops_counter_hook(module, input, output):
401
- module.__flops__ += 0
402
-
403
-
404
- def upsample_flops_counter_hook(module, input, output):
405
- output_size = output[0]
406
- batch_size = output_size.shape[0]
407
- output_elements_count = batch_size
408
- for val in output_size.shape[1:]:
409
- output_elements_count *= val
410
- module.__flops__ += int(output_elements_count)
411
-
412
-
413
- def relu_flops_counter_hook(module, input, output):
414
- active_elements_count = output.numel()
415
- module.__flops__ += int(active_elements_count)
416
-
417
-
418
- def linear_flops_counter_hook(module, input, output):
419
- input = input[0]
420
- output_last_dim = output.shape[
421
- -1] # pytorch checks dimensions, so here we don't care much
422
- module.__flops__ += int(np.prod(input.shape) * output_last_dim)
423
-
424
-
425
- def pool_flops_counter_hook(module, input, output):
426
- input = input[0]
427
- module.__flops__ += int(np.prod(input.shape))
428
-
429
-
430
- def norm_flops_counter_hook(module, input, output):
431
- input = input[0]
432
-
433
- batch_flops = np.prod(input.shape)
434
- if (getattr(module, 'affine', False)
435
- or getattr(module, 'elementwise_affine', False)):
436
- batch_flops *= 2
437
- module.__flops__ += int(batch_flops)
438
-
439
-
440
- def deconv_flops_counter_hook(conv_module, input, output):
441
- # Can have multiple inputs, getting the first one
442
- input = input[0]
443
-
444
- batch_size = input.shape[0]
445
- input_height, input_width = input.shape[2:]
446
-
447
- kernel_height, kernel_width = conv_module.kernel_size
448
- in_channels = conv_module.in_channels
449
- out_channels = conv_module.out_channels
450
- groups = conv_module.groups
451
-
452
- filters_per_channel = out_channels // groups
453
- conv_per_position_flops = (
454
- kernel_height * kernel_width * in_channels * filters_per_channel)
455
-
456
- active_elements_count = batch_size * input_height * input_width
457
- overall_conv_flops = conv_per_position_flops * active_elements_count
458
- bias_flops = 0
459
- if conv_module.bias is not None:
460
- output_height, output_width = output.shape[2:]
461
- bias_flops = out_channels * batch_size * output_height * output_height
462
- overall_flops = overall_conv_flops + bias_flops
463
-
464
- conv_module.__flops__ += int(overall_flops)
465
-
466
-
467
- def conv_flops_counter_hook(conv_module, input, output):
468
- # Can have multiple inputs, getting the first one
469
- input = input[0]
470
-
471
- batch_size = input.shape[0]
472
- output_dims = list(output.shape[2:])
473
-
474
- kernel_dims = list(conv_module.kernel_size)
475
- in_channels = conv_module.in_channels
476
- out_channels = conv_module.out_channels
477
- groups = conv_module.groups
478
-
479
- filters_per_channel = out_channels // groups
480
- conv_per_position_flops = int(
481
- np.prod(kernel_dims)) * in_channels * filters_per_channel
482
-
483
- active_elements_count = batch_size * int(np.prod(output_dims))
484
-
485
- overall_conv_flops = conv_per_position_flops * active_elements_count
486
-
487
- bias_flops = 0
488
-
489
- if conv_module.bias is not None:
490
-
491
- bias_flops = out_channels * active_elements_count
492
-
493
- overall_flops = overall_conv_flops + bias_flops
494
-
495
- conv_module.__flops__ += int(overall_flops)
496
-
497
-
498
- def batch_counter_hook(module, input, output):
499
- batch_size = 1
500
- if len(input) > 0:
501
- # Can have multiple inputs, getting the first one
502
- input = input[0]
503
- batch_size = len(input)
504
- else:
505
- pass
506
- print('Warning! No positional inputs found for a module, '
507
- 'assuming batch size is 1.')
508
- module.__batch_counter__ += batch_size
509
-
510
-
511
- def add_batch_counter_variables_or_reset(module):
512
-
513
- module.__batch_counter__ = 0
514
-
515
-
516
- def add_batch_counter_hook_function(module):
517
- if hasattr(module, '__batch_counter_handle__'):
518
- return
519
-
520
- handle = module.register_forward_hook(batch_counter_hook)
521
- module.__batch_counter_handle__ = handle
522
-
523
-
524
- def remove_batch_counter_hook_function(module):
525
- if hasattr(module, '__batch_counter_handle__'):
526
- module.__batch_counter_handle__.remove()
527
- del module.__batch_counter_handle__
528
-
529
-
530
- def add_flops_counter_variable_or_reset(module):
531
- if is_supported_instance(module):
532
- if hasattr(module, '__flops__') or hasattr(module, '__params__'):
533
- print('Warning: variables __flops__ or __params__ are already '
534
- 'defined for the module' + type(module).__name__ +
535
- ' ptflops can affect your code!')
536
- module.__flops__ = 0
537
- module.__params__ = get_model_parameters_number(module)
538
-
539
-
540
- def is_supported_instance(module):
541
- if type(module) in get_modules_mapping():
542
- return True
543
- return False
544
-
545
-
546
- def remove_flops_counter_hook_function(module):
547
- if is_supported_instance(module):
548
- if hasattr(module, '__flops_handle__'):
549
- module.__flops_handle__.remove()
550
- del module.__flops_handle__
551
-
552
-
553
- def get_modules_mapping():
554
- return {
555
- # convolutions
556
- nn.Conv1d: conv_flops_counter_hook,
557
- nn.Conv2d: conv_flops_counter_hook,
558
- mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook,
559
- nn.Conv3d: conv_flops_counter_hook,
560
- mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook,
561
- # activations
562
- nn.ReLU: relu_flops_counter_hook,
563
- nn.PReLU: relu_flops_counter_hook,
564
- nn.ELU: relu_flops_counter_hook,
565
- nn.LeakyReLU: relu_flops_counter_hook,
566
- nn.ReLU6: relu_flops_counter_hook,
567
- # poolings
568
- nn.MaxPool1d: pool_flops_counter_hook,
569
- nn.AvgPool1d: pool_flops_counter_hook,
570
- nn.AvgPool2d: pool_flops_counter_hook,
571
- nn.MaxPool2d: pool_flops_counter_hook,
572
- mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook,
573
- nn.MaxPool3d: pool_flops_counter_hook,
574
- mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook,
575
- nn.AvgPool3d: pool_flops_counter_hook,
576
- nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
577
- nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
578
- nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
579
- nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
580
- nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
581
- nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
582
- # normalizations
583
- nn.BatchNorm1d: norm_flops_counter_hook,
584
- nn.BatchNorm2d: norm_flops_counter_hook,
585
- nn.BatchNorm3d: norm_flops_counter_hook,
586
- nn.GroupNorm: norm_flops_counter_hook,
587
- nn.InstanceNorm1d: norm_flops_counter_hook,
588
- nn.InstanceNorm2d: norm_flops_counter_hook,
589
- nn.InstanceNorm3d: norm_flops_counter_hook,
590
- nn.LayerNorm: norm_flops_counter_hook,
591
- # FC
592
- nn.Linear: linear_flops_counter_hook,
593
- mmcv.cnn.bricks.Linear: linear_flops_counter_hook,
594
- # Upscale
595
- nn.Upsample: upsample_flops_counter_hook,
596
- # Deconvolution
597
- nn.ConvTranspose2d: deconv_flops_counter_hook,
598
- mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook,
599
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/image/photometric.py DELETED
@@ -1,428 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import cv2
3
- import numpy as np
4
-
5
- from ..utils import is_tuple_of
6
- from .colorspace import bgr2gray, gray2bgr
7
-
8
-
9
- def imnormalize(img, mean, std, to_rgb=True):
10
- """Normalize an image with mean and std.
11
-
12
- Args:
13
- img (ndarray): Image to be normalized.
14
- mean (ndarray): The mean to be used for normalize.
15
- std (ndarray): The std to be used for normalize.
16
- to_rgb (bool): Whether to convert to rgb.
17
-
18
- Returns:
19
- ndarray: The normalized image.
20
- """
21
- img = img.copy().astype(np.float32)
22
- return imnormalize_(img, mean, std, to_rgb)
23
-
24
-
25
- def imnormalize_(img, mean, std, to_rgb=True):
26
- """Inplace normalize an image with mean and std.
27
-
28
- Args:
29
- img (ndarray): Image to be normalized.
30
- mean (ndarray): The mean to be used for normalize.
31
- std (ndarray): The std to be used for normalize.
32
- to_rgb (bool): Whether to convert to rgb.
33
-
34
- Returns:
35
- ndarray: The normalized image.
36
- """
37
- # cv2 inplace normalization does not accept uint8
38
- assert img.dtype != np.uint8
39
- mean = np.float64(mean.reshape(1, -1))
40
- stdinv = 1 / np.float64(std.reshape(1, -1))
41
- if to_rgb:
42
- cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
43
- cv2.subtract(img, mean, img) # inplace
44
- cv2.multiply(img, stdinv, img) # inplace
45
- return img
46
-
47
-
48
- def imdenormalize(img, mean, std, to_bgr=True):
49
- assert img.dtype != np.uint8
50
- mean = mean.reshape(1, -1).astype(np.float64)
51
- std = std.reshape(1, -1).astype(np.float64)
52
- img = cv2.multiply(img, std) # make a copy
53
- cv2.add(img, mean, img) # inplace
54
- if to_bgr:
55
- cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace
56
- return img
57
-
58
-
59
- def iminvert(img):
60
- """Invert (negate) an image.
61
-
62
- Args:
63
- img (ndarray): Image to be inverted.
64
-
65
- Returns:
66
- ndarray: The inverted image.
67
- """
68
- return np.full_like(img, 255) - img
69
-
70
-
71
- def solarize(img, thr=128):
72
- """Solarize an image (invert all pixel values above a threshold)
73
-
74
- Args:
75
- img (ndarray): Image to be solarized.
76
- thr (int): Threshold for solarizing (0 - 255).
77
-
78
- Returns:
79
- ndarray: The solarized image.
80
- """
81
- img = np.where(img < thr, img, 255 - img)
82
- return img
83
-
84
-
85
- def posterize(img, bits):
86
- """Posterize an image (reduce the number of bits for each color channel)
87
-
88
- Args:
89
- img (ndarray): Image to be posterized.
90
- bits (int): Number of bits (1 to 8) to use for posterizing.
91
-
92
- Returns:
93
- ndarray: The posterized image.
94
- """
95
- shift = 8 - bits
96
- img = np.left_shift(np.right_shift(img, shift), shift)
97
- return img
98
-
99
-
100
- def adjust_color(img, alpha=1, beta=None, gamma=0):
101
- r"""It blends the source image and its gray image:
102
-
103
- .. math::
104
- output = img * alpha + gray\_img * beta + gamma
105
-
106
- Args:
107
- img (ndarray): The input source image.
108
- alpha (int | float): Weight for the source image. Default 1.
109
- beta (int | float): Weight for the converted gray image.
110
- If None, it's assigned the value (1 - `alpha`).
111
- gamma (int | float): Scalar added to each sum.
112
- Same as :func:`cv2.addWeighted`. Default 0.
113
-
114
- Returns:
115
- ndarray: Colored image which has the same size and dtype as input.
116
- """
117
- gray_img = bgr2gray(img)
118
- gray_img = np.tile(gray_img[..., None], [1, 1, 3])
119
- if beta is None:
120
- beta = 1 - alpha
121
- colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma)
122
- if not colored_img.dtype == np.uint8:
123
- # Note when the dtype of `img` is not the default `np.uint8`
124
- # (e.g. np.float32), the value in `colored_img` got from cv2
125
- # is not guaranteed to be in range [0, 255], so here clip
126
- # is needed.
127
- colored_img = np.clip(colored_img, 0, 255)
128
- return colored_img
129
-
130
-
131
- def imequalize(img):
132
- """Equalize the image histogram.
133
-
134
- This function applies a non-linear mapping to the input image,
135
- in order to create a uniform distribution of grayscale values
136
- in the output image.
137
-
138
- Args:
139
- img (ndarray): Image to be equalized.
140
-
141
- Returns:
142
- ndarray: The equalized image.
143
- """
144
-
145
- def _scale_channel(im, c):
146
- """Scale the data in the corresponding channel."""
147
- im = im[:, :, c]
148
- # Compute the histogram of the image channel.
149
- histo = np.histogram(im, 256, (0, 255))[0]
150
- # For computing the step, filter out the nonzeros.
151
- nonzero_histo = histo[histo > 0]
152
- step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255
153
- if not step:
154
- lut = np.array(range(256))
155
- else:
156
- # Compute the cumulative sum, shifted by step // 2
157
- # and then normalized by step.
158
- lut = (np.cumsum(histo) + (step // 2)) // step
159
- # Shift lut, prepending with 0.
160
- lut = np.concatenate([[0], lut[:-1]], 0)
161
- # handle potential integer overflow
162
- lut[lut > 255] = 255
163
- # If step is zero, return the original image.
164
- # Otherwise, index from lut.
165
- return np.where(np.equal(step, 0), im, lut[im])
166
-
167
- # Scales each channel independently and then stacks
168
- # the result.
169
- s1 = _scale_channel(img, 0)
170
- s2 = _scale_channel(img, 1)
171
- s3 = _scale_channel(img, 2)
172
- equalized_img = np.stack([s1, s2, s3], axis=-1)
173
- return equalized_img.astype(img.dtype)
174
-
175
-
176
- def adjust_brightness(img, factor=1.):
177
- """Adjust image brightness.
178
-
179
- This function controls the brightness of an image. An
180
- enhancement factor of 0.0 gives a black image.
181
- A factor of 1.0 gives the original image. This function
182
- blends the source image and the degenerated black image:
183
-
184
- .. math::
185
- output = img * factor + degenerated * (1 - factor)
186
-
187
- Args:
188
- img (ndarray): Image to be brightened.
189
- factor (float): A value controls the enhancement.
190
- Factor 1.0 returns the original image, lower
191
- factors mean less color (brightness, contrast,
192
- etc), and higher values more. Default 1.
193
-
194
- Returns:
195
- ndarray: The brightened image.
196
- """
197
- degenerated = np.zeros_like(img)
198
- # Note manually convert the dtype to np.float32, to
199
- # achieve as close results as PIL.ImageEnhance.Brightness.
200
- # Set beta=1-factor, and gamma=0
201
- brightened_img = cv2.addWeighted(
202
- img.astype(np.float32), factor, degenerated.astype(np.float32),
203
- 1 - factor, 0)
204
- brightened_img = np.clip(brightened_img, 0, 255)
205
- return brightened_img.astype(img.dtype)
206
-
207
-
208
- def adjust_contrast(img, factor=1.):
209
- """Adjust image contrast.
210
-
211
- This function controls the contrast of an image. An
212
- enhancement factor of 0.0 gives a solid grey
213
- image. A factor of 1.0 gives the original image. It
214
- blends the source image and the degenerated mean image:
215
-
216
- .. math::
217
- output = img * factor + degenerated * (1 - factor)
218
-
219
- Args:
220
- img (ndarray): Image to be contrasted. BGR order.
221
- factor (float): Same as :func:`mmcv.adjust_brightness`.
222
-
223
- Returns:
224
- ndarray: The contrasted image.
225
- """
226
- gray_img = bgr2gray(img)
227
- hist = np.histogram(gray_img, 256, (0, 255))[0]
228
- mean = round(np.sum(gray_img) / np.sum(hist))
229
- degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype)
230
- degenerated = gray2bgr(degenerated)
231
- contrasted_img = cv2.addWeighted(
232
- img.astype(np.float32), factor, degenerated.astype(np.float32),
233
- 1 - factor, 0)
234
- contrasted_img = np.clip(contrasted_img, 0, 255)
235
- return contrasted_img.astype(img.dtype)
236
-
237
-
238
- def auto_contrast(img, cutoff=0):
239
- """Auto adjust image contrast.
240
-
241
- This function maximize (normalize) image contrast by first removing cutoff
242
- percent of the lightest and darkest pixels from the histogram and remapping
243
- the image so that the darkest pixel becomes black (0), and the lightest
244
- becomes white (255).
245
-
246
- Args:
247
- img (ndarray): Image to be contrasted. BGR order.
248
- cutoff (int | float | tuple): The cutoff percent of the lightest and
249
- darkest pixels to be removed. If given as tuple, it shall be
250
- (low, high). Otherwise, the single value will be used for both.
251
- Defaults to 0.
252
-
253
- Returns:
254
- ndarray: The contrasted image.
255
- """
256
-
257
- def _auto_contrast_channel(im, c, cutoff):
258
- im = im[:, :, c]
259
- # Compute the histogram of the image channel.
260
- histo = np.histogram(im, 256, (0, 255))[0]
261
- # Remove cut-off percent pixels from histo
262
- histo_sum = np.cumsum(histo)
263
- cut_low = histo_sum[-1] * cutoff[0] // 100
264
- cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100
265
- histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low
266
- histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0)
267
-
268
- # Compute mapping
269
- low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1]
270
- # If all the values have been cut off, return the origin img
271
- if low >= high:
272
- return im
273
- scale = 255.0 / (high - low)
274
- offset = -low * scale
275
- lut = np.array(range(256))
276
- lut = lut * scale + offset
277
- lut = np.clip(lut, 0, 255)
278
- return lut[im]
279
-
280
- if isinstance(cutoff, (int, float)):
281
- cutoff = (cutoff, cutoff)
282
- else:
283
- assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \
284
- f'float or tuple, but got {type(cutoff)} instead.'
285
- # Auto adjusts contrast for each channel independently and then stacks
286
- # the result.
287
- s1 = _auto_contrast_channel(img, 0, cutoff)
288
- s2 = _auto_contrast_channel(img, 1, cutoff)
289
- s3 = _auto_contrast_channel(img, 2, cutoff)
290
- contrasted_img = np.stack([s1, s2, s3], axis=-1)
291
- return contrasted_img.astype(img.dtype)
292
-
293
-
294
- def adjust_sharpness(img, factor=1., kernel=None):
295
- """Adjust image sharpness.
296
-
297
- This function controls the sharpness of an image. An
298
- enhancement factor of 0.0 gives a blurred image. A
299
- factor of 1.0 gives the original image. And a factor
300
- of 2.0 gives a sharpened image. It blends the source
301
- image and the degenerated mean image:
302
-
303
- .. math::
304
- output = img * factor + degenerated * (1 - factor)
305
-
306
- Args:
307
- img (ndarray): Image to be sharpened. BGR order.
308
- factor (float): Same as :func:`mmcv.adjust_brightness`.
309
- kernel (np.ndarray, optional): Filter kernel to be applied on the img
310
- to obtain the degenerated img. Defaults to None.
311
-
312
- Note:
313
- No value sanity check is enforced on the kernel set by users. So with
314
- an inappropriate kernel, the ``adjust_sharpness`` may fail to perform
315
- the function its name indicates but end up performing whatever
316
- transform determined by the kernel.
317
-
318
- Returns:
319
- ndarray: The sharpened image.
320
- """
321
-
322
- if kernel is None:
323
- # adopted from PIL.ImageFilter.SMOOTH
324
- kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13
325
- assert isinstance(kernel, np.ndarray), \
326
- f'kernel must be of type np.ndarray, but got {type(kernel)} instead.'
327
- assert kernel.ndim == 2, \
328
- f'kernel must have a dimension of 2, but got {kernel.ndim} instead.'
329
-
330
- degenerated = cv2.filter2D(img, -1, kernel)
331
- sharpened_img = cv2.addWeighted(
332
- img.astype(np.float32), factor, degenerated.astype(np.float32),
333
- 1 - factor, 0)
334
- sharpened_img = np.clip(sharpened_img, 0, 255)
335
- return sharpened_img.astype(img.dtype)
336
-
337
-
338
- def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True):
339
- """AlexNet-style PCA jitter.
340
-
341
- This data augmentation is proposed in `ImageNet Classification with Deep
342
- Convolutional Neural Networks
343
- <https://dl.acm.org/doi/pdf/10.1145/3065386>`_.
344
-
345
- Args:
346
- img (ndarray): Image to be adjusted lighting. BGR order.
347
- eigval (ndarray): the eigenvalue of the convariance matrix of pixel
348
- values, respectively.
349
- eigvec (ndarray): the eigenvector of the convariance matrix of pixel
350
- values, respectively.
351
- alphastd (float): The standard deviation for distribution of alpha.
352
- Defaults to 0.1
353
- to_rgb (bool): Whether to convert img to rgb.
354
-
355
- Returns:
356
- ndarray: The adjusted image.
357
- """
358
- assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \
359
- f'eigval and eigvec should both be of type np.ndarray, got ' \
360
- f'{type(eigval)} and {type(eigvec)} instead.'
361
-
362
- assert eigval.ndim == 1 and eigvec.ndim == 2
363
- assert eigvec.shape == (3, eigval.shape[0])
364
- n_eigval = eigval.shape[0]
365
- assert isinstance(alphastd, float), 'alphastd should be of type float, ' \
366
- f'got {type(alphastd)} instead.'
367
-
368
- img = img.copy().astype(np.float32)
369
- if to_rgb:
370
- cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
371
-
372
- alpha = np.random.normal(0, alphastd, n_eigval)
373
- alter = eigvec \
374
- * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \
375
- * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval))
376
- alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape)
377
- img_adjusted = img + alter
378
- return img_adjusted
379
-
380
-
381
- def lut_transform(img, lut_table):
382
- """Transform array by look-up table.
383
-
384
- The function lut_transform fills the output array with values from the
385
- look-up table. Indices of the entries are taken from the input array.
386
-
387
- Args:
388
- img (ndarray): Image to be transformed.
389
- lut_table (ndarray): look-up table of 256 elements; in case of
390
- multi-channel input array, the table should either have a single
391
- channel (in this case the same table is used for all channels) or
392
- the same number of channels as in the input array.
393
-
394
- Returns:
395
- ndarray: The transformed image.
396
- """
397
- assert isinstance(img, np.ndarray)
398
- assert 0 <= np.min(img) and np.max(img) <= 255
399
- assert isinstance(lut_table, np.ndarray)
400
- assert lut_table.shape == (256, )
401
-
402
- return cv2.LUT(np.array(img, dtype=np.uint8), lut_table)
403
-
404
-
405
- def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)):
406
- """Use CLAHE method to process the image.
407
-
408
- See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
409
- Graphics Gems, 1994:474-485.` for more information.
410
-
411
- Args:
412
- img (ndarray): Image to be processed.
413
- clip_limit (float): Threshold for contrast limiting. Default: 40.0.
414
- tile_grid_size (tuple[int]): Size of grid for histogram equalization.
415
- Input image will be divided into equally sized rectangular tiles.
416
- It defines the number of tiles in row and column. Default: (8, 8).
417
-
418
- Returns:
419
- ndarray: The processed image.
420
- """
421
- assert isinstance(img, np.ndarray)
422
- assert img.ndim == 2
423
- assert isinstance(clip_limit, (float, int))
424
- assert is_tuple_of(tile_grid_size, int)
425
- assert len(tile_grid_size) == 2
426
-
427
- clahe = cv2.createCLAHE(clip_limit, tile_grid_size)
428
- return clahe.apply(np.array(img, dtype=np.uint8))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/dataset_wrappers.py DELETED
@@ -1,50 +0,0 @@
1
- from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
2
-
3
- from .builder import DATASETS
4
-
5
-
6
- @DATASETS.register_module()
7
- class ConcatDataset(_ConcatDataset):
8
- """A wrapper of concatenated dataset.
9
-
10
- Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
11
- concat the group flag for image aspect ratio.
12
-
13
- Args:
14
- datasets (list[:obj:`Dataset`]): A list of datasets.
15
- """
16
-
17
- def __init__(self, datasets):
18
- super(ConcatDataset, self).__init__(datasets)
19
- self.CLASSES = datasets[0].CLASSES
20
- self.PALETTE = datasets[0].PALETTE
21
-
22
-
23
- @DATASETS.register_module()
24
- class RepeatDataset(object):
25
- """A wrapper of repeated dataset.
26
-
27
- The length of repeated dataset will be `times` larger than the original
28
- dataset. This is useful when the data loading time is long but the dataset
29
- is small. Using RepeatDataset can reduce the data loading time between
30
- epochs.
31
-
32
- Args:
33
- dataset (:obj:`Dataset`): The dataset to be repeated.
34
- times (int): Repeat times.
35
- """
36
-
37
- def __init__(self, dataset, times):
38
- self.dataset = dataset
39
- self.times = times
40
- self.CLASSES = dataset.CLASSES
41
- self.PALETTE = dataset.PALETTE
42
- self._ori_len = len(self.dataset)
43
-
44
- def __getitem__(self, idx):
45
- """Get item from original dataset."""
46
- return self.dataset[idx % self._ori_len]
47
-
48
- def __len__(self):
49
- """The length is multiplied by ``times``"""
50
- return self.times * self._ori_len
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/markup.py DELETED
@@ -1,246 +0,0 @@
1
- import re
2
- from ast import literal_eval
3
- from operator import attrgetter
4
- from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union
5
-
6
- from ._emoji_replace import _emoji_replace
7
- from .emoji import EmojiVariant
8
- from .errors import MarkupError
9
- from .style import Style
10
- from .text import Span, Text
11
-
12
- RE_TAGS = re.compile(
13
- r"""((\\*)\[([a-z#/@][^[]*?)])""",
14
- re.VERBOSE,
15
- )
16
-
17
- RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$")
18
-
19
-
20
- class Tag(NamedTuple):
21
- """A tag in console markup."""
22
-
23
- name: str
24
- """The tag name. e.g. 'bold'."""
25
- parameters: Optional[str]
26
- """Any additional parameters after the name."""
27
-
28
- def __str__(self) -> str:
29
- return (
30
- self.name if self.parameters is None else f"{self.name} {self.parameters}"
31
- )
32
-
33
- @property
34
- def markup(self) -> str:
35
- """Get the string representation of this tag."""
36
- return (
37
- f"[{self.name}]"
38
- if self.parameters is None
39
- else f"[{self.name}={self.parameters}]"
40
- )
41
-
42
-
43
- _ReStringMatch = Match[str] # regex match object
44
- _ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
45
- _EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
46
-
47
-
48
- def escape(
49
- markup: str,
50
- _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
51
- ) -> str:
52
- """Escapes text so that it won't be interpreted as markup.
53
-
54
- Args:
55
- markup (str): Content to be inserted in to markup.
56
-
57
- Returns:
58
- str: Markup with square brackets escaped.
59
- """
60
-
61
- def escape_backslashes(match: Match[str]) -> str:
62
- """Called by re.sub replace matches."""
63
- backslashes, text = match.groups()
64
- return f"{backslashes}{backslashes}\\{text}"
65
-
66
- markup = _escape(escape_backslashes, markup)
67
- return markup
68
-
69
-
70
- def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
71
- """Parse markup in to an iterable of tuples of (position, text, tag).
72
-
73
- Args:
74
- markup (str): A string containing console markup
75
-
76
- """
77
- position = 0
78
- _divmod = divmod
79
- _Tag = Tag
80
- for match in RE_TAGS.finditer(markup):
81
- full_text, escapes, tag_text = match.groups()
82
- start, end = match.span()
83
- if start > position:
84
- yield start, markup[position:start], None
85
- if escapes:
86
- backslashes, escaped = _divmod(len(escapes), 2)
87
- if backslashes:
88
- # Literal backslashes
89
- yield start, "\\" * backslashes, None
90
- start += backslashes * 2
91
- if escaped:
92
- # Escape of tag
93
- yield start, full_text[len(escapes) :], None
94
- position = end
95
- continue
96
- text, equals, parameters = tag_text.partition("=")
97
- yield start, None, _Tag(text, parameters if equals else None)
98
- position = end
99
- if position < len(markup):
100
- yield position, markup[position:], None
101
-
102
-
103
- def render(
104
- markup: str,
105
- style: Union[str, Style] = "",
106
- emoji: bool = True,
107
- emoji_variant: Optional[EmojiVariant] = None,
108
- ) -> Text:
109
- """Render console markup in to a Text instance.
110
-
111
- Args:
112
- markup (str): A string containing console markup.
113
- emoji (bool, optional): Also render emoji code. Defaults to True.
114
-
115
- Raises:
116
- MarkupError: If there is a syntax error in the markup.
117
-
118
- Returns:
119
- Text: A test instance.
120
- """
121
- emoji_replace = _emoji_replace
122
- if "[" not in markup:
123
- return Text(
124
- emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
125
- style=style,
126
- )
127
- text = Text(style=style)
128
- append = text.append
129
- normalize = Style.normalize
130
-
131
- style_stack: List[Tuple[int, Tag]] = []
132
- pop = style_stack.pop
133
-
134
- spans: List[Span] = []
135
- append_span = spans.append
136
-
137
- _Span = Span
138
- _Tag = Tag
139
-
140
- def pop_style(style_name: str) -> Tuple[int, Tag]:
141
- """Pop tag matching given style name."""
142
- for index, (_, tag) in enumerate(reversed(style_stack), 1):
143
- if tag.name == style_name:
144
- return pop(-index)
145
- raise KeyError(style_name)
146
-
147
- for position, plain_text, tag in _parse(markup):
148
- if plain_text is not None:
149
- # Handle open brace escapes, where the brace is not part of a tag.
150
- plain_text = plain_text.replace("\\[", "[")
151
- append(emoji_replace(plain_text) if emoji else plain_text)
152
- elif tag is not None:
153
- if tag.name.startswith("/"): # Closing tag
154
- style_name = tag.name[1:].strip()
155
-
156
- if style_name: # explicit close
157
- style_name = normalize(style_name)
158
- try:
159
- start, open_tag = pop_style(style_name)
160
- except KeyError:
161
- raise MarkupError(
162
- f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
163
- ) from None
164
- else: # implicit close
165
- try:
166
- start, open_tag = pop()
167
- except IndexError:
168
- raise MarkupError(
169
- f"closing tag '[/]' at position {position} has nothing to close"
170
- ) from None
171
-
172
- if open_tag.name.startswith("@"):
173
- if open_tag.parameters:
174
- handler_name = ""
175
- parameters = open_tag.parameters.strip()
176
- handler_match = RE_HANDLER.match(parameters)
177
- if handler_match is not None:
178
- handler_name, match_parameters = handler_match.groups()
179
- parameters = (
180
- "()" if match_parameters is None else match_parameters
181
- )
182
-
183
- try:
184
- meta_params = literal_eval(parameters)
185
- except SyntaxError as error:
186
- raise MarkupError(
187
- f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
188
- )
189
- except Exception as error:
190
- raise MarkupError(
191
- f"error parsing {open_tag.parameters!r}; {error}"
192
- ) from None
193
-
194
- if handler_name:
195
- meta_params = (
196
- handler_name,
197
- meta_params
198
- if isinstance(meta_params, tuple)
199
- else (meta_params,),
200
- )
201
-
202
- else:
203
- meta_params = ()
204
-
205
- append_span(
206
- _Span(
207
- start, len(text), Style(meta={open_tag.name: meta_params})
208
- )
209
- )
210
- else:
211
- append_span(_Span(start, len(text), str(open_tag)))
212
-
213
- else: # Opening tag
214
- normalized_tag = _Tag(normalize(tag.name), tag.parameters)
215
- style_stack.append((len(text), normalized_tag))
216
-
217
- text_length = len(text)
218
- while style_stack:
219
- start, tag = style_stack.pop()
220
- style = str(tag)
221
- if style:
222
- append_span(_Span(start, text_length, style))
223
-
224
- text.spans = sorted(spans[::-1], key=attrgetter("start"))
225
- return text
226
-
227
-
228
- if __name__ == "__main__": # pragma: no cover
229
-
230
- MARKUP = [
231
- "[red]Hello World[/red]",
232
- "[magenta]Hello [b]World[/b]",
233
- "[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
234
- "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
235
- ":warning-emoji: [bold red blink] DANGER![/]",
236
- ]
237
-
238
- from pip._vendor.rich import print
239
- from pip._vendor.rich.table import Table
240
-
241
- grid = Table("Markup", "Result", padding=(0, 1))
242
-
243
- for markup in MARKUP:
244
- grid.add_row(Text(markup), markup)
245
-
246
- print(grid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Audio-AGI/WavJourney/code_generator.py DELETED
@@ -1,188 +0,0 @@
1
- import os
2
- import json5
3
- import utils
4
-
5
-
6
- def check_json_script(data):
7
- foreground_mandatory_attrs_map = {
8
- 'music': ['vol', 'len', 'desc'],
9
- 'sound_effect': ['vol', 'len', 'desc'],
10
- 'speech': ['vol', 'text']
11
- }
12
- background_mandatory_attrs_map = {
13
- 'music': ['vol', 'desc'],
14
- 'sound_effect': ['vol', 'desc'],
15
- }
16
-
17
- def check_by_audio_type(audio, mandatory_attrs_map, audio_str):
18
- if audio['audio_type'] not in mandatory_attrs_map:
19
- raise ValueError('audio_type is not allowed in this layout, audio={audio_str}')
20
- for attr_name in mandatory_attrs_map[audio['audio_type']]:
21
- if attr_name not in audio:
22
- raise ValueError(f'{attr_name} does not exist, audio={audio_str}')
23
-
24
- # Check json's format
25
- for audio in data:
26
- audio_str = json5.dumps(audio, indent=None)
27
- if 'layout' not in audio:
28
- raise ValueError(f'layout missing, audio={audio_str}')
29
- elif 'audio_type' not in audio:
30
- raise ValueError(f'audio_type missing, audio={audio_str}')
31
- elif audio['layout'] == 'foreground':
32
- check_by_audio_type(audio, foreground_mandatory_attrs_map, audio_str)
33
- elif audio['layout'] == 'background':
34
- if 'id' not in audio:
35
- raise ValueError(f'id not in background audio, audio={audio_str}')
36
- if 'action' not in audio:
37
- raise ValueError(f'action not in background audio, audio={audio_str}')
38
- if audio['action'] == 'begin':
39
- check_by_audio_type(audio, background_mandatory_attrs_map, audio_str)
40
- else:
41
- if audio['action'] != 'end':
42
- raise ValueError(f'Unknown action, audio={audio_str}')
43
- else:
44
- raise ValueError(f'Unknown layout, audio={audio_str}')
45
- #except Exception as err:
46
- # sys.stderr.write(f'PARSING ERROR: {err}, audio={json5.dumps(audio, indent=None)}\n')
47
- # all_clear = False
48
-
49
-
50
- def collect_and_check_audio_data(data):
51
- fg_audio_id = 0
52
- fg_audios = []
53
- bg_audios = []
54
- # Collect all the foreground and background audio ids used to calculate background audio length later
55
- for audio in data:
56
- if audio['layout'] == 'foreground':
57
- audio['id'] = fg_audio_id
58
- fg_audios.append(audio)
59
- fg_audio_id += 1
60
- else: # background
61
- if audio['action'] == 'begin':
62
- audio['begin_fg_audio_id'] = fg_audio_id
63
- bg_audios.append(audio)
64
- else: # ends
65
- # find the backgound with the id, and update its 'end_fg_audio_id'
66
- for bg_audio in bg_audios:
67
- if bg_audio['id'] == audio['id'] and bg_audio['audio_type'] == audio['audio_type']:
68
- bg_audio['end_fg_audio_id'] = fg_audio_id
69
- break
70
-
71
- # check if all background audios are valid
72
- for bg_audio in bg_audios:
73
- if 'begin_fg_audio_id' not in bg_audio:
74
- raise ValueError(f'begin of background missing, audio={bg_audio}')
75
- elif 'end_fg_audio_id' not in bg_audio:
76
- raise ValueError(f'end of background missing, audio={bg_audio}')
77
-
78
- if bg_audio['begin_fg_audio_id'] > bg_audio['end_fg_audio_id']:
79
- raise ValueError(f'background audio ends before start, audio={bg_audio}')
80
- elif bg_audio['begin_fg_audio_id'] == bg_audio['end_fg_audio_id']:
81
- raise ValueError(f'background audio contains no foreground audio, audio={bg_audio}')
82
- #except Exception as err:
83
- # sys.stderr.write(f'ALIGNMENT ERROR: {err}, audio={bg_audio}\n')
84
- # return None, None
85
-
86
- return fg_audios, bg_audios
87
-
88
-
89
- class AudioCodeGenerator:
90
- def __init__(self):
91
- self.wav_counters = {
92
- 'bg_sound_effect': 0,
93
- 'bg_music': 0,
94
- 'idle': 0,
95
- 'fg_sound_effect': 0,
96
- 'fg_music': 0,
97
- 'fg_speech': 0,
98
- }
99
- self.code = ''
100
-
101
- def append_code(self, content):
102
- self.code = f'{self.code}{content}\n'
103
-
104
- def generate_code(self, fg_audios, bg_audios, output_path, result_filename):
105
- def get_wav_name(audio):
106
- audio_type = audio['audio_type']
107
- layout = 'fg' if audio['layout'] == 'foreground' else 'bg'
108
- wav_type = f'{layout}_{audio_type}' if layout else audio_type
109
- desc = audio['text'] if 'text' in audio else audio['desc']
110
- desc = utils.text_to_abbrev_prompt(desc)
111
- wav_filename = f'{wav_type}_{self.wav_counters[wav_type]}_{desc}.wav'
112
- self.wav_counters[wav_type] += 1
113
- return wav_filename
114
-
115
- header = f'''
116
- import os
117
- import sys
118
- import datetime
119
-
120
- from APIs import TTM, TTS, TTA, MIX, CAT, COMPUTE_LEN
121
-
122
-
123
- fg_audio_lens = []
124
- wav_path = \"{output_path.absolute()}/audio\"
125
- os.makedirs(wav_path, exist_ok=True)
126
-
127
- '''
128
- self.append_code(header)
129
-
130
- fg_audio_wavs = []
131
- for fg_audio in fg_audios:
132
- wav_name = get_wav_name(fg_audio)
133
- if fg_audio['audio_type'] == 'sound_effect':
134
- self.append_code(f'TTA(text=\"{fg_audio["desc"]}\", length={fg_audio["len"]}, volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"))')
135
- elif fg_audio['audio_type'] == 'music':
136
- self.append_code(f'TTM(text=\"{fg_audio["desc"]}\", length={fg_audio["len"]}, volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"))')
137
- elif fg_audio['audio_type'] == 'speech':
138
- npz_path = self.char_to_voice_map[fg_audio["character"]]["npz_path"]
139
- npz_full_path = os.path.abspath(npz_path) if os.path.exists(npz_path) else npz_path
140
- self.append_code(f'TTS(text=\"{fg_audio["text"]}\", speaker_id=\"{self.char_to_voice_map[fg_audio["character"]]["id"]}\", volume={fg_audio["vol"]}, out_wav=os.path.join(wav_path, \"{wav_name}\"), speaker_npz=\"{npz_full_path}\")')
141
- fg_audio_wavs.append(wav_name)
142
- self.append_code(f'fg_audio_lens.append(COMPUTE_LEN(os.path.join(wav_path, \"{wav_name}\")))\n')
143
-
144
- # cat all foreground audio together
145
- self.append_code(f'fg_audio_wavs = []')
146
- for wav_filename in fg_audio_wavs:
147
- self.append_code(f'fg_audio_wavs.append(os.path.join(wav_path, \"{wav_filename}\"))')
148
- self.append_code(f'CAT(wavs=fg_audio_wavs, out_wav=os.path.join(wav_path, \"foreground.wav\"))')
149
-
150
- bg_audio_wavs = []
151
- self.append_code(f'\nbg_audio_offsets = []')
152
- for bg_audio in bg_audios:
153
- wav_name = get_wav_name(bg_audio)
154
- self.append_code(f'bg_audio_len = sum(fg_audio_lens[{bg_audio["begin_fg_audio_id"]}:{bg_audio["end_fg_audio_id"]}])')
155
- self.append_code(f'bg_audio_offset = sum(fg_audio_lens[:{bg_audio["begin_fg_audio_id"]}])')
156
- if bg_audio['audio_type'] == 'sound_effect':
157
- self.append_code(f'TTA(text=\"{bg_audio["desc"]}\", volume={bg_audio["vol"]}, length=bg_audio_len, out_wav=os.path.join(wav_path, \"{wav_name}\"))')
158
- elif bg_audio['audio_type'] == 'music':
159
- self.append_code(f'TTM(text=\"{bg_audio["desc"]}\", volume={bg_audio["vol"]}, length=bg_audio_len, out_wav=os.path.join(wav_path, \"{wav_name}\"))')
160
- else:
161
- raise ValueError()
162
- bg_audio_wavs.append(wav_name)
163
- self.append_code(f'bg_audio_offsets.append(bg_audio_offset)\n')
164
- self.append_code(f'bg_audio_wavs = []')
165
- for wav_filename in bg_audio_wavs:
166
- self.append_code(f'bg_audio_wavs.append(os.path.join(wav_path, \"{wav_filename}\"))')
167
-
168
- self.append_code(f'bg_audio_wav_offset_pairs = list(zip(bg_audio_wavs, bg_audio_offsets))')
169
- self.append_code(f'bg_audio_wav_offset_pairs.append((os.path.join(wav_path, \"foreground.wav\"), 0))')
170
- self.append_code(f'MIX(wavs=bg_audio_wav_offset_pairs, out_wav=os.path.join(wav_path, \"{result_filename}.wav\"))')
171
-
172
-
173
- def init_char_to_voice_map(self, filename):
174
- with open(filename, 'r') as file:
175
- self.char_to_voice_map = json5.load(file)
176
-
177
-
178
- def parse_and_generate(self, script_filename, char_to_voice_map_filename, output_path, result_filename='result'):
179
- self.code = ''
180
- self.init_char_to_voice_map(char_to_voice_map_filename)
181
-
182
- with open(script_filename, 'r') as file:
183
- data = json5.load(file)
184
-
185
- check_json_script(data)
186
- fg_audios, bg_audios = collect_and_check_audio_data(data)
187
- self.generate_code(fg_audios, bg_audios, output_path, result_filename)
188
- return self.code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/catalog.py DELETED
@@ -1,236 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import copy
3
- import logging
4
- import types
5
- from collections import UserDict
6
- from typing import List
7
-
8
- from detectron2.utils.logger import log_first_n
9
-
10
- __all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"]
11
-
12
-
13
- class _DatasetCatalog(UserDict):
14
- """
15
- A global dictionary that stores information about the datasets and how to obtain them.
16
-
17
- It contains a mapping from strings
18
- (which are names that identify a dataset, e.g. "coco_2014_train")
19
- to a function which parses the dataset and returns the samples in the
20
- format of `list[dict]`.
21
-
22
- The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
23
- if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
24
-
25
- The purpose of having this catalog is to make it easy to choose
26
- different datasets, by just using the strings in the config.
27
- """
28
-
29
- def register(self, name, func):
30
- """
31
- Args:
32
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
33
- func (callable): a callable which takes no arguments and returns a list of dicts.
34
- It must return the same results if called multiple times.
35
- """
36
- assert callable(func), "You must register a function with `DatasetCatalog.register`!"
37
- assert name not in self, "Dataset '{}' is already registered!".format(name)
38
- self[name] = func
39
-
40
- def get(self, name):
41
- """
42
- Call the registered function and return its results.
43
-
44
- Args:
45
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
46
-
47
- Returns:
48
- list[dict]: dataset annotations.
49
- """
50
- try:
51
- f = self[name]
52
- except KeyError as e:
53
- raise KeyError(
54
- "Dataset '{}' is not registered! Available datasets are: {}".format(
55
- name, ", ".join(list(self.keys()))
56
- )
57
- ) from e
58
- return f()
59
-
60
- def list(self) -> List[str]:
61
- """
62
- List all registered datasets.
63
-
64
- Returns:
65
- list[str]
66
- """
67
- return list(self.keys())
68
-
69
- def remove(self, name):
70
- """
71
- Alias of ``pop``.
72
- """
73
- self.pop(name)
74
-
75
- def __str__(self):
76
- return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys()))
77
-
78
- __repr__ = __str__
79
-
80
-
81
- DatasetCatalog = _DatasetCatalog()
82
- DatasetCatalog.__doc__ = (
83
- _DatasetCatalog.__doc__
84
- + """
85
- .. automethod:: detectron2.data.catalog.DatasetCatalog.register
86
- .. automethod:: detectron2.data.catalog.DatasetCatalog.get
87
- """
88
- )
89
-
90
-
91
- class Metadata(types.SimpleNamespace):
92
- """
93
- A class that supports simple attribute setter/getter.
94
- It is intended for storing metadata of a dataset and make it accessible globally.
95
-
96
- Examples:
97
- ::
98
- # somewhere when you load the data:
99
- MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"]
100
-
101
- # somewhere when you print statistics or visualize:
102
- classes = MetadataCatalog.get("mydataset").thing_classes
103
- """
104
-
105
- # the name of the dataset
106
- # set default to N/A so that `self.name` in the errors will not trigger getattr again
107
- name: str = "N/A"
108
-
109
- _RENAMED = {
110
- "class_names": "thing_classes",
111
- "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id",
112
- "stuff_class_names": "stuff_classes",
113
- }
114
-
115
- def __getattr__(self, key):
116
- if key in self._RENAMED:
117
- log_first_n(
118
- logging.WARNING,
119
- "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
120
- n=10,
121
- )
122
- return getattr(self, self._RENAMED[key])
123
-
124
- # "name" exists in every metadata
125
- if len(self.__dict__) > 1:
126
- raise AttributeError(
127
- "Attribute '{}' does not exist in the metadata of dataset '{}'. Available "
128
- "keys are {}.".format(key, self.name, str(self.__dict__.keys()))
129
- )
130
- else:
131
- raise AttributeError(
132
- f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': "
133
- "metadata is empty."
134
- )
135
-
136
- def __setattr__(self, key, val):
137
- if key in self._RENAMED:
138
- log_first_n(
139
- logging.WARNING,
140
- "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
141
- n=10,
142
- )
143
- setattr(self, self._RENAMED[key], val)
144
-
145
- # Ensure that metadata of the same name stays consistent
146
- try:
147
- oldval = getattr(self, key)
148
- assert oldval == val, (
149
- "Attribute '{}' in the metadata of '{}' cannot be set "
150
- "to a different value!\n{} != {}".format(key, self.name, oldval, val)
151
- )
152
- except AttributeError:
153
- super().__setattr__(key, val)
154
-
155
- def as_dict(self):
156
- """
157
- Returns all the metadata as a dict.
158
- Note that modifications to the returned dict will not reflect on the Metadata object.
159
- """
160
- return copy.copy(self.__dict__)
161
-
162
- def set(self, **kwargs):
163
- """
164
- Set multiple metadata with kwargs.
165
- """
166
- for k, v in kwargs.items():
167
- setattr(self, k, v)
168
- return self
169
-
170
- def get(self, key, default=None):
171
- """
172
- Access an attribute and return its value if exists.
173
- Otherwise return default.
174
- """
175
- try:
176
- return getattr(self, key)
177
- except AttributeError:
178
- return default
179
-
180
-
181
- class _MetadataCatalog(UserDict):
182
- """
183
- MetadataCatalog is a global dictionary that provides access to
184
- :class:`Metadata` of a given dataset.
185
-
186
- The metadata associated with a certain name is a singleton: once created, the
187
- metadata will stay alive and will be returned by future calls to ``get(name)``.
188
-
189
- It's like global variables, so don't abuse it.
190
- It's meant for storing knowledge that's constant and shared across the execution
191
- of the program, e.g.: the class names in COCO.
192
- """
193
-
194
- def get(self, name):
195
- """
196
- Args:
197
- name (str): name of a dataset (e.g. coco_2014_train).
198
-
199
- Returns:
200
- Metadata: The :class:`Metadata` instance associated with this name,
201
- or create an empty one if none is available.
202
- """
203
- assert len(name)
204
- r = super().get(name, None)
205
- if r is None:
206
- r = self[name] = Metadata(name=name)
207
- return r
208
-
209
- def list(self):
210
- """
211
- List all registered metadata.
212
-
213
- Returns:
214
- list[str]: keys (names of datasets) of all registered metadata
215
- """
216
- return list(self.keys())
217
-
218
- def remove(self, name):
219
- """
220
- Alias of ``pop``.
221
- """
222
- self.pop(name)
223
-
224
- def __str__(self):
225
- return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys()))
226
-
227
- __repr__ = __str__
228
-
229
-
230
- MetadataCatalog = _MetadataCatalog()
231
- MetadataCatalog.__doc__ = (
232
- _MetadataCatalog.__doc__
233
- + """
234
- .. automethod:: detectron2.data.catalog.MetadataCatalog.get
235
- """
236
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/README.md DELETED
@@ -1,9 +0,0 @@
1
-
2
-
3
- ### Common Datasets
4
-
5
- The dataset implemented here do not need to load the data into the final format.
6
- It should provide the minimal data structure needed to use the dataset, so it can be very efficient.
7
-
8
- For example, for an image dataset, just provide the file names and labels, but don't read the images.
9
- Let the downstream decide how to read.
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/server.py DELETED
@@ -1,124 +0,0 @@
1
- from flask import Flask, request, Response
2
- from io import BytesIO
3
- import torch
4
- from av import open as avopen
5
-
6
- import commons
7
- import utils
8
- from models import SynthesizerTrn
9
- from text.symbols import symbols
10
- from text import cleaned_text_to_sequence, get_bert
11
- from text.cleaner import clean_text
12
- from scipy.io import wavfile
13
-
14
- # Flask Init
15
- app = Flask(__name__)
16
- app.config['JSON_AS_ASCII'] = False
17
- def get_text(text, language_str, hps):
18
- norm_text, phone, tone, word2ph = clean_text(text, language_str)
19
- print([f"{p}{t}" for p, t in zip(phone, tone)])
20
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
21
-
22
- if hps.data.add_blank:
23
- phone = commons.intersperse(phone, 0)
24
- tone = commons.intersperse(tone, 0)
25
- language = commons.intersperse(language, 0)
26
- for i in range(len(word2ph)):
27
- word2ph[i] = word2ph[i] * 2
28
- word2ph[0] += 1
29
- bert = get_bert(norm_text, word2ph, language_str)
30
-
31
- assert bert.shape[-1] == len(phone)
32
-
33
- phone = torch.LongTensor(phone)
34
- tone = torch.LongTensor(tone)
35
- language = torch.LongTensor(language)
36
-
37
- return bert, phone, tone, language
38
-
39
- def infer(text, sdp_ratio, noise_scale, noise_scale_w,length_scale,sid):
40
- bert, phones, tones, lang_ids = get_text(text,"ZH", hps,)
41
- with torch.no_grad():
42
- x_tst=phones.to(dev).unsqueeze(0)
43
- tones=tones.to(dev).unsqueeze(0)
44
- lang_ids=lang_ids.to(dev).unsqueeze(0)
45
- bert = bert.to(dev).unsqueeze(0)
46
- x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev)
47
- speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev)
48
- audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids,bert, sdp_ratio=sdp_ratio
49
- , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy()
50
- return audio
51
-
52
- def replace_punctuation(text, i=2):
53
- punctuation = ",。?!"
54
- for char in punctuation:
55
- text = text.replace(char, char * i)
56
- return text
57
-
58
- def wav2(i, o, format):
59
- inp = avopen(i, 'rb')
60
- out = avopen(o, 'wb', format=format)
61
- if format == "ogg": format = "libvorbis"
62
-
63
- ostream = out.add_stream(format)
64
-
65
- for frame in inp.decode(audio=0):
66
- for p in ostream.encode(frame): out.mux(p)
67
-
68
- for p in ostream.encode(None): out.mux(p)
69
-
70
- out.close()
71
- inp.close()
72
-
73
- # Load Generator
74
- hps = utils.get_hparams_from_file("./configs/config.json")
75
-
76
- dev='cuda'
77
- net_g = SynthesizerTrn(
78
- len(symbols),
79
- hps.data.filter_length // 2 + 1,
80
- hps.train.segment_size // hps.data.hop_length,
81
- n_speakers=hps.data.n_speakers,
82
- **hps.model).to(dev)
83
- _ = net_g.eval()
84
-
85
- # _ = utils.load_checkpoint("logs/G_649000.pth", net_g, None,skip_optimizer=True)
86
- _ = utils.load_checkpoint("logs/dxl/G_21000.pth", net_g, None,skip_optimizer=True)
87
-
88
- @app.route("/",methods=['GET','POST'])
89
- def main():
90
- if request.method == 'GET':
91
- try:
92
- speaker = request.args.get('speaker')
93
- text = request.args.get('text').replace("/n","")
94
- sdp_ratio = float(request.args.get("sdp_ratio", 0.2))
95
- noise = float(request.args.get("noise", 0.5))
96
- noisew = float(request.args.get("noisew", 0.6))
97
- length = float(request.args.get("length", 1.2))
98
- if length >= 2:
99
- return "Too big length"
100
- if len(text) >=200:
101
- return "Too long text"
102
- fmt = request.args.get("format", "wav")
103
- if None in (speaker, text):
104
- return "Missing Parameter"
105
- if fmt not in ("mp3", "wav", "ogg"):
106
- return "Invalid Format"
107
- except:
108
- return "Invalid Parameter"
109
-
110
- with torch.no_grad():
111
- audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise, noise_scale_w=noisew, length_scale=length, sid=speaker)
112
-
113
- with BytesIO() as wav:
114
- wavfile.write(wav, hps.data.sampling_rate, audio)
115
- torch.cuda.empty_cache()
116
- if fmt == "wav":
117
- return Response(wav.getvalue(), mimetype="audio/wav")
118
- wav.seek(0, 0)
119
- with BytesIO() as ofp:
120
- wav2(wav, ofp, fmt)
121
- return Response(
122
- ofp.getvalue(),
123
- mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg"
124
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Gratis Gta 5 Mvil Apk Para Android.md DELETED
@@ -1,42 +0,0 @@
1
- <br />
2
- <h1>Metro surfistas ilimitado Hack 2023 APK Descargar: Todo lo que necesita saber</h1>
3
- <p>Si eres un fan de los juegos de corredor sin fin, probablemente hayas oído hablar de Subway Surfers. Este popular juego le permite navegar por el metro con su equipo fresco mientras esquiva los trenes, obstáculos, y el inspector gruñón. ¿Pero qué pasa si quieres disfrutar del juego sin preocuparte por monedas, llaves, potenciadores, personajes y tableros? Ahí es donde Subway Surfers ilimitada hack 2023 apk descarga es muy útil. En este artículo, le diremos todo lo que necesita saber acerca de este hack apk, incluyendo sus características, beneficios, riesgos, y cómo usarlo. También te daremos algunos consejos y trucos para jugar a Subway Surfers y echar un vistazo a la última actualización del juego en 2023. ¡Así que comencemos! </p>
4
- <h2>¿Qué es Subway Surfers? </h2>
5
- <p>Subway Surfers es un clásico juego de corredor sin fin que fue creado por SYBO Games en 2012. El juego ha sido descargado más de 1 mil millones de veces en Google Play Store solo y ha ganado muchos premios y reconocimientos. El juego cuenta con Jake, Tricky, Fresh y otros personajes que navegan por el metro alrededor del mundo mientras escapan del inspector y su perro. El juego tiene gráficos HD coloridos y vívidos, hoverboard surf, jetpacks de pintura, acrobacias de golpe rápido relámpago, y más. El juego también tiene un modo World Tour donde puedes explorar diferentes ciudades cada mes y recoger recompensas especiales. </p>
6
- <h2>descargar gratis gta 5 móvil apk para android</h2><br /><p><b><b>DOWNLOAD</b> &bull; <a href="https://bltlly.com/2v6JEd">https://bltlly.com/2v6JEd</a></b></p><br /><br />
7
- <h2>¿Por qué Hack Subway Surfers? </h2>
8
-
9
- <p>Hackear Subway Surfers no es tan fácil como parece. Hay muchos riesgos y desafíos involucrados en hacerlo. Por un lado, hackear Subway Surfers puede exponer tu dispositivo a virus, malware u otro software dañino que puede dañar tus datos o comprometer tu seguridad. Por otra parte, hackear Subway Surfers puede hacer que te prohíban participar en el juego o incluso enfrentar consecuencias legales si violas los términos y condiciones del juego. Por lo tanto, debe ser cuidadoso y cauteloso al hackear Subway Surfers y solo utilizar fuentes confiables y confiables para descargar archivos apk hack. </p>
10
- <h3>Los mejores surfistas de metro Hack APK para 2023</h3>
11
- <p>Si usted está buscando el mejor metro surfistas hack apk para 2023, usted debe comprobar el metro surfistas ilimitada Hack 2023 APK Descargar. Esta es una versión modificada del juego original que ofrece monedas ilimitadas, llaves, potenciadores, personajes y tableros. Puede utilizar este hack apk para disfrutar del juego sin limitaciones o restricciones. Puedes comprar lo que quieras, mejorar tus habilidades, desbloquear nuevo contenido, completar misiones y aumentar tu puntuación. También puedes navegar por el metro con cualquier personaje o tabla que te guste y divertirte con el juego. </p>
12
- <h4> Características de los surfistas de metro Hack APK</h4>
13
-
14
- <h4> Cómo descargar e instalar el metro surfistas Hack APK</h4>
15
- <p>Si desea descargar e instalar el Subway Surfers Hack APK en su dispositivo, es necesario seguir estos sencillos pasos: - Paso 1: Ir a [este enlace] y descargar el archivo Subway Surfers Hack APK en su dispositivo. - Paso 2: Ir a la configuración del dispositivo y habilitar fuentes desconocidas. Esto le permitirá instalar aplicaciones de fuentes distintas de Google Play Store. - Paso 3: Ir a su administrador de archivos y localizar el archivo descargado Subway Surfers Hack APK. Toque en él e instalarlo en su dispositivo. - Paso 4: Espere a que el proceso de instalación termine y luego inicie el juego desde el cajón de la aplicación. - Paso 5: ¡Disfruta jugando Subway Surfers con recursos ilimitados! </p>
16
- <h4> Cómo utilizar el metro surfistas Hack APK</h4>
17
- <p>Usando el metro Surfers Hack APK es muy fácil y sencillo. Solo tienes que seguir estos sencillos pasos: - Paso 1: Iniciar el juego desde el cajón de la aplicación y esperar a que se cargue. - Paso 2: Toque en el icono de la tienda en la esquina superior derecha de la pantalla y comprar cualquier cosa que desee con monedas y llaves ilimitadas. - Paso 3: Toque en el icono del carácter en la esquina inferior izquierda de la pantalla y seleccione cualquier personaje que desee con monedas y teclas ilimitadas. - Paso 4: Toque en el icono del tablero en la esquina inferior derecha de la pantalla y seleccione cualquier tablero que desee con monedas y teclas ilimitadas. - Paso 5: Toca el botón de reproducción en el centro inferior de la pantalla y comienza a navegar por el metro con potenciadores ilimitados. </p>
18
- <h2>Consejos y trucos para surfistas de metro</h2>
19
-
20
- utilizar diferentes potenciadores para maximizar sus efectos y beneficios. Por ejemplo, debe usar jetpacks cuando no hay obstáculos por encima de la cabeza, imanes cuando hay muchas monedas, multiplicadores de puntuación cuando tiene una larga carrera, hoverboards cuando está en peligro de estrellarse, y así sucesivamente. También debe combinar diferentes potenciadores para crear combos potentes que pueden aumentar su puntuación y rendimiento. - Mejora tus habilidades: Una de las cosas que puedes hacer con tus monedas es mejorar tus habilidades. Estas son habilidades que pueden mejorar tu juego y hacerte un mejor surfista. Hay cuatro habilidades que puedes mejorar: jetpack, imán, multiplicador 2x y súper zapatillas. Cada habilidad tiene cinco niveles que aumentan su duración y efectividad. Deberías mejorar tus habilidades regularmente para disfrutar de sus beneficios y ventajas. </p>
21
- <h2>Actualización de Subway Surfers 2023</h2>
22
- <p>Subway Surfers es un juego que se actualiza constantemente con nuevas características y contenido. La última actualización de Subway Surfers es la actualización de 2023 que presenta una nueva ciudad, personaje, atuendo y tabla. La actualización de 2023 te lleva a Tokio, Japón, donde puedes navegar por el metro con Harumi, una linda chica japonesa que ama el anime y el cosplay. También puedes desbloquear su traje Meow y su Kitty Board que tiene un efecto de rastro especial. </p>
23
- <p></p>
24
- <h3>¿Qué hay de nuevo en la actualización de Subway Surfers 2023? </h3>
25
-
26
- <h3> ¿Cómo descargar e instalar la actualización de Subway Surfers 2023? </h3>
27
- <p>Si desea descargar e instalar la actualización de Subway Surfers 2023 en su dispositivo, debe seguir estos sencillos pasos: - Paso 1: Vaya a Google Play Store y busque Subway Surfers. Si ya tiene el juego instalado, verá un botón de actualización. Toque en él y espere a que la actualización se descargue e instale. Si no tiene el juego instalado, verá un botón de instalación. Toque en él y espere a que el juego se descargue e instale. - Paso 2: Una vez que el juego se ha actualizado o instalado, lanzarlo desde el cajón de la aplicación y esperar a que se cargue. - Paso 3: ¡Disfruta jugando a Subway Surfers con la nueva actualización de 2023! </p>
28
- <h2>Conclusión</h2>
29
- <p>Subway Surfers es un divertido y adictivo juego de corredor sin fin que te permite navegar por el metro con tu equipo mientras esquivas trenes, obstáculos y el inspector gruñón. Pero si desea disfrutar del juego sin limitaciones o restricciones, puede utilizar Subway Surfers ilimitada hack 2023 apk descargar. Este hack apk le da monedas ilimitadas, llaves, potenciadores, personajes y tableros que puede utilizar para comprar lo que quieras, mejorar sus habilidades, desbloquear nuevo contenido, misiones completas, y aumentar su puntuación. También se puede utilizar este hack apk para jugar la última 2023 actualización de Subway Surfers que cuenta con una nueva ciudad, personaje, equipo y tablero. Sin embargo, también debes ser consciente de los riesgos y desafíos de hackear Subway Surfers, como virus, malware, prohibiciones y problemas legales. Por lo tanto, debe ser cuidadoso y cauteloso al hackear Subway Surfers y solo utilizar fuentes confiables y confiables para descargar archivos apk hack. Esperamos que este artículo le ha ayudado a aprender todo lo que necesita saber sobre Subway Surfers ilimitada hack 2023 apk descargar. Ahora seguir adelante y navegar por el metro con recursos ilimitados! </p>
30
- <h2>Preguntas frecuentes</h2>
31
- <p>Aquí hay algunas preguntas frecuentes y respuestas sobre Subway Surfers ilimitada hack 2023 apk download:</p>
32
-
33
- <p>A: Subway Surfers ilimitada hack 2023 apk descarga es seguro de usar, siempre y cuando se descarga de una fuente confiable y confiable. Sin embargo, también debe tener cuidado con los virus, malware u otro software dañino que puede dañar su dispositivo o comprometer su seguridad. También debe escanear el archivo apk hack con un programa antivirus o anti-malware antes de instalarlo en su dispositivo. </p>
34
- <h4>Q: Es Subway Surfers ilimitada hack 2023 apk descarga legal de usar? </h4>
35
- <p>A: Subway Surfers ilimitada hack 2023 apk descarga no es legal de usar, ya que viola los términos y condiciones del juego. Mediante el uso de este hack apk, que está modificando el juego original y acceder a sus recursos sin permiso o autorización. Esto puede hacer que te prohíban participar en el juego o incluso enfrentar consecuencias legales si te atrapan o te denuncian. Por lo tanto, usted debe utilizar este hack apk a su propio riesgo y discreción. </p>
36
- <h4>P: ¿Cómo puedo obtener más monedas y llaves en Subway Surfers? </h4>
37
- <p>A: Hay varias maneras de conseguir más monedas y llaves en Subway Surfers. Puedes recogerlas en las pistas o comprarlas con dinero real. También puedes completar misiones, logros, desafíos diarios, eventos o ver anuncios para ganar más monedas y llaves. Sin embargo, si desea obtener monedas y llaves ilimitadas en Subway Surfers, puede utilizar Subway Surfers ilimitada hack 2023 apk download. </p>
38
- <h4>P: ¿Cómo puedo desbloquear todos los personajes y tablas en Subway Surfers? </h4>
39
- <p>A: Hay varias maneras de desbloquear todos los personajes y tablas en Subway Surfers. Puedes comprarlos con monedas o llaves o encontrarlos en las pistas. También puedes desbloquearlos completando ciertas misiones, logros, eventos o colecciones. Sin embargo, si desea desbloquear todos los personajes y tablas en Subway Surfers, puede utilizar Subway Surfers ilimitada hack 2023 apk download. </p>
40
- <h4>Q: ¿Cómo puedo actualizar Subway Surfers a la última versión? </h4> 64aa2da5cf<br />
41
- <br />
42
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/+page.server.ts DELETED
@@ -1,33 +0,0 @@
1
- import type { PageServerLoad } from "./$types";
2
- import { collections } from "$lib/server/database";
3
- import { ObjectId } from "mongodb";
4
- import { error } from "@sveltejs/kit";
5
-
6
- export const load: PageServerLoad = async (event) => {
7
- // todo: add validation on params.id
8
- const conversation = await collections.conversations.findOne({
9
- _id: new ObjectId(event.params.id),
10
- sessionId: event.locals.sessionId,
11
- });
12
-
13
- if (!conversation) {
14
- const conversationExists =
15
- (await collections.conversations.countDocuments({
16
- _id: new ObjectId(event.params.id),
17
- })) !== 0;
18
-
19
- if (conversationExists) {
20
- throw error(
21
- 403,
22
- "You don't have access to this conversation. If someone gave you this link, ask them to use the 'share' feature instead."
23
- );
24
- }
25
-
26
- throw error(404, "Conversation not found.");
27
- }
28
-
29
- return {
30
- messages: conversation.messages,
31
- title: conversation.title,
32
- };
33
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/standard.py DELETED
@@ -1,532 +0,0 @@
1
- """Standard retry behavior.
2
-
3
- This contains the default standard retry behavior.
4
- It provides consistent behavior with other AWS SDKs.
5
-
6
- The key base classes uses for retries:
7
-
8
- * ``BaseRetryableChecker`` - Use to check a specific condition that
9
- indicates a retry should happen. This can include things like
10
- max attempts, HTTP status code checks, error code checks etc.
11
- * ``RetryBackoff`` - Use to determine how long we should backoff until
12
- we retry a request. This is the class that will implement delay such
13
- as exponential backoff.
14
- * ``RetryPolicy`` - Main class that determines if a retry should
15
- happen. It can combine data from a various BaseRetryableCheckers
16
- to make a final call as to whether or not a retry should happen.
17
- It then uses a ``BaseRetryBackoff`` to determine how long to delay.
18
- * ``RetryHandler`` - The bridge between botocore's event system
19
- used by endpoint.py to manage retries and the interfaces defined
20
- in this module.
21
-
22
- This allows us to define an API that has minimal coupling to the event
23
- based API used by botocore.
24
-
25
- """
26
- import logging
27
- import random
28
-
29
- from botocore.exceptions import (
30
- ConnectionError,
31
- ConnectTimeoutError,
32
- HTTPClientError,
33
- ReadTimeoutError,
34
- )
35
- from botocore.retries import quota, special
36
- from botocore.retries.base import BaseRetryableChecker, BaseRetryBackoff
37
-
38
- DEFAULT_MAX_ATTEMPTS = 3
39
- logger = logging.getLogger(__name__)
40
-
41
-
42
- def register_retry_handler(client, max_attempts=DEFAULT_MAX_ATTEMPTS):
43
- retry_quota = RetryQuotaChecker(quota.RetryQuota())
44
-
45
- service_id = client.meta.service_model.service_id
46
- service_event_name = service_id.hyphenize()
47
- client.meta.events.register(
48
- f'after-call.{service_event_name}', retry_quota.release_retry_quota
49
- )
50
-
51
- handler = RetryHandler(
52
- retry_policy=RetryPolicy(
53
- retry_checker=StandardRetryConditions(max_attempts=max_attempts),
54
- retry_backoff=ExponentialBackoff(),
55
- ),
56
- retry_event_adapter=RetryEventAdapter(),
57
- retry_quota=retry_quota,
58
- )
59
-
60
- unique_id = 'retry-config-%s' % service_event_name
61
- client.meta.events.register(
62
- 'needs-retry.%s' % service_event_name,
63
- handler.needs_retry,
64
- unique_id=unique_id,
65
- )
66
- return handler
67
-
68
-
69
- class RetryHandler:
70
- """Bridge between botocore's event system and this module.
71
-
72
- This class is intended to be hooked to botocore's event system
73
- as an event handler.
74
- """
75
-
76
- def __init__(self, retry_policy, retry_event_adapter, retry_quota):
77
- self._retry_policy = retry_policy
78
- self._retry_event_adapter = retry_event_adapter
79
- self._retry_quota = retry_quota
80
-
81
- def needs_retry(self, **kwargs):
82
- """Connect as a handler to the needs-retry event."""
83
- retry_delay = None
84
- context = self._retry_event_adapter.create_retry_context(**kwargs)
85
- if self._retry_policy.should_retry(context):
86
- # Before we can retry we need to ensure we have sufficient
87
- # capacity in our retry quota.
88
- if self._retry_quota.acquire_retry_quota(context):
89
- retry_delay = self._retry_policy.compute_retry_delay(context)
90
- logger.debug(
91
- "Retry needed, retrying request after delay of: %s",
92
- retry_delay,
93
- )
94
- else:
95
- logger.debug(
96
- "Retry needed but retry quota reached, "
97
- "not retrying request."
98
- )
99
- else:
100
- logger.debug("Not retrying request.")
101
- self._retry_event_adapter.adapt_retry_response_from_context(context)
102
- return retry_delay
103
-
104
-
105
- class RetryEventAdapter:
106
- """Adapter to existing retry interface used in the endpoints layer.
107
-
108
- This existing interface for determining if a retry needs to happen
109
- is event based and used in ``botocore.endpoint``. The interface has
110
- grown organically over the years and could use some cleanup. This
111
- adapter converts that interface into the interface used by the
112
- new retry strategies.
113
-
114
- """
115
-
116
- def create_retry_context(self, **kwargs):
117
- """Create context based on needs-retry kwargs."""
118
- response = kwargs['response']
119
- if response is None:
120
- # If response is None it means that an exception was raised
121
- # because we never received a response from the service. This
122
- # could be something like a ConnectionError we get from our
123
- # http layer.
124
- http_response = None
125
- parsed_response = None
126
- else:
127
- http_response, parsed_response = response
128
- # This provides isolation between the kwargs emitted in the
129
- # needs-retry event, and what this module uses to check for
130
- # retries.
131
- context = RetryContext(
132
- attempt_number=kwargs['attempts'],
133
- operation_model=kwargs['operation'],
134
- http_response=http_response,
135
- parsed_response=parsed_response,
136
- caught_exception=kwargs['caught_exception'],
137
- request_context=kwargs['request_dict']['context'],
138
- )
139
- return context
140
-
141
- def adapt_retry_response_from_context(self, context):
142
- """Modify response back to user back from context."""
143
- # This will mutate attributes that are returned back to the end
144
- # user. We do it this way so that all the various retry classes
145
- # don't mutate any input parameters from the needs-retry event.
146
- metadata = context.get_retry_metadata()
147
- if context.parsed_response is not None:
148
- context.parsed_response.setdefault('ResponseMetadata', {}).update(
149
- metadata
150
- )
151
-
152
-
153
- # Implementation note: this is meant to encapsulate all the misc stuff
154
- # that gets sent in the needs-retry event. This is mapped so that params
155
- # are more clear and explicit.
156
- class RetryContext:
157
- """Normalize a response that we use to check if a retry should occur.
158
-
159
- This class smoothes over the different types of responses we may get
160
- from a service including:
161
-
162
- * A modeled error response from the service that contains a service
163
- code and error message.
164
- * A raw HTTP response that doesn't contain service protocol specific
165
- error keys.
166
- * An exception received while attempting to retrieve a response.
167
- This could be a ConnectionError we receive from our HTTP layer which
168
- could represent that we weren't able to receive a response from
169
- the service.
170
-
171
- This class guarantees that at least one of the above attributes will be
172
- non None.
173
-
174
- This class is meant to provide a read-only view into the properties
175
- associated with a possible retryable response. None of the properties
176
- are meant to be modified directly.
177
-
178
- """
179
-
180
- def __init__(
181
- self,
182
- attempt_number,
183
- operation_model=None,
184
- parsed_response=None,
185
- http_response=None,
186
- caught_exception=None,
187
- request_context=None,
188
- ):
189
- # 1-based attempt number.
190
- self.attempt_number = attempt_number
191
- self.operation_model = operation_model
192
- # This is the parsed response dictionary we get from parsing
193
- # the HTTP response from the service.
194
- self.parsed_response = parsed_response
195
- # This is an instance of botocore.awsrequest.AWSResponse.
196
- self.http_response = http_response
197
- # This is a subclass of Exception that will be non None if
198
- # an exception was raised when retrying to retrieve a response.
199
- self.caught_exception = caught_exception
200
- # This is the request context dictionary that's added to the
201
- # request dict. This is used to story any additional state
202
- # about the request. We use this for storing retry quota
203
- # capacity.
204
- if request_context is None:
205
- request_context = {}
206
- self.request_context = request_context
207
- self._retry_metadata = {}
208
-
209
- # These are misc helper methods to avoid duplication in the various
210
- # checkers.
211
- def get_error_code(self):
212
- """Check if there was a parsed response with an error code.
213
-
214
- If we could not find any error codes, ``None`` is returned.
215
-
216
- """
217
- if self.parsed_response is None:
218
- return
219
- error = self.parsed_response.get('Error', {})
220
- if not isinstance(error, dict):
221
- return
222
- return error.get('Code')
223
-
224
- def add_retry_metadata(self, **kwargs):
225
- """Add key/value pairs to the retry metadata.
226
-
227
- This allows any objects during the retry process to add
228
- metadata about any checks/validations that happened.
229
-
230
- This gets added to the response metadata in the retry handler.
231
-
232
- """
233
- self._retry_metadata.update(**kwargs)
234
-
235
- def get_retry_metadata(self):
236
- return self._retry_metadata.copy()
237
-
238
-
239
- class RetryPolicy:
240
- def __init__(self, retry_checker, retry_backoff):
241
- self._retry_checker = retry_checker
242
- self._retry_backoff = retry_backoff
243
-
244
- def should_retry(self, context):
245
- return self._retry_checker.is_retryable(context)
246
-
247
- def compute_retry_delay(self, context):
248
- return self._retry_backoff.delay_amount(context)
249
-
250
-
251
- class ExponentialBackoff(BaseRetryBackoff):
252
-
253
- _BASE = 2
254
- _MAX_BACKOFF = 20
255
-
256
- def __init__(self, max_backoff=20, random=random.random):
257
- self._base = self._BASE
258
- self._max_backoff = max_backoff
259
- self._random = random
260
-
261
- def delay_amount(self, context):
262
- """Calculates delay based on exponential backoff.
263
-
264
- This class implements truncated binary exponential backoff
265
- with jitter::
266
-
267
- t_i = min(rand(0, 1) * 2 ** attempt, MAX_BACKOFF)
268
-
269
- where ``i`` is the request attempt (0 based).
270
-
271
- """
272
- # The context.attempt_number is a 1-based value, but we have
273
- # to calculate the delay based on i based a 0-based value. We
274
- # want the first delay to just be ``rand(0, 1)``.
275
- return min(
276
- self._random() * (self._base ** (context.attempt_number - 1)),
277
- self._max_backoff,
278
- )
279
-
280
-
281
- class MaxAttemptsChecker(BaseRetryableChecker):
282
- def __init__(self, max_attempts):
283
- self._max_attempts = max_attempts
284
-
285
- def is_retryable(self, context):
286
- under_max_attempts = context.attempt_number < self._max_attempts
287
- retries_context = context.request_context.get('retries')
288
- if retries_context:
289
- retries_context['max'] = max(
290
- retries_context.get('max', 0), self._max_attempts
291
- )
292
- if not under_max_attempts:
293
- logger.debug("Max attempts of %s reached.", self._max_attempts)
294
- context.add_retry_metadata(MaxAttemptsReached=True)
295
- return under_max_attempts
296
-
297
-
298
- class TransientRetryableChecker(BaseRetryableChecker):
299
- _TRANSIENT_ERROR_CODES = [
300
- 'RequestTimeout',
301
- 'RequestTimeoutException',
302
- 'PriorRequestNotComplete',
303
- ]
304
- _TRANSIENT_STATUS_CODES = [500, 502, 503, 504]
305
- _TRANSIENT_EXCEPTION_CLS = (
306
- ConnectionError,
307
- HTTPClientError,
308
- )
309
-
310
- def __init__(
311
- self,
312
- transient_error_codes=None,
313
- transient_status_codes=None,
314
- transient_exception_cls=None,
315
- ):
316
- if transient_error_codes is None:
317
- transient_error_codes = self._TRANSIENT_ERROR_CODES[:]
318
- if transient_status_codes is None:
319
- transient_status_codes = self._TRANSIENT_STATUS_CODES[:]
320
- if transient_exception_cls is None:
321
- transient_exception_cls = self._TRANSIENT_EXCEPTION_CLS
322
- self._transient_error_codes = transient_error_codes
323
- self._transient_status_codes = transient_status_codes
324
- self._transient_exception_cls = transient_exception_cls
325
-
326
- def is_retryable(self, context):
327
- if context.get_error_code() in self._transient_error_codes:
328
- return True
329
- if context.http_response is not None:
330
- if (
331
- context.http_response.status_code
332
- in self._transient_status_codes
333
- ):
334
- return True
335
- if context.caught_exception is not None:
336
- return isinstance(
337
- context.caught_exception, self._transient_exception_cls
338
- )
339
- return False
340
-
341
-
342
- class ThrottledRetryableChecker(BaseRetryableChecker):
343
- # This is the union of all error codes we've seen that represent
344
- # a throttled error.
345
- _THROTTLED_ERROR_CODES = [
346
- 'Throttling',
347
- 'ThrottlingException',
348
- 'ThrottledException',
349
- 'RequestThrottledException',
350
- 'TooManyRequestsException',
351
- 'ProvisionedThroughputExceededException',
352
- 'TransactionInProgressException',
353
- 'RequestLimitExceeded',
354
- 'BandwidthLimitExceeded',
355
- 'LimitExceededException',
356
- 'RequestThrottled',
357
- 'SlowDown',
358
- 'PriorRequestNotComplete',
359
- 'EC2ThrottledException',
360
- ]
361
-
362
- def __init__(self, throttled_error_codes=None):
363
- if throttled_error_codes is None:
364
- throttled_error_codes = self._THROTTLED_ERROR_CODES[:]
365
- self._throttled_error_codes = throttled_error_codes
366
-
367
- def is_retryable(self, context):
368
- # Only the error code from a parsed service response is used
369
- # to determine if the response is a throttled response.
370
- return context.get_error_code() in self._throttled_error_codes
371
-
372
-
373
- class ModeledRetryableChecker(BaseRetryableChecker):
374
- """Check if an error has been modeled as retryable."""
375
-
376
- def __init__(self):
377
- self._error_detector = ModeledRetryErrorDetector()
378
-
379
- def is_retryable(self, context):
380
- error_code = context.get_error_code()
381
- if error_code is None:
382
- return False
383
- return self._error_detector.detect_error_type(context) is not None
384
-
385
-
386
- class ModeledRetryErrorDetector:
387
- """Checks whether or not an error is a modeled retryable error."""
388
-
389
- # There are return values from the detect_error_type() method.
390
- TRANSIENT_ERROR = 'TRANSIENT_ERROR'
391
- THROTTLING_ERROR = 'THROTTLING_ERROR'
392
- # This class is lower level than ModeledRetryableChecker, which
393
- # implements BaseRetryableChecker. This object allows you to distinguish
394
- # between the various types of retryable errors.
395
-
396
- def detect_error_type(self, context):
397
- """Detect the error type associated with an error code and model.
398
-
399
- This will either return:
400
-
401
- * ``self.TRANSIENT_ERROR`` - If the error is a transient error
402
- * ``self.THROTTLING_ERROR`` - If the error is a throttling error
403
- * ``None`` - If the error is neither type of error.
404
-
405
- """
406
- error_code = context.get_error_code()
407
- op_model = context.operation_model
408
- if op_model is None or not op_model.error_shapes:
409
- return
410
- for shape in op_model.error_shapes:
411
- if shape.metadata.get('retryable') is not None:
412
- # Check if this error code matches the shape. This can
413
- # be either by name or by a modeled error code.
414
- error_code_to_check = (
415
- shape.metadata.get('error', {}).get('code') or shape.name
416
- )
417
- if error_code == error_code_to_check:
418
- if shape.metadata['retryable'].get('throttling'):
419
- return self.THROTTLING_ERROR
420
- return self.TRANSIENT_ERROR
421
-
422
-
423
- class ThrottlingErrorDetector:
424
- def __init__(self, retry_event_adapter):
425
- self._modeled_error_detector = ModeledRetryErrorDetector()
426
- self._fixed_error_code_detector = ThrottledRetryableChecker()
427
- self._retry_event_adapter = retry_event_adapter
428
-
429
- # This expects the kwargs from needs-retry to be passed through.
430
- def is_throttling_error(self, **kwargs):
431
- context = self._retry_event_adapter.create_retry_context(**kwargs)
432
- if self._fixed_error_code_detector.is_retryable(context):
433
- return True
434
- error_type = self._modeled_error_detector.detect_error_type(context)
435
- return error_type == self._modeled_error_detector.THROTTLING_ERROR
436
-
437
-
438
- class StandardRetryConditions(BaseRetryableChecker):
439
- """Concrete class that implements the standard retry policy checks.
440
-
441
- Specifically:
442
-
443
- not max_attempts and (transient or throttled or modeled_retry)
444
-
445
- """
446
-
447
- def __init__(self, max_attempts=DEFAULT_MAX_ATTEMPTS):
448
- # Note: This class is for convenience so you can have the
449
- # standard retry condition in a single class.
450
- self._max_attempts_checker = MaxAttemptsChecker(max_attempts)
451
- self._additional_checkers = OrRetryChecker(
452
- [
453
- TransientRetryableChecker(),
454
- ThrottledRetryableChecker(),
455
- ModeledRetryableChecker(),
456
- OrRetryChecker(
457
- [
458
- special.RetryIDPCommunicationError(),
459
- special.RetryDDBChecksumError(),
460
- ]
461
- ),
462
- ]
463
- )
464
-
465
- def is_retryable(self, context):
466
- return self._max_attempts_checker.is_retryable(
467
- context
468
- ) and self._additional_checkers.is_retryable(context)
469
-
470
-
471
- class OrRetryChecker(BaseRetryableChecker):
472
- def __init__(self, checkers):
473
- self._checkers = checkers
474
-
475
- def is_retryable(self, context):
476
- return any(checker.is_retryable(context) for checker in self._checkers)
477
-
478
-
479
- class RetryQuotaChecker:
480
- _RETRY_COST = 5
481
- _NO_RETRY_INCREMENT = 1
482
- _TIMEOUT_RETRY_REQUEST = 10
483
- _TIMEOUT_EXCEPTIONS = (ConnectTimeoutError, ReadTimeoutError)
484
-
485
- # Implementation note: We're not making this a BaseRetryableChecker
486
- # because this isn't just a check if we can retry. This also changes
487
- # state so we have to careful when/how we call this. Making it
488
- # a BaseRetryableChecker implies you can call .is_retryable(context)
489
- # as many times as you want and not affect anything.
490
-
491
- def __init__(self, quota):
492
- self._quota = quota
493
- # This tracks the last amount
494
- self._last_amount_acquired = None
495
-
496
- def acquire_retry_quota(self, context):
497
- if self._is_timeout_error(context):
498
- capacity_amount = self._TIMEOUT_RETRY_REQUEST
499
- else:
500
- capacity_amount = self._RETRY_COST
501
- success = self._quota.acquire(capacity_amount)
502
- if success:
503
- # We add the capacity amount to the request context so we know
504
- # how much to release later. The capacity amount can vary based
505
- # on the error.
506
- context.request_context['retry_quota_capacity'] = capacity_amount
507
- return True
508
- context.add_retry_metadata(RetryQuotaReached=True)
509
- return False
510
-
511
- def _is_timeout_error(self, context):
512
- return isinstance(context.caught_exception, self._TIMEOUT_EXCEPTIONS)
513
-
514
- # This is intended to be hooked up to ``after-call``.
515
- def release_retry_quota(self, context, http_response, **kwargs):
516
- # There's three possible options.
517
- # 1. The HTTP response did not have a 2xx response. In that case we
518
- # give no quota back.
519
- # 2. The HTTP request was successful and was never retried. In
520
- # that case we give _NO_RETRY_INCREMENT back.
521
- # 3. The API call had retries, and we eventually receive an HTTP
522
- # response with a 2xx status code. In that case we give back
523
- # whatever quota was associated with the last acquisition.
524
- if http_response is None:
525
- return
526
- status_code = http_response.status_code
527
- if 200 <= status_code < 300:
528
- if 'retry_quota_capacity' not in context:
529
- self._quota.release(self._NO_RETRY_INCREMENT)
530
- else:
531
- capacity_amount = context['retry_quota_capacity']
532
- self._quota.release(capacity_amount)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel.py DELETED
@@ -1,37 +0,0 @@
1
- import logging
2
- import os
3
- from typing import Optional
4
-
5
- from pip._vendor.pyproject_hooks import BuildBackendHookCaller
6
-
7
- from pip._internal.utils.subprocess import runner_with_spinner_message
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- def build_wheel_pep517(
13
- name: str,
14
- backend: BuildBackendHookCaller,
15
- metadata_directory: str,
16
- tempd: str,
17
- ) -> Optional[str]:
18
- """Build one InstallRequirement using the PEP 517 build process.
19
-
20
- Returns path to wheel if successfully built. Otherwise, returns None.
21
- """
22
- assert metadata_directory is not None
23
- try:
24
- logger.debug("Destination directory: %s", tempd)
25
-
26
- runner = runner_with_spinner_message(
27
- f"Building wheel for {name} (pyproject.toml)"
28
- )
29
- with backend.subprocess_runner(runner):
30
- wheel_name = backend.build_wheel(
31
- tempd,
32
- metadata_directory=metadata_directory,
33
- )
34
- except Exception:
35
- logger.error("Failed building wheel for %s", name)
36
- return None
37
- return os.path.join(tempd, wheel_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build.py DELETED
@@ -1,153 +0,0 @@
1
- """distutils.command.build
2
-
3
- Implements the Distutils 'build' command."""
4
-
5
- import sys
6
- import os
7
- from distutils.core import Command
8
- from distutils.errors import DistutilsOptionError
9
- from distutils.util import get_platform
10
-
11
-
12
- def show_compilers():
13
- from distutils.ccompiler import show_compilers
14
-
15
- show_compilers()
16
-
17
-
18
- class build(Command):
19
-
20
- description = "build everything needed to install"
21
-
22
- user_options = [
23
- ('build-base=', 'b', "base directory for build library"),
24
- ('build-purelib=', None, "build directory for platform-neutral distributions"),
25
- ('build-platlib=', None, "build directory for platform-specific distributions"),
26
- (
27
- 'build-lib=',
28
- None,
29
- "build directory for all distribution (defaults to either "
30
- + "build-purelib or build-platlib",
31
- ),
32
- ('build-scripts=', None, "build directory for scripts"),
33
- ('build-temp=', 't', "temporary build directory"),
34
- (
35
- 'plat-name=',
36
- 'p',
37
- "platform name to build for, if supported "
38
- "(default: %s)" % get_platform(),
39
- ),
40
- ('compiler=', 'c', "specify the compiler type"),
41
- ('parallel=', 'j', "number of parallel build jobs"),
42
- ('debug', 'g', "compile extensions and libraries with debugging information"),
43
- ('force', 'f', "forcibly build everything (ignore file timestamps)"),
44
- ('executable=', 'e', "specify final destination interpreter path (build.py)"),
45
- ]
46
-
47
- boolean_options = ['debug', 'force']
48
-
49
- help_options = [
50
- ('help-compiler', None, "list available compilers", show_compilers),
51
- ]
52
-
53
- def initialize_options(self):
54
- self.build_base = 'build'
55
- # these are decided only after 'build_base' has its final value
56
- # (unless overridden by the user or client)
57
- self.build_purelib = None
58
- self.build_platlib = None
59
- self.build_lib = None
60
- self.build_temp = None
61
- self.build_scripts = None
62
- self.compiler = None
63
- self.plat_name = None
64
- self.debug = None
65
- self.force = 0
66
- self.executable = None
67
- self.parallel = None
68
-
69
- def finalize_options(self): # noqa: C901
70
- if self.plat_name is None:
71
- self.plat_name = get_platform()
72
- else:
73
- # plat-name only supported for windows (other platforms are
74
- # supported via ./configure flags, if at all). Avoid misleading
75
- # other platforms.
76
- if os.name != 'nt':
77
- raise DistutilsOptionError(
78
- "--plat-name only supported on Windows (try "
79
- "using './configure --help' on your platform)"
80
- )
81
-
82
- plat_specifier = ".{}-{}".format(self.plat_name, sys.implementation.cache_tag)
83
-
84
- # Make it so Python 2.x and Python 2.x with --with-pydebug don't
85
- # share the same build directories. Doing so confuses the build
86
- # process for C modules
87
- if hasattr(sys, 'gettotalrefcount'):
88
- plat_specifier += '-pydebug'
89
-
90
- # 'build_purelib' and 'build_platlib' just default to 'lib' and
91
- # 'lib.<plat>' under the base build directory. We only use one of
92
- # them for a given distribution, though --
93
- if self.build_purelib is None:
94
- self.build_purelib = os.path.join(self.build_base, 'lib')
95
- if self.build_platlib is None:
96
- self.build_platlib = os.path.join(self.build_base, 'lib' + plat_specifier)
97
-
98
- # 'build_lib' is the actual directory that we will use for this
99
- # particular module distribution -- if user didn't supply it, pick
100
- # one of 'build_purelib' or 'build_platlib'.
101
- if self.build_lib is None:
102
- if self.distribution.has_ext_modules():
103
- self.build_lib = self.build_platlib
104
- else:
105
- self.build_lib = self.build_purelib
106
-
107
- # 'build_temp' -- temporary directory for compiler turds,
108
- # "build/temp.<plat>"
109
- if self.build_temp is None:
110
- self.build_temp = os.path.join(self.build_base, 'temp' + plat_specifier)
111
- if self.build_scripts is None:
112
- self.build_scripts = os.path.join(
113
- self.build_base, 'scripts-%d.%d' % sys.version_info[:2]
114
- )
115
-
116
- if self.executable is None and sys.executable:
117
- self.executable = os.path.normpath(sys.executable)
118
-
119
- if isinstance(self.parallel, str):
120
- try:
121
- self.parallel = int(self.parallel)
122
- except ValueError:
123
- raise DistutilsOptionError("parallel should be an integer")
124
-
125
- def run(self):
126
- # Run all relevant sub-commands. This will be some subset of:
127
- # - build_py - pure Python modules
128
- # - build_clib - standalone C libraries
129
- # - build_ext - Python extensions
130
- # - build_scripts - (Python) scripts
131
- for cmd_name in self.get_sub_commands():
132
- self.run_command(cmd_name)
133
-
134
- # -- Predicates for the sub-command list ---------------------------
135
-
136
- def has_pure_modules(self):
137
- return self.distribution.has_pure_modules()
138
-
139
- def has_c_libraries(self):
140
- return self.distribution.has_c_libraries()
141
-
142
- def has_ext_modules(self):
143
- return self.distribution.has_ext_modules()
144
-
145
- def has_scripts(self):
146
- return self.distribution.has_scripts()
147
-
148
- sub_commands = [
149
- ('build_py', has_pure_modules),
150
- ('build_clib', has_c_libraries),
151
- ('build_ext', has_ext_modules),
152
- ('build_scripts', has_scripts),
153
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CALM/Dashboard/streamlit_observable/frontend/src/Observable.tsx DELETED
@@ -1,161 +0,0 @@
1
- import React, { ReactNode } from "react"
2
- import {
3
- withStreamlitConnection,
4
- StreamlitComponentBase,
5
- Streamlit,
6
- } from "./streamlit"
7
- import { Runtime, Inspector } from "@observablehq/runtime";
8
-
9
- class Observable extends StreamlitComponentBase<{}> {
10
- public observeValue = {};
11
- private notebookRef = React.createRef<HTMLDivElement>();
12
- private runtime: any = null;
13
- private main: any = null;
14
-
15
- componentWillUnmount() {
16
- this.runtime?.dispose();
17
- }
18
- // @ts-ignore
19
- public componentDidUpdate(prevProps: any) {
20
- const { args: prevArgs } = prevProps;
21
- if (prevArgs.notebook !== this.props.args.notebook) {
22
- // TODO handle new notebook
23
- }
24
- console.log('this.props.args.redefine: ', this.props.args.redefine);
25
- if (this.main !== null) {
26
- this.redefineCells(this.main, this.props.args.redefine);
27
- }
28
- }
29
-
30
- async embedNotebook(notebook: string, targets: string[], observe: string[], hide:string[]) {
31
- if (this.runtime) {
32
- this.runtime.dispose();
33
- }
34
-
35
- console.log('Console says hi!');
36
-
37
- const targetSet = new Set(targets);
38
- const observeSet = new Set(observe);
39
- const hideSet = new Set(hide);
40
- this.runtime = new Runtime();
41
- const { default: define } = await eval(`import("https://api.observablehq.com/${notebook}.js?v=3")`);
42
-
43
- this.main = this.runtime.module(define, (name: string) => {
44
- console.log('name: ', name);
45
- console.log('observeSet.has(name: ', observeSet.has(name));
46
- console.log('targetSet.has(name): ', targetSet.has(name));
47
- if (observeSet.has(name) && !targetSet.has(name)) {
48
- const observeValue = this.observeValue;
49
-
50
- console.log('observeValue: ', observeValue);
51
-
52
- return {
53
- fulfilled: (value: any) => {
54
- //@ts-ignore
55
- observeValue[name] = value;
56
- //@ts-ignore
57
- Streamlit.setComponentValue(observeValue);
58
- }
59
- }
60
- }
61
- if (targetSet.size > 0 && !targetSet.has(name)) return;
62
- if(hideSet.has(name)) return true;
63
- const el = document.createElement('div');
64
- this.notebookRef.current?.appendChild(el);
65
-
66
- const i = new Inspector(el);
67
- el.addEventListener('input', e => {
68
- Streamlit.setFrameHeight();
69
- })
70
- return {
71
- pending() {
72
- i.pending();
73
- Streamlit.setFrameHeight();
74
- },
75
- fulfilled(value: any) {
76
- i.fulfilled(value);
77
- Streamlit.setFrameHeight();
78
- },
79
- rejected(error: any) {
80
- i.rejected(error);
81
- Streamlit.setFrameHeight();
82
- },
83
- };
84
- });
85
- if (observeSet.size > 0) {
86
- Promise.all(Array.from(observeSet).map(async name => [name, await this.main.value(name)])).then(initial => {
87
- for (const [name, value] of initial) {
88
- // @ts-ignore
89
- this.observeValue[name] = value
90
- };
91
- Streamlit.setComponentValue(this.observeValue);
92
- })
93
- }
94
- }
95
-
96
- redefineCells(main: any, redefine = {}) {
97
-
98
- console.log('Console says hi 2 !');
99
-
100
- for (let cell in redefine) {
101
- //@ts-ignore
102
- main.redefine(cell, redefine[cell]);
103
- }
104
- }
105
- componentDidMount() {
106
- const { notebook, targets = [], observe = [], redefine = {} , hide=[]} = this.props.args;
107
- Streamlit.setComponentValue(this.observeValue);
108
- this.embedNotebook(notebook, targets, observe, hide).then(() => {
109
- this.redefineCells(this.main, redefine);
110
- });
111
-
112
- }
113
-
114
- public render = (): ReactNode => {
115
-
116
- console.log('this.props.args.render_empty: ', this.props.args.render_empty);
117
- if (this.props.args.render_empty) {
118
- return (
119
- <div >
120
- <div style={{ padding: '9px 12px' }}>
121
- <div ref={this.notebookRef}></div>
122
- </div>
123
- <div style={{ marginTop: '4px' }}>
124
-
125
- <div >
126
- <div style={{textAlign:"left"}}>{this.props.args.name}</div>
127
- <div style={{textAlign:"right"}}>
128
- <a href={`https://observablehq.com/${this.props.args.notebook}`} style={{ color: '#666', }}></a>
129
- </div>
130
- </div>
131
- </div>
132
- </div >
133
- )
134
- }
135
- return (
136
- <div style={{ border: '1px solid gray', borderRadius: '4px' }}>
137
- <div style={{ padding: '9px 12px' }}>
138
- <div ref={this.notebookRef}></div>
139
- </div>
140
- <div style={{ marginTop: '4px' }}>
141
-
142
- <div style={{
143
- backgroundColor: '#ddd',
144
- fontWeight: 700,
145
- padding: ".25rem .5rem",
146
- borderRadius: '0 0 4px 4px',
147
- gridTemplateColumns: "auto auto",
148
- display:"grid"
149
- }}>
150
- <div style={{textAlign:"left"}}>{this.props.args.name}</div>
151
- <div style={{textAlign:"right"}}>
152
- <a href={`https://observablehq.com/${this.props.args.notebook}`} style={{ color: '#666', }}></a>
153
- </div>
154
- </div>
155
- </div>
156
- </div >
157
- )
158
- }
159
- }
160
-
161
- export default withStreamlitConnection(Observable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisCaviar/ControlNet-v1-1/app_mlsd.py DELETED
@@ -1,113 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import gradio as gr
4
-
5
- from utils import randomize_seed_fn
6
-
7
-
8
- def create_demo(process, max_images=12, default_num_images=3):
9
- with gr.Blocks() as demo:
10
- with gr.Row():
11
- with gr.Column():
12
- image = gr.Image()
13
- prompt = gr.Textbox(label='Prompt')
14
- run_button = gr.Button('Run')
15
- with gr.Accordion('Advanced options', open=False):
16
- num_samples = gr.Slider(label='Number of images',
17
- minimum=1,
18
- maximum=max_images,
19
- value=default_num_images,
20
- step=1)
21
- image_resolution = gr.Slider(label='Image resolution',
22
- minimum=256,
23
- maximum=512,
24
- value=512,
25
- step=256)
26
- preprocess_resolution = gr.Slider(
27
- label='Preprocess resolution',
28
- minimum=128,
29
- maximum=512,
30
- value=512,
31
- step=1)
32
- mlsd_value_threshold = gr.Slider(
33
- label='Hough value threshold (MLSD)',
34
- minimum=0.01,
35
- maximum=2.0,
36
- value=0.1,
37
- step=0.01)
38
- mlsd_distance_threshold = gr.Slider(
39
- label='Hough distance threshold (MLSD)',
40
- minimum=0.01,
41
- maximum=20.0,
42
- value=0.1,
43
- step=0.01)
44
- num_steps = gr.Slider(label='Number of steps',
45
- minimum=1,
46
- maximum=100,
47
- value=20,
48
- step=1)
49
- guidance_scale = gr.Slider(label='Guidance scale',
50
- minimum=0.1,
51
- maximum=30.0,
52
- value=9.0,
53
- step=0.1)
54
- seed = gr.Slider(label='Seed',
55
- minimum=0,
56
- maximum=1000000,
57
- step=1,
58
- value=0,
59
- randomize=True)
60
- randomize_seed = gr.Checkbox(label='Randomize seed',
61
- value=True)
62
- a_prompt = gr.Textbox(
63
- label='Additional prompt',
64
- value='best quality, extremely detailed')
65
- n_prompt = gr.Textbox(
66
- label='Negative prompt',
67
- value=
68
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
69
- )
70
- with gr.Column():
71
- result = gr.Gallery(label='Output', show_label=False).style(
72
- columns=2, object_fit='scale-down')
73
- inputs = [
74
- image,
75
- prompt,
76
- a_prompt,
77
- n_prompt,
78
- num_samples,
79
- image_resolution,
80
- preprocess_resolution,
81
- num_steps,
82
- guidance_scale,
83
- seed,
84
- mlsd_value_threshold,
85
- mlsd_distance_threshold,
86
- ]
87
- prompt.submit(
88
- fn=randomize_seed_fn,
89
- inputs=[seed, randomize_seed],
90
- outputs=seed,
91
- ).then(
92
- fn=process,
93
- inputs=inputs,
94
- outputs=result,
95
- )
96
- run_button.click(
97
- fn=randomize_seed_fn,
98
- inputs=[seed, randomize_seed],
99
- outputs=seed,
100
- ).then(
101
- fn=process,
102
- inputs=inputs,
103
- outputs=result,
104
- api_name='mlsd',
105
- )
106
- return demo
107
-
108
-
109
- if __name__ == '__main__':
110
- from model import Model
111
- model = Model(task_name='MLSD')
112
- demo = create_demo(model.process_mlsd)
113
- demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cloudfeng/anime-remove-background/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Anime Remove Background
3
- emoji: 🪄🖼️
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.1.4
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: skytnt/anime-remove-background
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/client/css/global.css DELETED
@@ -1,70 +0,0 @@
1
- @import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap");
2
- * {
3
- --font-1: "Inter", sans-serif;
4
- --section-gap: 24px;
5
- --border-radius-1: 8px;
6
- margin: 0;
7
- padding: 0;
8
- box-sizing: border-box;
9
- position: relative;
10
- font-family: var(--font-1);
11
- }
12
-
13
- .theme-light {
14
- --colour-1: #f5f5f5;
15
- --colour-2: #000000;
16
- --colour-3: #474747;
17
- --colour-4: #949494;
18
- --colour-5: #ebebeb;
19
- --colour-6: #dadada;
20
-
21
- --accent: #3a3a3a;
22
- --blur-bg: #ffffff;
23
- --blur-border: #dbdbdb;
24
- --user-input: #282828;
25
- --conversations: #666666;
26
- }
27
-
28
- .theme-dark {
29
- --colour-1: #181818;
30
- --colour-2: #ccc;
31
- --colour-3: #dadada;
32
- --colour-4: #f0f0f0;
33
- --colour-5: #181818;
34
- --colour-6: #242424;
35
-
36
- --accent: #151718;
37
- --blur-bg: #242627;
38
- --blur-border: #242627;
39
- --user-input: #f5f5f5;
40
- --conversations: #555555;
41
- }
42
-
43
- html,
44
- body {
45
- background: var(--colour-1);
46
- color: var(--colour-3);
47
- }
48
-
49
- ol,
50
- ul {
51
- padding-left: 20px;
52
- }
53
-
54
- .shown {
55
- display: flex !important;
56
- }
57
-
58
- a:-webkit-any-link {
59
- color: var(--accent);
60
- }
61
-
62
- pre {
63
- white-space: pre-wrap;
64
- }
65
-
66
- @media screen and (max-height: 720px) {
67
- :root {
68
- --section-gap: 16px;
69
- }
70
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CoreyMorris/MMLU-by-task-Leaderboard/README.md DELETED
@@ -1,9 +0,0 @@
1
- ---
2
- title: MMLU By Task Leaderboard
3
- emoji: 🏆
4
- sdk: streamlit
5
- sdk_version: 1.25.0
6
- app_file: app.py
7
- ---
8
-
9
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
spaces/Cpp4App/Cpp4App/CDM/result_processing/eval_classes.py DELETED
@@ -1,215 +0,0 @@
1
- import json
2
- import numpy as np
3
- import cv2
4
- from glob import glob
5
- from os.path import join as pjoin
6
- from tqdm import tqdm
7
-
8
- class_map = {'0':'Button', '1':'CheckBox', '2':'Chronometer', '3':'EditText', '4':'ImageButton', '5':'ImageView',
9
- '6':'ProgressBar', '7':'RadioButton', '8':'RatingBar', '9':'SeekBar', '10':'Spinner', '11':'Switch',
10
- '12':'ToggleButton', '13':'VideoView', '14':'TextView'}
11
-
12
-
13
- def resize_label(bboxes, d_height, gt_height, bias=0):
14
- bboxes_new = []
15
- scale = gt_height / d_height
16
- for bbox in bboxes:
17
- bbox = [int(b * scale + bias) for b in bbox]
18
- bboxes_new.append(bbox)
19
- return bboxes_new
20
-
21
-
22
- def draw_bounding_box(org, corners, color=(0, 255, 0), line=2, show=False):
23
- board = org.copy()
24
- for i in range(len(corners)):
25
- board = cv2.rectangle(board, (corners[i][0], corners[i][1]), (corners[i][2], corners[i][3]), color, line)
26
- if show:
27
- cv2.imshow('a', cv2.resize(board, (500, 1000)))
28
- cv2.waitKey(0)
29
- return board
30
-
31
-
32
- def load_detect_result_json(reslut_file_root, shrink=4):
33
- def is_bottom_or_top(corner):
34
- column_min, row_min, column_max, row_max = corner
35
- if row_max < 36 or row_min > 725:
36
- return True
37
- return False
38
-
39
- result_files = glob(pjoin(reslut_file_root, '*.json'))
40
- compos_reform = {}
41
- print('Loading %d detection results' % len(result_files))
42
- for reslut_file in tqdm(result_files):
43
- img_name = reslut_file.split('\\')[-1].split('.')[0]
44
- compos = json.load(open(reslut_file, 'r'))['compos']
45
- for compo in compos:
46
- if compo['column_max'] - compo['column_min'] < 10 or compo['row_max'] - compo['row_min'] < 10:
47
- continue
48
- if is_bottom_or_top((compo['column_min'], compo['row_min'], compo['column_max'], compo['row_max'])):
49
- continue
50
- if img_name not in compos_reform:
51
- compos_reform[img_name] = {'bboxes': [[compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink]],
52
- 'categories': [compo['category']]}
53
- else:
54
- compos_reform[img_name]['bboxes'].append([compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink])
55
- compos_reform[img_name]['categories'].append(compo['category'])
56
- return compos_reform
57
-
58
-
59
- def load_ground_truth_json(gt_file):
60
- def get_img_by_id(img_id):
61
- for image in images:
62
- if image['id'] == img_id:
63
- return image['file_name'].split('/')[-1][:-4], (image['height'], image['width'])
64
-
65
- def cvt_bbox(bbox):
66
- '''
67
- :param bbox: [x,y,width,height]
68
- :return: [col_min, row_min, col_max, row_max]
69
- '''
70
- bbox = [int(b) for b in bbox]
71
- return [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
72
-
73
- data = json.load(open(gt_file, 'r'))
74
- images = data['images']
75
- annots = data['annotations']
76
- compos = {}
77
- print('Loading %d ground truth' % len(annots))
78
- for annot in tqdm(annots):
79
- img_name, size = get_img_by_id(annot['image_id'])
80
- if img_name not in compos:
81
- compos[img_name] = {'bboxes': [cvt_bbox(annot['bbox'])], 'categories': [class_map[str(annot['category_id'])]], 'size': size}
82
- else:
83
- compos[img_name]['bboxes'].append(cvt_bbox(annot['bbox']))
84
- compos[img_name]['categories'].append(class_map[str(annot['category_id'])])
85
- return compos
86
-
87
-
88
- def eval(detection, ground_truth, img_root, show=True, no_text=False, only_text=False):
89
- def compo_filter(compos, flag):
90
- if not no_text and not only_text:
91
- return compos
92
- compos_new = {'bboxes': [], 'categories': []}
93
- for k, category in enumerate(compos['categories']):
94
- if only_text:
95
- if flag == 'det' and category != 'TextView':
96
- continue
97
- if flag == 'gt' and category != 'TextView':
98
- continue
99
- elif no_text:
100
- if flag == 'det' and category == 'TextView':
101
- continue
102
- if flag == 'gt' and category == 'TextView':
103
- continue
104
-
105
- compos_new['bboxes'].append(compos['bboxes'][k])
106
- compos_new['categories'].append(category)
107
- return compos_new
108
-
109
- def match(org, d_bbox, d_category, gt_compos, matched):
110
- '''
111
- :param matched: mark if the ground truth component is matched
112
- :param d_bbox: [col_min, row_min, col_max, row_max]
113
- :param gt_bboxes: list of ground truth [[col_min, row_min, col_max, row_max]]
114
- :return: Boolean: if IOU large enough or detected box is contained by ground truth
115
- '''
116
- area_d = (d_bbox[2] - d_bbox[0]) * (d_bbox[3] - d_bbox[1])
117
- gt_bboxes = gt_compos['bboxes']
118
- gt_categories = gt_compos['categories']
119
- for i, gt_bbox in enumerate(gt_bboxes):
120
- if matched[i] == 0:
121
- continue
122
- area_gt = (gt_bbox[2] - gt_bbox[0]) * (gt_bbox[3] - gt_bbox[1])
123
- col_min = max(d_bbox[0], gt_bbox[0])
124
- row_min = max(d_bbox[1], gt_bbox[1])
125
- col_max = min(d_bbox[2], gt_bbox[2])
126
- row_max = min(d_bbox[3], gt_bbox[3])
127
- # if not intersected, area intersection should be 0
128
- w = max(0, col_max - col_min)
129
- h = max(0, row_max - row_min)
130
- area_inter = w * h
131
- if area_inter == 0:
132
- continue
133
- iod = area_inter / area_d
134
- iou = area_inter / (area_d + area_gt - area_inter)
135
- # if show:
136
- # cv2.putText(org, (str(round(iou, 2)) + ',' + str(round(iod, 2))), (d_bbox[0], d_bbox[1]),
137
- # cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
138
-
139
- if iou > 0.9 or iod == 1:
140
- if d_category == gt_categories[i]:
141
- matched[i] = 0
142
- return True
143
- return False
144
-
145
- amount = len(detection)
146
- TP, FP, FN = 0, 0, 0
147
- pres, recalls, f1s = [], [], []
148
- for i, image_id in enumerate(detection):
149
- TP_this, FP_this, FN_this = 0, 0, 0
150
- img = cv2.imread(pjoin(img_root, image_id + '.jpg'))
151
- d_compos = detection[image_id]
152
- if image_id not in ground_truth:
153
- continue
154
- gt_compos = ground_truth[image_id]
155
-
156
- org_height = gt_compos['size'][0]
157
-
158
- d_compos = compo_filter(d_compos, 'det')
159
- gt_compos = compo_filter(gt_compos, 'gt')
160
-
161
- d_compos['bboxes'] = resize_label(d_compos['bboxes'], 800, org_height)
162
- matched = np.ones(len(gt_compos['bboxes']), dtype=int)
163
- for j, d_bbox in enumerate(d_compos['bboxes']):
164
- if match(img, d_bbox, d_compos['categories'][j], gt_compos, matched):
165
- TP += 1
166
- TP_this += 1
167
- else:
168
- FP += 1
169
- FP_this += 1
170
- FN += sum(matched)
171
- FN_this = sum(matched)
172
-
173
- try:
174
- pre_this = TP_this / (TP_this + FP_this)
175
- recall_this = TP_this / (TP_this + FN_this)
176
- f1_this = 2 * (pre_this * recall_this) / (pre_this + recall_this)
177
- except:
178
- print('empty')
179
- continue
180
-
181
- pres.append(pre_this)
182
- recalls.append(recall_this)
183
- f1s.append(f1_this)
184
- if show:
185
- print(image_id + '.jpg')
186
- print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f' % (
187
- i, amount, TP_this, FP_this, FN_this, pre_this, recall_this))
188
- # cv2.imshow('org', cv2.resize(img, (500, 1000)))
189
- broad = draw_bounding_box(img, d_compos['bboxes'], color=(255, 0, 0), line=3)
190
- draw_bounding_box(broad, gt_compos['bboxes'], color=(0, 0, 255), show=True, line=2)
191
-
192
- if i % 200 == 0:
193
- precision = TP / (TP + FP)
194
- recall = TP / (TP + FN)
195
- f1 = 2 * (precision * recall) / (precision + recall)
196
- print(
197
- '[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))
198
-
199
- precision = TP / (TP + FP)
200
- recall = TP / (TP + FN)
201
- print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))
202
- # print("Average precision:%.4f; Average recall:%.3f" % (sum(pres)/len(pres), sum(recalls)/len(recalls)))
203
-
204
- return pres, recalls, f1s
205
-
206
-
207
- no_text = True
208
- only_text = False
209
-
210
- # detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_cls\\ip')
211
- # detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_cls\\merge')
212
- detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3\\merge')
213
- # detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3\\ocr')
214
- gt = load_ground_truth_json('E:\\Mulong\\Datasets\\rico\\instances_test.json')
215
- eval(detect, gt, 'E:\\Mulong\\Datasets\\rico\\combined', show=False, no_text=no_text, only_text=only_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaFujaTyping/hf-Chat-ui/src/lib/buildPrompt.ts DELETED
@@ -1,30 +0,0 @@
1
- import type { BackendModel } from "./server/models";
2
- import type { Message } from "./types/Message";
3
-
4
- /**
5
- * Convert [{user: "assistant", content: "hi"}, {user: "user", content: "hello"}] to:
6
- *
7
- * <|assistant|>hi<|endoftext|><|prompter|>hello<|endoftext|><|assistant|>
8
- */
9
- export function buildPrompt(
10
- messages: Pick<Message, "from" | "content">[],
11
- model: BackendModel
12
- ): string {
13
- const prompt =
14
- messages
15
- .map(
16
- (m) =>
17
- (m.from === "user"
18
- ? model.userMessageToken + m.content
19
- : model.assistantMessageToken + m.content) +
20
- (model.messageEndToken
21
- ? m.content.endsWith(model.messageEndToken)
22
- ? ""
23
- : model.messageEndToken
24
- : "")
25
- )
26
- .join("") + model.assistantMessageToken;
27
-
28
- // Not super precise, but it's truncated in the model's backend anyway
29
- return model.preprompt + prompt.split(" ").slice(-model.parameters.truncate).join(" ");
30
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dagfinn1962/prodia2/main.css DELETED
@@ -1,67 +0,0 @@
1
- body {
2
- background: #FFFFFF;
3
-
4
- width: 100%;
5
- color: #FFFFFF;
6
- padding: 20px;
7
- border-radius: 10px;
8
- border: 1px solid #1b0202;
9
- }
10
-
11
- gr.blocks {
12
- background-color: #758bec;
13
- width: 100%;
14
- color: #FFFFFF;
15
- }
16
- h3 {
17
- background-color:#758bec;
18
- color: #FFFFF;
19
- text-align: center;
20
- font-family: verdana;
21
- font-size: 24px;
22
- border: 1px solid #FFFFFF;
23
- border-radius: 10px;
24
- }
25
-
26
- p {
27
- font-family: verdana;
28
- font-size: 14px;
29
- }
30
-
31
- label {
32
- font-family: verdana;
33
- color: #FFB76B;
34
- font-weight: 700;
35
- font-size: 14px;
36
- border: 1px solid #000000;
37
- }
38
-
39
- gr.Textbox {
40
- font-family: verdana;
41
- background-color: #000000;
42
- color: #FFFFFF;
43
- font-weight: 700;
44
- font-size: 14px;
45
- border: 1px solid #FFFFFF;
46
- border-radius: 6px;
47
- }
48
-
49
- gr.Botton {
50
- font-family: verdana;
51
- background-color: #758bec;
52
- color: #FFFFFF;
53
- font-weight: 700;
54
- font-size: 14px;
55
- border: 1px solid #000000;
56
- border-radius: 6px;
57
- }
58
-
59
- a a:active a.hover
60
- {
61
- font-family: verdana;
62
- color: #572430;
63
- text-decoration: none;
64
- font-weight: 700;
65
- font-size: 14px;
66
-
67
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/loss/led_loss.py DELETED
@@ -1,47 +0,0 @@
1
- """
2
- @Date: 2021/08/12
3
- @description:
4
- """
5
- import torch
6
- import torch.nn as nn
7
-
8
-
9
- class LEDLoss(nn.Module):
10
- def __init__(self):
11
- super().__init__()
12
- self.loss = nn.L1Loss()
13
-
14
- def forward(self, gt, dt):
15
- camera_height = 1.6
16
- gt_depth = gt['depth'] * camera_height
17
-
18
- dt_ceil_depth = dt['ceil_depth'] * camera_height * gt['ratio']
19
- dt_floor_depth = dt['depth'] * camera_height
20
-
21
- ceil_loss = self.loss(gt_depth, dt_ceil_depth)
22
- floor_loss = self.loss(gt_depth, dt_floor_depth)
23
-
24
- loss = floor_loss + ceil_loss
25
-
26
- return loss
27
-
28
-
29
- if __name__ == '__main__':
30
- import numpy as np
31
- from dataset.mp3d_dataset import MP3DDataset
32
-
33
- mp3d_dataset = MP3DDataset(root_dir='../src/dataset/mp3d', mode='train')
34
- gt = mp3d_dataset.__getitem__(0)
35
-
36
- gt['depth'] = torch.from_numpy(gt['depth'][np.newaxis]) # batch size is 1
37
- gt['ratio'] = torch.from_numpy(gt['ratio'][np.newaxis]) # batch size is 1
38
-
39
- dummy_dt = {
40
- 'depth': gt['depth'].clone(),
41
- 'ceil_depth': gt['depth'] / gt['ratio']
42
- }
43
- # dummy_dt['depth'][..., :20] *= 3 # some different
44
-
45
- led_loss = LEDLoss()
46
- loss = led_loss(gt, dummy_dt)
47
- print(loss)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EPFL-VILAB/MultiMAE/multimae/multimae_utils.py DELETED
@@ -1,253 +0,0 @@
1
- # Copyright (c) EPFL VILAB.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- # --------------------------------------------------------
7
- # Based on timm, DeiT, DINO, MoCo-v3, BEiT, MAE-priv and MAE code bases
8
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm
9
- # https://github.com/facebookresearch/deit
10
- # https://github.com/facebookresearch/dino
11
- # https://github.com/facebookresearch/moco-v3
12
- # https://github.com/microsoft/unilm/tree/master/beit
13
- # https://github.com/BUPT-PRIV/MAE-priv
14
- # https://github.com/facebookresearch/mae
15
- # --------------------------------------------------------
16
-
17
- import math
18
- import warnings
19
-
20
- import torch
21
- import torch.nn as nn
22
- from einops import rearrange
23
-
24
-
25
- def pair(t):
26
- return t if isinstance(t, tuple) else (t, t)
27
-
28
-
29
- def build_2d_sincos_posemb(h, w, embed_dim=1024, temperature=10000.):
30
- """Sine-cosine positional embeddings from MoCo-v3
31
-
32
- Source: https://github.com/facebookresearch/moco-v3/blob/main/vits.py
33
- """
34
- grid_w = torch.arange(w, dtype=torch.float32)
35
- grid_h = torch.arange(h, dtype=torch.float32)
36
- grid_w, grid_h = torch.meshgrid(grid_w, grid_h)
37
- assert embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
38
- pos_dim = embed_dim // 4
39
- omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
40
- omega = 1. / (temperature ** omega)
41
- out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega])
42
- out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega])
43
- pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :]
44
- pos_emb = rearrange(pos_emb, 'b (h w) d -> b d h w', h=h, w=w, d=embed_dim)
45
- return pos_emb
46
-
47
-
48
- def _no_grad_trunc_normal_(tensor, mean, std, a, b):
49
- # Cut & paste from PyTorch official master until it's in a few official releases - RW
50
- # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
51
- def norm_cdf(x):
52
- # Computes standard normal cumulative distribution function
53
- return (1. + math.erf(x / math.sqrt(2.))) / 2.
54
-
55
- if (mean < a - 2 * std) or (mean > b + 2 * std):
56
- warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
57
- "The distribution of values may be incorrect.",
58
- stacklevel=2)
59
-
60
- with torch.no_grad():
61
- # Values are generated by using a truncated uniform distribution and
62
- # then using the inverse CDF for the normal distribution.
63
- # Get upper and lower cdf values
64
- l = norm_cdf((a - mean) / std)
65
- u = norm_cdf((b - mean) / std)
66
-
67
- # Uniformly fill tensor with values from [l, u], then translate to
68
- # [2l-1, 2u-1].
69
- tensor.uniform_(2 * l - 1, 2 * u - 1)
70
-
71
- # Use inverse cdf transform for normal distribution to get truncated
72
- # standard normal
73
- tensor.erfinv_()
74
-
75
- # Transform to proper mean, std
76
- tensor.mul_(std * math.sqrt(2.))
77
- tensor.add_(mean)
78
-
79
- # Clamp to ensure it's in the proper range
80
- tensor.clamp_(min=a, max=b)
81
- return tensor
82
-
83
-
84
- def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
85
- # type: (Tensor, float, float, float, float) -> Tensor
86
- r"""Fills the input Tensor with values drawn from a truncated
87
- normal distribution. The values are effectively drawn from the
88
- normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
89
- with values outside :math:`[a, b]` redrawn until they are within
90
- the bounds. The method used for generating the random values works
91
- best when :math:`a \leq \text{mean} \leq b`.
92
- Args:
93
- tensor: an n-dimensional `torch.Tensor`
94
- mean: the mean of the normal distribution
95
- std: the standard deviation of the normal distribution
96
- a: the minimum cutoff value
97
- b: the maximum cutoff value
98
- Examples:
99
- >>> w = torch.empty(3, 5)
100
- >>> nn.init.trunc_normal_(w)
101
- """
102
- return _no_grad_trunc_normal_(tensor, mean, std, a, b)
103
-
104
-
105
- def drop_path(x, drop_prob: float = 0., training: bool = False):
106
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
107
- This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
108
- the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
109
- See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
110
- changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
111
- 'survival rate' as the argument.
112
- """
113
- if drop_prob == 0. or not training:
114
- return x
115
- keep_prob = 1 - drop_prob
116
- shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
117
- random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
118
- random_tensor.floor_() # binarize
119
- output = x.div(keep_prob) * random_tensor
120
- return output
121
-
122
-
123
- class DropPath(nn.Module):
124
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
125
- """
126
-
127
- def __init__(self, drop_prob=None):
128
- super(DropPath, self).__init__()
129
- self.drop_prob = drop_prob
130
-
131
- def forward(self, x):
132
- return drop_path(x, self.drop_prob, self.training)
133
-
134
- def extra_repr(self) -> str:
135
- return 'p={}'.format(self.drop_prob)
136
-
137
-
138
- class Mlp(nn.Module):
139
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
140
- super().__init__()
141
- out_features = out_features or in_features
142
- hidden_features = hidden_features or in_features
143
- self.fc1 = nn.Linear(in_features, hidden_features)
144
- self.act = act_layer()
145
- self.fc2 = nn.Linear(hidden_features, out_features)
146
- self.drop = nn.Dropout(drop)
147
-
148
- def forward(self, x):
149
- x = self.fc1(x)
150
- x = self.act(x)
151
- # x = self.drop(x)
152
- # commit this for the orignal BERT implement
153
- x = self.fc2(x)
154
- x = self.drop(x)
155
- return x
156
-
157
-
158
- class Attention(nn.Module):
159
- def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.,):
160
- super().__init__()
161
- self.num_heads = num_heads
162
- head_dim = dim // num_heads
163
- self.scale = head_dim ** -0.5
164
-
165
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
166
- self.attn_drop = nn.Dropout(attn_drop)
167
- self.proj = nn.Linear(dim, dim)
168
- self.proj_drop = nn.Dropout(proj_drop)
169
-
170
- def forward(self, x):
171
- B, N, C = x.shape
172
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
173
- q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
174
-
175
- attn = (q @ k.transpose(-2, -1)) * self.scale
176
- attn = attn.softmax(dim=-1)
177
- attn = self.attn_drop(attn)
178
-
179
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
180
- x = self.proj(x)
181
- x = self.proj_drop(x)
182
- return x
183
-
184
-
185
- class CrossAttention(nn.Module):
186
- def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
187
- super().__init__()
188
- self.num_heads = num_heads
189
- head_dim = dim // num_heads
190
- self.scale = head_dim ** -0.5
191
-
192
- self.q = nn.Linear(dim, dim, bias=qkv_bias)
193
- self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
194
-
195
- self.attn_drop = nn.Dropout(attn_drop)
196
- self.proj = nn.Linear(dim, dim)
197
- self.proj_drop = nn.Dropout(proj_drop)
198
-
199
- def forward(self, x, context):
200
- B, N, C = x.shape
201
- _, M, _ = context.shape
202
-
203
- q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
204
- kv = self.kv(context).reshape(B, M, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
205
- k, v = kv[0], kv[1]
206
-
207
- attn = (q @ k.transpose(-2, -1)) * self.scale
208
- attn = attn.softmax(dim=-1)
209
- attn = self.attn_drop(attn)
210
-
211
- x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
212
- x = self.proj(x)
213
- x = self.proj_drop(x)
214
- return x
215
-
216
-
217
- class Block(nn.Module):
218
-
219
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
220
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
221
- super().__init__()
222
- self.norm1 = norm_layer(dim)
223
- self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
224
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
225
- self.norm2 = norm_layer(dim)
226
- mlp_hidden_dim = int(dim * mlp_ratio)
227
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
228
-
229
- def forward(self, x):
230
- x = x + self.drop_path(self.attn(self.norm1(x)))
231
- x = x + self.drop_path(self.mlp(self.norm2(x)))
232
- return x
233
-
234
-
235
- class DecoderBlock(nn.Module):
236
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
237
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
238
- super().__init__()
239
- self.norm1 = norm_layer(dim)
240
- self.self_attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
241
- self.cross_attn = CrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
242
- self.query_norm = norm_layer(dim)
243
- self.context_norm = norm_layer(dim)
244
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
245
- self.norm2 = norm_layer(dim)
246
- mlp_hidden_dim = int(dim * mlp_ratio)
247
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
248
-
249
- def forward(self, x, context):
250
- x = x + self.drop_path(self.self_attn(self.norm1(x)))
251
- x = x + self.drop_path(self.cross_attn(self.query_norm(x), self.context_norm(context)))
252
- x = x + self.drop_path(self.mlp(self.norm2(x)))
253
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EXPOSUREEE/Ai-Image-Enhancer/scripts/generate_meta_info.py DELETED
@@ -1,58 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import os
5
-
6
-
7
- def main(args):
8
- txt_file = open(args.meta_info, 'w')
9
- for folder, root in zip(args.input, args.root):
10
- img_paths = sorted(glob.glob(os.path.join(folder, '*')))
11
- for img_path in img_paths:
12
- status = True
13
- if args.check:
14
- # read the image once for check, as some images may have errors
15
- try:
16
- img = cv2.imread(img_path)
17
- except (IOError, OSError) as error:
18
- print(f'Read {img_path} error: {error}')
19
- status = False
20
- if img is None:
21
- status = False
22
- print(f'Img is None: {img_path}')
23
- if status:
24
- # get the relative path
25
- img_name = os.path.relpath(img_path, root)
26
- print(img_name)
27
- txt_file.write(f'{img_name}\n')
28
-
29
-
30
- if __name__ == '__main__':
31
- """Generate meta info (txt file) for only Ground-Truth images.
32
-
33
- It can also generate meta info from several folders into one txt file.
34
- """
35
- parser = argparse.ArgumentParser()
36
- parser.add_argument(
37
- '--input',
38
- nargs='+',
39
- default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'],
40
- help='Input folder, can be a list')
41
- parser.add_argument(
42
- '--root',
43
- nargs='+',
44
- default=['datasets/DF2K', 'datasets/DF2K'],
45
- help='Folder root, should have the length as input folders')
46
- parser.add_argument(
47
- '--meta_info',
48
- type=str,
49
- default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt',
50
- help='txt path for meta info')
51
- parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok')
52
- args = parser.parse_args()
53
-
54
- assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got '
55
- f'{len(args.input)} and {len(args.root)}.')
56
- os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
57
-
58
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py DELETED
@@ -1,126 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- from . import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class SeperableConv2DBNActiv(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
31
- super(SeperableConv2DBNActiv, self).__init__()
32
- self.conv = nn.Sequential(
33
- nn.Conv2d(
34
- nin,
35
- nin,
36
- kernel_size=ksize,
37
- stride=stride,
38
- padding=pad,
39
- dilation=dilation,
40
- groups=nin,
41
- bias=False,
42
- ),
43
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
44
- nn.BatchNorm2d(nout),
45
- activ(),
46
- )
47
-
48
- def __call__(self, x):
49
- return self.conv(x)
50
-
51
-
52
- class Encoder(nn.Module):
53
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
54
- super(Encoder, self).__init__()
55
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
56
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
57
-
58
- def __call__(self, x):
59
- skip = self.conv1(x)
60
- h = self.conv2(skip)
61
-
62
- return h, skip
63
-
64
-
65
- class Decoder(nn.Module):
66
- def __init__(
67
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
68
- ):
69
- super(Decoder, self).__init__()
70
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
71
- self.dropout = nn.Dropout2d(0.1) if dropout else None
72
-
73
- def __call__(self, x, skip=None):
74
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
75
- if skip is not None:
76
- skip = spec_utils.crop_center(skip, x)
77
- x = torch.cat([x, skip], dim=1)
78
- h = self.conv(x)
79
-
80
- if self.dropout is not None:
81
- h = self.dropout(h)
82
-
83
- return h
84
-
85
-
86
- class ASPPModule(nn.Module):
87
- def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
88
- super(ASPPModule, self).__init__()
89
- self.conv1 = nn.Sequential(
90
- nn.AdaptiveAvgPool2d((1, None)),
91
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
92
- )
93
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
94
- self.conv3 = SeperableConv2DBNActiv(
95
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
96
- )
97
- self.conv4 = SeperableConv2DBNActiv(
98
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
99
- )
100
- self.conv5 = SeperableConv2DBNActiv(
101
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
102
- )
103
- self.conv6 = SeperableConv2DBNActiv(
104
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
105
- )
106
- self.conv7 = SeperableConv2DBNActiv(
107
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
108
- )
109
- self.bottleneck = nn.Sequential(
110
- Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
111
- )
112
-
113
- def forward(self, x):
114
- _, _, h, w = x.size()
115
- feat1 = F.interpolate(
116
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
117
- )
118
- feat2 = self.conv2(x)
119
- feat3 = self.conv3(x)
120
- feat4 = self.conv4(x)
121
- feat5 = self.conv5(x)
122
- feat6 = self.conv6(x)
123
- feat7 = self.conv7(x)
124
- out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
125
- bottle = self.bottleneck(out)
126
- return bottle