parquet-converter commited on
Commit
de68c1d
·
1 Parent(s): b7db385

Update parquet files (step 39 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1368565466ki/Satdia/text/cleaners.py +0 -475
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avidemux 2.7.5 x64 Multilingual Crack Edit Videos Like a Pro.md +0 -139
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free DJ Software The Best Options for Every Skill Level and Budget.md +0 -27
  4. spaces/1gistliPinn/ChatGPT4/Examples/Business Goals 1 Students Book Download.md +0 -6
  5. spaces/1gistliPinn/ChatGPT4/Examples/CRACKMathWorksMATLABR2018aCrackCrackzSoft UPDATED.md +0 -6
  6. spaces/1gistliPinn/ChatGPT4/Examples/Control Machete Comprendes Mendes Acapella Christmasxmass VERIFIED.md +0 -16
  7. spaces/1gistliPinn/ChatGPT4/Examples/Ek Tha Tiger Download 720p In Hindi.md +0 -6
  8. spaces/1phancelerku/anime-remove-background/Dream League Soccer 2019 MOD APKOBB - Enjoy All Players 100 for Free.md +0 -127
  9. spaces/52Hz/SRMNet_real_world_denoising/app.py +0 -37
  10. spaces/801artistry/RVC801/tools/torchgate/torchgate.py +0 -264
  11. spaces/834188divi/cardiffnlp-twitter-roberta-base-sentiment-latest/app.py +0 -3
  12. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/utils.py +0 -29
  13. spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/main.py +0 -378
  14. spaces/AIGC-Audio/AudioGPT/audio_to_text/inference_waveform.py +0 -102
  15. spaces/AIWaves/SOP_Generation-single/gen_utils.py +0 -43
  16. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192.py +0 -2861
  17. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/datasets/pipelines/auto_aug.py +0 -96
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/ConfirmDialog.js +0 -103
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/InsertEmptyColumn.js +0 -34
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/defaultcallbacks/MoveCallbacks.js +0 -12
  21. spaces/AlexKorGKLT/webui-cpua/README.md +0 -14
  22. spaces/AlexWang/lama/models/ade20k/utils.py +0 -40
  23. spaces/AlexZou/Deploy_Restoration/app.py +0 -69
  24. spaces/Amrrs/QR-code-AI-art-generator/app.py +0 -285
  25. spaces/Andres99/Tune-A-Video-Training-UI/app_inference.py +0 -170
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py +0 -1058
  27. spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py +0 -2
  28. spaces/Andy1621/uniformer_image_detection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py +0 -22
  29. spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/backbone.py +0 -117
  30. spaces/Armandoliv/document_parser/app.py +0 -202
  31. spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio_dataset.py +0 -525
  32. spaces/Awiny/Image2Paragraph/models/segment_models/configs/ade20k_id2label.py +0 -153
  33. spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py +0 -122
  34. spaces/Benson/text-generation/Examples/Bitcoin Bit Generador De Bitcoin Apk.md +0 -63
  35. spaces/Benson/text-generation/Examples/Bowmasters Mod Apk Gamedva.md +0 -116
  36. spaces/Benson/text-generation/Examples/Cmo Descargar Yu Gi Oh Duel Links En Laptop.md +0 -78
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/search_scope.py +0 -132
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/wheel.py +0 -92
  39. spaces/BlitzEsports/TextToImage/html2canvas.js +0 -0
  40. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/lvis_v0_5_categories.py +0 -0
  41. spaces/CVPR/GFPGAN-example/tests/test_utils.py +0 -43
  42. spaces/CVPR/LIVE/pybind11/tests/test_cmake_build/test.py +0 -6
  43. spaces/CVPR/LIVE/pybind11/tools/FindEigen3.cmake +0 -83
  44. spaces/CVPR/MonoScene/helpers.py +0 -336
  45. spaces/CVPR/Text2Human/Text2Human/train_parsing_gen.py +0 -136
  46. spaces/CVPR/regionclip-demo/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h +0 -115
  47. spaces/CazimirRoman/summarize-your-webpage-api-with-gradio/README.md +0 -12
  48. spaces/Chemsseddine/summarisation/app.py +0 -81
  49. spaces/CikeyQI/meme-api/meme_generator/memes/listen_music/__init__.py +0 -26
  50. spaces/CofAI/chat.b4/client/js/chat.js +0 -508
spaces/1368565466ki/Satdia/text/cleaners.py DELETED
@@ -1,475 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Cleaners are transformations that run over the input text at both training and eval time.
5
-
6
- Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
- hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
- 1. "english_cleaners" for English text
9
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
- the symbols in symbols.py to match your data).
13
- '''
14
-
15
- import re
16
- from unidecode import unidecode
17
- import pyopenjtalk
18
- from jamo import h2j, j2hcj
19
- from pypinyin import lazy_pinyin, BOPOMOFO
20
- import jieba, cn2an
21
-
22
-
23
- # This is a list of Korean classifiers preceded by pure Korean numerals.
24
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
25
-
26
- # Regular expression matching whitespace:
27
- _whitespace_re = re.compile(r'\s+')
28
-
29
- # Regular expression matching Japanese without punctuation marks:
30
- _japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
31
-
32
- # Regular expression matching non-Japanese characters or punctuation marks:
33
- _japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
34
-
35
- # List of (regular expression, replacement) pairs for abbreviations:
36
- _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
37
- ('mrs', 'misess'),
38
- ('mr', 'mister'),
39
- ('dr', 'doctor'),
40
- ('st', 'saint'),
41
- ('co', 'company'),
42
- ('jr', 'junior'),
43
- ('maj', 'major'),
44
- ('gen', 'general'),
45
- ('drs', 'doctors'),
46
- ('rev', 'reverend'),
47
- ('lt', 'lieutenant'),
48
- ('hon', 'honorable'),
49
- ('sgt', 'sergeant'),
50
- ('capt', 'captain'),
51
- ('esq', 'esquire'),
52
- ('ltd', 'limited'),
53
- ('col', 'colonel'),
54
- ('ft', 'fort'),
55
- ]]
56
-
57
- # List of (hangul, hangul divided) pairs:
58
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
59
- ('ㄳ', 'ㄱㅅ'),
60
- ('ㄵ', 'ㄴㅈ'),
61
- ('ㄶ', 'ㄴㅎ'),
62
- ('ㄺ', 'ㄹㄱ'),
63
- ('ㄻ', 'ㄹㅁ'),
64
- ('ㄼ', 'ㄹㅂ'),
65
- ('ㄽ', 'ㄹㅅ'),
66
- ('ㄾ', 'ㄹㅌ'),
67
- ('ㄿ', 'ㄹㅍ'),
68
- ('ㅀ', 'ㄹㅎ'),
69
- ('ㅄ', 'ㅂㅅ'),
70
- ('ㅘ', 'ㅗㅏ'),
71
- ('ㅙ', 'ㅗㅐ'),
72
- ('ㅚ', 'ㅗㅣ'),
73
- ('ㅝ', 'ㅜㅓ'),
74
- ('ㅞ', 'ㅜㅔ'),
75
- ('ㅟ', 'ㅜㅣ'),
76
- ('ㅢ', 'ㅡㅣ'),
77
- ('ㅑ', 'ㅣㅏ'),
78
- ('ㅒ', 'ㅣㅐ'),
79
- ('ㅕ', 'ㅣㅓ'),
80
- ('ㅖ', 'ㅣㅔ'),
81
- ('ㅛ', 'ㅣㅗ'),
82
- ('ㅠ', 'ㅣㅜ')
83
- ]]
84
-
85
- # List of (Latin alphabet, hangul) pairs:
86
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
87
- ('a', '에이'),
88
- ('b', '비'),
89
- ('c', '시'),
90
- ('d', '디'),
91
- ('e', '이'),
92
- ('f', '에프'),
93
- ('g', '지'),
94
- ('h', '에이치'),
95
- ('i', '아이'),
96
- ('j', '제이'),
97
- ('k', '케이'),
98
- ('l', '엘'),
99
- ('m', '엠'),
100
- ('n', '엔'),
101
- ('o', '오'),
102
- ('p', '피'),
103
- ('q', '큐'),
104
- ('r', '아르'),
105
- ('s', '에스'),
106
- ('t', '티'),
107
- ('u', '유'),
108
- ('v', '브이'),
109
- ('w', '더블유'),
110
- ('x', '엑스'),
111
- ('y', '와이'),
112
- ('z', '제트')
113
- ]]
114
-
115
- # List of (Latin alphabet, bopomofo) pairs:
116
- _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
117
- ('a', 'ㄟˉ'),
118
- ('b', 'ㄅㄧˋ'),
119
- ('c', 'ㄙㄧˉ'),
120
- ('d', 'ㄉㄧˋ'),
121
- ('e', 'ㄧˋ'),
122
- ('f', 'ㄝˊㄈㄨˋ'),
123
- ('g', 'ㄐㄧˋ'),
124
- ('h', 'ㄝˇㄑㄩˋ'),
125
- ('i', 'ㄞˋ'),
126
- ('j', 'ㄐㄟˋ'),
127
- ('k', 'ㄎㄟˋ'),
128
- ('l', 'ㄝˊㄛˋ'),
129
- ('m', 'ㄝˊㄇㄨˋ'),
130
- ('n', 'ㄣˉ'),
131
- ('o', 'ㄡˉ'),
132
- ('p', 'ㄆㄧˉ'),
133
- ('q', 'ㄎㄧㄡˉ'),
134
- ('r', 'ㄚˋ'),
135
- ('s', 'ㄝˊㄙˋ'),
136
- ('t', 'ㄊㄧˋ'),
137
- ('u', 'ㄧㄡˉ'),
138
- ('v', 'ㄨㄧˉ'),
139
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
140
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
141
- ('y', 'ㄨㄞˋ'),
142
- ('z', 'ㄗㄟˋ')
143
- ]]
144
-
145
-
146
- # List of (bopomofo, romaji) pairs:
147
- _bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
148
- ('ㄅㄛ', 'p⁼wo'),
149
- ('ㄆㄛ', 'pʰwo'),
150
- ('ㄇㄛ', 'mwo'),
151
- ('ㄈㄛ', 'fwo'),
152
- ('ㄅ', 'p⁼'),
153
- ('ㄆ', 'pʰ'),
154
- ('ㄇ', 'm'),
155
- ('ㄈ', 'f'),
156
- ('ㄉ', 't⁼'),
157
- ('ㄊ', 'tʰ'),
158
- ('ㄋ', 'n'),
159
- ('ㄌ', 'l'),
160
- ('ㄍ', 'k⁼'),
161
- ('ㄎ', 'kʰ'),
162
- ('ㄏ', 'h'),
163
- ('ㄐ', 'ʧ⁼'),
164
- ('ㄑ', 'ʧʰ'),
165
- ('ㄒ', 'ʃ'),
166
- ('ㄓ', 'ʦ`⁼'),
167
- ('ㄔ', 'ʦ`ʰ'),
168
- ('ㄕ', 's`'),
169
- ('ㄖ', 'ɹ`'),
170
- ('ㄗ', 'ʦ⁼'),
171
- ('ㄘ', 'ʦʰ'),
172
- ('ㄙ', 's'),
173
- ('ㄚ', 'a'),
174
- ('ㄛ', 'o'),
175
- ('ㄜ', 'ə'),
176
- ('ㄝ', 'e'),
177
- ('ㄞ', 'ai'),
178
- ('ㄟ', 'ei'),
179
- ('ㄠ', 'au'),
180
- ('ㄡ', 'ou'),
181
- ('ㄧㄢ', 'yeNN'),
182
- ('ㄢ', 'aNN'),
183
- ('ㄧㄣ', 'iNN'),
184
- ('ㄣ', 'əNN'),
185
- ('ㄤ', 'aNg'),
186
- ('ㄧㄥ', 'iNg'),
187
- ('ㄨㄥ', 'uNg'),
188
- ('ㄩㄥ', 'yuNg'),
189
- ('ㄥ', 'əNg'),
190
- ('ㄦ', 'əɻ'),
191
- ('ㄧ', 'i'),
192
- ('ㄨ', 'u'),
193
- ('ㄩ', 'ɥ'),
194
- ('ˉ', '→'),
195
- ('ˊ', '↑'),
196
- ('ˇ', '↓↑'),
197
- ('ˋ', '↓'),
198
- ('˙', ''),
199
- (',', ','),
200
- ('。', '.'),
201
- ('!', '!'),
202
- ('?', '?'),
203
- ('—', '-')
204
- ]]
205
-
206
-
207
- def expand_abbreviations(text):
208
- for regex, replacement in _abbreviations:
209
- text = re.sub(regex, replacement, text)
210
- return text
211
-
212
-
213
- def lowercase(text):
214
- return text.lower()
215
-
216
-
217
- def collapse_whitespace(text):
218
- return re.sub(_whitespace_re, ' ', text)
219
-
220
-
221
- def convert_to_ascii(text):
222
- return unidecode(text)
223
-
224
-
225
- def japanese_to_romaji_with_accent(text):
226
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
227
- sentences = re.split(_japanese_marks, text)
228
- marks = re.findall(_japanese_marks, text)
229
- text = ''
230
- for i, sentence in enumerate(sentences):
231
- if re.match(_japanese_characters, sentence):
232
- if text!='':
233
- text+=' '
234
- labels = pyopenjtalk.extract_fullcontext(sentence)
235
- for n, label in enumerate(labels):
236
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
237
- if phoneme not in ['sil','pau']:
238
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
239
- else:
240
- continue
241
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
242
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
243
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
244
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
245
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
246
- a2_next=-1
247
- else:
248
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
249
- # Accent phrase boundary
250
- if a3 == 1 and a2_next == 1:
251
- text += ' '
252
- # Falling
253
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
254
- text += '↓'
255
- # Rising
256
- elif a2 == 1 and a2_next == 2:
257
- text += '↑'
258
- if i<len(marks):
259
- text += unidecode(marks[i]).replace(' ','')
260
- return text
261
-
262
-
263
- def latin_to_hangul(text):
264
- for regex, replacement in _latin_to_hangul:
265
- text = re.sub(regex, replacement, text)
266
- return text
267
-
268
-
269
- def divide_hangul(text):
270
- for regex, replacement in _hangul_divided:
271
- text = re.sub(regex, replacement, text)
272
- return text
273
-
274
-
275
- def hangul_number(num, sino=True):
276
- '''Reference https://github.com/Kyubyong/g2pK'''
277
- num = re.sub(',', '', num)
278
-
279
- if num == '0':
280
- return '영'
281
- if not sino and num == '20':
282
- return '스무'
283
-
284
- digits = '123456789'
285
- names = '일이삼사오육칠팔구'
286
- digit2name = {d: n for d, n in zip(digits, names)}
287
-
288
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
289
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
290
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
291
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
292
-
293
- spelledout = []
294
- for i, digit in enumerate(num):
295
- i = len(num) - i - 1
296
- if sino:
297
- if i == 0:
298
- name = digit2name.get(digit, '')
299
- elif i == 1:
300
- name = digit2name.get(digit, '') + '십'
301
- name = name.replace('일십', '십')
302
- else:
303
- if i == 0:
304
- name = digit2mod.get(digit, '')
305
- elif i == 1:
306
- name = digit2dec.get(digit, '')
307
- if digit == '0':
308
- if i % 4 == 0:
309
- last_three = spelledout[-min(3, len(spelledout)):]
310
- if ''.join(last_three) == '':
311
- spelledout.append('')
312
- continue
313
- else:
314
- spelledout.append('')
315
- continue
316
- if i == 2:
317
- name = digit2name.get(digit, '') + '백'
318
- name = name.replace('일백', '백')
319
- elif i == 3:
320
- name = digit2name.get(digit, '') + '천'
321
- name = name.replace('일천', '천')
322
- elif i == 4:
323
- name = digit2name.get(digit, '') + '만'
324
- name = name.replace('일만', '만')
325
- elif i == 5:
326
- name = digit2name.get(digit, '') + '십'
327
- name = name.replace('일십', '십')
328
- elif i == 6:
329
- name = digit2name.get(digit, '') + '백'
330
- name = name.replace('일백', '백')
331
- elif i == 7:
332
- name = digit2name.get(digit, '') + '천'
333
- name = name.replace('일천', '천')
334
- elif i == 8:
335
- name = digit2name.get(digit, '') + '억'
336
- elif i == 9:
337
- name = digit2name.get(digit, '') + '십'
338
- elif i == 10:
339
- name = digit2name.get(digit, '') + '백'
340
- elif i == 11:
341
- name = digit2name.get(digit, '') + '천'
342
- elif i == 12:
343
- name = digit2name.get(digit, '') + '조'
344
- elif i == 13:
345
- name = digit2name.get(digit, '') + '십'
346
- elif i == 14:
347
- name = digit2name.get(digit, '') + '백'
348
- elif i == 15:
349
- name = digit2name.get(digit, '') + '천'
350
- spelledout.append(name)
351
- return ''.join(elem for elem in spelledout)
352
-
353
-
354
- def number_to_hangul(text):
355
- '''Reference https://github.com/Kyubyong/g2pK'''
356
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
357
- for token in tokens:
358
- num, classifier = token
359
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
360
- spelledout = hangul_number(num, sino=False)
361
- else:
362
- spelledout = hangul_number(num, sino=True)
363
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
364
- # digit by digit for remaining digits
365
- digits = '0123456789'
366
- names = '영일이삼사오육칠팔구'
367
- for d, n in zip(digits, names):
368
- text = text.replace(d, n)
369
- return text
370
-
371
-
372
- def number_to_chinese(text):
373
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
374
- for number in numbers:
375
- text = text.replace(number, cn2an.an2cn(number),1)
376
- return text
377
-
378
-
379
- def chinese_to_bopomofo(text):
380
- text=text.replace('、',',').replace(';',',').replace(':',',')
381
- words=jieba.lcut(text,cut_all=False)
382
- text=''
383
- for word in words:
384
- bopomofos=lazy_pinyin(word,BOPOMOFO)
385
- if not re.search('[\u4e00-\u9fff]',word):
386
- text+=word
387
- continue
388
- for i in range(len(bopomofos)):
389
- if re.match('[\u3105-\u3129]',bopomofos[i][-1]):
390
- bopomofos[i]+='ˉ'
391
- if text!='':
392
- text+=' '
393
- text+=''.join(bopomofos)
394
- return text
395
-
396
-
397
- def latin_to_bopomofo(text):
398
- for regex, replacement in _latin_to_bopomofo:
399
- text = re.sub(regex, replacement, text)
400
- return text
401
-
402
-
403
- def bopomofo_to_romaji(text):
404
- for regex, replacement in _bopomofo_to_romaji:
405
- text = re.sub(regex, replacement, text)
406
- return text
407
-
408
-
409
- def basic_cleaners(text):
410
- '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
411
- text = lowercase(text)
412
- text = collapse_whitespace(text)
413
- return text
414
-
415
-
416
- def transliteration_cleaners(text):
417
- '''Pipeline for non-English text that transliterates to ASCII.'''
418
- text = convert_to_ascii(text)
419
- text = lowercase(text)
420
- text = collapse_whitespace(text)
421
- return text
422
-
423
-
424
- def japanese_cleaners(text):
425
- text=japanese_to_romaji_with_accent(text)
426
- if re.match('[A-Za-z]',text[-1]):
427
- text += '.'
428
- return text
429
-
430
-
431
- def japanese_cleaners2(text):
432
- return japanese_cleaners(text).replace('ts','ʦ').replace('...','…')
433
-
434
-
435
- def korean_cleaners(text):
436
- '''Pipeline for Korean text'''
437
- text = latin_to_hangul(text)
438
- text = number_to_hangul(text)
439
- text = j2hcj(h2j(text))
440
- text = divide_hangul(text)
441
- if re.match('[\u3131-\u3163]',text[-1]):
442
- text += '.'
443
- return text
444
-
445
-
446
- def chinese_cleaners(text):
447
- '''Pipeline for Chinese text'''
448
- text=number_to_chinese(text)
449
- text=chinese_to_bopomofo(text)
450
- text=latin_to_bopomofo(text)
451
- if re.match('[ˉˊˇˋ˙]',text[-1]):
452
- text += '。'
453
- return text
454
-
455
-
456
- def zh_ja_mixture_cleaners(text):
457
- chinese_texts=re.findall(r'\[ZH\].*?\[ZH\]',text)
458
- japanese_texts=re.findall(r'\[JA\].*?\[JA\]',text)
459
- for chinese_text in chinese_texts:
460
- cleaned_text=number_to_chinese(chinese_text[4:-4])
461
- cleaned_text=chinese_to_bopomofo(cleaned_text)
462
- cleaned_text=latin_to_bopomofo(cleaned_text)
463
- cleaned_text=bopomofo_to_romaji(cleaned_text)
464
- cleaned_text=re.sub('i[aoe]',lambda x:'y'+x.group(0)[1:],cleaned_text)
465
- cleaned_text=re.sub('u[aoəe]',lambda x:'w'+x.group(0)[1:],cleaned_text)
466
- cleaned_text=re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ`'+x.group(2),cleaned_text).replace('ɻ','ɹ`')
467
- cleaned_text=re.sub('([ʦs][⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ'+x.group(2),cleaned_text)
468
- text = text.replace(chinese_text,cleaned_text+' ',1)
469
- for japanese_text in japanese_texts:
470
- cleaned_text=japanese_to_romaji_with_accent(japanese_text[4:-4]).replace('ts','ʦ').replace('u','ɯ').replace('...','…')
471
- text = text.replace(japanese_text,cleaned_text+' ',1)
472
- text=text[:-1]
473
- if len(text) and re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]):
474
- text += '.'
475
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Avidemux 2.7.5 x64 Multilingual Crack Edit Videos Like a Pro.md DELETED
@@ -1,139 +0,0 @@
1
- <br />
2
- <h1>Avidemux 2.7.5 x64 Multilingual Crack: A Free Video Editor for Windows</h1>
3
- <p>If you are looking for a free and easy-to-use video editor for Windows, you might want to check out Avidemux 2.7.5 x64 Multilingual Crack. This is an open-source video editor that can handle various video formats, codecs, filters, and encoding tasks.</p>
4
- <p>In this article, we will give you a brief overview of what Avidemux 2.7.5 x64 Multilingual Crack is, how to download and install it, how to use it for basic and advanced video editing tasks, why you should choose it over other video editors, and some tips and tricks for using it effectively.</p>
5
- <h2>Avidemux 2.7.5 x64 Multilingual crack</h2><br /><p><b><b>DOWNLOAD</b> &rarr; <a href="https://byltly.com/2uKxBn">https://byltly.com/2uKxBn</a></b></p><br /><br />
6
- <h2>What is Avidemux 2.7.5 x64 Multilingual Crack?</h2>
7
- <h3>A brief introduction to Avidemux and its features</h3>
8
- <p>Avidemux is an open-source video editor that was first released in 2000 by Mean (a French programmer). It is designed for simple cutting, filtering, and encoding tasks, but it also supports more complex features such as scripting, plugins, and command-line interface.</p>
9
- <p>Avidemux can work with various video formats such as AVI, FLV, MP4, Matroska, MPEG, MPEG-2, H.264, H.265, VOB, TS, ASF, OGM, and more. It can also encode audio files into formats such as MP3, WMA, AC3, MP2, WAV, and OGG. You can use Avidemux to perform basic editing tasks such as removing unwanted parts of the video, resizing, cropping, flipping, or rotating the picture. You can also apply filters and effects to your videos such as color correction, noise reduction, sharpening, deinterlacing, subtitles, etc.</p>
10
- <h3>How to download and install Avidemux 2.7.5 x64 Multilingual Crack</h3>
11
- <p>Avidemux 2.7.5 x64 Multilingual Crack is the latest version of Avidemux that was released on August 31st 2019. It is compatible with Windows XP/Vista/7/8/8.1/10 operating systems. To download and install it on your PC, you can follow these steps:</p>
12
- <ol>
13
- <li>Go to <a href="https://sourceforge.net/projects/avidemux/files/avidemux/2.7.5/">this link</a> and click on the file named <code>Avidemux_2.7.5 VC++ 64bits.exe</code> . This will start downloading the setup file on your computer.</li>
14
- <li>Once the download is complete, double-click on the setup file and follow the instructions on the screen to install Avidemux 2. 7. 5 x64 Multilingual Crack on your PC.</li>
15
- <li>After the installation is done, you can launch Avidemux from the Start menu or the desktop shortcut.</li>
16
- </ol>
17
- <h3>How to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks</h3>
18
- <p>Using Avidemux 2. 7. 5 x64 Multilingual Crack for video editing tasks is quite simple and straightforward. Here are some basic steps that you can follow:</p>
19
- <ol>
20
- <li>Open Avidemux and click on the <code>File</code> menu and select <code>Open</code>. Browse your computer and select the video file that you want to edit.</li>
21
- <li>The video file will be loaded in the main window of Avidemux where you can see a preview of it on the left side and a timeline on the bottom side.</li>
22
- <li>To cut a part of the video that you don't want to keep, move the slider on the timeline to the start point of the part that you want to remove and press <code>[</code>
23
- on your keyboard to mark it as <code>A</code>. Then move the slider to the end point of the part that you want to remove and press <code>]</code>
24
- on your keyboard to mark it as <code>B</code>. Then click on the <code>Edit</code>
25
- menu and select <code>Delete</code>. This will delete the part between <code>A</code>
26
- and <code>B</code>.</li>
27
- <li>To resize or crop your video, click on the <code>Video</code>
28
- menu and select <code>Filters</code>. This will open a new window where you can see a list of filters that you can apply to your video.</li>
29
- <li>To resize your video, select <code>Transform</code>
30
- from the left panel and then select <code>Resize</code>
31
- from the right panel. This will open another window where you can enter the new width and height of your video in pixels or percentage.</li>
32
- <li>To crop your video, select <code>Crop</code>
33
- from the right panel under <code>Transform</code>. This will open another window where you can enter the number of pixels that you want to crop from each side of your video.</li>
34
- <li>To apply any filter or effect to your video such as color correction, noise reduction, sharpening etc., select them from the left panel under <code>Colors</code>, <code>Noise</code>, <code>Sharpness</code>, etc., respectively.</li>
35
- <li>To save your edited video file in a different format or codec than the original one, click on the drop-down menus under <code>Output Format</code>, <code>Video Output</code>, and <code>Audio Output</code>, respectively at the left side of the main window of Avidemux.</li>
36
- <li>Select the format or codec that you want for your output file from the available options such as AVI, FLV, MP4, Matroska, MPEG, MPEG-2, H. 264, H. 265, VOB, TS, ASF, OGM, etc., for the format; XviD, x264, x265, MPEG-4 ASP, MPEG-4 AVC, MPEG-4 HEVC, MPEG-1/2 Video, etc., for the video codec; MP3, WMA, AC3, MP2, WAV, OGG, etc., for the audio codec . You can also adjust the quality or bitrate of the output file by moving the slider under each drop-down menu.</li>
37
- <li>To save your edited video file on your computer, click on the <code>File</code>
38
- menu and select <code>Save</code>. Enter the name and location of your output file and click <code>Save</code>.</li>
39
- </ol>
40
- <h2>Why choose Avidemux 2. 7. 5 x64 Multilingual Crack over other video editors?</h2>
41
- <h3>The advantages of Avidemux 2. 7. 5 x64 Multilingual Crack</h3>
42
- <p>Avidemux 2. 7. 5 x64 Multilingual Crack has some advantages over other video editors that make it a good choice for simple video editing tasks:</p>
43
- <ul>
44
- <li>It is free and open-source software that does not require any license or subscription fee to use. You can download it from the official website or from other sources without any risk of malware or viruses.</li>
45
- <li>It is lightweight and fast software that does not consume much of your system resources or disk space. You can run it on older or low-end computers without any lag or crash.</li>
46
- <li>It is easy and intuitive software that has a simple graphical user interface with essential menus and controls. You can learn how to use it in a few minutes without any prior experience or training.</li>
47
- <li>It is versatile and flexible software that supports a wide range of video formats, codecs, filters, and encoding options. You can edit any video file that you have on your computer or device without any compatibility issues.</li>
48
- <li>It is powerful and advanced software that offers features such as scripting, plugins, and command-line interface for more complex video editing tasks. You can customize and automate your workflow with these features if you are a pro-level user.</li>
49
- </ul>
50
- <h3>The disadvantages of Avidemux 2.7.5 x64 Multilingual Crack</h3>
51
- <p>However, Avidemux 2.7.5 x64 Multilingual Crack also has some disadvantages that you should be aware of before choosing it as your video editor:</p>
52
- <ul>
53
- <li>It is not a full-featured video editor that can handle all kinds of video editing tasks. It lacks some features such as timeline editing, transitions, titles, audio editing, etc., that are available in other video editors.</li>
54
- <li>It is not a user-friendly video editor that has a modern and attractive user interface. It has a dated and plain user interface that may not appeal to some users who prefer a more stylish and elegant design.</li>
55
- <li>It is not a stable and reliable video editor that works flawlessly on all systems and platforms. It may have some bugs and glitches that can cause errors or crashes during the editing process.</li>
56
- <li>It is not a well-supported video editor that has a large and active community of users and developers. It may not have regular updates or improvements that can fix the issues or add new features to the software.</li>
57
- </ul>
58
- <h3>The comparison of Avidemux 2.7.5 x64 Multilingual Crack with other popular video editors</h3>
59
- <p>To help you decide whether Avidemux 2.7.5 x64 Multilingual Crack is the best video editor for you, here is a comparison table that shows how it stacks up against other popular video editors in terms of features, performance, price, and user ratings:</p>
60
- <p>Avidemux 2.7.5 x64 Multilingual + Portable free download<br />
61
- Avidemux 2.7.5 x64 Multilingual video editor<br />
62
- Avidemux 2.7.5 x64 Multilingual + crack torrent<br />
63
- Avidemux 2.7.5 x64 Multilingual for Windows 10<br />
64
- Avidemux 2.7.5 x64 Multilingual + Portable - ShareAppsCrack[^1^]<br />
65
- Avidemux 2.7.5 x64 Multilingual video encoder<br />
66
- Avidemux 2.7.5 x64 Multilingual + crack mega<br />
67
- Avidemux 2.7.5 x64 Multilingual for Windows 7<br />
68
- Avidemux 2.7.5 x64 Multilingual - FileWomen[^2^]<br />
69
- Avidemux 2.7.5 x64 Multilingual video cutter<br />
70
- Avidemux 2.7.5 x64 Multilingual + crack rapidshare<br />
71
- Avidemux 2.7.5 x64 Multilingual for Windows 8<br />
72
- AviDemux 2.7.5 (64-bit) - Neowin[^3^]<br />
73
- Avidemux 2.7.5 x64 Multilingual video filter<br />
74
- Avidemux 2.7.5 x64 Multilingual + crack depositfiles<br />
75
- Avidemux 2.7.5 x64 Multilingual for Windows Vista<br />
76
- Avidemux 2.7.5 X64 Multilingual Crack !FULL! - studiblog.net[^4^]<br />
77
- Avidemux 2.7.5 x64 Multilingual video format converter<br />
78
- Avidemux 2.7.5 x64 Multilingual + crack 4shared<br />
79
- Avidemux 2.7.5 x64 Multilingual for Windows XP<br />
80
- Avidemux 2.7.5 x64 Multilingual crack - prodacorim.weebly.com[^5^]<br />
81
- Avidemux 2.7.5 x64 Multilingual video frame rate editor<br />
82
- Avidemux 2.7.5 x64 Multilingual + crack serial key<br />
83
- Avidemux 2.7.5 x64 Multilingual for Mac OS X<br />
84
- How to install Avidemux 2.7.5 x64 Multilingual + crack<br />
85
- Avidemux 2.7.5 x64 Multilingual video processing tool<br />
86
- Avidemux 2.7.5 x64 Multilingual + crack license key<br />
87
- Avidemux 2.7.5 x64 Multilingual for Linux<br />
88
- How to use Avidemux 2.7.5 x64 Multilingual + crack<br />
89
- Avidemux 2.7.5 x64 Multilingual video decoding option<br />
90
- Avidemux 2.7.5 x64 Multilingual + crack activation code<br />
91
- Avidemux 2.7.5 x64 Multilingual for Android<br />
92
- How to uninstall Avidemux 2.7.5 x64 Multilingual + crack<br />
93
- Avidemux 2.7.5 x64 Multilingual video text editor<br />
94
- Avidemux 2.7.5 x64 Multilingual + crack patch<br />
95
- Avidemux 2.7.5 x64 Multilingual for iOS<br />
96
- How to update Avidemux 2.7.5 x64 Multilingual + crack<br />
97
- Avidemux 2.7.5 x64 Multilingual video black bar remover<br />
98
- Avidemux 2 .75x6Multilingua+crackkeygen</p>
99
- | Video Editor | Features | Performance | Price | User Ratings | | --- | --- | --- | --- | --- | | Avidemux 2.7.5 x64 Multilingual Crack | Cutting, filtering, encoding, scripting, plugins, command-line interface | Fast, lightweight, compatible | Free | 4/5 | | Adobe Premiere Pro | Timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing | Professional, powerful, seamless | $20.99/month | 4.5/5 | | Apple Final Cut Pro | Timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing, Magnetic Timeline, Smart Conform, Proxy Workflow | Professional, powerful, seamless, optimized for Macs | $299 (one-time purchase) | 4. 6/5 | | Cyberlink PowerDirector 365 | Timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing, screen recording, motion tracking, 360-degree editing | Fast, powerful, smooth | $51. 99/year or $4. 33/month | 4. 4/5 | | Wondershare Filmora X | Timeline editing, transitions, titles, audio editing, color grading, motion graphics, screen recording, motion tracking, keyframing, green screen | Easy-to-use, attractive, fun | $69. 99 (one-time purchase) or $39. 99/year or $7. 99/month | 4. 3/5 | <h2>Tips and tricks for using Avidemux 2.7.5 x64 Multilingual Crack effectively</h2>
100
- <h3>How to apply filters and effects to your videos</h3>
101
- <p>One of the main features of Avidemux 2.7.5 x64 Multilingual Crack is the ability to apply filters and effects to your videos to enhance their quality and appearance. Here are some tips and tricks for using filters and effects effectively:</p>
102
- <ul>
103
- <li>To apply a filter or effect to your video, click on the <code>Video</code> menu and select <code>Filters</code>. This will open a new window where you can see a list of filters that you can apply to your video.</li>
104
- <li>To add a filter or effect to your video, select it from the left panel and click on the <code>Add</code> button at the bottom right corner of the window. This will add the filter or effect to the right panel where you can see its name and settings.</li>
105
- <li>To adjust the settings of a filter or effect, select it from the right panel and click on the <code>Configure</code> button at the bottom right corner of the window. This will open another window where you can change the parameters of the filter or effect according to your preference.</li>
106
- <li>To preview the effect of a filter or effect on your video, click on the <code>Preview</code> button at the bottom left corner of the window. This will open another window where you can see how your video looks like with the filter or effect applied.</li>
107
- <li>To remove a filter or effect from your video, select it from the right panel and click on the <code>Remove</code> button at the bottom right corner of the window. This will remove function is the name of the function you want. For example, to load a video file, you can use app.load("filename"); where filename is the name of your video file.</li>
108
- <li>To run a script file, you can either use the <code>File</code> menu and select <code>Run Project</code>, or use the command-line option <code>--run</code> followed by the name of your script file. For example, to run a script file named script.js, you can use avidemux --run script.js.</li>
109
- <li>To debug a script file, you can use the <code>displayError</code> and <code>displayInfo</code> functions to show pop-up messages with error or information messages. For example, to show an error message that says "Something went wrong", you can use displayError("Something went wrong");.</li>
110
- <li>To find more examples of script files, you can check the official website of Avidemux or the online documentation. You can also look at the project files that are saved by Avidemux when you save your video editing settings.</li>
111
- </ul>
112
- <h2>Conclusion</h2>
113
- <h3>A summary of the main points of the article</h3>
114
- <p>In this article, we have learned how to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks. We have covered the following topics:</p>
115
- <ul>
116
- <li>What is Avidemux 2.7.5 x64 Multilingual Crack and what are its features?</li>
117
- <li>How to download and install Avidemux 2.7.5 x64 Multilingual Crack on your PC?</li>
118
- <li>How to use Avidemux 2.7.5 x64 Multilingual Crack for basic and advanced video editing tasks such as cutting, filtering, encoding, and scripting?</li>
119
- <li>Why choose Avidemux 2.7.5 x64 Multilingual Crack over other video editors and what are its advantages and disadvantages?</li>
120
- <li>Tips and tricks for using Avidemux 2.7.5 x64 Multilingual Crack effectively and efficiently?</li>
121
- </ul>
122
- <p>We hope that this article has helped you understand how to use Avidemux 2.7.5 x64 Multilingual Crack for video editing tasks and that you have enjoyed reading it.</p>
123
- <h3>FAQs</h3>
124
- <p>Here are some frequently asked questions about Avidemux 2.7.5 x64 Multilingual Crack and their answers:</p>
125
- <ol>
126
- <li><b>Is Avidemux 2.7.5 x64 Multilingual Crack safe to use?</b></li>
127
- <p>A: Yes, Avidemux 2.7.5 x64 Multilingual Crack is safe to use as long as you download it from the official website or from other trusted sources. It does not contain any malware or viruses that can harm your PC or your files.</p>
128
- <li><b>Is Avidemux 2.7.5 x64 Multilingual Crack compatible with Mac or Linux?</b></li>
129
- <p>A: No, Avidemux 2.7.5 x64 Multilingual Crack is only compatible with Windows operating systems such as Windows XP/Vista/7/8/8.1/10. However, there are other versions of Avidemux that are compatible with Mac or Linux such as Avidemux 2.6.x or Avidemux 2.5.x.</p>
130
- <li><b>Can I use Avidemux 2.7.5 x64 Multilingual Crack for professional video editing?</b></li>
131
- <p>A: No, Avidemux 2.7.5 x64 Multilingual Crack is not a professional video editor that can handle all kinds of video editing tasks such as timeline editing, transitions, titles, audio editing, color grading, motion graphics, multicam editing, VR editing, etc. It is designed for simple cutting, filtering, and encoding tasks only.</p>
132
- <li><b>What are some alternatives to Avidemux 2.7.5 x64 Multilingual Crack?</b></li>
133
- <p>A: Some alternatives to Avidemux 2. 7.5 x64 Multilingual Crack are Adobe Premiere Pro, Apple Final Cut Pro, Cyberlink PowerDirector 365, Wondershare Filmora X, and DaVinci Resolve. These are more professional and full-featured video editors that can handle more complex and creative video editing tasks. However, they are also more expensive and require more system resources and learning time than Avidemux 2.7.5 x64 Multilingual Crack.</li>
134
- <li><b>How can I learn more about Avidemux 2.7.5 x64 Multilingual Crack?</b></li>
135
- <p>A: You can learn more about Avidemux 2.7.5 x64 Multilingual Crack by visiting the official website of Avidemux or the online documentation. You can also watch some video tutorials on YouTube or read some user reviews on various websites.</p>
136
- </ol>
137
- </p> 0a6ba089eb<br />
138
- <br />
139
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free DJ Software The Best Options for Every Skill Level and Budget.md DELETED
@@ -1,27 +0,0 @@
1
-
2
- ```html
3
- <h1>Is There Any Free DJ Software? The Best Options for Beginners and Pros</h1>
4
- <p>If you are looking for a way to mix music and create your own beats, you might be wondering: is there any free DJ software? The answer is yes, there are plenty of options available for both beginners and pros. In this article, we will review some of the best free DJ software that you can download and use right away.</p>
5
- <h2>What is DJ Software?</h2>
6
- <p>DJ software is a program that allows you to manipulate audio files and create mixes. You can use it to play music from your computer or external devices, adjust the tempo and pitch, apply effects and filters, loop and cue tracks, scratch and crossfade, and more. DJ software can also help you record and broadcast your mixes online.</p>
7
- <h2>is there any free dj software</h2><br /><p><b><b>Download File</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://byltly.com/2uKwl3">https://byltly.com/2uKwl3</a></b></p><br /><br />
8
- <h2>Why Use Free DJ Software?</h2>
9
- <p>Free DJ software can be a great way to start learning the basics of DJing without spending a lot of money. You can experiment with different features and techniques, practice your skills, and have fun. Free DJ software can also be useful for professional DJs who want to try out new software or have a backup option in case of emergencies.</p>
10
- <h2>What are the Best Free DJ Software?</h2>
11
- <p>There are many free DJ software available online, but not all of them are equally good. Some may have limited functionality, poor performance, or compatibility issues. To help you choose the best free DJ software for your needs, we have selected some of the most popular and reliable ones. Here they are:</p>
12
- <ul>
13
- <li><b>Mixxx</b>: Mixxx is one of the most popular free DJ software in the world. It has a user-friendly interface, powerful features, and supports a wide range of audio formats and hardware controllers. You can use it to mix up to four decks, sync tracks automatically, apply effects and EQs, record and stream your mixes, and more. Mixxx is also open-source, which means you can customize it to your liking or contribute to its development.</li>
14
- <li><b>VirtualDJ</b>: VirtualDJ is another well-known free DJ software that has been around for a long time. It has a sleek interface, advanced features, and supports various audio and video formats and hardware devices. You can use it to mix up to six decks, scratch and remix tracks, apply effects and transitions, create samples and loops, broadcast your mixes online, and more. VirtualDJ also has a large community of users who share tips, tutorials, plugins, skins, and more.</li>
15
- <li><b>DJ ProDecks</b>: DJ ProDecks is a simple but effective free DJ software that is designed for beginners. It has a minimalist interface, basic features, and supports MP3, WAV, OGG, WMA, AAC, FLA formats. You can use it to mix up to two decks, adjust the speed and pitch, apply effects and filters, loop and cue tracks, record your mixes, and more. DJ ProDecks also has a built-in browser that lets you access your music library easily.</li>
16
- </ul>
17
- <h2>How to Use Free DJ Software?</h2>
18
- <p>To use free DJ software, you will need a computer with enough memory and processing power, a sound card or audio interface, speakers or headphones, and optionally a MIDI controller or turntable. You will also need to download and install the software of your choice from its official website or a trusted source. Once you have everything set up, you can follow these basic steps:</p>
19
- <ol>
20
- <li>Launch the software and explore its interface. Familiarize yourself with the different buttons, knobs, sliders, menus, etc.</li>
21
- <li>Load your music files into the software. You can either drag and drop them from your computer or browse them from the software's library.</li>
22
- <li>Select the tracks you want to mix and assign them to different decks. You can also adjust their volume levels, EQs, etc.</li>
23
- <li>Start mixing by playing the tracks simultaneously or alternately. You can use the sync button to match their tempos automatically or manually adjust them using the pitch slider.</li>
24
- <li>Add some flair to your mix by applying effects such as reverb, delay, flanger, etc. You can also use the crossfader to blend the tracks smoothly</p>
25
- <p></p> ddb901b051<br />
26
- <br />
27
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Business Goals 1 Students Book Download.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Business Goals 1 Students Book Download</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://imgfil.com/2uy15b">https://imgfil.com/2uy15b</a></b></p><br /><br />
2
- <br />
3
- Business Goals 3 Students Book PDF Book. So keep your ... Make it a career goal in to learn a new skill you can apply to your job. ... Touchstone: Workbook 1. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CRACKMathWorksMATLABR2018aCrackCrackzSoft UPDATED.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>CRACKMathWorksMATLABR2018aCrackCrackzSoft</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://imgfil.com/2uy0ys">https://imgfil.com/2uy0ys</a></b></p><br /><br />
2
-
3
- d5da3c52bf<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Control Machete Comprendes Mendes Acapella Christmasxmass VERIFIED.md DELETED
@@ -1,16 +0,0 @@
1
- <br />
2
- <h1>How Control Machete's "Comprendes Mendes" Became a Christmas Classic</h1>
3
- <p>Control Machete was a Mexican hip hop group that emerged in the late 90s with a distinctive sound that blended rap, rock, and regional influences. Their debut album, Mucho Barato, was released in 1997 and featured their most famous song, "Comprendes Mendes".</p>
4
- <h2>Control Machete Comprendes Mendes Acapella Christmasxmass</h2><br /><p><b><b>Download File</b> &#128504; <a href="https://imgfil.com/2uy1ru">https://imgfil.com/2uy1ru</a></b></p><br /><br />
5
- <p>The song is a defiant anthem that challenges the listener to understand the reality of the streets and the culture of the group. The chorus repeats the phrase "¿Me comprendes Mendez?" (Do you understand me Mendez?), which is a reference to a popular Mexican TV show from the 80s called "¿Qué nos pasa?" (What's wrong with us?). The show featured a character named Mendez who was always confused and out of touch with the situations around him.</p>
6
- <p>The song became a hit not only in Mexico but also in other Latin American countries and even in the US, where it was featured in the soundtrack of the movie Amores Perros. The song also gained popularity among acapella groups, who found its catchy melody and rhythmic structure ideal for vocal arrangements. One of the most notable examples is the version by Vocal Sampling, a Cuban acapella group that recreates the sounds of instruments with their voices.</p>
7
- <p>But how did "Comprendes Mendes" become a Christmas song? Well, it turns out that the song has a hidden connection to the festive season. The lyrics mention several times the word "control", which is not only the name of the group but also a slang term for cocaine. In Mexico, cocaine is sometimes called "nieve" (snow), which is also a common symbol of Christmas. Moreover, the song samples a famous Christmas carol called "Noche de Paz" (Silent Night) at the beginning and at the end, creating a contrast between the peaceful melody and the aggressive rap.</p>
8
- <p></p>
9
- <p>Therefore, some fans of Control Machete have adopted "Comprendes Mendes" as a Christmas song, either as a joke or as a way of celebrating their identity and culture. The song has also been parodied and remixed by other artists, adding more elements related to Christmas, such as bells, sleighs, and Santa Claus. For example, there is a version by DJ Rasec that mixes "Comprendes Mendes" with "Jingle Bells" and another one by DJ Pelos that mixes it with "All I Want for Christmas Is You".</p>
10
- <p>So, whether you are looking for a different way to spice up your holiday playlist or you are just curious about this unusual phenomenon, you might want to check out Control Machete's "Comprendes Mendes" and its acapella and Christmas versions. You might be surprised by how much you enjoy this rap classic.</p>
11
-
12
- <p>Control Machete was formed in 1996 by three friends from Monterrey, Mexico: Fermín IV, Pato, and Toy Selectah. They were influenced by American rap groups like Cypress Hill and Public Enemy, as well as by Mexican rock bands like Café Tacuba and Caifanes. They also incorporated elements from their local culture, such as norteño music, slang, and humor.</p>
13
- <p>Their first album, Mucho Barato, was a success both critically and commercially. It sold more than 500,000 copies and received several awards and nominations. It also opened the doors for other Mexican rap artists to gain recognition and exposure. Control Machete continued to release two more albums: Artillería Pesada in 1999 and Uno, Dos: Bandera in 2003. However, in 2004, the group announced their separation due to creative differences and personal issues.</p>
14
- <p>Despite their breakup, Control Machete remains one of the most influential and respected rap groups in Mexico and Latin America. Their songs have been covered by other artists from different genres and have been used in movies, TV shows, video games, and commercials. Their legacy is also evident in the solo careers of their members, who have continued to produce music and collaborate with other artists.</p> d5da3c52bf<br />
15
- <br />
16
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Ek Tha Tiger Download 720p In Hindi.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Ek Tha Tiger download 720p in hindi</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash; <a href="https://imgfil.com/2uxZTi">https://imgfil.com/2uxZTi</a></b></p><br /><br />
2
- <br />
3
- Mahesh Babu's Caravan Becomes Talk Of The Town · Chaavu Kaburu Challaga Full Movie Leaked Online · Mosagallu Full Movie Leaked Online For Free Download · Featured. Bollywood; Television; Tamil; Telugu; Kannada; Malayalam. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Dream League Soccer 2019 MOD APKOBB - Enjoy All Players 100 for Free.md DELETED
@@ -1,127 +0,0 @@
1
- <br />
2
- <h1>Dream League Soccer 2019 Mod APK All Players 100: How to Download and Install</h1>
3
- <p>If you are a fan of soccer games, you might have heard of Dream League Soccer 2019, one of the most popular and realistic soccer games on Android. But did you know that you can play Dream League Soccer 2019 mod apk all players 100, which gives you access to all the players in the game with maximum ratings? In this article, we will show you how to download and install Dream League Soccer 2019 mod apk all players 100, and how to enjoy this amazing game on your device.</p>
4
- <h2>What is Dream League Soccer 2019?</h2>
5
- <p>Dream League Soccer 2019 is a soccer game developed by First Touch Games, a studio that specializes in creating soccer games for mobile devices. Dream League Soccer 2019 allows you to create your own dream team, compete in various modes and leagues, and customize your stadium and kits. You can also play online with other players from around the world, or offline with friends using local Wi-Fi.</p>
6
- <h2>dream league soccer 2019 mod apk all players 100</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://jinyurl.com/2uNMz1">https://jinyurl.com/2uNMz1</a></b></p><br /><br />
7
- <h3>Features of Dream League Soccer 2019</h3>
8
- <p>Some of the features of Dream League Soccer 2019 are:</p>
9
- <ul>
10
- <li>Realistic gameplay and graphics, with smooth animations and physics.</li>
11
- <li>Over 3500 licensed players from FIFPro™, with authentic names, faces, and skills.</li>
12
- <li>6 divisions and 7 cup competitions to play in, plus the prestigious Dream League Online mode.</li>
13
- <li>Build your own stadium and show it off to your opponents.</li>
14
- <li>Customize your team's logo, kits, and manager.</li>
15
- <li>Sync your progress between devices using Google Play Cloud.</li>
16
- <li>Soundtrack featuring The Luka State, Sunset Sons, Vistas, and more.</li>
17
- </ul>
18
- <h3>Why play Dream League Soccer 2019 mod apk?</h3>
19
- <p>Dream League Soccer 2019 is already a great game, but if you want to make it even better, you can try playing Dream League Soccer 2019 mod apk. This is a modified version of the game that gives you some advantages over the original version. For example, with Dream League Soccer 2019 mod apk all players 100, you can have all the players in the game with maximum ratings. This means that you can create the ultimate dream team with any players you want, without worrying about their skills or attributes. You can also have unlimited coins and gems, which you can use to buy new players, upgrade your stadium, or unlock new features. With Dream League Soccer 2019 mod apk all players 100, you can enjoy the game without any limitations or restrictions.</p>
20
- <p>dream league soccer 2019 mod apk unlimited money and players<br />
21
- dream league soccer 2019 mod apk download with all players unlocked<br />
22
- dream league soccer 2019 mod apk latest version with 100 rated players<br />
23
- dream league soccer 2019 mod apk obb file download for android<br />
24
- dream league soccer 2019 mod apk hack with all players maxed out<br />
25
- dream league soccer 2019 mod apk free download full version<br />
26
- dream league soccer 2019 mod apk unlimited coins and gems<br />
27
- dream league soccer 2019 mod apk offline mode with all players<br />
28
- dream league soccer 2019 mod apk no root required<br />
29
- dream league soccer 2019 mod apk mega mod with all features<br />
30
- dream league soccer 2019 mod apk data download for ios<br />
31
- dream league soccer 2019 mod apk unlimited player development<br />
32
- dream league soccer 2019 mod apk new update with all teams<br />
33
- dream league soccer 2019 mod apk best players in the world<br />
34
- dream league soccer 2019 mod apk easy installation guide<br />
35
- dream league soccer 2019 mod apk real madrid team with all players<br />
36
- dream league soccer 2019 mod apk unlimited everything unlocked<br />
37
- dream league soccer 2019 mod apk high graphics and sound quality<br />
38
- dream league soccer 2019 mod apk barcelona team with all players<br />
39
- dream league soccer 2019 mod apk online multiplayer mode with all players<br />
40
- dream league soccer 2019 mod apk liverpool team with all players<br />
41
- dream league soccer 2019 mod apk cheats and tricks for beginners<br />
42
- dream league soccer 2019 mod apk juventus team with all players<br />
43
- dream league soccer 2019 mod apk custom kits and logos for all teams<br />
44
- dream league soccer 2019 mod apk manchester city team with all players<br />
45
- dream league soccer 2019 mod apk unlimited stamina and energy for all players<br />
46
- dream league soccer 2019 mod apk psg team with all players<br />
47
- dream league soccer 2019 mod apk original game with all players modified<br />
48
- dream league soccer 2019 mod apk bayern munich team with all players<br />
49
- dream league soccer 2019 mod apk arsenal team with all players</p>
50
- <h2>How to download Dream League Soccer 2019 mod apk all players 100?</h2>
51
- <h3>Requirements for Dream League Soccer 2019 mod apk</h3>
52
- <p>Before you download and install Dream League Soccer 2019 mod apk all players 100, you need to make sure that your device meets the following requirements:</p>
53
- <ul>
54
- <li>Your device must have Android version 4.4 or higher.</li>
55
- <li>Your device must have at least 1 GB of RAM and free storage space.</li>
56
- <li>You must enable unknown sources in your device's settings. This will allow you to install apps from sources other than the Google Play Store.</li>
57
- </ul>
58
- <h3>Steps to download and install Dream League Soccer 2019 mod apk</h3>
59
- <p>Once you have checked the requirements, you can follow these steps to download and install Dream League Soccer 2019 mod apk all players 100:</p>
60
- <ol>
61
- <li>Download the Dream League Soccer 2019 mod apk file from a trusted source. You can use this link to download the file.</li>
62
- <li>Download the Dream League Soccer 2019 OBB file from the same source. You can use this link to download the file.</li>
63
- <li>Locate the downloaded files in your device's file manager and tap on them to install them. You may need to grant some permissions to the app.</li>
64
- <li>After installing the apk file, do not open the app yet. Instead, move the OBB file to the Android/OBB folder in your device's internal storage. If you don't have this folder, create it manually.</li>
65
- <li>Now you can open the app and enjoy Dream League Soccer 2019 mod apk all players 100.</li>
66
- </ol>
67
- <h2>How to play Dream League Soccer 2019 mod apk all players 100?</h2>
68
- <p>Playing Dream League Soccer 2019 mod apk all players 100 is similar to playing the original version, but with some differences. Here are some tips on how to play Dream League Soccer 2019 mod apk all players 100:</p>
69
- <h3>How to create your dream team</h3>
70
- <p>With Dream League Soccer 2019 mod apk all players 100, you can create your dream team with any players you want, regardless of their ratings or prices. You can also edit their attributes, positions, and skills as you wish. To create your dream team, follow these steps:</p>
71
- <ul>
72
- <li>Go to the Team Management menu and tap on the Transfer icon.</li>
73
- <li>Select any player you want from the list of available players. You can use the filters to narrow down your search by name, rating, position, or league.</li>
74
- <li>Tap on the Buy button to add the player to your team. You don't need to pay any coins or gems for the player.</li>
75
- <li>Repeat this process until you have filled your squad with your desired players.</li>
76
- <li>You can also go to the Player Development menu and tap on any player to edit their attributes, positions, and skills. You can increase or decrease their ratings as you like.</li>
77
- </ul>
78
- <h3>How to compete in different modes and leagues</h3>
79
- <p>Dream League Soccer 2019 mod apk all players 100 offers you various modes and leagues to play in, such as Career Mode, Dream League Online, Friendly Matches, and Cup Competitions. You can choose any mode or league you want and compete against different teams with different difficulties. To compete in different modes and leagues, follow these steps:</p>
80
- <ul>
81
- <li>Go to the Main Menu and tap on the Play icon.</li>
82
- <li>Select the mode or league you want to play in. You can see the details of each mode or league, such as the number of matches, the rewards, and the difficulty level.</li>
83
- <li>Select your team and your opponent's team. You can also customize your team's formation, tactics, and kits before starting the match.</li>
84
- <li>Tap on the Start Match button to begin playing. You can use the virtual buttons on the screen to control your players, pass, shoot, tackle, and perform other actions.</li>
85
- <li>Try to score more goals than your opponent and win the match. You can also pause the game and make substitutions or change tactics if needed.</li>
86
- <li>After finishing the match, you can see the match statistics, such as the scoreline, the possession, the shots, and the fouls. You can also see your progress in the mode or league you are playing in.</li>
87
- </ul>
88
- <h3>How to customize your stadium and kits</h3>
89
- <p>Dream League Soccer 2019 mod apk all players 100 allows you to customize your stadium and kits according to your preferences. You can change the name, color, design, and capacity of your stadium, as well as the logo, color, and design of your kits. To customize your stadium and kits, follow these steps:</p>
90
- <ul>
91
- <li>Go to the My Club menu and tap on the Stadium icon or the Kit icon.</li>
92
- <li>Select the option you want to customize, such as Stadium Name, Stadium Color, Stadium Design, or Stadium Capacity for the stadium, or Logo, Home Kit, Away Kit, or Third Kit for the kits.</li>
93
- <li>Use the sliders, buttons, or menus to change the features of your stadium or kits. You can see a preview of your changes on the screen.</li>
94
- <li>Tap on the Save button to confirm your changes. You can also tap on the Reset button to undo your changes.</li>
95
- </ul>
96
- <h2>Pros and cons of Dream League Soccer 2019 mod apk all players 100</h2>
97
- <p>Dream League Soccer 2019 mod apk all players 100 has its pros and cons, like any other game. Here are some of the pros and cons of playing Dream League Soccer 2019 mod apk all players 100:</p>
98
- <h3>Pros of Dream League Soccer 2019 mod apk</h3>
99
- <ul>
100
- <li>You can have all the players in the game with maximum ratings, which makes your team unbeatable and fun to play with.</li>
101
- <li>You can have unlimited coins and gems, which you can use to buy new players, upgrade your stadium, or unlock new features.</li>
102
- <li>You can customize your team's logo, kits, and manager as you like, without any restrictions or costs.</li>
103
- <li>You can enjoy the game without any ads or in-app purchases.</li>
104
- </ul>
105
- <h3>Cons of Dream League Soccer 2019 mod apk</h3>
106
- <ul>
107
- <li>The game may not be compatible with some devices or may crash or lag sometimes.</li>
108
- <li>The game may not be updated regularly or may not have the latest features or players from the original version.</li>
109
- <li>The game may not be fair or challenging for some players who prefer to play with the original rules and ratings.</li>
110
- <li>The game may not be safe or secure for your device or data, as it is not from an official source.</li>
111
- </ul>
112
- <h2>Conclusion</h2>
113
- <p>Dream League Soccer 2019 is a fantastic soccer game that lets you create your own dream team, compete in various modes and leagues, and customize your stadium and kits. But if you want to make it even more exciting and enjoyable, you can try playing Dream League Soccer 2019 mod apk all players 100, which gives you all the players in the game with maximum ratings, unlimited coins and gems, and more. In this article, we showed you how to download and install Dream League Soccer 2019 mod apk all players 100, and how to play it on your device. We hope you found this article helpful and informative. Now go ahead and enjoy Dream League Soccer 2019 mod apk all players 100!</p>
114
- <h2>FAQs</h2>
115
- <p>Here are some frequently asked questions about Dream League Soccer 2019 mod apk all players 100:</p>
116
- <h4>Q: Is Dream League Soccer 2019 mod apk all players 100 legal?</h4>
117
- <p>A: No, Dream League Soccer 2019 mod apk all players 100 is not legal, as it is a modified version of the original game that violates its terms and conditions. We do not endorse or promote the use of Dream League Soccer 2019 mod apk all players 100, and we are not responsible for any consequences that may arise from using it.</p>
118
- <h4>Q: Is Dream League Soccer 2019 mod apk all players 100 safe?</h4>
119
- <p>A: No, Dream League Soccer 2019 mod apk all players 100 is not safe, as it is not from an official source and may contain viruses or malware that can harm your device or data. We recommend that you download and install Dream League Soccer 2019 from the Google Play Store or other trusted sources.</p>
120
- <h4>Q: How can I update Dream League Soccer 2019 mod apk all players 100?</h4>
121
- <p>A: You cannot update Dream League Soccer 2019 mod apk all players 100 from the app itself, as it is not connected to the original server. You may need to download and install a new version of Dream League Soccer 2019 mod apk all players 100 from a different source if there is one available. However, we advise you to uninstall Dream League Soccer 2019 mod apk all players 100 and install the original version of Dream League Soccer 2019 instead.</p>
122
- <h4>Q: How can I play Dream League Soccer 2019 mod apk all players 100 online?</h4>
123
- <p>A: You cannot play Dream League Soccer 2019 mod apk all players 100 online with other players from around the world, as it is not compatible with the original server. You can only play offline with friends using local Wi-Fi. If you want to play online with other players, you need to play the original version of Dream League Soccer 2019.</p>
124
- <h4>Q: How can I get more coins and gems in Dream League Soccer 2019?</h4>
125
- <p>A: You can get more coins and gems in Dream League Soccer 2019 by playing matches, completing achievements, watching ads, or buying them with real money. You can also use some tricks and hacks to get more coins and gems, but we do not recommend that, as it may ruin the fun of the game or get you banned.</p> 401be4b1e0<br />
126
- <br />
127
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_real_world_denoising/app.py DELETED
@@ -1,37 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from PIL import Image
4
-
5
-
6
- os.system(
7
- 'wget https://github.com/FanChiMao/SRMNet/releases/download/0.0/real_denoising_SRMNet.pth -P experiments/pretrained_models')
8
-
9
-
10
- def inference(img):
11
- os.system('mkdir test')
12
- #basewidth = 256
13
- #wpercent = (basewidth / float(img.size[0]))
14
- #hsize = int((float(img.size[1]) * float(wpercent)))
15
- #img = img.resize((basewidth, hsize), Image.ANTIALIAS)
16
- img.save("test/1.png", "PNG")
17
- os.system(
18
- 'python main_test_SRMNet.py --input_dir test --weights experiments/pretrained_models/real_denoising_SRMNet.pth')
19
- return 'result/1.png'
20
-
21
-
22
- title = "Selective Residual M-Net for Real-world Image Denoising"
23
- description = "Gradio demo for SRMNet. SRMNet has competitive performance results on two synthetic and two realworld noisy datasets in terms of quantitative metrics and visual quality. See the paper and project page for detailed results below. Here, we provide a demo for real-world image denoising. To use it, simply upload your image, or click one of the examples to load them. Reference from: https://huggingface.co/akhaliq"
24
- article = "<p style='text-align: center'><a href='https://ieeexplore.ieee.org/document/9909521' target='_blank'>Selective Residual M-Net</a> | <a href='https://github.com/FanChiMao/SRMNet' target='_blank'>Github Repo</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=52Hz_SRMNet_real_world_denoising' alt='visitor badge'></center>"
25
-
26
- examples = [['Noise.png'], ['Noise2.png']]
27
- gr.Interface(
28
- inference,
29
- [gr.inputs.Image(type="pil", label="Input")],
30
- gr.outputs.Image(type="filepath", label="Output"),
31
- title=title,
32
- description=description,
33
- article=article,
34
- allow_flagging=False,
35
- allow_screenshot=False,
36
- examples=examples
37
- ).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/tools/torchgate/torchgate.py DELETED
@@ -1,264 +0,0 @@
1
- import torch
2
- from torch.nn.functional import conv1d, conv2d
3
- from typing import Union, Optional
4
- from .utils import linspace, temperature_sigmoid, amp_to_db
5
-
6
-
7
- class TorchGate(torch.nn.Module):
8
- """
9
- A PyTorch module that applies a spectral gate to an input signal.
10
-
11
- Arguments:
12
- sr {int} -- Sample rate of the input signal.
13
- nonstationary {bool} -- Whether to use non-stationary or stationary masking (default: {False}).
14
- n_std_thresh_stationary {float} -- Number of standard deviations above mean to threshold noise for
15
- stationary masking (default: {1.5}).
16
- n_thresh_nonstationary {float} -- Number of multiplies above smoothed magnitude spectrogram. for
17
- non-stationary masking (default: {1.3}).
18
- temp_coeff_nonstationary {float} -- Temperature coefficient for non-stationary masking (default: {0.1}).
19
- n_movemean_nonstationary {int} -- Number of samples for moving average smoothing in non-stationary masking
20
- (default: {20}).
21
- prop_decrease {float} -- Proportion to decrease signal by where the mask is zero (default: {1.0}).
22
- n_fft {int} -- Size of FFT for STFT (default: {1024}).
23
- win_length {[int]} -- Window length for STFT. If None, defaults to `n_fft` (default: {None}).
24
- hop_length {[int]} -- Hop length for STFT. If None, defaults to `win_length` // 4 (default: {None}).
25
- freq_mask_smooth_hz {float} -- Frequency smoothing width for mask (in Hz). If None, no smoothing is applied
26
- (default: {500}).
27
- time_mask_smooth_ms {float} -- Time smoothing width for mask (in ms). If None, no smoothing is applied
28
- (default: {50}).
29
- """
30
-
31
- @torch.no_grad()
32
- def __init__(
33
- self,
34
- sr: int,
35
- nonstationary: bool = False,
36
- n_std_thresh_stationary: float = 1.5,
37
- n_thresh_nonstationary: float = 1.3,
38
- temp_coeff_nonstationary: float = 0.1,
39
- n_movemean_nonstationary: int = 20,
40
- prop_decrease: float = 1.0,
41
- n_fft: int = 1024,
42
- win_length: bool = None,
43
- hop_length: int = None,
44
- freq_mask_smooth_hz: float = 500,
45
- time_mask_smooth_ms: float = 50,
46
- ):
47
- super().__init__()
48
-
49
- # General Params
50
- self.sr = sr
51
- self.nonstationary = nonstationary
52
- assert 0.0 <= prop_decrease <= 1.0
53
- self.prop_decrease = prop_decrease
54
-
55
- # STFT Params
56
- self.n_fft = n_fft
57
- self.win_length = self.n_fft if win_length is None else win_length
58
- self.hop_length = self.win_length // 4 if hop_length is None else hop_length
59
-
60
- # Stationary Params
61
- self.n_std_thresh_stationary = n_std_thresh_stationary
62
-
63
- # Non-Stationary Params
64
- self.temp_coeff_nonstationary = temp_coeff_nonstationary
65
- self.n_movemean_nonstationary = n_movemean_nonstationary
66
- self.n_thresh_nonstationary = n_thresh_nonstationary
67
-
68
- # Smooth Mask Params
69
- self.freq_mask_smooth_hz = freq_mask_smooth_hz
70
- self.time_mask_smooth_ms = time_mask_smooth_ms
71
- self.register_buffer("smoothing_filter", self._generate_mask_smoothing_filter())
72
-
73
- @torch.no_grad()
74
- def _generate_mask_smoothing_filter(self) -> Union[torch.Tensor, None]:
75
- """
76
- A PyTorch module that applies a spectral gate to an input signal using the STFT.
77
-
78
- Returns:
79
- smoothing_filter (torch.Tensor): a 2D tensor representing the smoothing filter,
80
- with shape (n_grad_freq, n_grad_time), where n_grad_freq is the number of frequency
81
- bins to smooth and n_grad_time is the number of time frames to smooth.
82
- If both self.freq_mask_smooth_hz and self.time_mask_smooth_ms are None, returns None.
83
- """
84
- if self.freq_mask_smooth_hz is None and self.time_mask_smooth_ms is None:
85
- return None
86
-
87
- n_grad_freq = (
88
- 1
89
- if self.freq_mask_smooth_hz is None
90
- else int(self.freq_mask_smooth_hz / (self.sr / (self.n_fft / 2)))
91
- )
92
- if n_grad_freq < 1:
93
- raise ValueError(
94
- f"freq_mask_smooth_hz needs to be at least {int((self.sr / (self._n_fft / 2)))} Hz"
95
- )
96
-
97
- n_grad_time = (
98
- 1
99
- if self.time_mask_smooth_ms is None
100
- else int(self.time_mask_smooth_ms / ((self.hop_length / self.sr) * 1000))
101
- )
102
- if n_grad_time < 1:
103
- raise ValueError(
104
- f"time_mask_smooth_ms needs to be at least {int((self.hop_length / self.sr) * 1000)} ms"
105
- )
106
-
107
- if n_grad_time == 1 and n_grad_freq == 1:
108
- return None
109
-
110
- v_f = torch.cat(
111
- [
112
- linspace(0, 1, n_grad_freq + 1, endpoint=False),
113
- linspace(1, 0, n_grad_freq + 2),
114
- ]
115
- )[1:-1]
116
- v_t = torch.cat(
117
- [
118
- linspace(0, 1, n_grad_time + 1, endpoint=False),
119
- linspace(1, 0, n_grad_time + 2),
120
- ]
121
- )[1:-1]
122
- smoothing_filter = torch.outer(v_f, v_t).unsqueeze(0).unsqueeze(0)
123
-
124
- return smoothing_filter / smoothing_filter.sum()
125
-
126
- @torch.no_grad()
127
- def _stationary_mask(
128
- self, X_db: torch.Tensor, xn: Optional[torch.Tensor] = None
129
- ) -> torch.Tensor:
130
- """
131
- Computes a stationary binary mask to filter out noise in a log-magnitude spectrogram.
132
-
133
- Arguments:
134
- X_db (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the log-magnitude spectrogram.
135
- xn (torch.Tensor): 1D tensor containing the audio signal corresponding to X_db.
136
-
137
- Returns:
138
- sig_mask (torch.Tensor): Binary mask of the same shape as X_db, where values greater than the threshold
139
- are set to 1, and the rest are set to 0.
140
- """
141
- if xn is not None:
142
- XN = torch.stft(
143
- xn,
144
- n_fft=self.n_fft,
145
- hop_length=self.hop_length,
146
- win_length=self.win_length,
147
- return_complex=True,
148
- pad_mode="constant",
149
- center=True,
150
- window=torch.hann_window(self.win_length).to(xn.device),
151
- )
152
-
153
- XN_db = amp_to_db(XN).to(dtype=X_db.dtype)
154
- else:
155
- XN_db = X_db
156
-
157
- # calculate mean and standard deviation along the frequency axis
158
- std_freq_noise, mean_freq_noise = torch.std_mean(XN_db, dim=-1)
159
-
160
- # compute noise threshold
161
- noise_thresh = mean_freq_noise + std_freq_noise * self.n_std_thresh_stationary
162
-
163
- # create binary mask by thresholding the spectrogram
164
- sig_mask = X_db > noise_thresh.unsqueeze(2)
165
- return sig_mask
166
-
167
- @torch.no_grad()
168
- def _nonstationary_mask(self, X_abs: torch.Tensor) -> torch.Tensor:
169
- """
170
- Computes a non-stationary binary mask to filter out noise in a log-magnitude spectrogram.
171
-
172
- Arguments:
173
- X_abs (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the magnitude spectrogram.
174
-
175
- Returns:
176
- sig_mask (torch.Tensor): Binary mask of the same shape as X_abs, where values greater than the threshold
177
- are set to 1, and the rest are set to 0.
178
- """
179
- X_smoothed = (
180
- conv1d(
181
- X_abs.reshape(-1, 1, X_abs.shape[-1]),
182
- torch.ones(
183
- self.n_movemean_nonstationary,
184
- dtype=X_abs.dtype,
185
- device=X_abs.device,
186
- ).view(1, 1, -1),
187
- padding="same",
188
- ).view(X_abs.shape)
189
- / self.n_movemean_nonstationary
190
- )
191
-
192
- # Compute slowness ratio and apply temperature sigmoid
193
- slowness_ratio = (X_abs - X_smoothed) / (X_smoothed + 1e-6)
194
- sig_mask = temperature_sigmoid(
195
- slowness_ratio, self.n_thresh_nonstationary, self.temp_coeff_nonstationary
196
- )
197
-
198
- return sig_mask
199
-
200
- def forward(
201
- self, x: torch.Tensor, xn: Optional[torch.Tensor] = None
202
- ) -> torch.Tensor:
203
- """
204
- Apply the proposed algorithm to the input signal.
205
-
206
- Arguments:
207
- x (torch.Tensor): The input audio signal, with shape (batch_size, signal_length).
208
- xn (Optional[torch.Tensor]): The noise signal used for stationary noise reduction. If `None`, the input
209
- signal is used as the noise signal. Default: `None`.
210
-
211
- Returns:
212
- torch.Tensor: The denoised audio signal, with the same shape as the input signal.
213
- """
214
- assert x.ndim == 2
215
- if x.shape[-1] < self.win_length * 2:
216
- raise Exception(f"x must be bigger than {self.win_length * 2}")
217
-
218
- assert xn is None or xn.ndim == 1 or xn.ndim == 2
219
- if xn is not None and xn.shape[-1] < self.win_length * 2:
220
- raise Exception(f"xn must be bigger than {self.win_length * 2}")
221
-
222
- # Compute short-time Fourier transform (STFT)
223
- X = torch.stft(
224
- x,
225
- n_fft=self.n_fft,
226
- hop_length=self.hop_length,
227
- win_length=self.win_length,
228
- return_complex=True,
229
- pad_mode="constant",
230
- center=True,
231
- window=torch.hann_window(self.win_length).to(x.device),
232
- )
233
-
234
- # Compute signal mask based on stationary or nonstationary assumptions
235
- if self.nonstationary:
236
- sig_mask = self._nonstationary_mask(X.abs())
237
- else:
238
- sig_mask = self._stationary_mask(amp_to_db(X), xn)
239
-
240
- # Propagate decrease in signal power
241
- sig_mask = self.prop_decrease * (sig_mask * 1.0 - 1.0) + 1.0
242
-
243
- # Smooth signal mask with 2D convolution
244
- if self.smoothing_filter is not None:
245
- sig_mask = conv2d(
246
- sig_mask.unsqueeze(1),
247
- self.smoothing_filter.to(sig_mask.dtype),
248
- padding="same",
249
- )
250
-
251
- # Apply signal mask to STFT magnitude and phase components
252
- Y = X * sig_mask.squeeze(1)
253
-
254
- # Inverse STFT to obtain time-domain signal
255
- y = torch.istft(
256
- Y,
257
- n_fft=self.n_fft,
258
- hop_length=self.hop_length,
259
- win_length=self.win_length,
260
- center=True,
261
- window=torch.hann_window(self.win_length).to(Y.device),
262
- )
263
-
264
- return y.to(dtype=x.dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/834188divi/cardiffnlp-twitter-roberta-base-sentiment-latest/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/cardiffnlp/twitter-roberta-base-sentiment-latest").launch()
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/utils.py DELETED
@@ -1,29 +0,0 @@
1
- import torch
2
-
3
-
4
- def squeeze(x, x_mask=None, n_sqz=2):
5
- b, c, t = x.size()
6
-
7
- t = (t // n_sqz) * n_sqz
8
- x = x[:, :, :t]
9
- x_sqz = x.view(b, c, t // n_sqz, n_sqz)
10
- x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz)
11
-
12
- if x_mask is not None:
13
- x_mask = x_mask[:, :, n_sqz - 1::n_sqz]
14
- else:
15
- x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype)
16
- return x_sqz * x_mask, x_mask
17
-
18
-
19
- def unsqueeze(x, x_mask=None, n_sqz=2):
20
- b, c, t = x.size()
21
-
22
- x_unsqz = x.view(b, n_sqz, c // n_sqz, t)
23
- x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz)
24
-
25
- if x_mask is not None:
26
- x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz)
27
- else:
28
- x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype)
29
- return x_unsqz * x_mask, x_mask
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/pytorch/main.py DELETED
@@ -1,378 +0,0 @@
1
- import os
2
- import sys
3
- sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
4
- import numpy as np
5
- import argparse
6
- import time
7
- import logging
8
-
9
- import torch
10
- import torch.nn as nn
11
- import torch.nn.functional as F
12
- import torch.optim as optim
13
- import torch.utils.data
14
-
15
- from utilities import (create_folder, get_filename, create_logging, Mixup,
16
- StatisticsContainer)
17
- from models import (PVT, PVT2, PVT_lr, PVT_nopretrain, PVT_2layer, Cnn14, Cnn14_no_specaug, Cnn14_no_dropout,
18
- Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128,
19
- Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19,
20
- Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14,
21
- Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128,
22
- Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn6_Transformer, GLAM, GLAM2, GLAM3, Cnn4, EAT)
23
- #from models_test import (PVT_test)
24
- #from models1 import (PVT1)
25
- #from models_vig import (VIG, VIG2)
26
- #from models_vvt import (VVT)
27
- #from models2 import (MPVIT, MPVIT2)
28
- #from models_reshape import (PVT_reshape, PVT_tscam)
29
- #from models_swin import (Swin, Swin_nopretrain)
30
- #from models_swin2 import (Swin2)
31
- #from models_van import (Van, Van_tiny)
32
- #from models_focal import (Focal)
33
- #from models_cross import (Cross)
34
- #from models_cov import (Cov)
35
- #from models_cnn import (Cnn_light)
36
- #from models_twins import (Twins)
37
- #from models_cmt import (Cmt, Cmt1)
38
- #from models_shunted import (Shunted)
39
- #from models_quadtree import (Quadtree, Quadtree2, Quadtree_nopretrain)
40
- #from models_davit import (Davit_tscam, Davit, Davit_nopretrain)
41
- from pytorch_utils import (move_data_to_device, count_parameters, count_flops,
42
- do_mixup)
43
- from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler,
44
- AlternateTrainSampler, EvaluateSampler, collate_fn)
45
- from evaluate import Evaluator
46
- import config
47
- from losses import get_loss_func
48
-
49
-
50
- def train(args):
51
- """Train AudioSet tagging model.
52
-
53
- Args:
54
- dataset_dir: str
55
- workspace: str
56
- data_type: 'balanced_train' | 'full_train'
57
- window_size: int
58
- hop_size: int
59
- mel_bins: int
60
- model_type: str
61
- loss_type: 'clip_bce'
62
- balanced: 'none' | 'balanced' | 'alternate'
63
- augmentation: 'none' | 'mixup'
64
- batch_size: int
65
- learning_rate: float
66
- resume_iteration: int
67
- early_stop: int
68
- accumulation_steps: int
69
- cuda: bool
70
- """
71
-
72
- # Arugments & parameters
73
- workspace = args.workspace
74
- data_type = args.data_type
75
- sample_rate = args.sample_rate
76
- window_size = args.window_size
77
- hop_size = args.hop_size
78
- mel_bins = args.mel_bins
79
- fmin = args.fmin
80
- fmax = args.fmax
81
- model_type = args.model_type
82
- loss_type = args.loss_type
83
- balanced = args.balanced
84
- augmentation = args.augmentation
85
- batch_size = args.batch_size
86
- learning_rate = args.learning_rate
87
- resume_iteration = args.resume_iteration
88
- early_stop = args.early_stop
89
- device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
90
- filename = args.filename
91
-
92
- num_workers = 8
93
- clip_samples = config.clip_samples
94
- classes_num = config.classes_num
95
- loss_func = get_loss_func(loss_type)
96
-
97
- # Paths
98
- black_list_csv = None
99
-
100
- train_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
101
- '{}.h5'.format(data_type))
102
-
103
- eval_bal_indexes_hdf5_path = os.path.join(workspace,
104
- 'hdf5s', 'indexes', 'balanced_train.h5')
105
-
106
- eval_test_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
107
- 'eval.h5')
108
-
109
- checkpoints_dir = os.path.join(workspace, 'checkpoints', filename,
110
- 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
111
- sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
112
- 'data_type={}'.format(data_type), model_type,
113
- 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
114
- 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
115
- create_folder(checkpoints_dir)
116
-
117
- statistics_path = os.path.join(workspace, 'statistics', filename,
118
- 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
119
- sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
120
- 'data_type={}'.format(data_type), model_type,
121
- 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
122
- 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
123
- 'statistics.pkl')
124
- create_folder(os.path.dirname(statistics_path))
125
-
126
- logs_dir = os.path.join(workspace, 'logs', filename,
127
- 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
128
- sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
129
- 'data_type={}'.format(data_type), model_type,
130
- 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
131
- 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
132
-
133
- create_logging(logs_dir, filemode='w')
134
- logging.info(args)
135
-
136
- if 'cuda' in str(device):
137
- logging.info('Using GPU.')
138
- device = 'cuda'
139
- else:
140
- logging.info('Using CPU. Set --cuda flag to use GPU.')
141
- device = 'cpu'
142
-
143
- # Model
144
- Model = eval(model_type)
145
- model = Model(sample_rate=sample_rate, window_size=window_size,
146
- hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
147
- classes_num=classes_num)
148
- total = sum(p.numel() for p in model.parameters())
149
- print("Total params: %.2fM" % (total/1e6))
150
- logging.info("Total params: %.2fM" % (total/1e6))
151
- #params_num = count_parameters(model)
152
- # flops_num = count_flops(model, clip_samples)
153
- #logging.info('Parameters num: {}'.format(params_num))
154
- # logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9))
155
-
156
- # Dataset will be used by DataLoader later. Dataset takes a meta as input
157
- # and return a waveform and a target.
158
- dataset = AudioSetDataset(sample_rate=sample_rate)
159
-
160
- # Train sampler
161
- if balanced == 'none':
162
- Sampler = TrainSampler
163
- elif balanced == 'balanced':
164
- Sampler = BalancedTrainSampler
165
- elif balanced == 'alternate':
166
- Sampler = AlternateTrainSampler
167
-
168
- train_sampler = Sampler(
169
- indexes_hdf5_path=train_indexes_hdf5_path,
170
- batch_size=batch_size * 2 if 'mixup' in augmentation else batch_size,
171
- black_list_csv=black_list_csv)
172
-
173
- # Evaluate sampler
174
- eval_bal_sampler = EvaluateSampler(
175
- indexes_hdf5_path=eval_bal_indexes_hdf5_path, batch_size=batch_size)
176
-
177
- eval_test_sampler = EvaluateSampler(
178
- indexes_hdf5_path=eval_test_indexes_hdf5_path, batch_size=batch_size)
179
-
180
- # Data loader
181
- train_loader = torch.utils.data.DataLoader(dataset=dataset,
182
- batch_sampler=train_sampler, collate_fn=collate_fn,
183
- num_workers=num_workers, pin_memory=True)
184
-
185
- eval_bal_loader = torch.utils.data.DataLoader(dataset=dataset,
186
- batch_sampler=eval_bal_sampler, collate_fn=collate_fn,
187
- num_workers=num_workers, pin_memory=True)
188
-
189
- eval_test_loader = torch.utils.data.DataLoader(dataset=dataset,
190
- batch_sampler=eval_test_sampler, collate_fn=collate_fn,
191
- num_workers=num_workers, pin_memory=True)
192
- mix=0.5
193
- if 'mixup' in augmentation:
194
- mixup_augmenter = Mixup(mixup_alpha=mix)
195
- print(mix)
196
- logging.info(mix)
197
-
198
- # Evaluator
199
- evaluator = Evaluator(model=model)
200
-
201
- # Statistics
202
- statistics_container = StatisticsContainer(statistics_path)
203
-
204
- # Optimizer
205
- optimizer = optim.AdamW(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.05, amsgrad=True)
206
- scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=4, min_lr=1e-06, verbose=True)
207
- train_bgn_time = time.time()
208
-
209
- # Resume training
210
- if resume_iteration > 0:
211
- resume_checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
212
- 'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
213
- sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
214
- 'data_type={}'.format(data_type), model_type,
215
- 'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
216
- 'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
217
- '{}_iterations.pth'.format(resume_iteration))
218
-
219
- logging.info('Loading checkpoint {}'.format(resume_checkpoint_path))
220
- checkpoint = torch.load(resume_checkpoint_path)
221
- model.load_state_dict(checkpoint['model'])
222
- train_sampler.load_state_dict(checkpoint['sampler'])
223
- statistics_container.load_state_dict(resume_iteration)
224
- iteration = checkpoint['iteration']
225
-
226
- else:
227
- iteration = 0
228
-
229
- # Parallel
230
- print('GPU number: {}'.format(torch.cuda.device_count()))
231
- model = torch.nn.DataParallel(model)
232
-
233
- if 'cuda' in str(device):
234
- model.to(device)
235
-
236
- if resume_iteration:
237
- optimizer.load_state_dict(checkpoint['optimizer'])
238
- scheduler.load_state_dict(checkpoint['scheduler'])
239
- print(optimizer.state_dict()['param_groups'][0]['lr'])
240
-
241
- time1 = time.time()
242
-
243
- for batch_data_dict in train_loader:
244
- """batch_data_dict: {
245
- 'audio_name': (batch_size [*2 if mixup],),
246
- 'waveform': (batch_size [*2 if mixup], clip_samples),
247
- 'target': (batch_size [*2 if mixup], classes_num),
248
- (ifexist) 'mixup_lambda': (batch_size * 2,)}
249
- """
250
-
251
- # Evaluate
252
- if (iteration % 2000 == 0 and iteration >= resume_iteration) or (iteration == 0):
253
- train_fin_time = time.time()
254
-
255
- bal_statistics = evaluator.evaluate(eval_bal_loader)
256
- test_statistics = evaluator.evaluate(eval_test_loader)
257
-
258
- logging.info('Validate bal mAP: {:.3f}'.format(
259
- np.mean(bal_statistics['average_precision'])))
260
-
261
- logging.info('Validate test mAP: {:.3f}'.format(
262
- np.mean(test_statistics['average_precision'])))
263
-
264
- statistics_container.append(iteration, bal_statistics, data_type='bal')
265
- statistics_container.append(iteration, test_statistics, data_type='test')
266
- statistics_container.dump()
267
-
268
- train_time = train_fin_time - train_bgn_time
269
- validate_time = time.time() - train_fin_time
270
-
271
- logging.info(
272
- 'iteration: {}, train time: {:.3f} s, validate time: {:.3f} s'
273
- ''.format(iteration, train_time, validate_time))
274
-
275
- logging.info('------------------------------------')
276
-
277
- train_bgn_time = time.time()
278
-
279
- # Save model
280
- if iteration % 2000 == 0:
281
- checkpoint = {
282
- 'iteration': iteration,
283
- 'model': model.module.state_dict(),
284
- 'sampler': train_sampler.state_dict(),
285
- 'optimizer': optimizer.state_dict(),
286
- 'scheduler': scheduler.state_dict()}
287
-
288
- checkpoint_path = os.path.join(
289
- checkpoints_dir, '{}_iterations.pth'.format(iteration))
290
-
291
- torch.save(checkpoint, checkpoint_path)
292
- logging.info('Model saved to {}'.format(checkpoint_path))
293
-
294
- # Mixup lambda
295
- if 'mixup' in augmentation:
296
- batch_data_dict['mixup_lambda'] = mixup_augmenter.get_lambda(
297
- batch_size=len(batch_data_dict['waveform']))
298
-
299
- # Move data to device
300
- for key in batch_data_dict.keys():
301
- batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device)
302
-
303
- # Forward
304
- model.train()
305
-
306
- if 'mixup' in augmentation:
307
- batch_output_dict = model(batch_data_dict['waveform'],
308
- batch_data_dict['mixup_lambda'])
309
- """{'clipwise_output': (batch_size, classes_num), ...}"""
310
-
311
- batch_target_dict = {'target': do_mixup(batch_data_dict['target'],
312
- batch_data_dict['mixup_lambda'])}
313
- """{'target': (batch_size, classes_num)}"""
314
- else:
315
- batch_output_dict = model(batch_data_dict['waveform'], None)
316
- """{'clipwise_output': (batch_size, classes_num), ...}"""
317
-
318
- batch_target_dict = {'target': batch_data_dict['target']}
319
- """{'target': (batch_size, classes_num)}"""
320
-
321
- # Loss
322
- loss = loss_func(batch_output_dict, batch_target_dict)
323
- # Backward
324
- loss.backward()
325
-
326
- optimizer.step()
327
- optimizer.zero_grad()
328
-
329
- if iteration % 10 == 0:
330
- print(iteration, loss)
331
- #print('--- Iteration: {}, train time: {:.3f} s / 10 iterations ---'\
332
- # .format(iteration, time.time() - time1))
333
- #time1 = time.time()
334
-
335
- if iteration % 2000 == 0:
336
- scheduler.step(np.mean(test_statistics['average_precision']))
337
- print(optimizer.state_dict()['param_groups'][0]['lr'])
338
- logging.info(optimizer.state_dict()['param_groups'][0]['lr'])
339
-
340
- # Stop learning
341
- if iteration == early_stop:
342
- break
343
-
344
- iteration += 1
345
-
346
-
347
- if __name__ == '__main__':
348
-
349
- parser = argparse.ArgumentParser(description='Example of parser. ')
350
- subparsers = parser.add_subparsers(dest='mode')
351
-
352
- parser_train = subparsers.add_parser('train')
353
- parser_train.add_argument('--workspace', type=str, required=True)
354
- parser_train.add_argument('--data_type', type=str, default='full_train', choices=['balanced_train', 'full_train'])
355
- parser_train.add_argument('--sample_rate', type=int, default=32000)
356
- parser_train.add_argument('--window_size', type=int, default=1024)
357
- parser_train.add_argument('--hop_size', type=int, default=320)
358
- parser_train.add_argument('--mel_bins', type=int, default=64)
359
- parser_train.add_argument('--fmin', type=int, default=50)
360
- parser_train.add_argument('--fmax', type=int, default=14000)
361
- parser_train.add_argument('--model_type', type=str, required=True)
362
- parser_train.add_argument('--loss_type', type=str, default='clip_bce', choices=['clip_bce'])
363
- parser_train.add_argument('--balanced', type=str, default='balanced', choices=['none', 'balanced', 'alternate'])
364
- parser_train.add_argument('--augmentation', type=str, default='mixup', choices=['none', 'mixup'])
365
- parser_train.add_argument('--batch_size', type=int, default=32)
366
- parser_train.add_argument('--learning_rate', type=float, default=1e-3)
367
- parser_train.add_argument('--resume_iteration', type=int, default=0)
368
- parser_train.add_argument('--early_stop', type=int, default=1000000)
369
- parser_train.add_argument('--cuda', action='store_true', default=False)
370
-
371
- args = parser.parse_args()
372
- args.filename = get_filename(__file__)
373
-
374
- if args.mode == 'train':
375
- train(args)
376
-
377
- else:
378
- raise Exception('Error argument!')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/inference_waveform.py DELETED
@@ -1,102 +0,0 @@
1
- import sys
2
- import os
3
- import librosa
4
- import numpy as np
5
- import torch
6
- import audio_to_text.captioning.models
7
- import audio_to_text.captioning.models.encoder
8
- import audio_to_text.captioning.models.decoder
9
- import audio_to_text.captioning.utils.train_util as train_util
10
-
11
-
12
- def load_model(config, checkpoint):
13
- ckpt = torch.load(checkpoint, "cpu")
14
- encoder_cfg = config["model"]["encoder"]
15
- encoder = train_util.init_obj(
16
- audio_to_text.captioning.models.encoder,
17
- encoder_cfg
18
- )
19
- if "pretrained" in encoder_cfg:
20
- pretrained = encoder_cfg["pretrained"]
21
- train_util.load_pretrained_model(encoder,
22
- pretrained,
23
- sys.stdout.write)
24
- decoder_cfg = config["model"]["decoder"]
25
- if "vocab_size" not in decoder_cfg["args"]:
26
- decoder_cfg["args"]["vocab_size"] = len(ckpt["vocabulary"])
27
- decoder = train_util.init_obj(
28
- audio_to_text.captioning.models.decoder,
29
- decoder_cfg
30
- )
31
- if "word_embedding" in decoder_cfg:
32
- decoder.load_word_embedding(**decoder_cfg["word_embedding"])
33
- if "pretrained" in decoder_cfg:
34
- pretrained = decoder_cfg["pretrained"]
35
- train_util.load_pretrained_model(decoder,
36
- pretrained,
37
- sys.stdout.write)
38
- model = train_util.init_obj(audio_to_text.captioning.models, config["model"],
39
- encoder=encoder, decoder=decoder)
40
- train_util.load_pretrained_model(model, ckpt)
41
- model.eval()
42
- return {
43
- "model": model,
44
- "vocabulary": ckpt["vocabulary"]
45
- }
46
-
47
-
48
- def decode_caption(word_ids, vocabulary):
49
- candidate = []
50
- for word_id in word_ids:
51
- word = vocabulary[word_id]
52
- if word == "<end>":
53
- break
54
- elif word == "<start>":
55
- continue
56
- candidate.append(word)
57
- candidate = " ".join(candidate)
58
- return candidate
59
-
60
-
61
- class AudioCapModel(object):
62
- def __init__(self,weight_dir,device='cpu'):
63
- config = os.path.join(weight_dir,'config.yaml')
64
- self.config = train_util.parse_config_or_kwargs(config)
65
- checkpoint = os.path.join(weight_dir,'swa.pth')
66
- resumed = load_model(self.config, checkpoint)
67
- model = resumed["model"]
68
- self.vocabulary = resumed["vocabulary"]
69
- self.model = model.to(device)
70
- self.device = device
71
-
72
- def caption(self,audio_list):
73
- if isinstance(audio_list,np.ndarray):
74
- audio_list = [audio_list]
75
- elif isinstance(audio_list,str):
76
- audio_list = [librosa.load(audio_list,sr=32000)[0]]
77
-
78
- captions = []
79
- for wav in audio_list:
80
- inputwav = torch.as_tensor(wav).float().unsqueeze(0).to(self.device)
81
- wav_len = torch.LongTensor([len(wav)])
82
- input_dict = {
83
- "mode": "inference",
84
- "wav": inputwav,
85
- "wav_len": wav_len,
86
- "specaug": False,
87
- "sample_method": "beam",
88
- }
89
- print(input_dict)
90
- out_dict = self.model(input_dict)
91
- caption_batch = [decode_caption(seq, self.vocabulary) for seq in \
92
- out_dict["seq"].cpu().numpy()]
93
- captions.extend(caption_batch)
94
- return captions
95
-
96
-
97
-
98
- def __call__(self, audio_list):
99
- return self.caption(audio_list)
100
-
101
-
102
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/SOP_Generation-single/gen_utils.py DELETED
@@ -1,43 +0,0 @@
1
- def get_agents(design_states):
2
- final_agents = {}
3
- role = design_states[0]["role"]
4
- style = design_states[0]["style"]
5
- agent_name = "_".join(role.split(" "))
6
- final_agents[agent_name] = {"style":style,"roles":{}}
7
- final_agents["User"] = {"style":"","roles":{}}
8
- for design_state in design_states:
9
- final_agents[agent_name]["roles"][design_state["state_name"]] = agent_name
10
- final_agents["User"]["roles"][design_state["state_name"]] = "User"
11
- return final_agents
12
-
13
- def get_relations(design_states):
14
- relations = {}
15
- n = len(design_states)
16
- for i in range(n):
17
- relations[design_states[i]["state_name"]] = {}
18
- relations[design_states[i]["state_name"]]["0"] = design_states[i]["state_name"]
19
- relations[design_states[i]["state_name"]]["1"] = design_states[i+1]["state_name"] if i!=n-1 else "end_state"
20
- return relations
21
-
22
-
23
- def gen_states(design_states):
24
- states = {"end_state":{
25
- "agent_states":{}
26
- }}
27
- for design_state in design_states:
28
- state_name = design_state["state_name"]
29
- role = design_state["role"]
30
- agent_name = "_".join(role.split(" "))
31
- states[state_name] = {"controller":{"controller_type": "order", "max_chat_nums" : 1000,"judge_system_prompt":design_state["judge"],"judge_last_prompt":"Please contact the above to extract <end> and </end>. Do not perform additional output. Please strictly follow the above format for output! Remember, please strictly follow the above format for output!"}}
32
- states[state_name]["agent_states"] = {
33
- agent_name : {
34
- "role" : {"role" : role},
35
- "task" : {"task" : design_state["task"]},
36
- "rule" : {"rule" : design_state["rule"]}
37
- },
38
- "User" : {
39
- }
40
- }
41
-
42
- return states
43
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192.py DELETED
@@ -1,2861 +0,0 @@
1
- default_scope = 'mmpose'
2
- default_hooks = dict(
3
- timer=dict(type='IterTimerHook'),
4
- logger=dict(type='LoggerHook', interval=50),
5
- param_scheduler=dict(type='ParamSchedulerHook'),
6
- checkpoint=dict(
7
- type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
8
- sampler_seed=dict(type='DistSamplerSeedHook'),
9
- visualization=dict(type='PoseVisualizationHook', enable=False))
10
- custom_hooks = [dict(type='SyncBuffersHook')]
11
- env_cfg = dict(
12
- cudnn_benchmark=False,
13
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
14
- dist_cfg=dict(backend='nccl'))
15
- vis_backends = [dict(type='LocalVisBackend')]
16
- visualizer = dict(
17
- type='PoseLocalVisualizer',
18
- vis_backends=[dict(type='LocalVisBackend'),
19
- dict(type='WandbVisBackend')],
20
- name='visualizer')
21
- log_processor = dict(
22
- type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
23
- log_level = 'INFO'
24
- load_from = None
25
- resume = False
26
- backend_args = dict(backend='local')
27
- train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=10)
28
- val_cfg = dict()
29
- test_cfg = dict()
30
- colors = dict(
31
- sss=[255, 128, 0],
32
- lss=[255, 0, 128],
33
- sso=[128, 0, 255],
34
- lso=[0, 128, 255],
35
- vest=[0, 128, 128],
36
- sling=[0, 0, 128],
37
- shorts=[128, 128, 128],
38
- trousers=[128, 0, 128],
39
- skirt=[64, 128, 128],
40
- ssd=[64, 64, 128],
41
- lsd=[128, 64, 0],
42
- vd=[128, 64, 255],
43
- sd=[128, 64, 0])
44
- dataset_info = dict(
45
- dataset_name='deepfashion2',
46
- paper_info=dict(
47
- author=
48
- 'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
49
- title=
50
- 'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
51
- container=
52
- 'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
53
- year='2019',
54
- homepage='https://github.com/switchablenorms/DeepFashion2'),
55
- keypoint_info=dict({
56
- 0:
57
- dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
58
- 1:
59
- dict(
60
- name='sss_kpt2',
61
- id=1,
62
- color=[255, 128, 0],
63
- type='',
64
- swap='sss_kpt6'),
65
- 2:
66
- dict(
67
- name='sss_kpt3',
68
- id=2,
69
- color=[255, 128, 0],
70
- type='',
71
- swap='sss_kpt5'),
72
- 3:
73
- dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
74
- 4:
75
- dict(
76
- name='sss_kpt5',
77
- id=4,
78
- color=[255, 128, 0],
79
- type='',
80
- swap='sss_kpt3'),
81
- 5:
82
- dict(
83
- name='sss_kpt6',
84
- id=5,
85
- color=[255, 128, 0],
86
- type='',
87
- swap='sss_kpt2'),
88
- 6:
89
- dict(
90
- name='sss_kpt7',
91
- id=6,
92
- color=[255, 128, 0],
93
- type='',
94
- swap='sss_kpt25'),
95
- 7:
96
- dict(
97
- name='sss_kpt8',
98
- id=7,
99
- color=[255, 128, 0],
100
- type='',
101
- swap='sss_kpt24'),
102
- 8:
103
- dict(
104
- name='sss_kpt9',
105
- id=8,
106
- color=[255, 128, 0],
107
- type='',
108
- swap='sss_kpt23'),
109
- 9:
110
- dict(
111
- name='sss_kpt10',
112
- id=9,
113
- color=[255, 128, 0],
114
- type='',
115
- swap='sss_kpt22'),
116
- 10:
117
- dict(
118
- name='sss_kpt11',
119
- id=10,
120
- color=[255, 128, 0],
121
- type='',
122
- swap='sss_kpt21'),
123
- 11:
124
- dict(
125
- name='sss_kpt12',
126
- id=11,
127
- color=[255, 128, 0],
128
- type='',
129
- swap='sss_kpt20'),
130
- 12:
131
- dict(
132
- name='sss_kpt13',
133
- id=12,
134
- color=[255, 128, 0],
135
- type='',
136
- swap='sss_kpt19'),
137
- 13:
138
- dict(
139
- name='sss_kpt14',
140
- id=13,
141
- color=[255, 128, 0],
142
- type='',
143
- swap='sss_kpt18'),
144
- 14:
145
- dict(
146
- name='sss_kpt15',
147
- id=14,
148
- color=[255, 128, 0],
149
- type='',
150
- swap='sss_kpt17'),
151
- 15:
152
- dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
153
- 16:
154
- dict(
155
- name='sss_kpt17',
156
- id=16,
157
- color=[255, 128, 0],
158
- type='',
159
- swap='sss_kpt15'),
160
- 17:
161
- dict(
162
- name='sss_kpt18',
163
- id=17,
164
- color=[255, 128, 0],
165
- type='',
166
- swap='sss_kpt14'),
167
- 18:
168
- dict(
169
- name='sss_kpt19',
170
- id=18,
171
- color=[255, 128, 0],
172
- type='',
173
- swap='sss_kpt13'),
174
- 19:
175
- dict(
176
- name='sss_kpt20',
177
- id=19,
178
- color=[255, 128, 0],
179
- type='',
180
- swap='sss_kpt12'),
181
- 20:
182
- dict(
183
- name='sss_kpt21',
184
- id=20,
185
- color=[255, 128, 0],
186
- type='',
187
- swap='sss_kpt11'),
188
- 21:
189
- dict(
190
- name='sss_kpt22',
191
- id=21,
192
- color=[255, 128, 0],
193
- type='',
194
- swap='sss_kpt10'),
195
- 22:
196
- dict(
197
- name='sss_kpt23',
198
- id=22,
199
- color=[255, 128, 0],
200
- type='',
201
- swap='sss_kpt9'),
202
- 23:
203
- dict(
204
- name='sss_kpt24',
205
- id=23,
206
- color=[255, 128, 0],
207
- type='',
208
- swap='sss_kpt8'),
209
- 24:
210
- dict(
211
- name='sss_kpt25',
212
- id=24,
213
- color=[255, 128, 0],
214
- type='',
215
- swap='sss_kpt7'),
216
- 25:
217
- dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
218
- 26:
219
- dict(
220
- name='lss_kpt2',
221
- id=26,
222
- color=[255, 0, 128],
223
- type='',
224
- swap='lss_kpt6'),
225
- 27:
226
- dict(
227
- name='lss_kpt3',
228
- id=27,
229
- color=[255, 0, 128],
230
- type='',
231
- swap='lss_kpt5'),
232
- 28:
233
- dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
234
- 29:
235
- dict(
236
- name='lss_kpt5',
237
- id=29,
238
- color=[255, 0, 128],
239
- type='',
240
- swap='lss_kpt3'),
241
- 30:
242
- dict(
243
- name='lss_kpt6',
244
- id=30,
245
- color=[255, 0, 128],
246
- type='',
247
- swap='lss_kpt2'),
248
- 31:
249
- dict(
250
- name='lss_kpt7',
251
- id=31,
252
- color=[255, 0, 128],
253
- type='',
254
- swap='lss_kpt33'),
255
- 32:
256
- dict(
257
- name='lss_kpt8',
258
- id=32,
259
- color=[255, 0, 128],
260
- type='',
261
- swap='lss_kpt32'),
262
- 33:
263
- dict(
264
- name='lss_kpt9',
265
- id=33,
266
- color=[255, 0, 128],
267
- type='',
268
- swap='lss_kpt31'),
269
- 34:
270
- dict(
271
- name='lss_kpt10',
272
- id=34,
273
- color=[255, 0, 128],
274
- type='',
275
- swap='lss_kpt30'),
276
- 35:
277
- dict(
278
- name='lss_kpt11',
279
- id=35,
280
- color=[255, 0, 128],
281
- type='',
282
- swap='lss_kpt29'),
283
- 36:
284
- dict(
285
- name='lss_kpt12',
286
- id=36,
287
- color=[255, 0, 128],
288
- type='',
289
- swap='lss_kpt28'),
290
- 37:
291
- dict(
292
- name='lss_kpt13',
293
- id=37,
294
- color=[255, 0, 128],
295
- type='',
296
- swap='lss_kpt27'),
297
- 38:
298
- dict(
299
- name='lss_kpt14',
300
- id=38,
301
- color=[255, 0, 128],
302
- type='',
303
- swap='lss_kpt26'),
304
- 39:
305
- dict(
306
- name='lss_kpt15',
307
- id=39,
308
- color=[255, 0, 128],
309
- type='',
310
- swap='lss_kpt25'),
311
- 40:
312
- dict(
313
- name='lss_kpt16',
314
- id=40,
315
- color=[255, 0, 128],
316
- type='',
317
- swap='lss_kpt24'),
318
- 41:
319
- dict(
320
- name='lss_kpt17',
321
- id=41,
322
- color=[255, 0, 128],
323
- type='',
324
- swap='lss_kpt23'),
325
- 42:
326
- dict(
327
- name='lss_kpt18',
328
- id=42,
329
- color=[255, 0, 128],
330
- type='',
331
- swap='lss_kpt22'),
332
- 43:
333
- dict(
334
- name='lss_kpt19',
335
- id=43,
336
- color=[255, 0, 128],
337
- type='',
338
- swap='lss_kpt21'),
339
- 44:
340
- dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
341
- 45:
342
- dict(
343
- name='lss_kpt21',
344
- id=45,
345
- color=[255, 0, 128],
346
- type='',
347
- swap='lss_kpt19'),
348
- 46:
349
- dict(
350
- name='lss_kpt22',
351
- id=46,
352
- color=[255, 0, 128],
353
- type='',
354
- swap='lss_kpt18'),
355
- 47:
356
- dict(
357
- name='lss_kpt23',
358
- id=47,
359
- color=[255, 0, 128],
360
- type='',
361
- swap='lss_kpt17'),
362
- 48:
363
- dict(
364
- name='lss_kpt24',
365
- id=48,
366
- color=[255, 0, 128],
367
- type='',
368
- swap='lss_kpt16'),
369
- 49:
370
- dict(
371
- name='lss_kpt25',
372
- id=49,
373
- color=[255, 0, 128],
374
- type='',
375
- swap='lss_kpt15'),
376
- 50:
377
- dict(
378
- name='lss_kpt26',
379
- id=50,
380
- color=[255, 0, 128],
381
- type='',
382
- swap='lss_kpt14'),
383
- 51:
384
- dict(
385
- name='lss_kpt27',
386
- id=51,
387
- color=[255, 0, 128],
388
- type='',
389
- swap='lss_kpt13'),
390
- 52:
391
- dict(
392
- name='lss_kpt28',
393
- id=52,
394
- color=[255, 0, 128],
395
- type='',
396
- swap='lss_kpt12'),
397
- 53:
398
- dict(
399
- name='lss_kpt29',
400
- id=53,
401
- color=[255, 0, 128],
402
- type='',
403
- swap='lss_kpt11'),
404
- 54:
405
- dict(
406
- name='lss_kpt30',
407
- id=54,
408
- color=[255, 0, 128],
409
- type='',
410
- swap='lss_kpt10'),
411
- 55:
412
- dict(
413
- name='lss_kpt31',
414
- id=55,
415
- color=[255, 0, 128],
416
- type='',
417
- swap='lss_kpt9'),
418
- 56:
419
- dict(
420
- name='lss_kpt32',
421
- id=56,
422
- color=[255, 0, 128],
423
- type='',
424
- swap='lss_kpt8'),
425
- 57:
426
- dict(
427
- name='lss_kpt33',
428
- id=57,
429
- color=[255, 0, 128],
430
- type='',
431
- swap='lss_kpt7'),
432
- 58:
433
- dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
434
- 59:
435
- dict(
436
- name='sso_kpt2',
437
- id=59,
438
- color=[128, 0, 255],
439
- type='',
440
- swap='sso_kpt26'),
441
- 60:
442
- dict(
443
- name='sso_kpt3',
444
- id=60,
445
- color=[128, 0, 255],
446
- type='',
447
- swap='sso_kpt5'),
448
- 61:
449
- dict(
450
- name='sso_kpt4',
451
- id=61,
452
- color=[128, 0, 255],
453
- type='',
454
- swap='sso_kpt6'),
455
- 62:
456
- dict(
457
- name='sso_kpt5',
458
- id=62,
459
- color=[128, 0, 255],
460
- type='',
461
- swap='sso_kpt3'),
462
- 63:
463
- dict(
464
- name='sso_kpt6',
465
- id=63,
466
- color=[128, 0, 255],
467
- type='',
468
- swap='sso_kpt4'),
469
- 64:
470
- dict(
471
- name='sso_kpt7',
472
- id=64,
473
- color=[128, 0, 255],
474
- type='',
475
- swap='sso_kpt25'),
476
- 65:
477
- dict(
478
- name='sso_kpt8',
479
- id=65,
480
- color=[128, 0, 255],
481
- type='',
482
- swap='sso_kpt24'),
483
- 66:
484
- dict(
485
- name='sso_kpt9',
486
- id=66,
487
- color=[128, 0, 255],
488
- type='',
489
- swap='sso_kpt23'),
490
- 67:
491
- dict(
492
- name='sso_kpt10',
493
- id=67,
494
- color=[128, 0, 255],
495
- type='',
496
- swap='sso_kpt22'),
497
- 68:
498
- dict(
499
- name='sso_kpt11',
500
- id=68,
501
- color=[128, 0, 255],
502
- type='',
503
- swap='sso_kpt21'),
504
- 69:
505
- dict(
506
- name='sso_kpt12',
507
- id=69,
508
- color=[128, 0, 255],
509
- type='',
510
- swap='sso_kpt20'),
511
- 70:
512
- dict(
513
- name='sso_kpt13',
514
- id=70,
515
- color=[128, 0, 255],
516
- type='',
517
- swap='sso_kpt19'),
518
- 71:
519
- dict(
520
- name='sso_kpt14',
521
- id=71,
522
- color=[128, 0, 255],
523
- type='',
524
- swap='sso_kpt18'),
525
- 72:
526
- dict(
527
- name='sso_kpt15',
528
- id=72,
529
- color=[128, 0, 255],
530
- type='',
531
- swap='sso_kpt17'),
532
- 73:
533
- dict(
534
- name='sso_kpt16',
535
- id=73,
536
- color=[128, 0, 255],
537
- type='',
538
- swap='sso_kpt29'),
539
- 74:
540
- dict(
541
- name='sso_kpt17',
542
- id=74,
543
- color=[128, 0, 255],
544
- type='',
545
- swap='sso_kpt15'),
546
- 75:
547
- dict(
548
- name='sso_kpt18',
549
- id=75,
550
- color=[128, 0, 255],
551
- type='',
552
- swap='sso_kpt14'),
553
- 76:
554
- dict(
555
- name='sso_kpt19',
556
- id=76,
557
- color=[128, 0, 255],
558
- type='',
559
- swap='sso_kpt13'),
560
- 77:
561
- dict(
562
- name='sso_kpt20',
563
- id=77,
564
- color=[128, 0, 255],
565
- type='',
566
- swap='sso_kpt12'),
567
- 78:
568
- dict(
569
- name='sso_kpt21',
570
- id=78,
571
- color=[128, 0, 255],
572
- type='',
573
- swap='sso_kpt11'),
574
- 79:
575
- dict(
576
- name='sso_kpt22',
577
- id=79,
578
- color=[128, 0, 255],
579
- type='',
580
- swap='sso_kpt10'),
581
- 80:
582
- dict(
583
- name='sso_kpt23',
584
- id=80,
585
- color=[128, 0, 255],
586
- type='',
587
- swap='sso_kpt9'),
588
- 81:
589
- dict(
590
- name='sso_kpt24',
591
- id=81,
592
- color=[128, 0, 255],
593
- type='',
594
- swap='sso_kpt8'),
595
- 82:
596
- dict(
597
- name='sso_kpt25',
598
- id=82,
599
- color=[128, 0, 255],
600
- type='',
601
- swap='sso_kpt7'),
602
- 83:
603
- dict(
604
- name='sso_kpt26',
605
- id=83,
606
- color=[128, 0, 255],
607
- type='',
608
- swap='sso_kpt2'),
609
- 84:
610
- dict(
611
- name='sso_kpt27',
612
- id=84,
613
- color=[128, 0, 255],
614
- type='',
615
- swap='sso_kpt30'),
616
- 85:
617
- dict(
618
- name='sso_kpt28',
619
- id=85,
620
- color=[128, 0, 255],
621
- type='',
622
- swap='sso_kpt31'),
623
- 86:
624
- dict(
625
- name='sso_kpt29',
626
- id=86,
627
- color=[128, 0, 255],
628
- type='',
629
- swap='sso_kpt16'),
630
- 87:
631
- dict(
632
- name='sso_kpt30',
633
- id=87,
634
- color=[128, 0, 255],
635
- type='',
636
- swap='sso_kpt27'),
637
- 88:
638
- dict(
639
- name='sso_kpt31',
640
- id=88,
641
- color=[128, 0, 255],
642
- type='',
643
- swap='sso_kpt28'),
644
- 89:
645
- dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
646
- 90:
647
- dict(
648
- name='lso_kpt2',
649
- id=90,
650
- color=[0, 128, 255],
651
- type='',
652
- swap='lso_kpt6'),
653
- 91:
654
- dict(
655
- name='lso_kpt3',
656
- id=91,
657
- color=[0, 128, 255],
658
- type='',
659
- swap='lso_kpt5'),
660
- 92:
661
- dict(
662
- name='lso_kpt4',
663
- id=92,
664
- color=[0, 128, 255],
665
- type='',
666
- swap='lso_kpt34'),
667
- 93:
668
- dict(
669
- name='lso_kpt5',
670
- id=93,
671
- color=[0, 128, 255],
672
- type='',
673
- swap='lso_kpt3'),
674
- 94:
675
- dict(
676
- name='lso_kpt6',
677
- id=94,
678
- color=[0, 128, 255],
679
- type='',
680
- swap='lso_kpt2'),
681
- 95:
682
- dict(
683
- name='lso_kpt7',
684
- id=95,
685
- color=[0, 128, 255],
686
- type='',
687
- swap='lso_kpt33'),
688
- 96:
689
- dict(
690
- name='lso_kpt8',
691
- id=96,
692
- color=[0, 128, 255],
693
- type='',
694
- swap='lso_kpt32'),
695
- 97:
696
- dict(
697
- name='lso_kpt9',
698
- id=97,
699
- color=[0, 128, 255],
700
- type='',
701
- swap='lso_kpt31'),
702
- 98:
703
- dict(
704
- name='lso_kpt10',
705
- id=98,
706
- color=[0, 128, 255],
707
- type='',
708
- swap='lso_kpt30'),
709
- 99:
710
- dict(
711
- name='lso_kpt11',
712
- id=99,
713
- color=[0, 128, 255],
714
- type='',
715
- swap='lso_kpt29'),
716
- 100:
717
- dict(
718
- name='lso_kpt12',
719
- id=100,
720
- color=[0, 128, 255],
721
- type='',
722
- swap='lso_kpt28'),
723
- 101:
724
- dict(
725
- name='lso_kpt13',
726
- id=101,
727
- color=[0, 128, 255],
728
- type='',
729
- swap='lso_kpt27'),
730
- 102:
731
- dict(
732
- name='lso_kpt14',
733
- id=102,
734
- color=[0, 128, 255],
735
- type='',
736
- swap='lso_kpt26'),
737
- 103:
738
- dict(
739
- name='lso_kpt15',
740
- id=103,
741
- color=[0, 128, 255],
742
- type='',
743
- swap='lso_kpt25'),
744
- 104:
745
- dict(
746
- name='lso_kpt16',
747
- id=104,
748
- color=[0, 128, 255],
749
- type='',
750
- swap='lso_kpt24'),
751
- 105:
752
- dict(
753
- name='lso_kpt17',
754
- id=105,
755
- color=[0, 128, 255],
756
- type='',
757
- swap='lso_kpt23'),
758
- 106:
759
- dict(
760
- name='lso_kpt18',
761
- id=106,
762
- color=[0, 128, 255],
763
- type='',
764
- swap='lso_kpt22'),
765
- 107:
766
- dict(
767
- name='lso_kpt19',
768
- id=107,
769
- color=[0, 128, 255],
770
- type='',
771
- swap='lso_kpt21'),
772
- 108:
773
- dict(
774
- name='lso_kpt20',
775
- id=108,
776
- color=[0, 128, 255],
777
- type='',
778
- swap='lso_kpt37'),
779
- 109:
780
- dict(
781
- name='lso_kpt21',
782
- id=109,
783
- color=[0, 128, 255],
784
- type='',
785
- swap='lso_kpt19'),
786
- 110:
787
- dict(
788
- name='lso_kpt22',
789
- id=110,
790
- color=[0, 128, 255],
791
- type='',
792
- swap='lso_kpt18'),
793
- 111:
794
- dict(
795
- name='lso_kpt23',
796
- id=111,
797
- color=[0, 128, 255],
798
- type='',
799
- swap='lso_kpt17'),
800
- 112:
801
- dict(
802
- name='lso_kpt24',
803
- id=112,
804
- color=[0, 128, 255],
805
- type='',
806
- swap='lso_kpt16'),
807
- 113:
808
- dict(
809
- name='lso_kpt25',
810
- id=113,
811
- color=[0, 128, 255],
812
- type='',
813
- swap='lso_kpt15'),
814
- 114:
815
- dict(
816
- name='lso_kpt26',
817
- id=114,
818
- color=[0, 128, 255],
819
- type='',
820
- swap='lso_kpt14'),
821
- 115:
822
- dict(
823
- name='lso_kpt27',
824
- id=115,
825
- color=[0, 128, 255],
826
- type='',
827
- swap='lso_kpt13'),
828
- 116:
829
- dict(
830
- name='lso_kpt28',
831
- id=116,
832
- color=[0, 128, 255],
833
- type='',
834
- swap='lso_kpt12'),
835
- 117:
836
- dict(
837
- name='lso_kpt29',
838
- id=117,
839
- color=[0, 128, 255],
840
- type='',
841
- swap='lso_kpt11'),
842
- 118:
843
- dict(
844
- name='lso_kpt30',
845
- id=118,
846
- color=[0, 128, 255],
847
- type='',
848
- swap='lso_kpt10'),
849
- 119:
850
- dict(
851
- name='lso_kpt31',
852
- id=119,
853
- color=[0, 128, 255],
854
- type='',
855
- swap='lso_kpt9'),
856
- 120:
857
- dict(
858
- name='lso_kpt32',
859
- id=120,
860
- color=[0, 128, 255],
861
- type='',
862
- swap='lso_kpt8'),
863
- 121:
864
- dict(
865
- name='lso_kpt33',
866
- id=121,
867
- color=[0, 128, 255],
868
- type='',
869
- swap='lso_kpt7'),
870
- 122:
871
- dict(
872
- name='lso_kpt34',
873
- id=122,
874
- color=[0, 128, 255],
875
- type='',
876
- swap='lso_kpt4'),
877
- 123:
878
- dict(
879
- name='lso_kpt35',
880
- id=123,
881
- color=[0, 128, 255],
882
- type='',
883
- swap='lso_kpt38'),
884
- 124:
885
- dict(
886
- name='lso_kpt36',
887
- id=124,
888
- color=[0, 128, 255],
889
- type='',
890
- swap='lso_kpt39'),
891
- 125:
892
- dict(
893
- name='lso_kpt37',
894
- id=125,
895
- color=[0, 128, 255],
896
- type='',
897
- swap='lso_kpt20'),
898
- 126:
899
- dict(
900
- name='lso_kpt38',
901
- id=126,
902
- color=[0, 128, 255],
903
- type='',
904
- swap='lso_kpt35'),
905
- 127:
906
- dict(
907
- name='lso_kpt39',
908
- id=127,
909
- color=[0, 128, 255],
910
- type='',
911
- swap='lso_kpt36'),
912
- 128:
913
- dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
914
- 129:
915
- dict(
916
- name='vest_kpt2',
917
- id=129,
918
- color=[0, 128, 128],
919
- type='',
920
- swap='vest_kpt6'),
921
- 130:
922
- dict(
923
- name='vest_kpt3',
924
- id=130,
925
- color=[0, 128, 128],
926
- type='',
927
- swap='vest_kpt5'),
928
- 131:
929
- dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
930
- 132:
931
- dict(
932
- name='vest_kpt5',
933
- id=132,
934
- color=[0, 128, 128],
935
- type='',
936
- swap='vest_kpt3'),
937
- 133:
938
- dict(
939
- name='vest_kpt6',
940
- id=133,
941
- color=[0, 128, 128],
942
- type='',
943
- swap='vest_kpt2'),
944
- 134:
945
- dict(
946
- name='vest_kpt7',
947
- id=134,
948
- color=[0, 128, 128],
949
- type='',
950
- swap='vest_kpt15'),
951
- 135:
952
- dict(
953
- name='vest_kpt8',
954
- id=135,
955
- color=[0, 128, 128],
956
- type='',
957
- swap='vest_kpt14'),
958
- 136:
959
- dict(
960
- name='vest_kpt9',
961
- id=136,
962
- color=[0, 128, 128],
963
- type='',
964
- swap='vest_kpt13'),
965
- 137:
966
- dict(
967
- name='vest_kpt10',
968
- id=137,
969
- color=[0, 128, 128],
970
- type='',
971
- swap='vest_kpt12'),
972
- 138:
973
- dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
974
- 139:
975
- dict(
976
- name='vest_kpt12',
977
- id=139,
978
- color=[0, 128, 128],
979
- type='',
980
- swap='vest_kpt10'),
981
- 140:
982
- dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
983
- 141:
984
- dict(
985
- name='vest_kpt14',
986
- id=141,
987
- color=[0, 128, 128],
988
- type='',
989
- swap='vest_kpt8'),
990
- 142:
991
- dict(
992
- name='vest_kpt15',
993
- id=142,
994
- color=[0, 128, 128],
995
- type='',
996
- swap='vest_kpt7'),
997
- 143:
998
- dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
999
- 144:
1000
- dict(
1001
- name='sling_kpt2',
1002
- id=144,
1003
- color=[0, 0, 128],
1004
- type='',
1005
- swap='sling_kpt6'),
1006
- 145:
1007
- dict(
1008
- name='sling_kpt3',
1009
- id=145,
1010
- color=[0, 0, 128],
1011
- type='',
1012
- swap='sling_kpt5'),
1013
- 146:
1014
- dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
1015
- 147:
1016
- dict(
1017
- name='sling_kpt5',
1018
- id=147,
1019
- color=[0, 0, 128],
1020
- type='',
1021
- swap='sling_kpt3'),
1022
- 148:
1023
- dict(
1024
- name='sling_kpt6',
1025
- id=148,
1026
- color=[0, 0, 128],
1027
- type='',
1028
- swap='sling_kpt2'),
1029
- 149:
1030
- dict(
1031
- name='sling_kpt7',
1032
- id=149,
1033
- color=[0, 0, 128],
1034
- type='',
1035
- swap='sling_kpt15'),
1036
- 150:
1037
- dict(
1038
- name='sling_kpt8',
1039
- id=150,
1040
- color=[0, 0, 128],
1041
- type='',
1042
- swap='sling_kpt14'),
1043
- 151:
1044
- dict(
1045
- name='sling_kpt9',
1046
- id=151,
1047
- color=[0, 0, 128],
1048
- type='',
1049
- swap='sling_kpt13'),
1050
- 152:
1051
- dict(
1052
- name='sling_kpt10',
1053
- id=152,
1054
- color=[0, 0, 128],
1055
- type='',
1056
- swap='sling_kpt12'),
1057
- 153:
1058
- dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
1059
- 154:
1060
- dict(
1061
- name='sling_kpt12',
1062
- id=154,
1063
- color=[0, 0, 128],
1064
- type='',
1065
- swap='sling_kpt10'),
1066
- 155:
1067
- dict(
1068
- name='sling_kpt13',
1069
- id=155,
1070
- color=[0, 0, 128],
1071
- type='',
1072
- swap='sling_kpt9'),
1073
- 156:
1074
- dict(
1075
- name='sling_kpt14',
1076
- id=156,
1077
- color=[0, 0, 128],
1078
- type='',
1079
- swap='sling_kpt8'),
1080
- 157:
1081
- dict(
1082
- name='sling_kpt15',
1083
- id=157,
1084
- color=[0, 0, 128],
1085
- type='',
1086
- swap='sling_kpt7'),
1087
- 158:
1088
- dict(
1089
- name='shorts_kpt1',
1090
- id=158,
1091
- color=[128, 128, 128],
1092
- type='',
1093
- swap='shorts_kpt3'),
1094
- 159:
1095
- dict(
1096
- name='shorts_kpt2',
1097
- id=159,
1098
- color=[128, 128, 128],
1099
- type='',
1100
- swap=''),
1101
- 160:
1102
- dict(
1103
- name='shorts_kpt3',
1104
- id=160,
1105
- color=[128, 128, 128],
1106
- type='',
1107
- swap='shorts_kpt1'),
1108
- 161:
1109
- dict(
1110
- name='shorts_kpt4',
1111
- id=161,
1112
- color=[128, 128, 128],
1113
- type='',
1114
- swap='shorts_kpt10'),
1115
- 162:
1116
- dict(
1117
- name='shorts_kpt5',
1118
- id=162,
1119
- color=[128, 128, 128],
1120
- type='',
1121
- swap='shorts_kpt9'),
1122
- 163:
1123
- dict(
1124
- name='shorts_kpt6',
1125
- id=163,
1126
- color=[128, 128, 128],
1127
- type='',
1128
- swap='shorts_kpt8'),
1129
- 164:
1130
- dict(
1131
- name='shorts_kpt7',
1132
- id=164,
1133
- color=[128, 128, 128],
1134
- type='',
1135
- swap=''),
1136
- 165:
1137
- dict(
1138
- name='shorts_kpt8',
1139
- id=165,
1140
- color=[128, 128, 128],
1141
- type='',
1142
- swap='shorts_kpt6'),
1143
- 166:
1144
- dict(
1145
- name='shorts_kpt9',
1146
- id=166,
1147
- color=[128, 128, 128],
1148
- type='',
1149
- swap='shorts_kpt5'),
1150
- 167:
1151
- dict(
1152
- name='shorts_kpt10',
1153
- id=167,
1154
- color=[128, 128, 128],
1155
- type='',
1156
- swap='shorts_kpt4'),
1157
- 168:
1158
- dict(
1159
- name='trousers_kpt1',
1160
- id=168,
1161
- color=[128, 0, 128],
1162
- type='',
1163
- swap='trousers_kpt3'),
1164
- 169:
1165
- dict(
1166
- name='trousers_kpt2',
1167
- id=169,
1168
- color=[128, 0, 128],
1169
- type='',
1170
- swap=''),
1171
- 170:
1172
- dict(
1173
- name='trousers_kpt3',
1174
- id=170,
1175
- color=[128, 0, 128],
1176
- type='',
1177
- swap='trousers_kpt1'),
1178
- 171:
1179
- dict(
1180
- name='trousers_kpt4',
1181
- id=171,
1182
- color=[128, 0, 128],
1183
- type='',
1184
- swap='trousers_kpt14'),
1185
- 172:
1186
- dict(
1187
- name='trousers_kpt5',
1188
- id=172,
1189
- color=[128, 0, 128],
1190
- type='',
1191
- swap='trousers_kpt13'),
1192
- 173:
1193
- dict(
1194
- name='trousers_kpt6',
1195
- id=173,
1196
- color=[128, 0, 128],
1197
- type='',
1198
- swap='trousers_kpt12'),
1199
- 174:
1200
- dict(
1201
- name='trousers_kpt7',
1202
- id=174,
1203
- color=[128, 0, 128],
1204
- type='',
1205
- swap='trousers_kpt11'),
1206
- 175:
1207
- dict(
1208
- name='trousers_kpt8',
1209
- id=175,
1210
- color=[128, 0, 128],
1211
- type='',
1212
- swap='trousers_kpt10'),
1213
- 176:
1214
- dict(
1215
- name='trousers_kpt9',
1216
- id=176,
1217
- color=[128, 0, 128],
1218
- type='',
1219
- swap=''),
1220
- 177:
1221
- dict(
1222
- name='trousers_kpt10',
1223
- id=177,
1224
- color=[128, 0, 128],
1225
- type='',
1226
- swap='trousers_kpt8'),
1227
- 178:
1228
- dict(
1229
- name='trousers_kpt11',
1230
- id=178,
1231
- color=[128, 0, 128],
1232
- type='',
1233
- swap='trousers_kpt7'),
1234
- 179:
1235
- dict(
1236
- name='trousers_kpt12',
1237
- id=179,
1238
- color=[128, 0, 128],
1239
- type='',
1240
- swap='trousers_kpt6'),
1241
- 180:
1242
- dict(
1243
- name='trousers_kpt13',
1244
- id=180,
1245
- color=[128, 0, 128],
1246
- type='',
1247
- swap='trousers_kpt5'),
1248
- 181:
1249
- dict(
1250
- name='trousers_kpt14',
1251
- id=181,
1252
- color=[128, 0, 128],
1253
- type='',
1254
- swap='trousers_kpt4'),
1255
- 182:
1256
- dict(
1257
- name='skirt_kpt1',
1258
- id=182,
1259
- color=[64, 128, 128],
1260
- type='',
1261
- swap='skirt_kpt3'),
1262
- 183:
1263
- dict(
1264
- name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
1265
- 184:
1266
- dict(
1267
- name='skirt_kpt3',
1268
- id=184,
1269
- color=[64, 128, 128],
1270
- type='',
1271
- swap='skirt_kpt1'),
1272
- 185:
1273
- dict(
1274
- name='skirt_kpt4',
1275
- id=185,
1276
- color=[64, 128, 128],
1277
- type='',
1278
- swap='skirt_kpt8'),
1279
- 186:
1280
- dict(
1281
- name='skirt_kpt5',
1282
- id=186,
1283
- color=[64, 128, 128],
1284
- type='',
1285
- swap='skirt_kpt7'),
1286
- 187:
1287
- dict(
1288
- name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
1289
- 188:
1290
- dict(
1291
- name='skirt_kpt7',
1292
- id=188,
1293
- color=[64, 128, 128],
1294
- type='',
1295
- swap='skirt_kpt5'),
1296
- 189:
1297
- dict(
1298
- name='skirt_kpt8',
1299
- id=189,
1300
- color=[64, 128, 128],
1301
- type='',
1302
- swap='skirt_kpt4'),
1303
- 190:
1304
- dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
1305
- 191:
1306
- dict(
1307
- name='ssd_kpt2',
1308
- id=191,
1309
- color=[64, 64, 128],
1310
- type='',
1311
- swap='ssd_kpt6'),
1312
- 192:
1313
- dict(
1314
- name='ssd_kpt3',
1315
- id=192,
1316
- color=[64, 64, 128],
1317
- type='',
1318
- swap='ssd_kpt5'),
1319
- 193:
1320
- dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
1321
- 194:
1322
- dict(
1323
- name='ssd_kpt5',
1324
- id=194,
1325
- color=[64, 64, 128],
1326
- type='',
1327
- swap='ssd_kpt3'),
1328
- 195:
1329
- dict(
1330
- name='ssd_kpt6',
1331
- id=195,
1332
- color=[64, 64, 128],
1333
- type='',
1334
- swap='ssd_kpt2'),
1335
- 196:
1336
- dict(
1337
- name='ssd_kpt7',
1338
- id=196,
1339
- color=[64, 64, 128],
1340
- type='',
1341
- swap='ssd_kpt29'),
1342
- 197:
1343
- dict(
1344
- name='ssd_kpt8',
1345
- id=197,
1346
- color=[64, 64, 128],
1347
- type='',
1348
- swap='ssd_kpt28'),
1349
- 198:
1350
- dict(
1351
- name='ssd_kpt9',
1352
- id=198,
1353
- color=[64, 64, 128],
1354
- type='',
1355
- swap='ssd_kpt27'),
1356
- 199:
1357
- dict(
1358
- name='ssd_kpt10',
1359
- id=199,
1360
- color=[64, 64, 128],
1361
- type='',
1362
- swap='ssd_kpt26'),
1363
- 200:
1364
- dict(
1365
- name='ssd_kpt11',
1366
- id=200,
1367
- color=[64, 64, 128],
1368
- type='',
1369
- swap='ssd_kpt25'),
1370
- 201:
1371
- dict(
1372
- name='ssd_kpt12',
1373
- id=201,
1374
- color=[64, 64, 128],
1375
- type='',
1376
- swap='ssd_kpt24'),
1377
- 202:
1378
- dict(
1379
- name='ssd_kpt13',
1380
- id=202,
1381
- color=[64, 64, 128],
1382
- type='',
1383
- swap='ssd_kpt23'),
1384
- 203:
1385
- dict(
1386
- name='ssd_kpt14',
1387
- id=203,
1388
- color=[64, 64, 128],
1389
- type='',
1390
- swap='ssd_kpt22'),
1391
- 204:
1392
- dict(
1393
- name='ssd_kpt15',
1394
- id=204,
1395
- color=[64, 64, 128],
1396
- type='',
1397
- swap='ssd_kpt21'),
1398
- 205:
1399
- dict(
1400
- name='ssd_kpt16',
1401
- id=205,
1402
- color=[64, 64, 128],
1403
- type='',
1404
- swap='ssd_kpt20'),
1405
- 206:
1406
- dict(
1407
- name='ssd_kpt17',
1408
- id=206,
1409
- color=[64, 64, 128],
1410
- type='',
1411
- swap='ssd_kpt19'),
1412
- 207:
1413
- dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
1414
- 208:
1415
- dict(
1416
- name='ssd_kpt19',
1417
- id=208,
1418
- color=[64, 64, 128],
1419
- type='',
1420
- swap='ssd_kpt17'),
1421
- 209:
1422
- dict(
1423
- name='ssd_kpt20',
1424
- id=209,
1425
- color=[64, 64, 128],
1426
- type='',
1427
- swap='ssd_kpt16'),
1428
- 210:
1429
- dict(
1430
- name='ssd_kpt21',
1431
- id=210,
1432
- color=[64, 64, 128],
1433
- type='',
1434
- swap='ssd_kpt15'),
1435
- 211:
1436
- dict(
1437
- name='ssd_kpt22',
1438
- id=211,
1439
- color=[64, 64, 128],
1440
- type='',
1441
- swap='ssd_kpt14'),
1442
- 212:
1443
- dict(
1444
- name='ssd_kpt23',
1445
- id=212,
1446
- color=[64, 64, 128],
1447
- type='',
1448
- swap='ssd_kpt13'),
1449
- 213:
1450
- dict(
1451
- name='ssd_kpt24',
1452
- id=213,
1453
- color=[64, 64, 128],
1454
- type='',
1455
- swap='ssd_kpt12'),
1456
- 214:
1457
- dict(
1458
- name='ssd_kpt25',
1459
- id=214,
1460
- color=[64, 64, 128],
1461
- type='',
1462
- swap='ssd_kpt11'),
1463
- 215:
1464
- dict(
1465
- name='ssd_kpt26',
1466
- id=215,
1467
- color=[64, 64, 128],
1468
- type='',
1469
- swap='ssd_kpt10'),
1470
- 216:
1471
- dict(
1472
- name='ssd_kpt27',
1473
- id=216,
1474
- color=[64, 64, 128],
1475
- type='',
1476
- swap='ssd_kpt9'),
1477
- 217:
1478
- dict(
1479
- name='ssd_kpt28',
1480
- id=217,
1481
- color=[64, 64, 128],
1482
- type='',
1483
- swap='ssd_kpt8'),
1484
- 218:
1485
- dict(
1486
- name='ssd_kpt29',
1487
- id=218,
1488
- color=[64, 64, 128],
1489
- type='',
1490
- swap='ssd_kpt7'),
1491
- 219:
1492
- dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
1493
- 220:
1494
- dict(
1495
- name='lsd_kpt2',
1496
- id=220,
1497
- color=[128, 64, 0],
1498
- type='',
1499
- swap='lsd_kpt6'),
1500
- 221:
1501
- dict(
1502
- name='lsd_kpt3',
1503
- id=221,
1504
- color=[128, 64, 0],
1505
- type='',
1506
- swap='lsd_kpt5'),
1507
- 222:
1508
- dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
1509
- 223:
1510
- dict(
1511
- name='lsd_kpt5',
1512
- id=223,
1513
- color=[128, 64, 0],
1514
- type='',
1515
- swap='lsd_kpt3'),
1516
- 224:
1517
- dict(
1518
- name='lsd_kpt6',
1519
- id=224,
1520
- color=[128, 64, 0],
1521
- type='',
1522
- swap='lsd_kpt2'),
1523
- 225:
1524
- dict(
1525
- name='lsd_kpt7',
1526
- id=225,
1527
- color=[128, 64, 0],
1528
- type='',
1529
- swap='lsd_kpt37'),
1530
- 226:
1531
- dict(
1532
- name='lsd_kpt8',
1533
- id=226,
1534
- color=[128, 64, 0],
1535
- type='',
1536
- swap='lsd_kpt36'),
1537
- 227:
1538
- dict(
1539
- name='lsd_kpt9',
1540
- id=227,
1541
- color=[128, 64, 0],
1542
- type='',
1543
- swap='lsd_kpt35'),
1544
- 228:
1545
- dict(
1546
- name='lsd_kpt10',
1547
- id=228,
1548
- color=[128, 64, 0],
1549
- type='',
1550
- swap='lsd_kpt34'),
1551
- 229:
1552
- dict(
1553
- name='lsd_kpt11',
1554
- id=229,
1555
- color=[128, 64, 0],
1556
- type='',
1557
- swap='lsd_kpt33'),
1558
- 230:
1559
- dict(
1560
- name='lsd_kpt12',
1561
- id=230,
1562
- color=[128, 64, 0],
1563
- type='',
1564
- swap='lsd_kpt32'),
1565
- 231:
1566
- dict(
1567
- name='lsd_kpt13',
1568
- id=231,
1569
- color=[128, 64, 0],
1570
- type='',
1571
- swap='lsd_kpt31'),
1572
- 232:
1573
- dict(
1574
- name='lsd_kpt14',
1575
- id=232,
1576
- color=[128, 64, 0],
1577
- type='',
1578
- swap='lsd_kpt30'),
1579
- 233:
1580
- dict(
1581
- name='lsd_kpt15',
1582
- id=233,
1583
- color=[128, 64, 0],
1584
- type='',
1585
- swap='lsd_kpt29'),
1586
- 234:
1587
- dict(
1588
- name='lsd_kpt16',
1589
- id=234,
1590
- color=[128, 64, 0],
1591
- type='',
1592
- swap='lsd_kpt28'),
1593
- 235:
1594
- dict(
1595
- name='lsd_kpt17',
1596
- id=235,
1597
- color=[128, 64, 0],
1598
- type='',
1599
- swap='lsd_kpt27'),
1600
- 236:
1601
- dict(
1602
- name='lsd_kpt18',
1603
- id=236,
1604
- color=[128, 64, 0],
1605
- type='',
1606
- swap='lsd_kpt26'),
1607
- 237:
1608
- dict(
1609
- name='lsd_kpt19',
1610
- id=237,
1611
- color=[128, 64, 0],
1612
- type='',
1613
- swap='lsd_kpt25'),
1614
- 238:
1615
- dict(
1616
- name='lsd_kpt20',
1617
- id=238,
1618
- color=[128, 64, 0],
1619
- type='',
1620
- swap='lsd_kpt24'),
1621
- 239:
1622
- dict(
1623
- name='lsd_kpt21',
1624
- id=239,
1625
- color=[128, 64, 0],
1626
- type='',
1627
- swap='lsd_kpt23'),
1628
- 240:
1629
- dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
1630
- 241:
1631
- dict(
1632
- name='lsd_kpt23',
1633
- id=241,
1634
- color=[128, 64, 0],
1635
- type='',
1636
- swap='lsd_kpt21'),
1637
- 242:
1638
- dict(
1639
- name='lsd_kpt24',
1640
- id=242,
1641
- color=[128, 64, 0],
1642
- type='',
1643
- swap='lsd_kpt20'),
1644
- 243:
1645
- dict(
1646
- name='lsd_kpt25',
1647
- id=243,
1648
- color=[128, 64, 0],
1649
- type='',
1650
- swap='lsd_kpt19'),
1651
- 244:
1652
- dict(
1653
- name='lsd_kpt26',
1654
- id=244,
1655
- color=[128, 64, 0],
1656
- type='',
1657
- swap='lsd_kpt18'),
1658
- 245:
1659
- dict(
1660
- name='lsd_kpt27',
1661
- id=245,
1662
- color=[128, 64, 0],
1663
- type='',
1664
- swap='lsd_kpt17'),
1665
- 246:
1666
- dict(
1667
- name='lsd_kpt28',
1668
- id=246,
1669
- color=[128, 64, 0],
1670
- type='',
1671
- swap='lsd_kpt16'),
1672
- 247:
1673
- dict(
1674
- name='lsd_kpt29',
1675
- id=247,
1676
- color=[128, 64, 0],
1677
- type='',
1678
- swap='lsd_kpt15'),
1679
- 248:
1680
- dict(
1681
- name='lsd_kpt30',
1682
- id=248,
1683
- color=[128, 64, 0],
1684
- type='',
1685
- swap='lsd_kpt14'),
1686
- 249:
1687
- dict(
1688
- name='lsd_kpt31',
1689
- id=249,
1690
- color=[128, 64, 0],
1691
- type='',
1692
- swap='lsd_kpt13'),
1693
- 250:
1694
- dict(
1695
- name='lsd_kpt32',
1696
- id=250,
1697
- color=[128, 64, 0],
1698
- type='',
1699
- swap='lsd_kpt12'),
1700
- 251:
1701
- dict(
1702
- name='lsd_kpt33',
1703
- id=251,
1704
- color=[128, 64, 0],
1705
- type='',
1706
- swap='lsd_kpt11'),
1707
- 252:
1708
- dict(
1709
- name='lsd_kpt34',
1710
- id=252,
1711
- color=[128, 64, 0],
1712
- type='',
1713
- swap='lsd_kpt10'),
1714
- 253:
1715
- dict(
1716
- name='lsd_kpt35',
1717
- id=253,
1718
- color=[128, 64, 0],
1719
- type='',
1720
- swap='lsd_kpt9'),
1721
- 254:
1722
- dict(
1723
- name='lsd_kpt36',
1724
- id=254,
1725
- color=[128, 64, 0],
1726
- type='',
1727
- swap='lsd_kpt8'),
1728
- 255:
1729
- dict(
1730
- name='lsd_kpt37',
1731
- id=255,
1732
- color=[128, 64, 0],
1733
- type='',
1734
- swap='lsd_kpt7'),
1735
- 256:
1736
- dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
1737
- 257:
1738
- dict(
1739
- name='vd_kpt2',
1740
- id=257,
1741
- color=[128, 64, 255],
1742
- type='',
1743
- swap='vd_kpt6'),
1744
- 258:
1745
- dict(
1746
- name='vd_kpt3',
1747
- id=258,
1748
- color=[128, 64, 255],
1749
- type='',
1750
- swap='vd_kpt5'),
1751
- 259:
1752
- dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
1753
- 260:
1754
- dict(
1755
- name='vd_kpt5',
1756
- id=260,
1757
- color=[128, 64, 255],
1758
- type='',
1759
- swap='vd_kpt3'),
1760
- 261:
1761
- dict(
1762
- name='vd_kpt6',
1763
- id=261,
1764
- color=[128, 64, 255],
1765
- type='',
1766
- swap='vd_kpt2'),
1767
- 262:
1768
- dict(
1769
- name='vd_kpt7',
1770
- id=262,
1771
- color=[128, 64, 255],
1772
- type='',
1773
- swap='vd_kpt19'),
1774
- 263:
1775
- dict(
1776
- name='vd_kpt8',
1777
- id=263,
1778
- color=[128, 64, 255],
1779
- type='',
1780
- swap='vd_kpt18'),
1781
- 264:
1782
- dict(
1783
- name='vd_kpt9',
1784
- id=264,
1785
- color=[128, 64, 255],
1786
- type='',
1787
- swap='vd_kpt17'),
1788
- 265:
1789
- dict(
1790
- name='vd_kpt10',
1791
- id=265,
1792
- color=[128, 64, 255],
1793
- type='',
1794
- swap='vd_kpt16'),
1795
- 266:
1796
- dict(
1797
- name='vd_kpt11',
1798
- id=266,
1799
- color=[128, 64, 255],
1800
- type='',
1801
- swap='vd_kpt15'),
1802
- 267:
1803
- dict(
1804
- name='vd_kpt12',
1805
- id=267,
1806
- color=[128, 64, 255],
1807
- type='',
1808
- swap='vd_kpt14'),
1809
- 268:
1810
- dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
1811
- 269:
1812
- dict(
1813
- name='vd_kpt14',
1814
- id=269,
1815
- color=[128, 64, 255],
1816
- type='',
1817
- swap='vd_kpt12'),
1818
- 270:
1819
- dict(
1820
- name='vd_kpt15',
1821
- id=270,
1822
- color=[128, 64, 255],
1823
- type='',
1824
- swap='vd_kpt11'),
1825
- 271:
1826
- dict(
1827
- name='vd_kpt16',
1828
- id=271,
1829
- color=[128, 64, 255],
1830
- type='',
1831
- swap='vd_kpt10'),
1832
- 272:
1833
- dict(
1834
- name='vd_kpt17',
1835
- id=272,
1836
- color=[128, 64, 255],
1837
- type='',
1838
- swap='vd_kpt9'),
1839
- 273:
1840
- dict(
1841
- name='vd_kpt18',
1842
- id=273,
1843
- color=[128, 64, 255],
1844
- type='',
1845
- swap='vd_kpt8'),
1846
- 274:
1847
- dict(
1848
- name='vd_kpt19',
1849
- id=274,
1850
- color=[128, 64, 255],
1851
- type='',
1852
- swap='vd_kpt7'),
1853
- 275:
1854
- dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
1855
- 276:
1856
- dict(
1857
- name='sd_kpt2',
1858
- id=276,
1859
- color=[128, 64, 0],
1860
- type='',
1861
- swap='sd_kpt6'),
1862
- 277:
1863
- dict(
1864
- name='sd_kpt3',
1865
- id=277,
1866
- color=[128, 64, 0],
1867
- type='',
1868
- swap='sd_kpt5'),
1869
- 278:
1870
- dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
1871
- 279:
1872
- dict(
1873
- name='sd_kpt5',
1874
- id=279,
1875
- color=[128, 64, 0],
1876
- type='',
1877
- swap='sd_kpt3'),
1878
- 280:
1879
- dict(
1880
- name='sd_kpt6',
1881
- id=280,
1882
- color=[128, 64, 0],
1883
- type='',
1884
- swap='sd_kpt2'),
1885
- 281:
1886
- dict(
1887
- name='sd_kpt7',
1888
- id=281,
1889
- color=[128, 64, 0],
1890
- type='',
1891
- swap='sd_kpt19'),
1892
- 282:
1893
- dict(
1894
- name='sd_kpt8',
1895
- id=282,
1896
- color=[128, 64, 0],
1897
- type='',
1898
- swap='sd_kpt18'),
1899
- 283:
1900
- dict(
1901
- name='sd_kpt9',
1902
- id=283,
1903
- color=[128, 64, 0],
1904
- type='',
1905
- swap='sd_kpt17'),
1906
- 284:
1907
- dict(
1908
- name='sd_kpt10',
1909
- id=284,
1910
- color=[128, 64, 0],
1911
- type='',
1912
- swap='sd_kpt16'),
1913
- 285:
1914
- dict(
1915
- name='sd_kpt11',
1916
- id=285,
1917
- color=[128, 64, 0],
1918
- type='',
1919
- swap='sd_kpt15'),
1920
- 286:
1921
- dict(
1922
- name='sd_kpt12',
1923
- id=286,
1924
- color=[128, 64, 0],
1925
- type='',
1926
- swap='sd_kpt14'),
1927
- 287:
1928
- dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
1929
- 288:
1930
- dict(
1931
- name='sd_kpt14',
1932
- id=288,
1933
- color=[128, 64, 0],
1934
- type='',
1935
- swap='sd_kpt12'),
1936
- 289:
1937
- dict(
1938
- name='sd_kpt15',
1939
- id=289,
1940
- color=[128, 64, 0],
1941
- type='',
1942
- swap='sd_kpt11'),
1943
- 290:
1944
- dict(
1945
- name='sd_kpt16',
1946
- id=290,
1947
- color=[128, 64, 0],
1948
- type='',
1949
- swap='sd_kpt10'),
1950
- 291:
1951
- dict(
1952
- name='sd_kpt17',
1953
- id=291,
1954
- color=[128, 64, 0],
1955
- type='',
1956
- swap='sd_kpt9'),
1957
- 292:
1958
- dict(
1959
- name='sd_kpt18',
1960
- id=292,
1961
- color=[128, 64, 0],
1962
- type='',
1963
- swap='sd_kpt8'),
1964
- 293:
1965
- dict(
1966
- name='sd_kpt19',
1967
- id=293,
1968
- color=[128, 64, 0],
1969
- type='',
1970
- swap='sd_kpt7')
1971
- }),
1972
- skeleton_info=dict({
1973
- 0:
1974
- dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
1975
- 1:
1976
- dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
1977
- 2:
1978
- dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
1979
- 3:
1980
- dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
1981
- 4:
1982
- dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
1983
- 5:
1984
- dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
1985
- 6:
1986
- dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
1987
- 7:
1988
- dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
1989
- 8:
1990
- dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
1991
- 9:
1992
- dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
1993
- 10:
1994
- dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
1995
- 11:
1996
- dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
1997
- 12:
1998
- dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
1999
- 13:
2000
- dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
2001
- 14:
2002
- dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
2003
- 15:
2004
- dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
2005
- 16:
2006
- dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
2007
- 17:
2008
- dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
2009
- 18:
2010
- dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
2011
- 19:
2012
- dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
2013
- 20:
2014
- dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
2015
- 21:
2016
- dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
2017
- 22:
2018
- dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
2019
- 23:
2020
- dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
2021
- 24:
2022
- dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
2023
- 25:
2024
- dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
2025
- 26:
2026
- dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
2027
- 27:
2028
- dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
2029
- 28:
2030
- dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
2031
- 29:
2032
- dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
2033
- 30:
2034
- dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
2035
- 31:
2036
- dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
2037
- 32:
2038
- dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
2039
- 33:
2040
- dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
2041
- 34:
2042
- dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
2043
- 35:
2044
- dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
2045
- 36:
2046
- dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
2047
- 37:
2048
- dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
2049
- 38:
2050
- dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
2051
- 39:
2052
- dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
2053
- 40:
2054
- dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
2055
- 41:
2056
- dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
2057
- 42:
2058
- dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
2059
- 43:
2060
- dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
2061
- 44:
2062
- dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
2063
- 45:
2064
- dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
2065
- 46:
2066
- dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
2067
- 47:
2068
- dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
2069
- 48:
2070
- dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
2071
- 49:
2072
- dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
2073
- 50:
2074
- dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
2075
- 51:
2076
- dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
2077
- 52:
2078
- dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
2079
- 53:
2080
- dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
2081
- 54:
2082
- dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
2083
- 55:
2084
- dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
2085
- 56:
2086
- dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
2087
- 57:
2088
- dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
2089
- 58:
2090
- dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
2091
- 59:
2092
- dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
2093
- 60:
2094
- dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
2095
- 61:
2096
- dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
2097
- 62:
2098
- dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
2099
- 63:
2100
- dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
2101
- 64:
2102
- dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
2103
- 65:
2104
- dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
2105
- 66:
2106
- dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
2107
- 67:
2108
- dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
2109
- 68:
2110
- dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
2111
- 69:
2112
- dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
2113
- 70:
2114
- dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
2115
- 71:
2116
- dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
2117
- 72:
2118
- dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
2119
- 73:
2120
- dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
2121
- 74:
2122
- dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
2123
- 75:
2124
- dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
2125
- 76:
2126
- dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
2127
- 77:
2128
- dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
2129
- 78:
2130
- dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
2131
- 79:
2132
- dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
2133
- 80:
2134
- dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
2135
- 81:
2136
- dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
2137
- 82:
2138
- dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
2139
- 83:
2140
- dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
2141
- 84:
2142
- dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
2143
- 85:
2144
- dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
2145
- 86:
2146
- dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
2147
- 87:
2148
- dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
2149
- 88:
2150
- dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
2151
- 89:
2152
- dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
2153
- 90:
2154
- dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
2155
- 91:
2156
- dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
2157
- 92:
2158
- dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
2159
- 93:
2160
- dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
2161
- 94:
2162
- dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
2163
- 95:
2164
- dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
2165
- 96:
2166
- dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
2167
- 97:
2168
- dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
2169
- 98:
2170
- dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
2171
- 99:
2172
- dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
2173
- 100:
2174
- dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
2175
- 101:
2176
- dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
2177
- 102:
2178
- dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
2179
- 103:
2180
- dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
2181
- 104:
2182
- dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
2183
- 105:
2184
- dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
2185
- 106:
2186
- dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
2187
- 107:
2188
- dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
2189
- 108:
2190
- dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
2191
- 109:
2192
- dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
2193
- 110:
2194
- dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
2195
- 111:
2196
- dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
2197
- 112:
2198
- dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
2199
- 113:
2200
- dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
2201
- 114:
2202
- dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
2203
- 115:
2204
- dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
2205
- 116:
2206
- dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
2207
- 117:
2208
- dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
2209
- 118:
2210
- dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
2211
- 119:
2212
- dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
2213
- 120:
2214
- dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
2215
- 121:
2216
- dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
2217
- 122:
2218
- dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
2219
- 123:
2220
- dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
2221
- 124:
2222
- dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
2223
- 125:
2224
- dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
2225
- 126:
2226
- dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
2227
- 127:
2228
- dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
2229
- 128:
2230
- dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
2231
- 129:
2232
- dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
2233
- 130:
2234
- dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
2235
- 131:
2236
- dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
2237
- 132:
2238
- dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
2239
- 133:
2240
- dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
2241
- 134:
2242
- dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
2243
- 135:
2244
- dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
2245
- 136:
2246
- dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
2247
- 137:
2248
- dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
2249
- 138:
2250
- dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
2251
- 139:
2252
- dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
2253
- 140:
2254
- dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
2255
- 141:
2256
- dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
2257
- 142:
2258
- dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
2259
- 143:
2260
- dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
2261
- 144:
2262
- dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
2263
- 145:
2264
- dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
2265
- 146:
2266
- dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
2267
- 147:
2268
- dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
2269
- 148:
2270
- dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
2271
- 149:
2272
- dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
2273
- 150:
2274
- dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
2275
- 151:
2276
- dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
2277
- 152:
2278
- dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
2279
- 153:
2280
- dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
2281
- 154:
2282
- dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
2283
- 155:
2284
- dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
2285
- 156:
2286
- dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
2287
- 157:
2288
- dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
2289
- 158:
2290
- dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
2291
- 159:
2292
- dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
2293
- 160:
2294
- dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
2295
- 161:
2296
- dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
2297
- 162:
2298
- dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
2299
- 163:
2300
- dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
2301
- 164:
2302
- dict(
2303
- link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
2304
- 128]),
2305
- 165:
2306
- dict(
2307
- link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
2308
- 128]),
2309
- 166:
2310
- dict(
2311
- link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
2312
- 128]),
2313
- 167:
2314
- dict(
2315
- link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
2316
- 128]),
2317
- 168:
2318
- dict(
2319
- link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
2320
- 128]),
2321
- 169:
2322
- dict(
2323
- link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
2324
- 128]),
2325
- 170:
2326
- dict(
2327
- link=('shorts_kpt9', 'shorts_kpt10'),
2328
- id=170,
2329
- color=[128, 128, 128]),
2330
- 171:
2331
- dict(
2332
- link=('shorts_kpt10', 'shorts_kpt3'),
2333
- id=171,
2334
- color=[128, 128, 128]),
2335
- 172:
2336
- dict(
2337
- link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
2338
- 128]),
2339
- 173:
2340
- dict(
2341
- link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
2342
- 128]),
2343
- 174:
2344
- dict(
2345
- link=('trousers_kpt1', 'trousers_kpt4'),
2346
- id=174,
2347
- color=[128, 0, 128]),
2348
- 175:
2349
- dict(
2350
- link=('trousers_kpt4', 'trousers_kpt5'),
2351
- id=175,
2352
- color=[128, 0, 128]),
2353
- 176:
2354
- dict(
2355
- link=('trousers_kpt5', 'trousers_kpt6'),
2356
- id=176,
2357
- color=[128, 0, 128]),
2358
- 177:
2359
- dict(
2360
- link=('trousers_kpt6', 'trousers_kpt7'),
2361
- id=177,
2362
- color=[128, 0, 128]),
2363
- 178:
2364
- dict(
2365
- link=('trousers_kpt7', 'trousers_kpt8'),
2366
- id=178,
2367
- color=[128, 0, 128]),
2368
- 179:
2369
- dict(
2370
- link=('trousers_kpt8', 'trousers_kpt9'),
2371
- id=179,
2372
- color=[128, 0, 128]),
2373
- 180:
2374
- dict(
2375
- link=('trousers_kpt9', 'trousers_kpt10'),
2376
- id=180,
2377
- color=[128, 0, 128]),
2378
- 181:
2379
- dict(
2380
- link=('trousers_kpt10', 'trousers_kpt11'),
2381
- id=181,
2382
- color=[128, 0, 128]),
2383
- 182:
2384
- dict(
2385
- link=('trousers_kpt11', 'trousers_kpt12'),
2386
- id=182,
2387
- color=[128, 0, 128]),
2388
- 183:
2389
- dict(
2390
- link=('trousers_kpt12', 'trousers_kpt13'),
2391
- id=183,
2392
- color=[128, 0, 128]),
2393
- 184:
2394
- dict(
2395
- link=('trousers_kpt13', 'trousers_kpt14'),
2396
- id=184,
2397
- color=[128, 0, 128]),
2398
- 185:
2399
- dict(
2400
- link=('trousers_kpt14', 'trousers_kpt3'),
2401
- id=185,
2402
- color=[128, 0, 128]),
2403
- 186:
2404
- dict(
2405
- link=('trousers_kpt3', 'trousers_kpt2'),
2406
- id=186,
2407
- color=[128, 0, 128]),
2408
- 187:
2409
- dict(
2410
- link=('trousers_kpt2', 'trousers_kpt1'),
2411
- id=187,
2412
- color=[128, 0, 128]),
2413
- 188:
2414
- dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
2415
- 189:
2416
- dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
2417
- 190:
2418
- dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
2419
- 191:
2420
- dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
2421
- 192:
2422
- dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
2423
- 193:
2424
- dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
2425
- 194:
2426
- dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
2427
- 195:
2428
- dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
2429
- 196:
2430
- dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
2431
- 197:
2432
- dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
2433
- 198:
2434
- dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
2435
- 199:
2436
- dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
2437
- 200:
2438
- dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
2439
- 201:
2440
- dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
2441
- 202:
2442
- dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
2443
- 203:
2444
- dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
2445
- 204:
2446
- dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
2447
- 205:
2448
- dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
2449
- 206:
2450
- dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
2451
- 207:
2452
- dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
2453
- 208:
2454
- dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
2455
- 209:
2456
- dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
2457
- 210:
2458
- dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
2459
- 211:
2460
- dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
2461
- 212:
2462
- dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
2463
- 213:
2464
- dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
2465
- 214:
2466
- dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
2467
- 215:
2468
- dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
2469
- 216:
2470
- dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
2471
- 217:
2472
- dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
2473
- 218:
2474
- dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
2475
- 219:
2476
- dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
2477
- 220:
2478
- dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
2479
- 221:
2480
- dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
2481
- 222:
2482
- dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
2483
- 223:
2484
- dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
2485
- 224:
2486
- dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
2487
- 225:
2488
- dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
2489
- 226:
2490
- dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
2491
- 227:
2492
- dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
2493
- 228:
2494
- dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
2495
- 229:
2496
- dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
2497
- 230:
2498
- dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
2499
- 231:
2500
- dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
2501
- 232:
2502
- dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
2503
- 233:
2504
- dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
2505
- 234:
2506
- dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
2507
- 235:
2508
- dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
2509
- 236:
2510
- dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
2511
- 237:
2512
- dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
2513
- 238:
2514
- dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
2515
- 239:
2516
- dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
2517
- 240:
2518
- dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
2519
- 241:
2520
- dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
2521
- 242:
2522
- dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
2523
- 243:
2524
- dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
2525
- 244:
2526
- dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
2527
- 245:
2528
- dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
2529
- 246:
2530
- dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
2531
- 247:
2532
- dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
2533
- 248:
2534
- dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
2535
- 249:
2536
- dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
2537
- 250:
2538
- dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
2539
- 251:
2540
- dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
2541
- 252:
2542
- dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
2543
- 253:
2544
- dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
2545
- 254:
2546
- dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
2547
- 255:
2548
- dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
2549
- 256:
2550
- dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
2551
- 257:
2552
- dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
2553
- 258:
2554
- dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
2555
- 259:
2556
- dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
2557
- 260:
2558
- dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
2559
- 261:
2560
- dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
2561
- 262:
2562
- dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
2563
- 263:
2564
- dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
2565
- 264:
2566
- dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
2567
- 265:
2568
- dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
2569
- 266:
2570
- dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
2571
- 267:
2572
- dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
2573
- 268:
2574
- dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
2575
- 269:
2576
- dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
2577
- 270:
2578
- dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
2579
- 271:
2580
- dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
2581
- 272:
2582
- dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
2583
- 273:
2584
- dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
2585
- 274:
2586
- dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
2587
- 275:
2588
- dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
2589
- 276:
2590
- dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
2591
- 277:
2592
- dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
2593
- 278:
2594
- dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
2595
- 279:
2596
- dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
2597
- 280:
2598
- dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
2599
- 281:
2600
- dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
2601
- 282:
2602
- dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
2603
- 283:
2604
- dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
2605
- 284:
2606
- dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
2607
- 285:
2608
- dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
2609
- 286:
2610
- dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
2611
- 287:
2612
- dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
2613
- 288:
2614
- dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
2615
- 289:
2616
- dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
2617
- 290:
2618
- dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
2619
- 291:
2620
- dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
2621
- 292:
2622
- dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
2623
- 293:
2624
- dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
2625
- 294:
2626
- dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
2627
- 295:
2628
- dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
2629
- 296:
2630
- dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
2631
- 297:
2632
- dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
2633
- 298:
2634
- dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
2635
- 299:
2636
- dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
2637
- 300:
2638
- dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
2639
- 301:
2640
- dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
2641
- 302:
2642
- dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
2643
- 303:
2644
- dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
2645
- }),
2646
- joint_weights=[
2647
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2648
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2649
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2650
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2651
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2652
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2653
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2654
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2655
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2656
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2657
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2658
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2659
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2660
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2661
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2662
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2663
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2664
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2665
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2666
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2667
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
2668
- ],
2669
- sigmas=[])
2670
- param_scheduler = [
2671
- dict(
2672
- type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
2673
- dict(
2674
- type='MultiStepLR',
2675
- begin=0,
2676
- end=150,
2677
- milestones=[100, 130],
2678
- gamma=0.1,
2679
- by_epoch=True)
2680
- ]
2681
- optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
2682
- auto_scale_lr = dict(base_batch_size=512)
2683
- dataset_type = 'DeepFashion2Dataset'
2684
- data_mode = 'topdown'
2685
- data_root = 'data/deepfashion2/'
2686
- codec = dict(
2687
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
2688
- train_pipeline = [
2689
- dict(type='LoadImage'),
2690
- dict(type='GetBBoxCenterScale'),
2691
- dict(type='RandomFlip', direction='horizontal'),
2692
- dict(
2693
- type='RandomBBoxTransform',
2694
- shift_prob=0,
2695
- rotate_factor=60,
2696
- scale_factor=(0.75, 1.25)),
2697
- dict(type='TopdownAffine', input_size=(192, 256)),
2698
- dict(
2699
- type='GenerateTarget',
2700
- encoder=dict(
2701
- type='MSRAHeatmap',
2702
- input_size=(192, 256),
2703
- heatmap_size=(48, 64),
2704
- sigma=2)),
2705
- dict(type='PackPoseInputs')
2706
- ]
2707
- val_pipeline = [
2708
- dict(type='LoadImage', backend_args=dict(backend='local')),
2709
- dict(type='GetBBoxCenterScale'),
2710
- dict(type='TopdownAffine', input_size=(192, 256)),
2711
- dict(type='PackPoseInputs')
2712
- ]
2713
- train_dataloader = dict(
2714
- batch_size=64,
2715
- num_workers=6,
2716
- persistent_workers=True,
2717
- sampler=dict(type='DefaultSampler', shuffle=True),
2718
- dataset=dict(
2719
- type='DeepFashion2Dataset',
2720
- data_root='data/deepfashion2/',
2721
- data_mode='topdown',
2722
- ann_file='train/deepfashion2_vest_dress.json',
2723
- data_prefix=dict(img='train/image/'),
2724
- pipeline=[
2725
- dict(type='LoadImage'),
2726
- dict(type='GetBBoxCenterScale'),
2727
- dict(type='RandomFlip', direction='horizontal'),
2728
- dict(
2729
- type='RandomBBoxTransform',
2730
- shift_prob=0,
2731
- rotate_factor=60,
2732
- scale_factor=(0.75, 1.25)),
2733
- dict(type='TopdownAffine', input_size=(192, 256)),
2734
- dict(
2735
- type='GenerateTarget',
2736
- encoder=dict(
2737
- type='MSRAHeatmap',
2738
- input_size=(192, 256),
2739
- heatmap_size=(48, 64),
2740
- sigma=2)),
2741
- dict(type='PackPoseInputs')
2742
- ]))
2743
- val_dataloader = dict(
2744
- batch_size=32,
2745
- num_workers=6,
2746
- persistent_workers=True,
2747
- drop_last=False,
2748
- sampler=dict(type='DefaultSampler', shuffle=False),
2749
- dataset=dict(
2750
- type='DeepFashion2Dataset',
2751
- data_root='data/deepfashion2/',
2752
- data_mode='topdown',
2753
- ann_file='validation/deepfashion2_vest_dress.json',
2754
- data_prefix=dict(img='validation/image/'),
2755
- test_mode=True,
2756
- pipeline=[
2757
- dict(type='LoadImage', backend_args=dict(backend='local')),
2758
- dict(type='GetBBoxCenterScale'),
2759
- dict(type='TopdownAffine', input_size=(192, 256)),
2760
- dict(type='PackPoseInputs')
2761
- ]))
2762
- test_dataloader = dict(
2763
- batch_size=32,
2764
- num_workers=6,
2765
- persistent_workers=True,
2766
- drop_last=False,
2767
- sampler=dict(type='DefaultSampler', shuffle=False),
2768
- dataset=dict(
2769
- type='DeepFashion2Dataset',
2770
- data_root='data/deepfashion2/',
2771
- data_mode='topdown',
2772
- ann_file='validation/deepfashion2_vest_dress.json',
2773
- data_prefix=dict(img='validation/image/'),
2774
- test_mode=True,
2775
- pipeline=[
2776
- dict(type='LoadImage', backend_args=dict(backend='local')),
2777
- dict(type='GetBBoxCenterScale'),
2778
- dict(type='TopdownAffine', input_size=(192, 256)),
2779
- dict(type='PackPoseInputs')
2780
- ]))
2781
- channel_cfg = dict(
2782
- num_output_channels=294,
2783
- dataset_joints=294,
2784
- dataset_channel=[[
2785
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2786
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
2787
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
2788
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
2789
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
2790
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
2791
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
2792
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
2793
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
2794
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
2795
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
2796
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
2797
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
2798
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
2799
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
2800
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
2801
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
2802
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
2803
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
2804
- 290, 291, 292, 293
2805
- ]],
2806
- inference_channel=[
2807
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2808
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
2809
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
2810
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
2811
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
2812
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
2813
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
2814
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
2815
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
2816
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
2817
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
2818
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
2819
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
2820
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
2821
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
2822
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
2823
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
2824
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
2825
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
2826
- 290, 291, 292, 293
2827
- ])
2828
- model = dict(
2829
- type='TopdownPoseEstimator',
2830
- data_preprocessor=dict(
2831
- type='PoseDataPreprocessor',
2832
- mean=[123.675, 116.28, 103.53],
2833
- std=[58.395, 57.12, 57.375],
2834
- bgr_to_rgb=True),
2835
- backbone=dict(
2836
- type='ResNet',
2837
- depth=50,
2838
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
2839
- head=dict(
2840
- type='HeatmapHead',
2841
- in_channels=2048,
2842
- out_channels=294,
2843
- loss=dict(type='KeypointMSELoss', use_target_weight=True),
2844
- decoder=dict(
2845
- type='MSRAHeatmap',
2846
- input_size=(192, 256),
2847
- heatmap_size=(48, 64),
2848
- sigma=2)),
2849
- test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
2850
- val_evaluator = [
2851
- dict(type='PCKAccuracy', thr=0.2),
2852
- dict(type='AUC'),
2853
- dict(type='EPE')
2854
- ]
2855
- test_evaluator = [
2856
- dict(type='PCKAccuracy', thr=0.2),
2857
- dict(type='AUC'),
2858
- dict(type='EPE')
2859
- ]
2860
- launcher = 'pytorch'
2861
- work_dir = './work_dirs/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/datasets/pipelines/auto_aug.py DELETED
@@ -1,96 +0,0 @@
1
- # Policy for ImageNet, refers to
2
- # https://github.com/DeepVoltaire/AutoAugment/blame/master/autoaugment.py
3
- policy_imagenet = [
4
- [
5
- dict(type='Posterize', bits=4, prob=0.4),
6
- dict(type='Rotate', angle=30., prob=0.6)
7
- ],
8
- [
9
- dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
10
- dict(type='AutoContrast', prob=0.6)
11
- ],
12
- [dict(type='Equalize', prob=0.8),
13
- dict(type='Equalize', prob=0.6)],
14
- [
15
- dict(type='Posterize', bits=5, prob=0.6),
16
- dict(type='Posterize', bits=5, prob=0.6)
17
- ],
18
- [
19
- dict(type='Equalize', prob=0.4),
20
- dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
21
- ],
22
- [
23
- dict(type='Equalize', prob=0.4),
24
- dict(type='Rotate', angle=30 / 9 * 8, prob=0.8)
25
- ],
26
- [
27
- dict(type='Solarize', thr=256 / 9 * 6, prob=0.6),
28
- dict(type='Equalize', prob=0.6)
29
- ],
30
- [dict(type='Posterize', bits=6, prob=0.8),
31
- dict(type='Equalize', prob=1.)],
32
- [
33
- dict(type='Rotate', angle=10., prob=0.2),
34
- dict(type='Solarize', thr=256 / 9, prob=0.6)
35
- ],
36
- [
37
- dict(type='Equalize', prob=0.6),
38
- dict(type='Posterize', bits=5, prob=0.4)
39
- ],
40
- [
41
- dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
42
- dict(type='ColorTransform', magnitude=0., prob=0.4)
43
- ],
44
- [
45
- dict(type='Rotate', angle=30., prob=0.4),
46
- dict(type='Equalize', prob=0.6)
47
- ],
48
- [dict(type='Equalize', prob=0.0),
49
- dict(type='Equalize', prob=0.8)],
50
- [dict(type='Invert', prob=0.6),
51
- dict(type='Equalize', prob=1.)],
52
- [
53
- dict(type='ColorTransform', magnitude=0.4, prob=0.6),
54
- dict(type='Contrast', magnitude=0.8, prob=1.)
55
- ],
56
- [
57
- dict(type='Rotate', angle=30 / 9 * 8, prob=0.8),
58
- dict(type='ColorTransform', magnitude=0.2, prob=1.)
59
- ],
60
- [
61
- dict(type='ColorTransform', magnitude=0.8, prob=0.8),
62
- dict(type='Solarize', thr=256 / 9 * 2, prob=0.8)
63
- ],
64
- [
65
- dict(type='Sharpness', magnitude=0.7, prob=0.4),
66
- dict(type='Invert', prob=0.6)
67
- ],
68
- [
69
- dict(
70
- type='Shear',
71
- magnitude=0.3 / 9 * 5,
72
- prob=0.6,
73
- direction='horizontal'),
74
- dict(type='Equalize', prob=1.)
75
- ],
76
- [
77
- dict(type='ColorTransform', magnitude=0., prob=0.4),
78
- dict(type='Equalize', prob=0.6)
79
- ],
80
- [
81
- dict(type='Equalize', prob=0.4),
82
- dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)
83
- ],
84
- [
85
- dict(type='Solarize', thr=256 / 9 * 4, prob=0.6),
86
- dict(type='AutoContrast', prob=0.6)
87
- ],
88
- [dict(type='Invert', prob=0.6),
89
- dict(type='Equalize', prob=1.)],
90
- [
91
- dict(type='ColorTransform', magnitude=0.4, prob=0.6),
92
- dict(type='Contrast', magnitude=0.8, prob=1.)
93
- ],
94
- [dict(type='Equalize', prob=0.8),
95
- dict(type='Equalize', prob=0.6)],
96
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/ConfirmDialog.js DELETED
@@ -1,103 +0,0 @@
1
- import Dialog from '../dialog/Dialog.js';
2
- import Methods from './methods/Methods.js';
3
- import RegisterEvents from './methods/RegisterEvents.js';
4
- import DeepClone from '../../../plugins/utils/object/DeepClone.js';
5
- import CreateBackground from '../utils/build/CreateBackground.js';
6
- import CreateLabel from '../utils/build/CreateLabel.js';
7
- import CreateContent from './methods/CreateContent.js';
8
- import SetValue from '../../../plugins/utils/object/SetValue.js';
9
- import HasValue from '../../../plugins/utils/object/HasValue.js';
10
- import TextArea from '../textarea/TextArea.js';
11
-
12
- const GetValue = Phaser.Utils.Objects.GetValue;
13
-
14
- class ConfirmDialog extends Dialog {
15
- constructor(scene, config, creators) {
16
- config = (config) ? DeepClone(config) : {};
17
-
18
- if (creators === undefined) {
19
- creators = {};
20
- }
21
-
22
- var createBackground = GetValue(creators, 'background', CreateBackground);
23
- if (createBackground) {
24
- config.background = createBackground(scene, config.background);
25
- } else {
26
- delete config.background;
27
- }
28
-
29
- config.title = CreateLabel(scene, config.title, creators.title);
30
-
31
- config.content = CreateContent(scene, config.content, creators.content);
32
- if (config.content instanceof TextArea) {
33
- if (HasValue(config, 'height') && !HasValue(config, 'proportion.content')) {
34
- SetValue(config, 'proportion.content', 1);
35
- }
36
- }
37
-
38
- var defaultButtonConfig = config.button;
39
- var buttonAConfig = config.buttonA || defaultButtonConfig;
40
- var buttonBConfig = config.buttonB || defaultButtonConfig;
41
- var buttonMode = config.buttonMode;
42
- if (buttonMode === undefined) {
43
- buttonMode = (!!buttonAConfig && !!buttonBConfig) ? 2 :
44
- (!!buttonAConfig) ? 1 :
45
- 0;
46
- }
47
-
48
- var defaultButtonCreator = creators.button;
49
- var buttonACreators = creators.buttonA || defaultButtonCreator;
50
- var buttonBCreators = creators.buttonB || defaultButtonCreator;
51
- switch (buttonMode) {
52
- case 2:
53
- config.actions = [
54
- CreateLabel(scene, buttonAConfig, buttonACreators),
55
- CreateLabel(scene, buttonBConfig, buttonBCreators),
56
- ]
57
- break;
58
-
59
- case 1:
60
- config.actions = [
61
- CreateLabel(scene, buttonAConfig, buttonACreators),
62
- ]
63
- break;
64
-
65
- case 0:
66
- break;
67
-
68
- default:
69
- config.actions = [];
70
- break;
71
- }
72
-
73
- var defaultChoiceConfig = config.choice;
74
- if (defaultChoiceConfig) {
75
- config.choices = [];
76
- }
77
-
78
- super(scene, config);
79
- this.type = 'rexConfirmDialog';
80
-
81
- this.buttonMode = buttonMode;
82
-
83
- this.defaultActionConfig = defaultButtonConfig;
84
- this.defaultActionButtonCreator = defaultButtonCreator;
85
-
86
- this.defaultChoiceConfig = defaultChoiceConfig;
87
- this.defaultChoiceCreator = creators.choice;
88
-
89
- var buttons = this.childrenMap.actions;
90
- this.addChildrenMap('buttonA', (buttons) ? buttons[0] : null);
91
- this.addChildrenMap('buttonB', (buttons) ? buttons[1] : null);
92
-
93
- // Interactive
94
- RegisterEvents.call(this);
95
- }
96
- }
97
-
98
- Object.assign(
99
- ConfirmDialog.prototype,
100
- Methods
101
- )
102
-
103
- export default ConfirmDialog;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/InsertEmptyColumn.js DELETED
@@ -1,34 +0,0 @@
1
- var InsertEmptyColumn = function (colIndex, proportion, space) {
2
- if (proportion === undefined) {
3
- proportion = this.columnProportions[0] || 0;
4
- }
5
- if (space === undefined) {
6
- space = this.space.column[0] || 0;
7
- }
8
-
9
- this.columnCount += 1;
10
- this.gridCount += this.rowCount;
11
-
12
- for (var i = this.rowCount - 1; i >= 0; i--) {
13
- var insertIndex = (i * this.columnCount) + colIndex;
14
- this.sizerChildren.splice(insertIndex, 0, null);
15
- }
16
-
17
- this.columnProportions.push(proportion);
18
-
19
- this.columnWidth.length += 1; // this.columnWidth will be recalculated when layout()
20
-
21
- this.space.column.splice(colIndex, 0, space);
22
-
23
- return this;
24
- }
25
-
26
- var AddEmptyColumn = function (proportion, space) {
27
- InsertEmptyColumn.call(this, this.columnCount, proportion, space);
28
- return this;
29
- }
30
-
31
- export {
32
- InsertEmptyColumn,
33
- AddEmptyColumn
34
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sides/defaultcallbacks/MoveCallbacks.js DELETED
@@ -1,12 +0,0 @@
1
- var GetCallback = function (duration, ease) {
2
- return function (child, key, sides, reset) {
3
- if (key !== 'panel') {
4
- sides.moveChild(child, ((reset) ? 0 : duration), ease);
5
- }
6
- }
7
- }
8
-
9
- export default {
10
- show: GetCallback,
11
- hide: GetCallback
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexKorGKLT/webui-cpua/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Stable Diffusion Webui on Cpu
3
- emoji: 🏃
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- python_version: 3.10.6
11
- duplicated_from: DMTuit/webui-cpu
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/models/ade20k/utils.py DELETED
@@ -1,40 +0,0 @@
1
- """Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch"""
2
-
3
- import os
4
- import sys
5
-
6
- import numpy as np
7
- import torch
8
-
9
- try:
10
- from urllib import urlretrieve
11
- except ImportError:
12
- from urllib.request import urlretrieve
13
-
14
-
15
- def load_url(url, model_dir='./pretrained', map_location=None):
16
- if not os.path.exists(model_dir):
17
- os.makedirs(model_dir)
18
- filename = url.split('/')[-1]
19
- cached_file = os.path.join(model_dir, filename)
20
- if not os.path.exists(cached_file):
21
- sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
22
- urlretrieve(url, cached_file)
23
- return torch.load(cached_file, map_location=map_location)
24
-
25
-
26
- def color_encode(labelmap, colors, mode='RGB'):
27
- labelmap = labelmap.astype('int')
28
- labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),
29
- dtype=np.uint8)
30
- for label in np.unique(labelmap):
31
- if label < 0:
32
- continue
33
- labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \
34
- np.tile(colors[label],
35
- (labelmap.shape[0], labelmap.shape[1], 1))
36
-
37
- if mode == 'BGR':
38
- return labelmap_rgb[:, :, ::-1]
39
- else:
40
- return labelmap_rgb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexZou/Deploy_Restoration/app.py DELETED
@@ -1,69 +0,0 @@
1
- import gradio as gr
2
- import os
3
-
4
- def inference(image, task):
5
- if not os.path.exists('tmp'):
6
- os.system('mkdir tmp')
7
- image.save("tmp/lq_image.png", "PNG")
8
-
9
- if task == 'Dehazing':
10
- os.system("python Dehazing.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/Haze4k.tjm")
11
-
12
- if task == 'LLIE':
13
- os.system("python Lowlight.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/Lowlight.pth")
14
-
15
- if task == 'SuperResolutionx2':
16
- os.system("python SuperResolution.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/SRx2.pth --scale 2")
17
-
18
- if task == 'SuperResolutionx3':
19
- os.system("python SuperResolution.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/SRx3.pth --scale 3")
20
-
21
- if task == 'SuperResolutionx4':
22
- os.system("python SuperResolution.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/SRx4.pth --scale 4")
23
-
24
- if task == 'Underwater':
25
- os.system("python Underwater.py --test_path ./tmp/lq_image.png --save_path ./tmp/ --pk_path model_zoo/underwater.pth")
26
-
27
- return 'tmp/output.png'
28
-
29
- title = "基于光学传播建模的复杂水下成像智能复原方法<br><font size=3>高红霞</font><br><font size=3>华南理工大学</font><br>"
30
- description = " 简介:项目组结合多年来关于基于光学成像建模的图像复原方法的理论研究基础,从光的物理传播建模出发,开展如下研究:建立描述复杂水体中介质的吸收、前后散射等主要成像退化特征的光学成像模型;基于光学成像模型和低秩约束实现自适应智能复原;以计算机视觉理论和光学成像模型为基础,实现消除多种退化效应的深度学习复原模型。在前期的图像复原理论研究的基础上,后续展开在雾天、低照度、低分辨率等场景下的传统与深度学习复原方法的研究。"
31
- article = "相关成果:<br>[1]Ye Cai, Hongxia Gao*, Shicheng Niu, Tian Qi. A multi-stage restoration method for degraded images with light scattering and absorption. Proceeding of 26th International Conference on Pattern Recognition (ICPR 2022).<br>[2]Ye Cai, Lan Luo, Hongxia Gao*, Shicheng Niu, Weipeng Yang , Tian Qi, Guoheng Liang. Haze Removal Using a Hybrid Convolutional Sparse Representation Model. The 14th International Conference on Digital Image Processing (ICDIP).<br>[3] Hongxia Gao, Zhanhong Chen, Binyang Huang*, Jiahe Chen, Zhifu Li. Image Super Resolution Based on Conditional Generative Adversarial Network. IET Image Processing(SCI三区),2020, 14(13): 3006-3013.(SCI收录- 000595800300006).<br>[4] Weipeng Yang, Hongxia Gao, Shasha Huang, Shicheng Niu, Hongsheng Chen, Guoheng Liang. Low-light image enhancement under mixed noise model with Tensor Representation. CAAI International Conference on Artificial Intelligence(CICAI)."
32
- #description = "Gradio demo for <b>NAFNet: Nonlinear Activation Free Network for Image Restoration</b>. NAFNet achieves state-of-the-art performance on three tasks: image denoising, image debluring and stereo image super-resolution (SR). See the paper and project page for detailed results below. Here, we provide a demo for image denoise and deblur. To use it, simply upload your image, or click one of the examples to load them. Inference needs some time since this demo uses CPU."
33
- #article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2204.04676' target='_blank'>Simple Baselines for Image Restoration</a> | <a href='https://arxiv.org/abs/2204.08714' target='_blank'>NAFSSR: Stereo Image Super-Resolution Using NAFNet</a> | <a href='https://github.com/megvii-research/NAFNet' target='_blank'> Github Repo</a></p>"
34
-
35
-
36
- examples = [['demo/underwater.jpg', 'Underwater'],
37
- ['demo/low.jpg', 'LLIE'],
38
- ['demo/dehaze.jpg', 'Dehazing'],
39
- ['demo/sr.png', 'SuperResolutionx2']]
40
-
41
-
42
- #examples = [['demo/low.jpg', 'LLIE']]
43
-
44
- '''
45
- iface = gr.Interface(
46
- inference,
47
- [gr.inputs.Image(type="pil", label="Input"),
48
- gr.inputs.Radio(["LLIE"], default="LLIE", label='task'),],
49
- gr.outputs.Image(type="file", label="Output"),
50
- title=title,
51
- description=description,
52
- article=article,
53
- enable_queue=True,
54
- examples=examples
55
- )
56
- iface.launch(debug=True,enable_queue=True)
57
- '''
58
- iface = gr.Interface(
59
- inference,
60
- [gr.inputs.Image(type="pil", label="Input"),
61
- gr.inputs.Radio(["Underwater", "LLIE", "Dehazing", "SuperResolutionx2", "SuperResolutionx3", "SuperResolutionx4"], default="Underwater", label='task'),],
62
- gr.outputs.Image(type="file", label="Output"),
63
- title=title,
64
- description=description,
65
- article=article,
66
- enable_queue=True,
67
- examples=examples
68
- )
69
- iface.launch(debug=True,enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/QR-code-AI-art-generator/app.py DELETED
@@ -1,285 +0,0 @@
1
- import torch
2
- import gradio as gr
3
- from PIL import Image
4
- import qrcode
5
- from pathlib import Path
6
- from multiprocessing import cpu_count
7
- import requests
8
- import io
9
- import os
10
- from PIL import Image
11
-
12
- from diffusers import (
13
- StableDiffusionPipeline,
14
- StableDiffusionControlNetImg2ImgPipeline,
15
- ControlNetModel,
16
- DDIMScheduler,
17
- DPMSolverMultistepScheduler,
18
- DEISMultistepScheduler,
19
- HeunDiscreteScheduler,
20
- EulerDiscreteScheduler,
21
- )
22
-
23
- qrcode_generator = qrcode.QRCode(
24
- version=1,
25
- error_correction=qrcode.ERROR_CORRECT_H,
26
- box_size=10,
27
- border=4,
28
- )
29
-
30
- controlnet = ControlNetModel.from_pretrained(
31
- "DionTimmer/controlnet_qrcode-control_v1p_sd15", torch_dtype=torch.float16
32
- )
33
-
34
- pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
35
- "runwayml/stable-diffusion-v1-5",
36
- controlnet=controlnet,
37
- safety_checker=None,
38
- torch_dtype=torch.float16,
39
- ).to("cuda")
40
- pipe.enable_xformers_memory_efficient_attention()
41
-
42
-
43
- def resize_for_condition_image(input_image: Image.Image, resolution: int):
44
- input_image = input_image.convert("RGB")
45
- W, H = input_image.size
46
- k = float(resolution) / min(H, W)
47
- H *= k
48
- W *= k
49
- H = int(round(H / 64.0)) * 64
50
- W = int(round(W / 64.0)) * 64
51
- img = input_image.resize((W, H), resample=Image.LANCZOS)
52
- return img
53
-
54
-
55
- SAMPLER_MAP = {
56
- "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"),
57
- "DPM++ Karras": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True),
58
- "Heun": lambda config: HeunDiscreteScheduler.from_config(config),
59
- "Euler": lambda config: EulerDiscreteScheduler.from_config(config),
60
- "DDIM": lambda config: DDIMScheduler.from_config(config),
61
- "DEIS": lambda config: DEISMultistepScheduler.from_config(config),
62
- }
63
-
64
-
65
- def inference(
66
- qr_code_content: str,
67
- prompt: str,
68
- negative_prompt: str,
69
- guidance_scale: float = 10.0,
70
- controlnet_conditioning_scale: float = 2.0,
71
- strength: float = 0.8,
72
- seed: int = -1,
73
- init_image: Image.Image | None = None,
74
- qrcode_image: Image.Image | None = None,
75
- use_qr_code_as_init_image = True,
76
- sampler = "DPM++ Karras SDE",
77
- ):
78
- if prompt is None or prompt == "":
79
- raise gr.Error("Prompt is required")
80
-
81
- if qrcode_image is None and qr_code_content == "":
82
- raise gr.Error("QR Code Image or QR Code Content is required")
83
-
84
- pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config)
85
-
86
- generator = torch.manual_seed(seed) if seed != -1 else torch.Generator()
87
-
88
- if qr_code_content != "" or qrcode_image.size == (1, 1):
89
- print("Generating QR Code from content")
90
- qr = qrcode.QRCode(
91
- version=1,
92
- error_correction=qrcode.constants.ERROR_CORRECT_H,
93
- box_size=10,
94
- border=4,
95
- )
96
- qr.add_data(qr_code_content)
97
- qr.make(fit=True)
98
-
99
- qrcode_image = qr.make_image(fill_color="black", back_color="white")
100
- qrcode_image = resize_for_condition_image(qrcode_image, 768)
101
- else:
102
- print("Using QR Code Image")
103
- qrcode_image = resize_for_condition_image(qrcode_image, 768)
104
-
105
- # hack due to gradio examples
106
- init_image = qrcode_image
107
-
108
- out = pipe(
109
- prompt=prompt,
110
- negative_prompt=negative_prompt,
111
- image=qrcode_image,
112
- control_image=qrcode_image, # type: ignore
113
- width=768, # type: ignore
114
- height=768, # type: ignore
115
- guidance_scale=float(guidance_scale),
116
- controlnet_conditioning_scale=float(controlnet_conditioning_scale), # type: ignore
117
- generator=generator,
118
- strength=float(strength),
119
- num_inference_steps=40,
120
- )
121
- return out.images[0] # type: ignore
122
-
123
-
124
- with gr.Blocks() as blocks:
125
- gr.Markdown(
126
- """
127
- # QR Code AI Art Generator
128
-
129
- ## 💡 How to generate beautiful QR codes
130
-
131
- We use the QR code image as the initial image **and** the control image, which allows you to generate
132
- QR Codes that blend in **very naturally** with your provided prompt.
133
- The strength parameter defines how much noise is added to your QR code and the noisy QR code is then guided towards both your prompt and the QR code image via Controlnet.
134
- Use a high strength value between 0.8 and 0.95 and choose a conditioning scale between 0.6 and 2.0.
135
- This mode arguably achieves the asthetically most appealing QR code images, but also requires more tuning of the controlnet conditioning scale and the strength value. If the generated image
136
- looks way to much like the original QR code, make sure to gently increase the *strength* value and reduce the *conditioning* scale. Also check out the examples below.
137
-
138
- model: https://huggingface.co/DionTimmer/controlnet_qrcode-control_v1p_sd15
139
-
140
- <a href="https://huggingface.co/spaces/huggingface-projects/QR-code-AI-art-generator?duplicate=true" style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
141
- <img style="margin-bottom: 0em;display: inline;margin-top: -.25em;" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> for no queue on your own hardware.</p>
142
- """
143
- )
144
-
145
- with gr.Row():
146
- with gr.Column():
147
- qr_code_content = gr.Textbox(
148
- label="QR Code Content",
149
- info="QR Code Content or URL",
150
- value="",
151
- )
152
- with gr.Accordion(label="QR Code Image (Optional)", open=False):
153
- qr_code_image = gr.Image(
154
- label="QR Code Image (Optional). Leave blank to automatically generate QR code",
155
- type="pil",
156
- )
157
-
158
- prompt = gr.Textbox(
159
- label="Prompt",
160
- info="Prompt that guides the generation towards",
161
- )
162
- negative_prompt = gr.Textbox(
163
- label="Negative Prompt",
164
- value="ugly, disfigured, low quality, blurry, nsfw",
165
- )
166
- use_qr_code_as_init_image = gr.Checkbox(label="Use QR code as init image", value=True, interactive=False, info="Whether init image should be QR code. Unclick to pass init image or generate init image with Stable Diffusion 2.1")
167
-
168
- with gr.Accordion(label="Init Images (Optional)", open=False, visible=False) as init_image_acc:
169
- init_image = gr.Image(label="Init Image (Optional). Leave blank to generate image with SD 2.1", type="pil")
170
-
171
-
172
- with gr.Accordion(
173
- label="Params: The generated QR Code functionality is largely influenced by the parameters detailed below",
174
- open=True,
175
- ):
176
- controlnet_conditioning_scale = gr.Slider(
177
- minimum=0.0,
178
- maximum=5.0,
179
- step=0.01,
180
- value=1.1,
181
- label="Controlnet Conditioning Scale",
182
- )
183
- strength = gr.Slider(
184
- minimum=0.0, maximum=1.0, step=0.01, value=0.9, label="Strength"
185
- )
186
- guidance_scale = gr.Slider(
187
- minimum=0.0,
188
- maximum=50.0,
189
- step=0.25,
190
- value=7.5,
191
- label="Guidance Scale",
192
- )
193
- sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="DPM++ Karras SDE")
194
- seed = gr.Slider(
195
- minimum=-1,
196
- maximum=9999999999,
197
- step=1,
198
- value=2313123,
199
- label="Seed",
200
- randomize=True,
201
- )
202
- with gr.Row():
203
- run_btn = gr.Button("Run")
204
- with gr.Column():
205
- result_image = gr.Image(label="Result Image")
206
- run_btn.click(
207
- inference,
208
- inputs=[
209
- qr_code_content,
210
- prompt,
211
- negative_prompt,
212
- guidance_scale,
213
- controlnet_conditioning_scale,
214
- strength,
215
- seed,
216
- init_image,
217
- qr_code_image,
218
- use_qr_code_as_init_image,
219
- sampler,
220
- ],
221
- outputs=[result_image],
222
- )
223
-
224
- gr.Examples(
225
- examples=[
226
- [
227
- "https://huggingface.co/",
228
- "A sky view of a colorful lakes and rivers flowing through the desert",
229
- "ugly, disfigured, low quality, blurry, nsfw",
230
- 7.5,
231
- 1.3,
232
- 0.9,
233
- 5392011833,
234
- None,
235
- None,
236
- True,
237
- "DPM++ Karras SDE",
238
- ],
239
- [
240
- "https://huggingface.co/",
241
- "Bright sunshine coming through the cracks of a wet, cave wall of big rocks",
242
- "ugly, disfigured, low quality, blurry, nsfw",
243
- 7.5,
244
- 1.11,
245
- 0.9,
246
- 2523992465,
247
- None,
248
- None,
249
- True,
250
- "DPM++ Karras SDE",
251
- ],
252
- [
253
- "https://huggingface.co/",
254
- "Sky view of highly aesthetic, ancient greek thermal baths in beautiful nature",
255
- "ugly, disfigured, low quality, blurry, nsfw",
256
- 7.5,
257
- 1.5,
258
- 0.9,
259
- 2523992465,
260
- None,
261
- None,
262
- True,
263
- "DPM++ Karras SDE",
264
- ],
265
- ],
266
- fn=inference,
267
- inputs=[
268
- qr_code_content,
269
- prompt,
270
- negative_prompt,
271
- guidance_scale,
272
- controlnet_conditioning_scale,
273
- strength,
274
- seed,
275
- init_image,
276
- qr_code_image,
277
- use_qr_code_as_init_image,
278
- sampler,
279
- ],
280
- outputs=[result_image],
281
- cache_examples=True,
282
- )
283
-
284
- blocks.queue(concurrency_count=1, max_size=20)
285
- blocks.launch(share=bool(os.environ.get("SHARE", False)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andres99/Tune-A-Video-Training-UI/app_inference.py DELETED
@@ -1,170 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from __future__ import annotations
4
-
5
- import enum
6
-
7
- import gradio as gr
8
- from huggingface_hub import HfApi
9
-
10
- from constants import MODEL_LIBRARY_ORG_NAME, UploadTarget
11
- from inference import InferencePipeline
12
- from utils import find_exp_dirs
13
-
14
-
15
- class ModelSource(enum.Enum):
16
- HUB_LIB = UploadTarget.MODEL_LIBRARY.value
17
- LOCAL = 'Local'
18
-
19
-
20
- class InferenceUtil:
21
- def __init__(self, hf_token: str | None):
22
- self.hf_token = hf_token
23
-
24
- def load_hub_model_list(self) -> dict:
25
- api = HfApi(token=self.hf_token)
26
- choices = [
27
- info.modelId
28
- for info in api.list_models(author=MODEL_LIBRARY_ORG_NAME)
29
- ]
30
- return gr.update(choices=choices,
31
- value=choices[0] if choices else None)
32
-
33
- @staticmethod
34
- def load_local_model_list() -> dict:
35
- choices = find_exp_dirs()
36
- return gr.update(choices=choices,
37
- value=choices[0] if choices else None)
38
-
39
- def reload_model_list(self, model_source: str) -> dict:
40
- if model_source == ModelSource.HUB_LIB.value:
41
- return self.load_hub_model_list()
42
- elif model_source == ModelSource.LOCAL.value:
43
- return self.load_local_model_list()
44
- else:
45
- raise ValueError
46
-
47
- def load_model_info(self, model_id: str) -> tuple[str, str]:
48
- try:
49
- card = InferencePipeline.get_model_card(model_id, self.hf_token)
50
- except Exception:
51
- return '', ''
52
- base_model = getattr(card.data, 'base_model', '')
53
- training_prompt = getattr(card.data, 'training_prompt', '')
54
- return base_model, training_prompt
55
-
56
- def reload_model_list_and_update_model_info(
57
- self, model_source: str) -> tuple[dict, str, str]:
58
- model_list_update = self.reload_model_list(model_source)
59
- model_list = model_list_update['choices']
60
- model_info = self.load_model_info(model_list[0] if model_list else '')
61
- return model_list_update, *model_info
62
-
63
-
64
- def create_inference_demo(pipe: InferencePipeline,
65
- hf_token: str | None = None) -> gr.Blocks:
66
- app = InferenceUtil(hf_token)
67
-
68
- with gr.Blocks() as demo:
69
- with gr.Row():
70
- with gr.Column():
71
- with gr.Box():
72
- model_source = gr.Radio(
73
- label='Model Source',
74
- choices=[_.value for _ in ModelSource],
75
- value=ModelSource.HUB_LIB.value)
76
- reload_button = gr.Button('Reload Model List')
77
- model_id = gr.Dropdown(label='Model ID',
78
- choices=None,
79
- value=None)
80
- with gr.Accordion(
81
- label=
82
- 'Model info (Base model and prompt used for training)',
83
- open=False):
84
- with gr.Row():
85
- base_model_used_for_training = gr.Text(
86
- label='Base model', interactive=False)
87
- prompt_used_for_training = gr.Text(
88
- label='Training prompt', interactive=False)
89
- prompt = gr.Textbox(
90
- label='Prompt',
91
- max_lines=1,
92
- placeholder='Example: "A panda is surfing"')
93
- video_length = gr.Slider(label='Video length',
94
- minimum=4,
95
- maximum=12,
96
- step=1,
97
- value=8)
98
- fps = gr.Slider(label='FPS',
99
- minimum=1,
100
- maximum=12,
101
- step=1,
102
- value=1)
103
- seed = gr.Slider(label='Seed',
104
- minimum=0,
105
- maximum=100000,
106
- step=1,
107
- value=0)
108
- with gr.Accordion('Other Parameters', open=False):
109
- num_steps = gr.Slider(label='Number of Steps',
110
- minimum=0,
111
- maximum=100,
112
- step=1,
113
- value=50)
114
- guidance_scale = gr.Slider(label='CFG Scale',
115
- minimum=0,
116
- maximum=50,
117
- step=0.1,
118
- value=7.5)
119
-
120
- run_button = gr.Button('Generate')
121
-
122
- gr.Markdown('''
123
- - After training, you can press "Reload Model List" button to load your trained model names.
124
- - It takes a few minutes to download model first.
125
- - Expected time to generate an 8-frame video: 70 seconds with T4, 24 seconds with A10G, (10 seconds with A100)
126
- ''')
127
- with gr.Column():
128
- result = gr.Video(label='Result')
129
-
130
- model_source.change(fn=app.reload_model_list_and_update_model_info,
131
- inputs=model_source,
132
- outputs=[
133
- model_id,
134
- base_model_used_for_training,
135
- prompt_used_for_training,
136
- ])
137
- reload_button.click(fn=app.reload_model_list_and_update_model_info,
138
- inputs=model_source,
139
- outputs=[
140
- model_id,
141
- base_model_used_for_training,
142
- prompt_used_for_training,
143
- ])
144
- model_id.change(fn=app.load_model_info,
145
- inputs=model_id,
146
- outputs=[
147
- base_model_used_for_training,
148
- prompt_used_for_training,
149
- ])
150
- inputs = [
151
- model_id,
152
- prompt,
153
- video_length,
154
- fps,
155
- seed,
156
- num_steps,
157
- guidance_scale,
158
- ]
159
- prompt.submit(fn=pipe.run, inputs=inputs, outputs=result)
160
- run_button.click(fn=pipe.run, inputs=inputs, outputs=result)
161
- return demo
162
-
163
-
164
- if __name__ == '__main__':
165
- import os
166
-
167
- hf_token = os.getenv('HF_TOKEN')
168
- pipe = InferencePipeline(hf_token)
169
- demo = create_inference_demo(pipe, hf_token)
170
- demo.queue(max_size=10).launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py DELETED
@@ -1,1058 +0,0 @@
1
- import html
2
- import inspect
3
- import re
4
- import urllib.parse as ul
5
- from typing import Any, Callable, Dict, List, Optional, Union
6
-
7
- import numpy as np
8
- import PIL
9
- import torch
10
- import torch.nn.functional as F
11
- from transformers import CLIPImageProcessor, T5EncoderModel, T5Tokenizer
12
-
13
- from ...loaders import LoraLoaderMixin
14
- from ...models import UNet2DConditionModel
15
- from ...schedulers import DDPMScheduler
16
- from ...utils import (
17
- BACKENDS_MAPPING,
18
- PIL_INTERPOLATION,
19
- is_accelerate_available,
20
- is_accelerate_version,
21
- is_bs4_available,
22
- is_ftfy_available,
23
- logging,
24
- randn_tensor,
25
- replace_example_docstring,
26
- )
27
- from ..pipeline_utils import DiffusionPipeline
28
- from . import IFPipelineOutput
29
- from .safety_checker import IFSafetyChecker
30
- from .watermark import IFWatermarker
31
-
32
-
33
- if is_bs4_available():
34
- from bs4 import BeautifulSoup
35
-
36
- if is_ftfy_available():
37
- import ftfy
38
-
39
-
40
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
41
-
42
-
43
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.resize
44
- def resize(images: PIL.Image.Image, img_size: int) -> PIL.Image.Image:
45
- w, h = images.size
46
-
47
- coef = w / h
48
-
49
- w, h = img_size, img_size
50
-
51
- if coef >= 1:
52
- w = int(round(img_size / 8 * coef) * 8)
53
- else:
54
- h = int(round(img_size / 8 / coef) * 8)
55
-
56
- images = images.resize((w, h), resample=PIL_INTERPOLATION["bicubic"], reducing_gap=None)
57
-
58
- return images
59
-
60
-
61
- EXAMPLE_DOC_STRING = """
62
- Examples:
63
- ```py
64
- >>> from diffusers import IFImg2ImgPipeline, IFImg2ImgSuperResolutionPipeline, DiffusionPipeline
65
- >>> from diffusers.utils import pt_to_pil
66
- >>> import torch
67
- >>> from PIL import Image
68
- >>> import requests
69
- >>> from io import BytesIO
70
-
71
- >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
72
- >>> response = requests.get(url)
73
- >>> original_image = Image.open(BytesIO(response.content)).convert("RGB")
74
- >>> original_image = original_image.resize((768, 512))
75
-
76
- >>> pipe = IFImg2ImgPipeline.from_pretrained(
77
- ... "DeepFloyd/IF-I-XL-v1.0",
78
- ... variant="fp16",
79
- ... torch_dtype=torch.float16,
80
- ... )
81
- >>> pipe.enable_model_cpu_offload()
82
-
83
- >>> prompt = "A fantasy landscape in style minecraft"
84
- >>> prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
85
-
86
- >>> image = pipe(
87
- ... image=original_image,
88
- ... prompt_embeds=prompt_embeds,
89
- ... negative_prompt_embeds=negative_embeds,
90
- ... output_type="pt",
91
- ... ).images
92
-
93
- >>> # save intermediate image
94
- >>> pil_image = pt_to_pil(image)
95
- >>> pil_image[0].save("./if_stage_I.png")
96
-
97
- >>> super_res_1_pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(
98
- ... "DeepFloyd/IF-II-L-v1.0",
99
- ... text_encoder=None,
100
- ... variant="fp16",
101
- ... torch_dtype=torch.float16,
102
- ... )
103
- >>> super_res_1_pipe.enable_model_cpu_offload()
104
-
105
- >>> image = super_res_1_pipe(
106
- ... image=image,
107
- ... original_image=original_image,
108
- ... prompt_embeds=prompt_embeds,
109
- ... negative_prompt_embeds=negative_embeds,
110
- ... ).images
111
- >>> image[0].save("./if_stage_II.png")
112
- ```
113
- """
114
-
115
-
116
- class IFImg2ImgSuperResolutionPipeline(DiffusionPipeline, LoraLoaderMixin):
117
- tokenizer: T5Tokenizer
118
- text_encoder: T5EncoderModel
119
-
120
- unet: UNet2DConditionModel
121
- scheduler: DDPMScheduler
122
- image_noising_scheduler: DDPMScheduler
123
-
124
- feature_extractor: Optional[CLIPImageProcessor]
125
- safety_checker: Optional[IFSafetyChecker]
126
-
127
- watermarker: Optional[IFWatermarker]
128
-
129
- bad_punct_regex = re.compile(
130
- r"[" + "#®•©™&@·º½¾¿¡§~" + "\)" + "\(" + "\]" + "\[" + "\}" + "\{" + "\|" + "\\" + "\/" + "\*" + r"]{1,}"
131
- ) # noqa
132
-
133
- _optional_components = ["tokenizer", "text_encoder", "safety_checker", "feature_extractor"]
134
-
135
- def __init__(
136
- self,
137
- tokenizer: T5Tokenizer,
138
- text_encoder: T5EncoderModel,
139
- unet: UNet2DConditionModel,
140
- scheduler: DDPMScheduler,
141
- image_noising_scheduler: DDPMScheduler,
142
- safety_checker: Optional[IFSafetyChecker],
143
- feature_extractor: Optional[CLIPImageProcessor],
144
- watermarker: Optional[IFWatermarker],
145
- requires_safety_checker: bool = True,
146
- ):
147
- super().__init__()
148
-
149
- if safety_checker is None and requires_safety_checker:
150
- logger.warning(
151
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
152
- " that you abide to the conditions of the IF license and do not expose unfiltered"
153
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
154
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
155
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
156
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
157
- )
158
-
159
- if safety_checker is not None and feature_extractor is None:
160
- raise ValueError(
161
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
162
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
163
- )
164
-
165
- if unet.config.in_channels != 6:
166
- logger.warn(
167
- "It seems like you have loaded a checkpoint that shall not be used for super resolution from {unet.config._name_or_path} as it accepts {unet.config.in_channels} input channels instead of 6. Please make sure to pass a super resolution checkpoint as the `'unet'`: IFSuperResolutionPipeline.from_pretrained(unet=super_resolution_unet, ...)`."
168
- )
169
-
170
- self.register_modules(
171
- tokenizer=tokenizer,
172
- text_encoder=text_encoder,
173
- unet=unet,
174
- scheduler=scheduler,
175
- image_noising_scheduler=image_noising_scheduler,
176
- safety_checker=safety_checker,
177
- feature_extractor=feature_extractor,
178
- watermarker=watermarker,
179
- )
180
- self.register_to_config(requires_safety_checker=requires_safety_checker)
181
-
182
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.enable_model_cpu_offload
183
- def enable_model_cpu_offload(self, gpu_id=0):
184
- r"""
185
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
186
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
187
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
188
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
189
- """
190
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
191
- from accelerate import cpu_offload_with_hook
192
- else:
193
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
194
-
195
- device = torch.device(f"cuda:{gpu_id}")
196
-
197
- if self.device.type != "cpu":
198
- self.to("cpu", silence_dtype_warnings=True)
199
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
200
-
201
- hook = None
202
-
203
- if self.text_encoder is not None:
204
- _, hook = cpu_offload_with_hook(self.text_encoder, device, prev_module_hook=hook)
205
-
206
- # Accelerate will move the next model to the device _before_ calling the offload hook of the
207
- # previous model. This will cause both models to be present on the device at the same time.
208
- # IF uses T5 for its text encoder which is really large. We can manually call the offload
209
- # hook for the text encoder to ensure it's moved to the cpu before the unet is moved to
210
- # the GPU.
211
- self.text_encoder_offload_hook = hook
212
-
213
- _, hook = cpu_offload_with_hook(self.unet, device, prev_module_hook=hook)
214
-
215
- # if the safety checker isn't called, `unet_offload_hook` will have to be called to manually offload the unet
216
- self.unet_offload_hook = hook
217
-
218
- if self.safety_checker is not None:
219
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
220
-
221
- # We'll offload the last model manually.
222
- self.final_offload_hook = hook
223
-
224
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.remove_all_hooks
225
- def remove_all_hooks(self):
226
- if is_accelerate_available():
227
- from accelerate.hooks import remove_hook_from_module
228
- else:
229
- raise ImportError("Please install accelerate via `pip install accelerate`")
230
-
231
- for model in [self.text_encoder, self.unet, self.safety_checker]:
232
- if model is not None:
233
- remove_hook_from_module(model, recurse=True)
234
-
235
- self.unet_offload_hook = None
236
- self.text_encoder_offload_hook = None
237
- self.final_offload_hook = None
238
-
239
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
240
- def _text_preprocessing(self, text, clean_caption=False):
241
- if clean_caption and not is_bs4_available():
242
- logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
243
- logger.warn("Setting `clean_caption` to False...")
244
- clean_caption = False
245
-
246
- if clean_caption and not is_ftfy_available():
247
- logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
248
- logger.warn("Setting `clean_caption` to False...")
249
- clean_caption = False
250
-
251
- if not isinstance(text, (tuple, list)):
252
- text = [text]
253
-
254
- def process(text: str):
255
- if clean_caption:
256
- text = self._clean_caption(text)
257
- text = self._clean_caption(text)
258
- else:
259
- text = text.lower().strip()
260
- return text
261
-
262
- return [process(t) for t in text]
263
-
264
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
265
- def _clean_caption(self, caption):
266
- caption = str(caption)
267
- caption = ul.unquote_plus(caption)
268
- caption = caption.strip().lower()
269
- caption = re.sub("<person>", "person", caption)
270
- # urls:
271
- caption = re.sub(
272
- r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
273
- "",
274
- caption,
275
- ) # regex for urls
276
- caption = re.sub(
277
- r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
278
- "",
279
- caption,
280
- ) # regex for urls
281
- # html:
282
- caption = BeautifulSoup(caption, features="html.parser").text
283
-
284
- # @<nickname>
285
- caption = re.sub(r"@[\w\d]+\b", "", caption)
286
-
287
- # 31C0—31EF CJK Strokes
288
- # 31F0—31FF Katakana Phonetic Extensions
289
- # 3200—32FF Enclosed CJK Letters and Months
290
- # 3300—33FF CJK Compatibility
291
- # 3400—4DBF CJK Unified Ideographs Extension A
292
- # 4DC0—4DFF Yijing Hexagram Symbols
293
- # 4E00—9FFF CJK Unified Ideographs
294
- caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
295
- caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
296
- caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
297
- caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
298
- caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
299
- caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
300
- caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
301
- #######################################################
302
-
303
- # все виды тире / all types of dash --> "-"
304
- caption = re.sub(
305
- r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
306
- "-",
307
- caption,
308
- )
309
-
310
- # кавычки к одному стандарту
311
- caption = re.sub(r"[`´«»“”¨]", '"', caption)
312
- caption = re.sub(r"[‘’]", "'", caption)
313
-
314
- # &quot;
315
- caption = re.sub(r"&quot;?", "", caption)
316
- # &amp
317
- caption = re.sub(r"&amp", "", caption)
318
-
319
- # ip adresses:
320
- caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
321
-
322
- # article ids:
323
- caption = re.sub(r"\d:\d\d\s+$", "", caption)
324
-
325
- # \n
326
- caption = re.sub(r"\\n", " ", caption)
327
-
328
- # "#123"
329
- caption = re.sub(r"#\d{1,3}\b", "", caption)
330
- # "#12345.."
331
- caption = re.sub(r"#\d{5,}\b", "", caption)
332
- # "123456.."
333
- caption = re.sub(r"\b\d{6,}\b", "", caption)
334
- # filenames:
335
- caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
336
-
337
- #
338
- caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
339
- caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
340
-
341
- caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
342
- caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
343
-
344
- # this-is-my-cute-cat / this_is_my_cute_cat
345
- regex2 = re.compile(r"(?:\-|\_)")
346
- if len(re.findall(regex2, caption)) > 3:
347
- caption = re.sub(regex2, " ", caption)
348
-
349
- caption = ftfy.fix_text(caption)
350
- caption = html.unescape(html.unescape(caption))
351
-
352
- caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
353
- caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
354
- caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
355
-
356
- caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
357
- caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
358
- caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
359
- caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
360
- caption = re.sub(r"\bpage\s+\d+\b", "", caption)
361
-
362
- caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
363
-
364
- caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
365
-
366
- caption = re.sub(r"\b\s+\:\s+", r": ", caption)
367
- caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
368
- caption = re.sub(r"\s+", " ", caption)
369
-
370
- caption.strip()
371
-
372
- caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
373
- caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
374
- caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
375
- caption = re.sub(r"^\.\S+$", "", caption)
376
-
377
- return caption.strip()
378
-
379
- @torch.no_grad()
380
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.encode_prompt
381
- def encode_prompt(
382
- self,
383
- prompt,
384
- do_classifier_free_guidance=True,
385
- num_images_per_prompt=1,
386
- device=None,
387
- negative_prompt=None,
388
- prompt_embeds: Optional[torch.FloatTensor] = None,
389
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
390
- clean_caption: bool = False,
391
- ):
392
- r"""
393
- Encodes the prompt into text encoder hidden states.
394
-
395
- Args:
396
- prompt (`str` or `List[str]`, *optional*):
397
- prompt to be encoded
398
- device: (`torch.device`, *optional*):
399
- torch device to place the resulting embeddings on
400
- num_images_per_prompt (`int`, *optional*, defaults to 1):
401
- number of images that should be generated per prompt
402
- do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
403
- whether to use classifier free guidance or not
404
- negative_prompt (`str` or `List[str]`, *optional*):
405
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
406
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
407
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
408
- prompt_embeds (`torch.FloatTensor`, *optional*):
409
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
410
- provided, text embeddings will be generated from `prompt` input argument.
411
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
412
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
413
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
414
- argument.
415
- """
416
- if prompt is not None and negative_prompt is not None:
417
- if type(prompt) is not type(negative_prompt):
418
- raise TypeError(
419
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
420
- f" {type(prompt)}."
421
- )
422
-
423
- if device is None:
424
- device = self._execution_device
425
-
426
- if prompt is not None and isinstance(prompt, str):
427
- batch_size = 1
428
- elif prompt is not None and isinstance(prompt, list):
429
- batch_size = len(prompt)
430
- else:
431
- batch_size = prompt_embeds.shape[0]
432
-
433
- # while T5 can handle much longer input sequences than 77, the text encoder was trained with a max length of 77 for IF
434
- max_length = 77
435
-
436
- if prompt_embeds is None:
437
- prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
438
- text_inputs = self.tokenizer(
439
- prompt,
440
- padding="max_length",
441
- max_length=max_length,
442
- truncation=True,
443
- add_special_tokens=True,
444
- return_tensors="pt",
445
- )
446
- text_input_ids = text_inputs.input_ids
447
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
448
-
449
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
450
- text_input_ids, untruncated_ids
451
- ):
452
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
453
- logger.warning(
454
- "The following part of your input was truncated because CLIP can only handle sequences up to"
455
- f" {max_length} tokens: {removed_text}"
456
- )
457
-
458
- attention_mask = text_inputs.attention_mask.to(device)
459
-
460
- prompt_embeds = self.text_encoder(
461
- text_input_ids.to(device),
462
- attention_mask=attention_mask,
463
- )
464
- prompt_embeds = prompt_embeds[0]
465
-
466
- if self.text_encoder is not None:
467
- dtype = self.text_encoder.dtype
468
- elif self.unet is not None:
469
- dtype = self.unet.dtype
470
- else:
471
- dtype = None
472
-
473
- prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
474
-
475
- bs_embed, seq_len, _ = prompt_embeds.shape
476
- # duplicate text embeddings for each generation per prompt, using mps friendly method
477
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
478
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
479
-
480
- # get unconditional embeddings for classifier free guidance
481
- if do_classifier_free_guidance and negative_prompt_embeds is None:
482
- uncond_tokens: List[str]
483
- if negative_prompt is None:
484
- uncond_tokens = [""] * batch_size
485
- elif isinstance(negative_prompt, str):
486
- uncond_tokens = [negative_prompt]
487
- elif batch_size != len(negative_prompt):
488
- raise ValueError(
489
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
490
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
491
- " the batch size of `prompt`."
492
- )
493
- else:
494
- uncond_tokens = negative_prompt
495
-
496
- uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
497
- max_length = prompt_embeds.shape[1]
498
- uncond_input = self.tokenizer(
499
- uncond_tokens,
500
- padding="max_length",
501
- max_length=max_length,
502
- truncation=True,
503
- return_attention_mask=True,
504
- add_special_tokens=True,
505
- return_tensors="pt",
506
- )
507
- attention_mask = uncond_input.attention_mask.to(device)
508
-
509
- negative_prompt_embeds = self.text_encoder(
510
- uncond_input.input_ids.to(device),
511
- attention_mask=attention_mask,
512
- )
513
- negative_prompt_embeds = negative_prompt_embeds[0]
514
-
515
- if do_classifier_free_guidance:
516
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
517
- seq_len = negative_prompt_embeds.shape[1]
518
-
519
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
520
-
521
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
522
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
523
-
524
- # For classifier free guidance, we need to do two forward passes.
525
- # Here we concatenate the unconditional and text embeddings into a single batch
526
- # to avoid doing two forward passes
527
- else:
528
- negative_prompt_embeds = None
529
-
530
- return prompt_embeds, negative_prompt_embeds
531
-
532
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.run_safety_checker
533
- def run_safety_checker(self, image, device, dtype):
534
- if self.safety_checker is not None:
535
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
536
- image, nsfw_detected, watermark_detected = self.safety_checker(
537
- images=image,
538
- clip_input=safety_checker_input.pixel_values.to(dtype=dtype),
539
- )
540
- else:
541
- nsfw_detected = None
542
- watermark_detected = None
543
-
544
- if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
545
- self.unet_offload_hook.offload()
546
-
547
- return image, nsfw_detected, watermark_detected
548
-
549
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline.prepare_extra_step_kwargs
550
- def prepare_extra_step_kwargs(self, generator, eta):
551
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
552
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
553
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
554
- # and should be between [0, 1]
555
-
556
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
557
- extra_step_kwargs = {}
558
- if accepts_eta:
559
- extra_step_kwargs["eta"] = eta
560
-
561
- # check if the scheduler accepts generator
562
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
563
- if accepts_generator:
564
- extra_step_kwargs["generator"] = generator
565
- return extra_step_kwargs
566
-
567
- def check_inputs(
568
- self,
569
- prompt,
570
- image,
571
- original_image,
572
- batch_size,
573
- callback_steps,
574
- negative_prompt=None,
575
- prompt_embeds=None,
576
- negative_prompt_embeds=None,
577
- ):
578
- if (callback_steps is None) or (
579
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
580
- ):
581
- raise ValueError(
582
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
583
- f" {type(callback_steps)}."
584
- )
585
-
586
- if prompt is not None and prompt_embeds is not None:
587
- raise ValueError(
588
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
589
- " only forward one of the two."
590
- )
591
- elif prompt is None and prompt_embeds is None:
592
- raise ValueError(
593
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
594
- )
595
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
596
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
597
-
598
- if negative_prompt is not None and negative_prompt_embeds is not None:
599
- raise ValueError(
600
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
601
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
602
- )
603
-
604
- if prompt_embeds is not None and negative_prompt_embeds is not None:
605
- if prompt_embeds.shape != negative_prompt_embeds.shape:
606
- raise ValueError(
607
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
608
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
609
- f" {negative_prompt_embeds.shape}."
610
- )
611
-
612
- # image
613
-
614
- if isinstance(image, list):
615
- check_image_type = image[0]
616
- else:
617
- check_image_type = image
618
-
619
- if (
620
- not isinstance(check_image_type, torch.Tensor)
621
- and not isinstance(check_image_type, PIL.Image.Image)
622
- and not isinstance(check_image_type, np.ndarray)
623
- ):
624
- raise ValueError(
625
- "`image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
626
- f" {type(check_image_type)}"
627
- )
628
-
629
- if isinstance(image, list):
630
- image_batch_size = len(image)
631
- elif isinstance(image, torch.Tensor):
632
- image_batch_size = image.shape[0]
633
- elif isinstance(image, PIL.Image.Image):
634
- image_batch_size = 1
635
- elif isinstance(image, np.ndarray):
636
- image_batch_size = image.shape[0]
637
- else:
638
- assert False
639
-
640
- if batch_size != image_batch_size:
641
- raise ValueError(f"image batch size: {image_batch_size} must be same as prompt batch size {batch_size}")
642
-
643
- # original_image
644
-
645
- if isinstance(original_image, list):
646
- check_image_type = original_image[0]
647
- else:
648
- check_image_type = original_image
649
-
650
- if (
651
- not isinstance(check_image_type, torch.Tensor)
652
- and not isinstance(check_image_type, PIL.Image.Image)
653
- and not isinstance(check_image_type, np.ndarray)
654
- ):
655
- raise ValueError(
656
- "`original_image` has to be of type `torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, or List[...] but is"
657
- f" {type(check_image_type)}"
658
- )
659
-
660
- if isinstance(original_image, list):
661
- image_batch_size = len(original_image)
662
- elif isinstance(original_image, torch.Tensor):
663
- image_batch_size = original_image.shape[0]
664
- elif isinstance(original_image, PIL.Image.Image):
665
- image_batch_size = 1
666
- elif isinstance(original_image, np.ndarray):
667
- image_batch_size = original_image.shape[0]
668
- else:
669
- assert False
670
-
671
- if batch_size != image_batch_size:
672
- raise ValueError(
673
- f"original_image batch size: {image_batch_size} must be same as prompt batch size {batch_size}"
674
- )
675
-
676
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image
677
- def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor:
678
- if not isinstance(image, list):
679
- image = [image]
680
-
681
- def numpy_to_pt(images):
682
- if images.ndim == 3:
683
- images = images[..., None]
684
-
685
- images = torch.from_numpy(images.transpose(0, 3, 1, 2))
686
- return images
687
-
688
- if isinstance(image[0], PIL.Image.Image):
689
- new_image = []
690
-
691
- for image_ in image:
692
- image_ = image_.convert("RGB")
693
- image_ = resize(image_, self.unet.sample_size)
694
- image_ = np.array(image_)
695
- image_ = image_.astype(np.float32)
696
- image_ = image_ / 127.5 - 1
697
- new_image.append(image_)
698
-
699
- image = new_image
700
-
701
- image = np.stack(image, axis=0) # to np
702
- image = numpy_to_pt(image) # to pt
703
-
704
- elif isinstance(image[0], np.ndarray):
705
- image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
706
- image = numpy_to_pt(image)
707
-
708
- elif isinstance(image[0], torch.Tensor):
709
- image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
710
-
711
- return image
712
-
713
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_superresolution.IFSuperResolutionPipeline.preprocess_image
714
- def preprocess_image(self, image: PIL.Image.Image, num_images_per_prompt, device) -> torch.Tensor:
715
- if not isinstance(image, torch.Tensor) and not isinstance(image, list):
716
- image = [image]
717
-
718
- if isinstance(image[0], PIL.Image.Image):
719
- image = [np.array(i).astype(np.float32) / 127.5 - 1.0 for i in image]
720
-
721
- image = np.stack(image, axis=0) # to np
722
- image = torch.from_numpy(image.transpose(0, 3, 1, 2))
723
- elif isinstance(image[0], np.ndarray):
724
- image = np.stack(image, axis=0) # to np
725
- if image.ndim == 5:
726
- image = image[0]
727
-
728
- image = torch.from_numpy(image.transpose(0, 3, 1, 2))
729
- elif isinstance(image, list) and isinstance(image[0], torch.Tensor):
730
- dims = image[0].ndim
731
-
732
- if dims == 3:
733
- image = torch.stack(image, dim=0)
734
- elif dims == 4:
735
- image = torch.concat(image, dim=0)
736
- else:
737
- raise ValueError(f"Image must have 3 or 4 dimensions, instead got {dims}")
738
-
739
- image = image.to(device=device, dtype=self.unet.dtype)
740
-
741
- image = image.repeat_interleave(num_images_per_prompt, dim=0)
742
-
743
- return image
744
-
745
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.get_timesteps
746
- def get_timesteps(self, num_inference_steps, strength):
747
- # get the original timestep using init_timestep
748
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
749
-
750
- t_start = max(num_inference_steps - init_timestep, 0)
751
- timesteps = self.scheduler.timesteps[t_start:]
752
-
753
- return timesteps, num_inference_steps - t_start
754
-
755
- # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.prepare_intermediate_images
756
- def prepare_intermediate_images(
757
- self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None
758
- ):
759
- _, channels, height, width = image.shape
760
-
761
- batch_size = batch_size * num_images_per_prompt
762
-
763
- shape = (batch_size, channels, height, width)
764
-
765
- if isinstance(generator, list) and len(generator) != batch_size:
766
- raise ValueError(
767
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
768
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
769
- )
770
-
771
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
772
-
773
- image = image.repeat_interleave(num_images_per_prompt, dim=0)
774
- image = self.scheduler.add_noise(image, noise, timestep)
775
-
776
- return image
777
-
778
- @torch.no_grad()
779
- @replace_example_docstring(EXAMPLE_DOC_STRING)
780
- def __call__(
781
- self,
782
- image: Union[PIL.Image.Image, np.ndarray, torch.FloatTensor],
783
- original_image: Union[
784
- PIL.Image.Image, torch.Tensor, np.ndarray, List[PIL.Image.Image], List[torch.Tensor], List[np.ndarray]
785
- ] = None,
786
- strength: float = 0.8,
787
- prompt: Union[str, List[str]] = None,
788
- num_inference_steps: int = 50,
789
- timesteps: List[int] = None,
790
- guidance_scale: float = 4.0,
791
- negative_prompt: Optional[Union[str, List[str]]] = None,
792
- num_images_per_prompt: Optional[int] = 1,
793
- eta: float = 0.0,
794
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
795
- prompt_embeds: Optional[torch.FloatTensor] = None,
796
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
797
- output_type: Optional[str] = "pil",
798
- return_dict: bool = True,
799
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
800
- callback_steps: int = 1,
801
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
802
- noise_level: int = 250,
803
- clean_caption: bool = True,
804
- ):
805
- """
806
- Function invoked when calling the pipeline for generation.
807
-
808
- Args:
809
- image (`torch.FloatTensor` or `PIL.Image.Image`):
810
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
811
- process.
812
- original_image (`torch.FloatTensor` or `PIL.Image.Image`):
813
- The original image that `image` was varied from.
814
- strength (`float`, *optional*, defaults to 0.8):
815
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
816
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
817
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
818
- be maximum and the denoising process will run for the full number of iterations specified in
819
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
820
- prompt (`str` or `List[str]`, *optional*):
821
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
822
- instead.
823
- num_inference_steps (`int`, *optional*, defaults to 50):
824
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
825
- expense of slower inference.
826
- timesteps (`List[int]`, *optional*):
827
- Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
828
- timesteps are used. Must be in descending order.
829
- guidance_scale (`float`, *optional*, defaults to 7.5):
830
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
831
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
832
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
833
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
834
- usually at the expense of lower image quality.
835
- negative_prompt (`str` or `List[str]`, *optional*):
836
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
837
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
838
- less than `1`).
839
- num_images_per_prompt (`int`, *optional*, defaults to 1):
840
- The number of images to generate per prompt.
841
- eta (`float`, *optional*, defaults to 0.0):
842
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
843
- [`schedulers.DDIMScheduler`], will be ignored for others.
844
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
845
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
846
- to make generation deterministic.
847
- prompt_embeds (`torch.FloatTensor`, *optional*):
848
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
849
- provided, text embeddings will be generated from `prompt` input argument.
850
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
851
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
852
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
853
- argument.
854
- output_type (`str`, *optional*, defaults to `"pil"`):
855
- The output format of the generate image. Choose between
856
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
857
- return_dict (`bool`, *optional*, defaults to `True`):
858
- Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
859
- callback (`Callable`, *optional*):
860
- A function that will be called every `callback_steps` steps during inference. The function will be
861
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
862
- callback_steps (`int`, *optional*, defaults to 1):
863
- The frequency at which the `callback` function will be called. If not specified, the callback will be
864
- called at every step.
865
- cross_attention_kwargs (`dict`, *optional*):
866
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
867
- `self.processor` in
868
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
869
- noise_level (`int`, *optional*, defaults to 250):
870
- The amount of noise to add to the upscaled image. Must be in the range `[0, 1000)`
871
- clean_caption (`bool`, *optional*, defaults to `True`):
872
- Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
873
- be installed. If the dependencies are not installed, the embeddings will be created from the raw
874
- prompt.
875
-
876
- Examples:
877
-
878
- Returns:
879
- [`~pipelines.stable_diffusion.IFPipelineOutput`] or `tuple`:
880
- [`~pipelines.stable_diffusion.IFPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
881
- returning a tuple, the first element is a list with the generated images, and the second element is a list
882
- of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
883
- or watermarked content, according to the `safety_checker`.
884
- """
885
- # 1. Check inputs. Raise error if not correct
886
- if prompt is not None and isinstance(prompt, str):
887
- batch_size = 1
888
- elif prompt is not None and isinstance(prompt, list):
889
- batch_size = len(prompt)
890
- else:
891
- batch_size = prompt_embeds.shape[0]
892
-
893
- self.check_inputs(
894
- prompt,
895
- image,
896
- original_image,
897
- batch_size,
898
- callback_steps,
899
- negative_prompt,
900
- prompt_embeds,
901
- negative_prompt_embeds,
902
- )
903
-
904
- # 2. Define call parameters
905
-
906
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
907
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
908
- # corresponds to doing no classifier free guidance.
909
- do_classifier_free_guidance = guidance_scale > 1.0
910
-
911
- device = self._execution_device
912
-
913
- # 3. Encode input prompt
914
- prompt_embeds, negative_prompt_embeds = self.encode_prompt(
915
- prompt,
916
- do_classifier_free_guidance,
917
- num_images_per_prompt=num_images_per_prompt,
918
- device=device,
919
- negative_prompt=negative_prompt,
920
- prompt_embeds=prompt_embeds,
921
- negative_prompt_embeds=negative_prompt_embeds,
922
- clean_caption=clean_caption,
923
- )
924
-
925
- if do_classifier_free_guidance:
926
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
927
-
928
- dtype = prompt_embeds.dtype
929
-
930
- # 4. Prepare timesteps
931
- if timesteps is not None:
932
- self.scheduler.set_timesteps(timesteps=timesteps, device=device)
933
- timesteps = self.scheduler.timesteps
934
- num_inference_steps = len(timesteps)
935
- else:
936
- self.scheduler.set_timesteps(num_inference_steps, device=device)
937
- timesteps = self.scheduler.timesteps
938
-
939
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
940
-
941
- # 5. prepare original image
942
- original_image = self.preprocess_original_image(original_image)
943
- original_image = original_image.to(device=device, dtype=dtype)
944
-
945
- # 6. Prepare intermediate images
946
- noise_timestep = timesteps[0:1]
947
- noise_timestep = noise_timestep.repeat(batch_size * num_images_per_prompt)
948
-
949
- intermediate_images = self.prepare_intermediate_images(
950
- original_image,
951
- noise_timestep,
952
- batch_size,
953
- num_images_per_prompt,
954
- dtype,
955
- device,
956
- generator,
957
- )
958
-
959
- # 7. Prepare upscaled image and noise level
960
- _, _, height, width = original_image.shape
961
-
962
- image = self.preprocess_image(image, num_images_per_prompt, device)
963
-
964
- upscaled = F.interpolate(image, (height, width), mode="bilinear", align_corners=True)
965
-
966
- noise_level = torch.tensor([noise_level] * upscaled.shape[0], device=upscaled.device)
967
- noise = randn_tensor(upscaled.shape, generator=generator, device=upscaled.device, dtype=upscaled.dtype)
968
- upscaled = self.image_noising_scheduler.add_noise(upscaled, noise, timesteps=noise_level)
969
-
970
- if do_classifier_free_guidance:
971
- noise_level = torch.cat([noise_level] * 2)
972
-
973
- # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
974
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
975
-
976
- # HACK: see comment in `enable_model_cpu_offload`
977
- if hasattr(self, "text_encoder_offload_hook") and self.text_encoder_offload_hook is not None:
978
- self.text_encoder_offload_hook.offload()
979
-
980
- # 9. Denoising loop
981
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
982
- with self.progress_bar(total=num_inference_steps) as progress_bar:
983
- for i, t in enumerate(timesteps):
984
- model_input = torch.cat([intermediate_images, upscaled], dim=1)
985
-
986
- model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
987
- model_input = self.scheduler.scale_model_input(model_input, t)
988
-
989
- # predict the noise residual
990
- noise_pred = self.unet(
991
- model_input,
992
- t,
993
- encoder_hidden_states=prompt_embeds,
994
- class_labels=noise_level,
995
- cross_attention_kwargs=cross_attention_kwargs,
996
- return_dict=False,
997
- )[0]
998
-
999
- # perform guidance
1000
- if do_classifier_free_guidance:
1001
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1002
- noise_pred_uncond, _ = noise_pred_uncond.split(model_input.shape[1] // 2, dim=1)
1003
- noise_pred_text, predicted_variance = noise_pred_text.split(model_input.shape[1] // 2, dim=1)
1004
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1005
- noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
1006
-
1007
- if self.scheduler.config.variance_type not in ["learned", "learned_range"]:
1008
- noise_pred, _ = noise_pred.split(intermediate_images.shape[1], dim=1)
1009
-
1010
- # compute the previous noisy sample x_t -> x_t-1
1011
- intermediate_images = self.scheduler.step(
1012
- noise_pred, t, intermediate_images, **extra_step_kwargs, return_dict=False
1013
- )[0]
1014
-
1015
- # call the callback, if provided
1016
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1017
- progress_bar.update()
1018
- if callback is not None and i % callback_steps == 0:
1019
- callback(i, t, intermediate_images)
1020
-
1021
- image = intermediate_images
1022
-
1023
- if output_type == "pil":
1024
- # 10. Post-processing
1025
- image = (image / 2 + 0.5).clamp(0, 1)
1026
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1027
-
1028
- # 11. Run safety checker
1029
- image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1030
-
1031
- # 12. Convert to PIL
1032
- image = self.numpy_to_pil(image)
1033
-
1034
- # 13. Apply watermark
1035
- if self.watermarker is not None:
1036
- self.watermarker.apply_watermark(image, self.unet.config.sample_size)
1037
- elif output_type == "pt":
1038
- nsfw_detected = None
1039
- watermark_detected = None
1040
-
1041
- if hasattr(self, "unet_offload_hook") and self.unet_offload_hook is not None:
1042
- self.unet_offload_hook.offload()
1043
- else:
1044
- # 10. Post-processing
1045
- image = (image / 2 + 0.5).clamp(0, 1)
1046
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1047
-
1048
- # 11. Run safety checker
1049
- image, nsfw_detected, watermark_detected = self.run_safety_checker(image, device, prompt_embeds.dtype)
1050
-
1051
- # Offload last model to CPU
1052
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1053
- self.final_offload_hook.offload()
1054
-
1055
- if not return_dict:
1056
- return (image, nsfw_detected, watermark_detected)
1057
-
1058
- return IFPipelineOutput(images=image, nsfw_detected=nsfw_detected, watermark_detected=watermark_detected)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './fovea_r50_fpn_4x4_1x_coco.py'
2
- model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py DELETED
@@ -1,22 +0,0 @@
1
- _base_ = './gfl_r50_fpn_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 22])
4
- runner = dict(type='EpochBasedRunner', max_epochs=24)
5
- # multi-scale training
6
- img_norm_cfg = dict(
7
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
8
- train_pipeline = [
9
- dict(type='LoadImageFromFile'),
10
- dict(type='LoadAnnotations', with_bbox=True),
11
- dict(
12
- type='Resize',
13
- img_scale=[(1333, 480), (1333, 800)],
14
- multiscale_mode='range',
15
- keep_ratio=True),
16
- dict(type='RandomFlip', flip_ratio=0.5),
17
- dict(type='Normalize', **img_norm_cfg),
18
- dict(type='Pad', size_divisor=32),
19
- dict(type='DefaultFormatBundle'),
20
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
21
- ]
22
- data = dict(train=dict(pipeline=train_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/backbone.py DELETED
@@ -1,117 +0,0 @@
1
- import torch.nn as nn
2
-
3
- from .trident_conv import MultiScaleTridentConv
4
-
5
-
6
- class ResidualBlock(nn.Module):
7
- def __init__(self, in_planes, planes, norm_layer=nn.InstanceNorm2d, stride=1, dilation=1,
8
- ):
9
- super(ResidualBlock, self).__init__()
10
-
11
- self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3,
12
- dilation=dilation, padding=dilation, stride=stride, bias=False)
13
- self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
14
- dilation=dilation, padding=dilation, bias=False)
15
- self.relu = nn.ReLU(inplace=True)
16
-
17
- self.norm1 = norm_layer(planes)
18
- self.norm2 = norm_layer(planes)
19
- if not stride == 1 or in_planes != planes:
20
- self.norm3 = norm_layer(planes)
21
-
22
- if stride == 1 and in_planes == planes:
23
- self.downsample = None
24
- else:
25
- self.downsample = nn.Sequential(
26
- nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
27
-
28
- def forward(self, x):
29
- y = x
30
- y = self.relu(self.norm1(self.conv1(y)))
31
- y = self.relu(self.norm2(self.conv2(y)))
32
-
33
- if self.downsample is not None:
34
- x = self.downsample(x)
35
-
36
- return self.relu(x + y)
37
-
38
-
39
- class CNNEncoder(nn.Module):
40
- def __init__(self, output_dim=128,
41
- norm_layer=nn.InstanceNorm2d,
42
- num_output_scales=1,
43
- **kwargs,
44
- ):
45
- super(CNNEncoder, self).__init__()
46
- self.num_branch = num_output_scales
47
-
48
- feature_dims = [64, 96, 128]
49
-
50
- self.conv1 = nn.Conv2d(3, feature_dims[0], kernel_size=7, stride=2, padding=3, bias=False) # 1/2
51
- self.norm1 = norm_layer(feature_dims[0])
52
- self.relu1 = nn.ReLU(inplace=True)
53
-
54
- self.in_planes = feature_dims[0]
55
- self.layer1 = self._make_layer(feature_dims[0], stride=1, norm_layer=norm_layer) # 1/2
56
- self.layer2 = self._make_layer(feature_dims[1], stride=2, norm_layer=norm_layer) # 1/4
57
-
58
- # highest resolution 1/4 or 1/8
59
- stride = 2 if num_output_scales == 1 else 1
60
- self.layer3 = self._make_layer(feature_dims[2], stride=stride,
61
- norm_layer=norm_layer,
62
- ) # 1/4 or 1/8
63
-
64
- self.conv2 = nn.Conv2d(feature_dims[2], output_dim, 1, 1, 0)
65
-
66
- if self.num_branch > 1:
67
- if self.num_branch == 4:
68
- strides = (1, 2, 4, 8)
69
- elif self.num_branch == 3:
70
- strides = (1, 2, 4)
71
- elif self.num_branch == 2:
72
- strides = (1, 2)
73
- else:
74
- raise ValueError
75
-
76
- self.trident_conv = MultiScaleTridentConv(output_dim, output_dim,
77
- kernel_size=3,
78
- strides=strides,
79
- paddings=1,
80
- num_branch=self.num_branch,
81
- )
82
-
83
- for m in self.modules():
84
- if isinstance(m, nn.Conv2d):
85
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
86
- elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
87
- if m.weight is not None:
88
- nn.init.constant_(m.weight, 1)
89
- if m.bias is not None:
90
- nn.init.constant_(m.bias, 0)
91
-
92
- def _make_layer(self, dim, stride=1, dilation=1, norm_layer=nn.InstanceNorm2d):
93
- layer1 = ResidualBlock(self.in_planes, dim, norm_layer=norm_layer, stride=stride, dilation=dilation)
94
- layer2 = ResidualBlock(dim, dim, norm_layer=norm_layer, stride=1, dilation=dilation)
95
-
96
- layers = (layer1, layer2)
97
-
98
- self.in_planes = dim
99
- return nn.Sequential(*layers)
100
-
101
- def forward(self, x):
102
- x = self.conv1(x)
103
- x = self.norm1(x)
104
- x = self.relu1(x)
105
-
106
- x = self.layer1(x) # 1/2
107
- x = self.layer2(x) # 1/4
108
- x = self.layer3(x) # 1/8 or 1/4
109
-
110
- x = self.conv2(x)
111
-
112
- if self.num_branch > 1:
113
- out = self.trident_conv([x] * self.num_branch) # high to low res
114
- else:
115
- out = [x]
116
-
117
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Armandoliv/document_parser/app.py DELETED
@@ -1,202 +0,0 @@
1
- import os
2
- import os
3
- os.system('pip install "detectron2@git+https://github.com/facebookresearch/[email protected]#egg=detectron2"')
4
-
5
- import io
6
- import pandas as pd
7
- import numpy as np
8
- import gradio as gr
9
-
10
- ## for plotting
11
- import matplotlib.pyplot as plt
12
-
13
- ## for ocr
14
- import pdf2image
15
- import cv2
16
- import layoutparser as lp
17
-
18
- from docx import Document
19
- from docx.shared import Inches
20
-
21
-
22
- def parse_doc(dic):
23
- for k,v in dic.items():
24
- if "Title" in k:
25
- print('\x1b[1;31m'+ v +'\x1b[0m')
26
- elif "Figure" in k:
27
- plt.figure(figsize=(10,5))
28
- plt.imshow(v)
29
- plt.show()
30
- else:
31
- print(v)
32
- print(" ")
33
-
34
-
35
- def to_image(filename):
36
- doc = pdf2image.convert_from_path(filename, dpi=350, last_page=1)
37
- # Save imgs
38
- folder = "doc"
39
- if folder not in os.listdir():
40
- os.makedirs(folder)
41
-
42
- p = 1
43
- for page in doc:
44
- image_name = "page_"+str(p)+".jpg"
45
- page.save(os.path.join(folder, image_name), "JPEG")
46
- p = p+1
47
-
48
- return doc
49
-
50
-
51
-
52
- def detect(doc):
53
- # General
54
- model = lp.Detectron2LayoutModel("lp://PubLayNet/mask_rcnn_X_101_32x8d_FPN_3x/config",
55
- extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.8],
56
- label_map={0:"Text", 1:"Title", 2:"List", 3:"Table", 4:"Figure"})
57
- ## turn img into array
58
- img = np.asarray(doc[0])
59
-
60
- ## predict
61
- detected = model.detect(img)
62
-
63
-
64
- return img, detected
65
-
66
-
67
- # sort detected
68
- def split_page(img, n, axis):
69
- new_detected, start = [], 0
70
- for s in range(n):
71
- end = len(img[0])/3 * s if axis == "x" else len(img[1])/3
72
- section = lp.Interval(start=start, end=end, axis=axis).put_on_canvas(img)
73
- filter_detected = detected.filter_by(section, center=True)._blocks
74
- new_detected = new_detected + filter_detected
75
- start = end
76
- return lp.Layout([block.set(id=idx) for idx,block in enumerate(new_detected)])
77
-
78
-
79
-
80
- def get_detected(img, detected):
81
- n_cols,n_rows = 1,1
82
-
83
- ## if single page just sort based on y
84
- if (n_cols == 1) and (n_rows == 1):
85
- new_detected = detected.sort(key=lambda x: x.coordinates[1])
86
- detected = lp.Layout([block.set(id=idx) for idx,block in enumerate(new_detected)])
87
-
88
- ## if multi columns sort by x,y
89
- elif (n_cols > 1) and (n_rows == 1):
90
- detected = split_page(img, n_cols, axis="x")
91
-
92
- ## if multi rows sort by y,x
93
- elif (n_cols > 1) and (n_rows == 1):
94
- detected = split_page(img, n_rows, axis="y")
95
-
96
- ## if multi columns-rows
97
- else:
98
- pass
99
-
100
- return detected
101
-
102
-
103
- def predict_elements(img, detected)->dict:
104
- model = lp.TesseractAgent(languages='eng')
105
- dic_predicted = {}
106
-
107
- for block in [block for block in detected if block.type in ["Title","Text", "List"]]:
108
- ## segmentation
109
- segmented = block.pad(left=15, right=15, top=5, bottom=5).crop_image(img)
110
- ## extraction
111
- extracted = model.detect(segmented)
112
- ## save
113
- dic_predicted[str(block.id)+"-"+block.type] = extracted.replace('\n',' ').strip()
114
-
115
- for block in [block for block in detected if block.type == "Figure"]:
116
- ## segmentation
117
- segmented = block.pad(left=15, right=15, top=5, bottom=5).crop_image(img)
118
- ## save
119
- dic_predicted[str(block.id)+"-"+block.type] = segmented
120
-
121
-
122
- for block in [block for block in detected if block.type == "Table"]:
123
- ## segmentation
124
- segmented = block.pad(left=15, right=15, top=5, bottom=5).crop_image(img)
125
- ## extraction
126
- extracted = model.detect(segmented)
127
- ## save
128
- dic_predicted[str(block.id)+"-"+block.type] = pd.read_csv( io.StringIO(extracted) )
129
-
130
-
131
- return dic_predicted
132
-
133
- def gen_doc(dic_predicted:dict):
134
- document = Document()
135
-
136
- for k,v in dic_predicted.items():
137
-
138
- if "Figure" in k:
139
- cv2.imwrite(f'{k}.jpg', dic_predicted[k])
140
- document.add_picture(f'{k}.jpg', width=Inches(3))
141
-
142
- elif "Table" in k:
143
- table = document.add_table(rows=v.shape[0], cols=v.shape[1])
144
- hdr_cells = table.rows[0].cells
145
- for idx, col in enumerate(v.columns):
146
- hdr_cells[idx].text = col
147
- for c in v.iterrows():
148
-
149
- for idx, col in enumerate(v.columns):
150
- try:
151
- if len(c[1][col].strip())>0:
152
- row_cells = table.add_row().cells
153
- row_cells[idx].text = str(c[1][col])
154
- except:
155
- continue
156
-
157
- else:
158
- document.add_paragraph(str(v))
159
-
160
- document.save('demo.docx')
161
-
162
-
163
- def main_convert(filename):
164
- print(filename.name)
165
- doc = to_image(filename.name)
166
-
167
- img, detected = detect(doc)
168
-
169
- n_detected = get_detected(img, detected)
170
-
171
- dic_predicted = predict_elements(img, n_detected)
172
-
173
- gen_doc(dic_predicted)
174
-
175
- im_out = lp.draw_box(img, detected, box_width=5, box_alpha=0.2, show_element_type=True)
176
- dict_out = {}
177
- for k,v in dic_predicted.items():
178
- if "figure" not in k.lower():
179
- dict_out[k] = dic_predicted[k]
180
-
181
- return 'demo.docx', im_out, dict_out
182
-
183
-
184
- inputs = [gr.File(type='file', label="Original PDF File")]
185
- outputs = [gr.File(label="Converted DOC File"),gr.Image(type="PIL.Image", label="Detected Image"), gr.JSON()]
186
-
187
- title = "A Document AI parser"
188
- description = "This demo uses AI Models to detect text, titles, tables, figures and lists as well as table cells from an Scanned document.\nBased on the layout it determines reading order and generates an MS-DOC file to Download."
189
-
190
-
191
- io = gr.Interface(fn=main_convert, inputs=inputs, outputs=outputs, title=title, description=description,
192
- css= """.gr-button-primary { background: -webkit-linear-gradient(
193
- 90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764;
194
- background: linear-gradient(
195
- 90deg, #355764 0%, #55a8a1 100% ) !important;
196
- background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important;
197
- background: -webkit-linear-gradient(
198
- 90deg, #355764 0%, #55a8a1 100% ) !important;
199
- color:white !important}"""
200
- )
201
-
202
- io.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/data/audio_dataset.py DELETED
@@ -1,525 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import argparse
8
- import copy
9
- from concurrent.futures import ThreadPoolExecutor, Future
10
- from dataclasses import dataclass, fields
11
- from contextlib import ExitStack
12
- import gzip
13
- import json
14
- import logging
15
- import os
16
- from pathlib import Path
17
- import random
18
- import sys
19
- import typing as tp
20
-
21
- import torch
22
- import torch.nn.functional as F
23
-
24
- from .audio import audio_read, audio_info
25
- from .audio_utils import convert_audio
26
- from .zip import PathInZip
27
-
28
- try:
29
- import dora
30
- except ImportError:
31
- dora = None # type: ignore
32
-
33
-
34
- @dataclass(order=True)
35
- class BaseInfo:
36
-
37
- @classmethod
38
- def _dict2fields(cls, dictionary: dict):
39
- return {
40
- field.name: dictionary[field.name]
41
- for field in fields(cls) if field.name in dictionary
42
- }
43
-
44
- @classmethod
45
- def from_dict(cls, dictionary: dict):
46
- _dictionary = cls._dict2fields(dictionary)
47
- return cls(**_dictionary)
48
-
49
- def to_dict(self):
50
- return {
51
- field.name: self.__getattribute__(field.name)
52
- for field in fields(self)
53
- }
54
-
55
-
56
- @dataclass(order=True)
57
- class AudioMeta(BaseInfo):
58
- path: str
59
- duration: float
60
- sample_rate: int
61
- amplitude: tp.Optional[float] = None
62
- weight: tp.Optional[float] = None
63
- # info_path is used to load additional information about the audio file that is stored in zip files.
64
- info_path: tp.Optional[PathInZip] = None
65
-
66
- @classmethod
67
- def from_dict(cls, dictionary: dict):
68
- base = cls._dict2fields(dictionary)
69
- if 'info_path' in base and base['info_path'] is not None:
70
- base['info_path'] = PathInZip(base['info_path'])
71
- return cls(**base)
72
-
73
- def to_dict(self):
74
- d = super().to_dict()
75
- if d['info_path'] is not None:
76
- d['info_path'] = str(d['info_path'])
77
- return d
78
-
79
-
80
- @dataclass(order=True)
81
- class SegmentInfo(BaseInfo):
82
- meta: AudioMeta
83
- seek_time: float
84
- n_frames: int # actual number of frames without padding
85
- total_frames: int # total number of frames, padding included
86
- sample_rate: int # actual sample rate
87
-
88
-
89
- DEFAULT_EXTS = ['.wav', '.mp3', '.flac', '.ogg', '.m4a']
90
-
91
- logger = logging.getLogger(__name__)
92
-
93
-
94
- def _get_audio_meta(file_path: str, minimal: bool = True) -> AudioMeta:
95
- """AudioMeta from a path to an audio file.
96
-
97
- Args:
98
- file_path (str): Resolved path of valid audio file.
99
- minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
100
- Returns:
101
- AudioMeta: Audio file path and its metadata.
102
- """
103
- info = audio_info(file_path)
104
- amplitude: tp.Optional[float] = None
105
- if not minimal:
106
- wav, sr = audio_read(file_path)
107
- amplitude = wav.abs().max().item()
108
- return AudioMeta(file_path, info.duration, info.sample_rate, amplitude)
109
-
110
-
111
- def _resolve_audio_meta(m: AudioMeta, fast: bool = True) -> AudioMeta:
112
- """If Dora is available as a dependency, try to resolve potential relative paths
113
- in list of AudioMeta. This method is expected to be used when loading meta from file.
114
-
115
- Args:
116
- m (AudioMeta): Audio meta to resolve.
117
- fast (bool): If True, uses a really fast check for determining if a file is already absolute or not.
118
- Only valid on Linux/Mac.
119
- Returns:
120
- AudioMeta: Audio meta with resolved path.
121
- """
122
- def is_abs(m):
123
- if fast:
124
- return str(m)[0] == '/'
125
- else:
126
- os.path.isabs(str(m))
127
-
128
- if not dora:
129
- return m
130
-
131
- if not is_abs(m.path):
132
- m.path = dora.git_save.to_absolute_path(m.path)
133
- if m.info_path is not None and not is_abs(m.info_path.zip_path):
134
- m.info_path.zip_path = dora.git_save.to_absolute_path(m.path)
135
- return m
136
-
137
-
138
- def find_audio_files(path: tp.Union[Path, str],
139
- exts: tp.List[str] = DEFAULT_EXTS,
140
- resolve: bool = True,
141
- minimal: bool = True,
142
- progress: bool = False,
143
- workers: int = 0) -> tp.List[AudioMeta]:
144
- """Build a list of AudioMeta from a given path,
145
- collecting relevant audio files and fetching meta info.
146
-
147
- Args:
148
- path (str or Path): Path to folder containing audio files.
149
- exts (list of str): List of file extensions to consider for audio files.
150
- minimal (bool): Whether to only load the minimal set of metadata (takes longer if not).
151
- progress (bool): Whether to log progress on audio files collection.
152
- workers (int): number of parallel workers, if 0, use only the current thread.
153
- Returns:
154
- List[AudioMeta]: List of audio file path and its metadata.
155
- """
156
- audio_files = []
157
- futures: tp.List[Future] = []
158
- pool: tp.Optional[ThreadPoolExecutor] = None
159
- with ExitStack() as stack:
160
- if workers > 0:
161
- pool = ThreadPoolExecutor(workers)
162
- stack.enter_context(pool)
163
-
164
- if progress:
165
- print("Finding audio files...")
166
- for root, folders, files in os.walk(path, followlinks=True):
167
- for file in files:
168
- full_path = Path(root) / file
169
- if full_path.suffix.lower() in exts:
170
- audio_files.append(full_path)
171
- if pool is not None:
172
- futures.append(pool.submit(_get_audio_meta, str(audio_files[-1]), minimal))
173
- if progress:
174
- print(format(len(audio_files), " 8d"), end='\r', file=sys.stderr)
175
-
176
- if progress:
177
- print("Getting audio metadata...")
178
- meta: tp.List[AudioMeta] = []
179
- for idx, file_path in enumerate(audio_files):
180
- try:
181
- if pool is None:
182
- m = _get_audio_meta(str(file_path), minimal)
183
- else:
184
- m = futures[idx].result()
185
- if resolve:
186
- m = _resolve_audio_meta(m)
187
- except Exception as err:
188
- print("Error with", str(file_path), err, file=sys.stderr)
189
- continue
190
- meta.append(m)
191
- if progress:
192
- print(format((1 + idx) / len(audio_files), " 3.1%"), end='\r', file=sys.stderr)
193
- meta.sort()
194
- return meta
195
-
196
-
197
- def load_audio_meta(path: tp.Union[str, Path],
198
- resolve: bool = True, fast: bool = True) -> tp.List[AudioMeta]:
199
- """Load list of AudioMeta from an optionally compressed json file.
200
-
201
- Args:
202
- path (str or Path): Path to JSON file.
203
- resolve (bool): Whether to resolve the path from AudioMeta (default=True).
204
- fast (bool): activates some tricks to make things faster.
205
- Returns:
206
- List[AudioMeta]: List of audio file path and its total duration.
207
- """
208
- open_fn = gzip.open if str(path).lower().endswith('.gz') else open
209
- with open_fn(path, 'rb') as fp: # type: ignore
210
- lines = fp.readlines()
211
- meta = []
212
- for line in lines:
213
- d = json.loads(line)
214
- m = AudioMeta.from_dict(d)
215
- if resolve:
216
- m = _resolve_audio_meta(m, fast=fast)
217
- meta.append(m)
218
- return meta
219
-
220
-
221
- def save_audio_meta(path: tp.Union[str, Path], meta: tp.List[AudioMeta]):
222
- """Save the audio metadata to the file pointer as json.
223
-
224
- Args:
225
- path (str or Path): Path to JSON file.
226
- metadata (list of BaseAudioMeta): List of audio meta to save.
227
- """
228
- Path(path).parent.mkdir(exist_ok=True, parents=True)
229
- open_fn = gzip.open if str(path).lower().endswith('.gz') else open
230
- with open_fn(path, 'wb') as fp: # type: ignore
231
- for m in meta:
232
- json_str = json.dumps(m.to_dict()) + '\n'
233
- json_bytes = json_str.encode('utf-8')
234
- fp.write(json_bytes)
235
-
236
-
237
- class AudioDataset:
238
- """Base audio dataset.
239
-
240
- The dataset takes a list of AudioMeta and create a dataset composed of segments of audio
241
- and potentially additional information, by creating random segments from the list of audio
242
- files referenced in the metadata and applying minimal data pre-processing such as resampling,
243
- mixing of channels, padding, etc.
244
-
245
- If no segment_duration value is provided, the AudioDataset will return the full wav for each
246
- audio file. Otherwise, it will randomly sample audio files and create a segment of the specified
247
- duration, applying padding if required.
248
-
249
- By default, only the torch Tensor corresponding to the waveform is returned. Setting return_info=True
250
- allows to return a tuple containing the torch Tensor and additional metadata on the segment and the
251
- original audio meta.
252
-
253
- Args:
254
- meta (tp.List[AudioMeta]): List of audio files metadata.
255
- segment_duration (float): Optional segment duration of audio to load.
256
- If not specified, the dataset will load the full audio segment from the file.
257
- shuffle (bool): Set to `True` to have the data reshuffled at every epoch.
258
- sample_rate (int): Target sample rate of the loaded audio samples.
259
- channels (int): Target number of channels of the loaded audio samples.
260
- sample_on_duration (bool): Set to `True` to sample segments with probability
261
- dependent on audio file duration. This is only used if `segment_duration` is provided.
262
- sample_on_weight (bool): Set to `True` to sample segments using the `weight` entry of
263
- `AudioMeta`. If `sample_on_duration` is also True, the actual weight will be the product
264
- of the file duration and file weight. This is only used if `segment_duration` is provided.
265
- min_segment_ratio (float): Minimum segment ratio to use when the audio file
266
- is shorter than the desired segment.
267
- max_read_retry (int): Maximum number of retries to sample an audio segment from the dataset.
268
- return_info (bool): Whether to return the wav only or return wav along with segment info and metadata.
269
- min_audio_duration (tp.Optional[float], optional): Minimum audio file duration, in seconds, if provided
270
- audio shorter than this will be filtered out.
271
- max_audio_duration (tp.Optional[float], optional): Maximal audio file duration in seconds, if provided
272
- audio longer than this will be filtered out.
273
- """
274
- def __init__(self,
275
- meta: tp.List[AudioMeta],
276
- segment_duration: tp.Optional[float] = None,
277
- shuffle: bool = True,
278
- num_samples: int = 10_000,
279
- sample_rate: int = 48_000,
280
- channels: int = 2,
281
- pad: bool = True,
282
- sample_on_duration: bool = True,
283
- sample_on_weight: bool = True,
284
- min_segment_ratio: float = 0.5,
285
- max_read_retry: int = 10,
286
- return_info: bool = False,
287
- min_audio_duration: tp.Optional[float] = None,
288
- max_audio_duration: tp.Optional[float] = None
289
- ):
290
- assert len(meta) > 0, 'No audio meta provided to AudioDataset. Please check loading of audio meta.'
291
- assert segment_duration is None or segment_duration > 0
292
- assert segment_duration is None or min_segment_ratio >= 0
293
- logging.debug(f'sample_on_duration: {sample_on_duration}')
294
- logging.debug(f'sample_on_weight: {sample_on_weight}')
295
- logging.debug(f'pad: {pad}')
296
- logging.debug(f'min_segment_ratio: {min_segment_ratio}')
297
-
298
- self.segment_duration = segment_duration
299
- self.min_segment_ratio = min_segment_ratio
300
- self.max_audio_duration = max_audio_duration
301
- self.min_audio_duration = min_audio_duration
302
- if self.min_audio_duration is not None and self.max_audio_duration is not None:
303
- assert self.min_audio_duration <= self.max_audio_duration
304
- self.meta: tp.List[AudioMeta] = self._filter_duration(meta)
305
- assert len(self.meta) # Fail fast if all data has been filtered.
306
- self.total_duration = sum(d.duration for d in self.meta)
307
-
308
- if segment_duration is None:
309
- num_samples = len(self.meta)
310
- self.num_samples = num_samples
311
- self.shuffle = shuffle
312
- self.sample_rate = sample_rate
313
- self.channels = channels
314
- self.pad = pad
315
- self.sample_on_weight = sample_on_weight
316
- self.sample_on_duration = sample_on_duration
317
- self.sampling_probabilities = self._get_sampling_probabilities()
318
- self.max_read_retry = max_read_retry
319
- self.return_info = return_info
320
-
321
- def __len__(self):
322
- return self.num_samples
323
-
324
- def _get_sampling_probabilities(self, normalized: bool = True):
325
- """Return the sampling probabilities for each file inside `self.meta`.
326
- """
327
- scores: tp.List[float] = []
328
- for file_meta in self.meta:
329
- score = 1.
330
- if self.sample_on_weight and file_meta.weight is not None:
331
- score *= file_meta.weight
332
- if self.sample_on_duration:
333
- score *= file_meta.duration
334
- scores.append(score)
335
- probabilities = torch.tensor(scores)
336
- if normalized:
337
- probabilities /= probabilities.sum()
338
- return probabilities
339
-
340
- def sample_file(self, rng: torch.Generator) -> AudioMeta:
341
- """Sample a given file from `self.meta`. Can be overriden in subclasses.
342
- This is only called if `segment_duration` is not None.
343
-
344
- You must use the provided random number generator `rng` for reproducibility.
345
- """
346
- if not self.sample_on_weight and not self.sample_on_duration:
347
- file_index = int(torch.randint(len(self.sampling_probabilities), (1,), generator=rng).item())
348
- else:
349
- file_index = int(torch.multinomial(self.sampling_probabilities, 1, generator=rng).item())
350
-
351
- return self.meta[file_index]
352
-
353
- def __getitem__(self, index: int) -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, SegmentInfo]]:
354
- if self.segment_duration is None:
355
- file_meta = self.meta[index]
356
- out, sr = audio_read(file_meta.path)
357
- out = convert_audio(out, sr, self.sample_rate, self.channels)
358
- n_frames = out.shape[-1]
359
- segment_info = SegmentInfo(file_meta, seek_time=0., n_frames=n_frames, total_frames=n_frames,
360
- sample_rate=self.sample_rate)
361
- else:
362
- rng = torch.Generator()
363
- if self.shuffle:
364
- # We use index, plus extra randomness
365
- rng.manual_seed(index + self.num_samples * random.randint(0, 2**24))
366
- else:
367
- # We only use index
368
- rng.manual_seed(index)
369
-
370
- for retry in range(self.max_read_retry):
371
- file_meta = self.sample_file(rng)
372
- # We add some variance in the file position even if audio file is smaller than segment
373
- # without ending up with empty segments
374
- max_seek = max(0, file_meta.duration - self.segment_duration * self.min_segment_ratio)
375
- seek_time = torch.rand(1, generator=rng).item() * max_seek
376
- try:
377
- out, sr = audio_read(file_meta.path, seek_time, self.segment_duration, pad=False)
378
- out = convert_audio(out, sr, self.sample_rate, self.channels)
379
- n_frames = out.shape[-1]
380
- target_frames = int(self.segment_duration * self.sample_rate)
381
- if self.pad:
382
- out = F.pad(out, (0, target_frames - n_frames))
383
- segment_info = SegmentInfo(file_meta, seek_time, n_frames=n_frames, total_frames=target_frames,
384
- sample_rate=self.sample_rate)
385
- except Exception as exc:
386
- logger.warning("Error opening file %s: %r", file_meta.path, exc)
387
- if retry == self.max_read_retry - 1:
388
- raise
389
- else:
390
- break
391
-
392
- if self.return_info:
393
- # Returns the wav and additional information on the wave segment
394
- return out, segment_info
395
- else:
396
- return out
397
-
398
- def collater(self, samples):
399
- """The collater function has to be provided to the dataloader
400
- if AudioDataset has return_info=True in order to properly collate
401
- the samples of a batch.
402
- """
403
- if self.segment_duration is None and len(samples) > 1:
404
- assert self.pad, "Must allow padding when batching examples of different durations."
405
-
406
- # In this case the audio reaching the collater is of variable length as segment_duration=None.
407
- to_pad = self.segment_duration is None and self.pad
408
- if to_pad:
409
- max_len = max([wav.shape[-1] for wav, _ in samples])
410
-
411
- def _pad_wav(wav):
412
- return F.pad(wav, (0, max_len - wav.shape[-1]))
413
-
414
- if self.return_info:
415
- if len(samples) > 0:
416
- assert len(samples[0]) == 2
417
- assert isinstance(samples[0][0], torch.Tensor)
418
- assert isinstance(samples[0][1], SegmentInfo)
419
-
420
- wavs = [wav for wav, _ in samples]
421
- segment_infos = [copy.deepcopy(info) for _, info in samples]
422
-
423
- if to_pad:
424
- # Each wav could be of a different duration as they are not segmented.
425
- for i in range(len(samples)):
426
- # Determines the total legth of the signal with padding, so we update here as we pad.
427
- segment_infos[i].total_frames = max_len
428
- wavs[i] = _pad_wav(wavs[i])
429
-
430
- wav = torch.stack(wavs)
431
- return wav, segment_infos
432
- else:
433
- assert isinstance(samples[0], torch.Tensor)
434
- if to_pad:
435
- samples = [_pad_wav(s) for s in samples]
436
- return torch.stack(samples)
437
-
438
- def _filter_duration(self, meta: tp.List[AudioMeta]) -> tp.List[AudioMeta]:
439
- """Filters out audio files with short durations.
440
- Removes from meta files that have durations that will not allow to samples examples from them.
441
- """
442
- orig_len = len(meta)
443
-
444
- # Filter data that is too short.
445
- if self.min_audio_duration is not None:
446
- meta = [m for m in meta if m.duration >= self.min_audio_duration]
447
-
448
- # Filter data that is too long.
449
- if self.max_audio_duration is not None:
450
- meta = [m for m in meta if m.duration <= self.max_audio_duration]
451
-
452
- filtered_len = len(meta)
453
- removed_percentage = 100*(1-float(filtered_len)/orig_len)
454
- msg = 'Removed %.2f percent of the data because it was too short or too long.' % removed_percentage
455
- if removed_percentage < 10:
456
- logging.debug(msg)
457
- else:
458
- logging.warning(msg)
459
- return meta
460
-
461
- @classmethod
462
- def from_meta(cls, root: tp.Union[str, Path], **kwargs):
463
- """Instantiate AudioDataset from a path to a directory containing a manifest as a jsonl file.
464
-
465
- Args:
466
- root (str or Path): Path to root folder containing audio files.
467
- kwargs: Additional keyword arguments for the AudioDataset.
468
- """
469
- root = Path(root)
470
- if root.is_dir():
471
- if (root / 'data.jsonl').exists():
472
- root = root / 'data.jsonl'
473
- elif (root / 'data.jsonl.gz').exists():
474
- root = root / 'data.jsonl.gz'
475
- else:
476
- raise ValueError("Don't know where to read metadata from in the dir. "
477
- "Expecting either a data.jsonl or data.jsonl.gz file but none found.")
478
- meta = load_audio_meta(root)
479
- return cls(meta, **kwargs)
480
-
481
- @classmethod
482
- def from_path(cls, root: tp.Union[str, Path], minimal_meta: bool = True,
483
- exts: tp.List[str] = DEFAULT_EXTS, **kwargs):
484
- """Instantiate AudioDataset from a path containing (possibly nested) audio files.
485
-
486
- Args:
487
- root (str or Path): Path to root folder containing audio files.
488
- minimal_meta (bool): Whether to only load minimal metadata or not.
489
- exts (list of str): Extensions for audio files.
490
- kwargs: Additional keyword arguments for the AudioDataset.
491
- """
492
- root = Path(root)
493
- if root.is_file():
494
- meta = load_audio_meta(root, resolve=True)
495
- else:
496
- meta = find_audio_files(root, exts, minimal=minimal_meta, resolve=True)
497
- return cls(meta, **kwargs)
498
-
499
-
500
- def main():
501
- logging.basicConfig(stream=sys.stderr, level=logging.INFO)
502
- parser = argparse.ArgumentParser(
503
- prog='audio_dataset',
504
- description='Generate .jsonl files by scanning a folder.')
505
- parser.add_argument('root', help='Root folder with all the audio files')
506
- parser.add_argument('output_meta_file',
507
- help='Output file to store the metadata, ')
508
- parser.add_argument('--complete',
509
- action='store_false', dest='minimal', default=True,
510
- help='Retrieve all metadata, even the one that are expansive '
511
- 'to compute (e.g. normalization).')
512
- parser.add_argument('--resolve',
513
- action='store_true', default=False,
514
- help='Resolve the paths to be absolute and with no symlinks.')
515
- parser.add_argument('--workers',
516
- default=10, type=int,
517
- help='Number of workers.')
518
- args = parser.parse_args()
519
- meta = find_audio_files(args.root, DEFAULT_EXTS, progress=True,
520
- resolve=args.resolve, minimal=args.minimal, workers=args.workers)
521
- save_audio_meta(args.output_meta_file, meta)
522
-
523
-
524
- if __name__ == '__main__':
525
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/segment_models/configs/ade20k_id2label.py DELETED
@@ -1,153 +0,0 @@
1
- CONFIG = {
2
- "id2label": {
3
- "0": "wall",
4
- "1": "building",
5
- "2": "sky",
6
- "3": "floor",
7
- "4": "tree",
8
- "5": "ceiling",
9
- "6": "road",
10
- "7": "bed ",
11
- "8": "windowpane",
12
- "9": "grass",
13
- "10": "cabinet",
14
- "11": "sidewalk",
15
- "12": "person",
16
- "13": "earth",
17
- "14": "door",
18
- "15": "table",
19
- "16": "mountain",
20
- "17": "plant",
21
- "18": "curtain",
22
- "19": "chair",
23
- "20": "car",
24
- "21": "water",
25
- "22": "painting",
26
- "23": "sofa",
27
- "24": "shelf",
28
- "25": "house",
29
- "26": "sea",
30
- "27": "mirror",
31
- "28": "rug",
32
- "29": "field",
33
- "30": "armchair",
34
- "31": "seat",
35
- "32": "fence",
36
- "33": "desk",
37
- "34": "rock",
38
- "35": "wardrobe",
39
- "36": "lamp",
40
- "37": "bathtub",
41
- "38": "railing",
42
- "39": "cushion",
43
- "40": "base",
44
- "41": "box",
45
- "42": "column",
46
- "43": "signboard",
47
- "44": "chest of drawers",
48
- "45": "counter",
49
- "46": "sand",
50
- "47": "sink",
51
- "48": "skyscraper",
52
- "49": "fireplace",
53
- "50": "refrigerator",
54
- "51": "grandstand",
55
- "52": "path",
56
- "53": "stairs",
57
- "54": "runway",
58
- "55": "case",
59
- "56": "pool table",
60
- "57": "pillow",
61
- "58": "screen door",
62
- "59": "stairway",
63
- "60": "river",
64
- "61": "bridge",
65
- "62": "bookcase",
66
- "63": "blind",
67
- "64": "coffee table",
68
- "65": "toilet",
69
- "66": "flower",
70
- "67": "book",
71
- "68": "hill",
72
- "69": "bench",
73
- "70": "countertop",
74
- "71": "stove",
75
- "72": "palm",
76
- "73": "kitchen island",
77
- "74": "computer",
78
- "75": "swivel chair",
79
- "76": "boat",
80
- "77": "bar",
81
- "78": "arcade machine",
82
- "79": "hovel",
83
- "80": "bus",
84
- "81": "towel",
85
- "82": "light",
86
- "83": "truck",
87
- "84": "tower",
88
- "85": "chandelier",
89
- "86": "awning",
90
- "87": "streetlight",
91
- "88": "booth",
92
- "89": "television receiver",
93
- "90": "airplane",
94
- "91": "dirt track",
95
- "92": "apparel",
96
- "93": "pole",
97
- "94": "land",
98
- "95": "bannister",
99
- "96": "escalator",
100
- "97": "ottoman",
101
- "98": "bottle",
102
- "99": "buffet",
103
- "100": "poster",
104
- "101": "stage",
105
- "102": "van",
106
- "103": "ship",
107
- "104": "fountain",
108
- "105": "conveyer belt",
109
- "106": "canopy",
110
- "107": "washer",
111
- "108": "plaything",
112
- "109": "swimming pool",
113
- "110": "stool",
114
- "111": "barrel",
115
- "112": "basket",
116
- "113": "waterfall",
117
- "114": "tent",
118
- "115": "bag",
119
- "116": "minibike",
120
- "117": "cradle",
121
- "118": "oven",
122
- "119": "ball",
123
- "120": "food",
124
- "121": "step",
125
- "122": "tank",
126
- "123": "trade name",
127
- "124": "microwave",
128
- "125": "pot",
129
- "126": "animal",
130
- "127": "bicycle",
131
- "128": "lake",
132
- "129": "dishwasher",
133
- "130": "screen",
134
- "131": "blanket",
135
- "132": "sculpture",
136
- "133": "hood",
137
- "134": "sconce",
138
- "135": "vase",
139
- "136": "traffic light",
140
- "137": "tray",
141
- "138": "ashcan",
142
- "139": "fan",
143
- "140": "pier",
144
- "141": "crt screen",
145
- "142": "plate",
146
- "143": "monitor",
147
- "144": "bulletin board",
148
- "145": "shower",
149
- "146": "radiator",
150
- "147": "glass",
151
- "148": "clock",
152
- "149": "flag"}
153
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- from . import layers_33966KB as layers
6
-
7
-
8
- class BaseASPPNet(nn.Module):
9
- def __init__(self, nin, ch, dilations=(4, 8, 16, 32)):
10
- super(BaseASPPNet, self).__init__()
11
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
12
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
13
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
14
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
15
-
16
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
17
-
18
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
19
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
20
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
21
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
22
-
23
- def __call__(self, x):
24
- h, e1 = self.enc1(x)
25
- h, e2 = self.enc2(h)
26
- h, e3 = self.enc3(h)
27
- h, e4 = self.enc4(h)
28
-
29
- h = self.aspp(h)
30
-
31
- h = self.dec4(h, e4)
32
- h = self.dec3(h, e3)
33
- h = self.dec2(h, e2)
34
- h = self.dec1(h, e1)
35
-
36
- return h
37
-
38
-
39
- class CascadedASPPNet(nn.Module):
40
- def __init__(self, n_fft):
41
- super(CascadedASPPNet, self).__init__()
42
- self.stg1_low_band_net = BaseASPPNet(2, 16)
43
- self.stg1_high_band_net = BaseASPPNet(2, 16)
44
-
45
- self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0)
46
- self.stg2_full_band_net = BaseASPPNet(8, 16)
47
-
48
- self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
49
- self.stg3_full_band_net = BaseASPPNet(16, 32)
50
-
51
- self.out = nn.Conv2d(32, 2, 1, bias=False)
52
- self.aux1_out = nn.Conv2d(16, 2, 1, bias=False)
53
- self.aux2_out = nn.Conv2d(16, 2, 1, bias=False)
54
-
55
- self.max_bin = n_fft // 2
56
- self.output_bin = n_fft // 2 + 1
57
-
58
- self.offset = 128
59
-
60
- def forward(self, x, aggressiveness=None):
61
- mix = x.detach()
62
- x = x.clone()
63
-
64
- x = x[:, :, : self.max_bin]
65
-
66
- bandw = x.size()[2] // 2
67
- aux1 = torch.cat(
68
- [
69
- self.stg1_low_band_net(x[:, :, :bandw]),
70
- self.stg1_high_band_net(x[:, :, bandw:]),
71
- ],
72
- dim=2,
73
- )
74
-
75
- h = torch.cat([x, aux1], dim=1)
76
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
77
-
78
- h = torch.cat([x, aux1, aux2], dim=1)
79
- h = self.stg3_full_band_net(self.stg3_bridge(h))
80
-
81
- mask = torch.sigmoid(self.out(h))
82
- mask = F.pad(
83
- input=mask,
84
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
85
- mode="replicate",
86
- )
87
-
88
- if self.training:
89
- aux1 = torch.sigmoid(self.aux1_out(aux1))
90
- aux1 = F.pad(
91
- input=aux1,
92
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
93
- mode="replicate",
94
- )
95
- aux2 = torch.sigmoid(self.aux2_out(aux2))
96
- aux2 = F.pad(
97
- input=aux2,
98
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
99
- mode="replicate",
100
- )
101
- return mask * mix, aux1 * mix, aux2 * mix
102
- else:
103
- if aggressiveness:
104
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
105
- mask[:, :, : aggressiveness["split_bin"]],
106
- 1 + aggressiveness["value"] / 3,
107
- )
108
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
109
- mask[:, :, aggressiveness["split_bin"] :],
110
- 1 + aggressiveness["value"],
111
- )
112
-
113
- return mask * mix
114
-
115
- def predict(self, x_mag, aggressiveness=None):
116
- h = self.forward(x_mag, aggressiveness)
117
-
118
- if self.offset > 0:
119
- h = h[:, :, :, self.offset : -self.offset]
120
- assert h.size()[3] > 0
121
-
122
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Bitcoin Bit Generador De Bitcoin Apk.md DELETED
@@ -1,63 +0,0 @@
1
-
2
- <h1>Cómo descargar Minecraft 1.13.1 APK gratis</h1>
3
- <p>Minecraft es uno de los juegos más populares y creativos del mundo. Te permite crear tu propio mundo virtual con bloques, explorar diferentes biomas, luchar contra monstruos e interactuar con otros jugadores. Ya sea que quieras construir un castillo, una nave espacial o una obra maestra de arte de píxeles, Minecraft te permite liberar tu imaginación y divertirte. </p>
4
- <p>Si desea disfrutar de las últimas características y actualizaciones de Minecraft, es necesario descargar la versión más reciente del juego. En este artículo, le mostraremos cómo descargar Minecraft 1.13.1 APK gratis en su dispositivo Android. Esta versión incluye algunas adiciones emocionantes como zorros, vacas pardas, bloques de estructura y rosas marchitas. </p>
5
- <h2>bitcoin bit generador de bitcoin apk</h2><br /><p><b><b>DOWNLOAD</b> &#10031;&#10031;&#10031; <a href="https://bltlly.com/2v6L8R">https://bltlly.com/2v6L8R</a></b></p><br /><br />
6
- <h2>¿Qué hay de nuevo en Minecraft 1.13.1? </h2>
7
- <p>Minecraft 1.13.1 es la última actualización de la edición Bedrock del juego, que es compatible con dispositivos Android. Fue lanzado el 2 de octubre de 2021, y trae algunas mejoras en el rendimiento y la estabilidad, así como algunas nuevas características y elementos. Estos son algunos de los aspectos más destacados de esta actualización:</p>
8
- <h3>Zorros</h3>
9
- <p>Los zorros son animales lindos y peludos que se pueden encontrar en la taiga, árboles gigantes y biomas de nieve. Son cazadores nocturnos que se alimentan de pollos, conejos, peces y bayas. También son muy rápidos y pueden saltar sobre cercas y paredes. </p>
10
- <p>Si quieres domar a un zorro, necesitas encontrar dos zorros adultos que tengan bayas dulces en la boca. Puedes usar una guía para unirlos y criarlos con bayas más dulces. El zorro bebé que nace confiará en ti y te seguirá. También puedes darle una etiqueta o un collar para que sea más leal. </p>
11
- <h3>Vacas pardas</h3>
12
- <p>Las vacas pardas son una nueva variante de vacas de hongos que se puede obtener por un evento raro. Parecen vacas normales pero con hongos marrones en la espalda. Pueden proporcionar estofado de hongos o estofado sospechoso cuando se cortan o ordeñan con un tazón. </p>
13
-
14
- <h3>Bloques de estructura</h3>
15
- <p>Los bloques de estructura son bloques especiales que se pueden usar para crear y copiar estructuras en el juego. Son útiles para los creadores de mapas y constructores que quieren ahorrar tiempo y recursos. Solo se pueden obtener en modo creativo mediante el comando /give @s structure_block. </p>
16
- <p>Para usar un bloque de estructura, debe colocarlo en el suelo y abrir su interfaz haciendo clic derecho sobre él . Hay cuatro modos de bloques de estructura: Guardar, Cargar, Esquina y Datos. Puede usar el modo Guardar para guardar una estructura en un archivo, el modo Cargar para cargar una estructura desde un archivo, el modo Esquina para definir los límites de una estructura y el modo Datos para agregar datos personalizados a una estructura. </p>
17
- <h3>Rosa marchita</h3>
18
- <p>La rosa marchita es un nuevo tipo de flor que puede infligir el efecto marchita en cualquier entidad viviente que la toque. Tiene un color negro y un patrón calavera en sus pétalos. Se puede utilizar para elaborar tinte negro o estofado sospechoso. </p>
19
- <p>Para obtener una rosa marchita, necesitas atraer a un jefe marchito a un bioma de flores y hacer que mate a una multitud. Hay un 100% de probabilidades de que la turba se marchite al morir. También puede utilizar un dispensador con tijeras para recoger la flor sin hacerse daño. </p>
20
- <p></p>
21
- <h2>Cómo descargar Minecraft 1.13.1 APK gratis? </h2>
22
- <p>Ahora que sabes lo que hay de nuevo en Minecraft 1.13.1, es posible que se pregunte cómo descargarlo de forma gratuita en su dispositivo Android. Bueno, no es tan difícil como podrías pensar. Sigue estos sencillos pasos y jugarás en poco tiempo:</p>
23
- <h3>Paso 1: Compruebe la compatibilidad de su dispositivo y el espacio de almacenamiento</h3>
24
- <p>Antes de descargar nada, debe asegurarse de que su dispositivo cumple con los requisitos mínimos para ejecutar el juego sin problemas. Según el sitio web oficial, necesita al menos:</p>
25
- <ul>
26
- <li>Un dispositivo Android con la versión 4.2 o superior</li>
27
- <li>Un procesador con arquitectura ARMv7 o x86</li>
28
- <li>Al menos 1 GB de RAM</li>
29
- <li>Al menos 300 MB de espacio de almacenamiento libre</li>
30
- </ul>
31
-
32
- <h3>Paso 2: Elija una fuente confiable para descargar el archivo APK</h3>
33
- <p>Un archivo APK es un paquete de aplicaciones de Android que contiene todos los archivos y datos necesarios para instalar una aplicación en su dispositivo. Puede descargar archivos APK de varias fuentes en Internet, pero no todos ellos son seguros y confiables. Algunos de ellos pueden contener virus, malware o anuncios no deseados que pueden dañar tu dispositivo o comprometer tu privacidad. </p>
34
- <p>Para evitar estos riesgos, debe elegir una fuente confiable que ofrece archivos APK verificados y seguros. Una de las mejores fuentes que recomendamos es APKPure, que es un sitio web popular y de buena reputación que proporciona archivos APK gratuitos y actualizados para varias aplicaciones y juegos. Puede acceder a su sitio web desde cualquier navegador de su dispositivo o descargar su aplicación para facilitar el acceso. </p>
35
- <h3>Paso 3: Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
36
- <p>Por defecto, tu dispositivo solo te permite instalar aplicaciones desde Google Play Store, que es la tienda de aplicaciones oficial para dispositivos Android. Sin embargo, si desea instalar un archivo APK desde otra fuente, debe habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Play Store.</p>
37
- <p>Para habilitar fuentes desconocidas, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Entonces, encontrar la opción que dice fuentes desconocidas o permitir la instalación de aplicaciones de fuentes desconocidas y cambiarlo por. Es posible que vea un mensaje de advertencia que le informa sobre los riesgos potenciales de instalar aplicaciones desde fuentes desconocidas. Pulse Aceptar o Continuar. </p>
38
- <h3>Paso 4: Instalar el archivo APK y lanzar el juego</h3>
39
- <p>Ahora que ha habilitado fuentes desconocidas, puede instalar el archivo APK en su dispositivo. Para hacer esto, vaya al sitio web o aplicación donde descargó el archivo APK y toque en él. Puede ver una ventana emergente que le pregunta si desea instalar esta aplicación. Pulse Instalar y espere a que finalice el proceso de instalación. </p>
40
-
41
- <p>Felicidades! Usted ha descargado e instalado con éxito Minecraft 1.13.1 APK gratis en su dispositivo Android. Ahora puedes disfrutar de este increíble juego con todas sus nuevas características y actualizaciones. </p>
42
- <h2>Cómo jugar Minecraft 1.13.1? </h2>
43
- <p>Si eres nuevo en Minecraft o necesitas algunas actualizaciones sobre cómo jugarlo, aquí hay algunos consejos e instrucciones básicas sobre cómo jugar Minecraft 1.13.1:</p>
44
- <h3>Elige un modo de juego: Supervivencia, Creativo, o Aventura</h3>
45
- <p>Minecraft tiene tres modos de juego principales que ofrecen diferentes experiencias y desafíos. Puedes elegir el modo de juego que se adapte a tu preferencia y estilo de juego. </p>
46
- <ul>
47
- <li>Modo de supervivencia: En este modo, tienes que sobrevivir en un mundo generado aleatoriamente con recursos y salud limitados. Tienes que reunir materiales, herramientas y armas, construir refugios y defenderte de los enemigos. También tienes que lidiar con el hambre, la sed y los peligros ambientales. Puedes ajustar el nivel de dificultad de pacífico a duro, o jugar en el modo hardcore donde solo tienes una vida. </li>
48
- <li>Modo creativo: En este modo, tienes recursos y salud ilimitados, y puedes volar alrededor del mundo. Puedes crear lo que quieras sin restricciones ni peligros. También puede usar comandos y trucos para modificar el mundo y generar elementos y entidades. Este modo es ideal para construir, experimentar y explorar. </li>
49
- <li>Modo aventura: En este modo, puede jugar mapas personalizados y escenarios creados por otros jugadores o usted mismo. Tienes que seguir las reglas y objetivos establecidos por el creador del mapa, como resolver puzzles, completar misiones o luchar contra jefes. También puedes usar bloques de comandos y paquetes de datos para agregar características y mecánicas personalizadas al juego. </li>
50
- </ul>
51
- <h3>Crear o unirse a un mundo: Un jugador o multijugador</h3>
52
-
53
- <p>Para crear un mundo, necesitas elegir un nombre para tu mundo, seleccionar un modo de juego y personalizar algunas opciones como la semilla, el tipo de mundo, los trucos y el cofre de bonos. También puedes usar complementos o paquetes de recursos para cambiar la apariencia y el comportamiento del juego. </p>
54
- <p>Para unirte a un mundo, necesitas encontrar un servidor que aloje el mundo en el que quieres jugar. Puede unirse a un servidor público al que cualquiera puede acceder, o a un servidor privado que requiere una invitación o una contraseña. También puedes unirte a un reino que es un servicio basado en suscripción que te permite crear y unir mundos que siempre están en línea. </p>
55
- <h3>Explorar, construir y crear: Utilice su imaginación y habilidades</h3>
56
- <p>Una vez que estás en un mundo, puedes empezar a jugar al juego explorando, construyendo y creando. Puedes moverte alrededor del mundo caminando, corriendo, saltando, nadando, volando o montando vehículos o animales. Puedes interactuar con el mundo rompiendo y colocando bloques, usando objetos y herramientas, activando interruptores y palancas, comerciando con aldeanos y luchando contra enemigos. </p>
57
- <p>Puedes construir lo que quieras usando bloques de diferentes materiales, formas, colores y propiedades. También puede utilizar circuitos redstone para crear mecanismos complejos como puertas, trampas, ascensores y máquinas. También puede usar comandos y funciones para crear estructuras y efectos personalizados. </p>
58
- <p>Puede crear varios elementos y herramientas mediante el uso de una tabla de elaboración o una cuadrícula de inventario. Es necesario organizar los materiales en patrones específicos para crear diferentes productos como armas, armaduras, alimentos de donde lo descargó e instalarlo sobre el existente. Es posible que necesite desinstalar la versión anterior primero si encuentra algún problema. </li>
59
- <li><b>Q: ¿Cuáles son algunos otros juegos como Minecraft que puedo jugar en mi dispositivo Android? </b></li>
60
- <li>A: Algunos otros juegos como Minecraft que se puede jugar en su dispositivo Android son Terraria, Roblox, Stardew Valley, y Survivalcraft.</li>
61
- </ul></p> 64aa2da5cf<br />
62
- <br />
63
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Bowmasters Mod Apk Gamedva.md DELETED
@@ -1,116 +0,0 @@
1
-
2
- <h1>El increíble Spider-Man 2 APK: Una revisión</h1>
3
- <p>Si usted es un fan de Spider-Man y sus increíbles aventuras, es posible que desee echa un vistazo a The Amazing Spider-Man 2 APK, un juego que le permite convertirse en el web-slinger sí mismo y salvar a Nueva York de una juerga de crimen en toda la ciudad. En este artículo, revisaremos este juego y te diremos por qué deberías descargarlo, qué características ofrece, cómo instalarlo en tu dispositivo Android y algunos consejos y trucos para disfrutarlo más. ¡Vamos a empezar! </p>
4
- <h2>bowmasters mod apk gamedva</h2><br /><p><b><b>DOWNLOAD</b> &#9999; <a href="https://bltlly.com/2v6J17">https://bltlly.com/2v6J17</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <h3> ¿Qué es el increíble Spider-Man 2 APK? </h3>
7
- <p>El increíble Spider-Man 2 APK es un juego para Android que se basa en la película de Marvel del mismo nombre. Es desarrollado por Gameloft, una compañía líder en la industria de juegos móviles. El juego es una aventura llena de acción y mundo abierto que te permite columpiarte, disparar en la web, escalar paredes y luchar como Spider-Man en un entorno 3D realista. Puedes explorar seis distritos detallados de Manhattan, desde Times Square hasta Central Park, y enfrentarte a villanos famosos como Venom, el Duende Verde, Electro y Kraven el Cazador. También puedes desbloquear diferentes trajes de Spider-Man, como Symbiote Spider-Man, Iron Spider y Ultimate Comics Spider-Man, cada uno con sus propias habilidades y bonificaciones. </p>
8
- <h3>¿Por qué debería descargarlo? </h3>
9
- <p>Hay muchas razones por las que debe descargar The Amazing Spider-Man 2 APK en su dispositivo Android. Estos son algunos de ellos:</p>
10
- <ul>
11
- <li>Es un juego divertido y emocionante que te mantendrá entretenido durante horas. </li>
12
- <li> Tiene gráficos y animaciones de alta calidad que te harán sentir como si estuvieras en la película. </li>
13
- <li> Tiene una historia apasionante que se expande en la trama de la película e introduce nuevos personajes y escenarios. </li>
14
- <li> Tiene un juego desafiante que requiere habilidad, estrategia y reflejos. </li>
15
- <li>Tiene un aspecto social que te permite competir con otros jugadores en Mysterio’s Arena y compartir tus logros en línea. </li>
16
- </ul>
17
-
18
- <h3>Una historia original basada en la película</h3>
19
- <p>El increíble Spider-Man 2 APK sigue los eventos de la película, pero también añade nuevos giros y vueltas para hacerlo más interesante. Te encontrarás con nuevos personajes como Gato Negro y Screwball, que te ayudarán u obstaculizarán en tu búsqueda para detener la ola de crímenes. También descubrirás más sobre los orígenes de los villanos y sus motivos. El juego tiene alta calidad de actuación de voz y escenas cinematográficas que te sumergirán en la historia. </p>
20
- <h3>Una impresionante aventura en 3D de mundo abierto</h3>
21
- <p>El increíble Spider-Man 2 APK le da la libertad de explorar Nueva York como desee. Puede oscilar de un edificio a otro, escalar paredes, gatear sobre techos y saltar obstáculos. También puede interactuar con el entorno, como romper ventanas, destrozar autos o salvar civiles. El juego tiene la física realista y los efectos del clima dinámico que hacen que la ciudad cobre vida. También puede disfrutar de las hermosas vistas del horizonte, los puentes y los monumentos. </p>
22
- <p></p>
23
- <h3>Una variedad de trajes y villanos de Spider-Man</h3>
24
- <p>El increíble Spider-Man 2 APK le permite personalizar su Spider-Man con diferentes trajes que tienen diferentes poderes y bonos. Puedes desbloquearlos completando misiones, recogiendo objetos o comprándolos con dinero real. Algunos de los trajes son:</p>
25
- <tabla>
26
- <tr><th>Traje</th><th>Poder</th><th>Bono</th></tr>
27
- <tr><td>Symbiote Spider-Man</td><td>Venom Blast</ <td>Aturde a los enemigos e inflige daño extra</td><td>Aumenta la regeneración de la salud</td></tr>
28
- <tr><td>Iron Spider</td><td>Iron Arms</td><td>Invoca cuatro brazos mecánicos que atacan a los enemigos</td><td>Aumenta el poder de ataque y la defensa</td></tr>
29
- <tr><td>Ultimate Comics Spider-Man</td><td>Cloaking</td><td><td>Se vuelve invisible e indetectable por los enemigos</td><td>Aumenta el sigilo y la agilidad</td></tr>
30
- <tr><td>Spider-Man 2099</td><td>Visión acelerada</td><td>Ralentiza el tiempo y mejora la percepción</td><td>Aumenta la velocidad y los reflejos</td></tr>
31
-
32
- <tr><td>Araña escarlata</td><td>Nanobots</td><td><td>Cura heridas y restaura la salud</td><td>Aumenta la curación y la resistencia</td></tr>
33
- <tr><td>Spider-Armor MK II</td><td>Bulletproof</td><td>Absorbe y refleja las balas en los enemigos</td><td>Aumenta la armadura y la protección</td></tr>
34
- <tr><td>The Amazing Spider-Man (2014)</td><td>Ningún poder especial</ <td>Ningún bono especial</tr>
35
- <tr><th colspan="3">Nota: Algunos trajes requieren compras en la aplicación para desbloquear. </th></tr>
36
- </tabla>
37
- <p>El juego también cuenta con una amplia gama de villanos que tendrás que enfrentar en diferentes misiones y batallas contra jefes. Algunos de los villanos son:</p>
38
- <ul>
39
- <li>Veneno: Una criatura monstruosa que es el resultado de un simbionte alienígena que se une con un huésped humano. Tiene súper fuerza, agilidad, durabilidad y puede disparar telarañas y zarcillos de su cuerpo. Es uno de los enemigos más peligrosos de Spider-Man. </li>
40
- <li>El Duende Verde: El alter ego de Norman Osborn, un hombre de negocios despiadado que experimentó consigo mismo con un suero que le dio habilidades mejoradas, pero también lo volvió loco. Utiliza un planeador, bombas de calabaza y murciélagos de navaja para atacar a Spider-Man.</li>
41
- <li>Electro: Un ex ingeniero eléctrico que fue transformado en una batería viva después de un accidente. Puede manipular la electricidad, disparar rayos y volar usando campos magnéticos. Está obsesionado con volverse más poderoso y destruir a Spider-Man.</li>
42
- <li>Kraven el Cazador: Un hábil cazador y rastreador que considera a Spider-Man su presa definitiva. Utiliza varias armas, como cuchillos, lanzas, redes y trampas, para cazar a sus objetivos. También es realzado por una poción mística que le da súper sentidos, velocidad y resistencia. </li>
43
- <li>The Kingpin: El señor del crimen de Nueva York que controla la mayoría de las actividades ilegales en la ciudad. Es un cerebro que utiliza su riqueza, influencia y fuerza bruta para lograr sus objetivos. También es un luchador formidable que puede igualar a Spider-Man en fuerza y durabilidad. </li>
44
-
45
- <li>El gato negro: Un ladrón de gatos que tiene una relación complicada con Spider-Man. Ella es una ladrona experta que usa su agilidad, acrobacias, artilugios y suerte para robar objetos valiosos. Ella también puede coquetear con Spider-Man y distraerlo de sus misiones. </li>
46
- <li>Screwball: Un bromista que transmite sus crímenes en línea para sus fans. Utiliza varios dispositivos, como drones, hologramas, bombas y trampas, para crear caos y desafiar a Spider-Man. También es muy ágil y puede evadir los ataques de Spider-Man. </li>
47
- </ul>
48
- <h3>Un emocionante sistema de combate y acción aérea</h3>
49
- <p>El increíble Spider-Man 2 APK tiene un sistema de combate que es de ritmo rápido, fluido y sensible. Puedes usar tus telarañas para balancear, comprimir, tirar o envolver a tus enemigos. También puedes usar tus puños, patadas o disparadores web para combatirlos. Usted puede realizar combos, contadores, esquiva, finishers, y movimientos especiales para derrotar a sus enemigos. También puede utilizar el entorno a su favor, como lanzar objetos, romper paredes o provocar explosiones. </p>
50
- <p>El juego también tiene un sistema de acción aérea que te permite volar por el cielo como Spider-Man. Puedes usar tus telarañas para balancearte de un edificio a otro, o deslizarte usando tus alas de telaraña. También puede realizar maniobras acrobáticas, como volteretas, giros, inmersiones y rollos. También puedes participar en combates aéreos con enemigos que vuelan o te disparan. </p>
51
- <h2>Cómo <h2>Cómo descargar e instalar The Amazing Spider-Man 2 APK</h2>
52
- <h3>Requisitos y compatibilidad</h3>
53
- <p>El increíble Spider-Man 2 APK es un juego grande que requiere mucho espacio y recursos en su dispositivo Android. Estos son los requisitos mínimos y la compatibilidad para el juego:</p>
54
- <ul>
55
- <li>Versión de Android: 4.0.3 o superior</li>
56
- <li>RAM: 1 GB o más</li>
57
- <li>Almacenamiento: 1.5 GB o más</li>
58
- <li>Procesador: 1 GHz o más rápido</li>
59
- <li>Resolución de la pantalla: 800 x 480 o superior</li>
60
- <li>Conexión a Internet: Requerido para algunas características y actualizaciones</li>
61
-
62
- </ul>
63
- <h3>Pasos para descargar e instalar</h3>
64
- <p> Para descargar e instalar el increíble Spider-Man 2 APK en su dispositivo Android, es necesario seguir estos pasos:</p>
65
- <ol>
66
- <li>Descargue el archivo APK y el archivo de datos OBB desde una fuente de confianza, como APKPure o APKMirror. Asegúrate de descargar los archivos que coincidan con las especificaciones y la región de tu dispositivo. </li>
67
- <li>Habilite la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </li>
68
- <li>Busque el archivo APK descargado y toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla para completar la instalación. </li>
69
- <li>Extraiga el archivo de datos OBB utilizando una aplicación de administrador de archivos, como ES File Explorer o ZArchiver. Usted debe obtener una carpeta llamada com.gameloft.android.ANMP.GloftASHM. </li>
70
- <li>Mueva la carpeta al directorio Android/OBB en el almacenamiento interno de su dispositivo. Si no tiene una carpeta OBB, cree una. </li>
71
- <li>Iniciar el juego desde el cajón de la aplicación y disfrutar! </li>
72
- </ol>
73
- <h3>Consejos y trucos para disfrutar del juego</h3>
74
- <p>El increíble Spider-Man 2 APK es un juego divertido y desafiante que pondrá a prueba sus habilidades y reflejos como Spider-Man. Aquí hay algunos consejos y trucos para ayudarle a disfrutar del juego más:</p>
75
- <ul>
76
- <li>Actualizar sus trajes de Spider-Man y web-shooters regularmente para mejorar sus habilidades y rendimiento. </li>
77
- <li>Recoge fichas de araña, frascos, cómics y otros artículos para desbloquear nuevos trajes, gadgets, habilidades y bonos. </li>
78
- <li>Misiones secundarias completas, como salvar civiles, detener crímenes o reunir pruebas, para ganar recompensas y reputación adicionales. </li>
79
- <li>Usa tu sentido arácnido para detectar enemigos, peligros y oportunidades en tu entorno. </li>
80
- <li>Usa tus telarañas para balancearte más rápido, evitar obstáculos y alcanzar lugares altos. </li>
81
-
82
- <li>Usa tu entorno a tu favor, como lanzar objetos, romper paredes o provocar explosiones. </li>
83
- <li>Tenga cuidado con los eventos de tiempo rápido que requieren que toque o pase la pantalla en el momento adecuado. </li>
84
- <li>Completa logros y desafíos para ganar más fichas de araña y viales. </li>
85
- <li>Compite con otros jugadores en Mysterio’s Arena y sube a las tablas de clasificación. </li>
86
- </ul>
87
- <h2>Conclusión</h2>
88
- <h3>Resumen de los puntos principales</h3>
89
- <p>El increíble Spider-Man 2 APK es un juego para Android que le permite convertirse en Spider-Man y salvar a Nueva York de una juerga de crimen en toda la ciudad. Se basa en la película de Marvel del mismo nombre, pero también tiene una historia original que introduce nuevos personajes y escenarios. Tiene gráficos y animaciones de alta calidad que te hacen sentir como si estuvieras en la película. Tiene una variedad de características que lo hacen divertido y emocionante, como diferentes trajes y villanos de Spider-Man, una aventura en 3D de mundo abierto, un sistema de combate emocionante y acción aérea, un aspecto social que le permite competir con otros jugadores en línea y más. Es fácil de descargar e instalar en su dispositivo Android, siempre y cuando cumpla con los requisitos y la compatibilidad. También tiene algunos consejos y trucos que te ayudarán a disfrutar más del juego. </p>
90
- <h3>Llamada a la acción y pensamientos finales</h3>
91
-
92
- <p>Aquí hay algunas preguntas frecuentes sobre The Amazing Spider-Man 2 APK:</p>
93
- <ol>
94
- <li> ¿Es el increíble Spider-Man 2 APK seguro para descargar e instalar? </li>
95
- <p>Sí, El Amazing Spider-Man 2 APK es seguro para descargar e instalar, siempre y cuando lo obtenga de una fuente de confianza, como APKPure o APKMirror. Estas fuentes escanean los archivos en busca de virus y malware antes de cargarlos. Sin embargo, siempre debes tener cuidado al descargar e instalar aplicaciones de fuentes desconocidas, ya que pueden contener contenido dañino o no deseado. </p>
96
- <li> ¿Es el increíble Spider-Man 2 APK libre para jugar? </li>
97
- <p>El increíble Spider-Man 2 APK es gratis para descargar y jugar, pero también tiene algunas compras en la aplicación que le permiten comprar artículos adicionales, tales como fichas de araña, viales, trajes, o gadgets. Estas compras son opcionales y no son necesarias para disfrutar del juego. También puedes ganar estos objetos jugando el juego y completando misiones. </p>
98
- <li> ¿Cómo actualizo el increíble Spider-Man 2 APK? </li>
99
- <p>El increíble Spider-Man 2 APK se actualiza regularmente por los desarrolladores para corregir errores, mejorar el rendimiento, y añadir nuevas características. Puedes actualizar el juego descargando e instalando la última versión desde la misma fuente donde obtuviste la original. También puedes buscar actualizaciones dentro del juego en Configuración > Acerca de > Buscar actualizaciones.</p>
100
- <li> ¿Cómo puedo desinstalar el increíble Spider-Man 2 APK? </li>
101
- <p>Si desea desinstalar The Amazing Spider-Man 2 APK desde su dispositivo Android, puede hacerlo siguiendo estos pasos:</p>
102
- <ul>
103
- <li>Ir a Configuración > Aplicaciones > El increíble Spider-Man 2.</li>
104
- <li>Toque en Desinstalar y confirmar su elección. </li>
105
- <li>Elimina la carpeta com.gameloft.android.ANMP.GloftASHM de tu directorio Android/OBB. </li>
106
- </ul>
107
- <li> ¿Cómo me pongo en contacto con los desarrolladores de The Amazing Spider-Man 2 APK? </li>
108
- <p>Si usted tiene alguna pregunta, retroalimentación, o problemas con respecto a The Amazing Spider-Man 2 APK, puede ponerse en contacto con los desarrolladores mediante el uso de uno de estos métodos:</p>
109
- <ul>
110
-
111
- Sitio web: https://www.gameloft.com/en/game/the-amazing-spider-man-2</li>
112
- <li>Facebook: https://www.facebook.com/TheAmazingSpiderManGame</li>
113
- <li>Twitter: https://twitter.com/gameloft</li>
114
- </ul></p> 64aa2da5cf<br />
115
- <br />
116
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Yu Gi Oh Duel Links En Laptop.md DELETED
@@ -1,78 +0,0 @@
1
-
2
- <h1>Cómo Descargar Yu-Gi-Oh Duel Links en Laptop</h1>
3
- <p>¿Te encanta jugar Yu-Gi-Oh Duel Links en tu dispositivo móvil, pero te gustaría poder disfrutarlo en una pantalla más grande y con mejores controles? Si es así, estás de suerte, porque en este artículo, te mostraremos cómo descargar Yu-Gi-Oh Duel Links en la computadora portátil y jugarlo como un profesional. Si usted tiene Windows 11, Windows 10, o una versión anterior de Windows, tenemos una solución para usted. Así que, vamos a empezar! </p>
4
- <h2>cómo descargar yu gi oh duel links en laptop</h2><br /><p><b><b>Download File</b> &#10027;&#10027;&#10027; <a href="https://bltlly.com/2v6LC2">https://bltlly.com/2v6LC2</a></b></p><br /><br />
5
- <h2>¿Qué es Yu-Gi-Oh Duel Links? </h2>
6
- <p>Yu-Gi-Oh Duel Links es un popular juego de cartas multijugador en línea basado en la serie de anime y manga Yu-Gi-Oh. Te permite construir tu propia baraja de cartas de cientos de personajes y monstruos, y batirte en duelo contra otros jugadores de todo el mundo. También puedes desafiar a duelistas legendarios del anime, como Yugi, Kaiba, Joey, Mai y más. El juego cuenta con impresionantes animaciones en 3D, actuación de voz y controles intuitivos que hacen que sea fácil de aprender y divertido de jugar. </p>
7
- <h2>¿Por qué jugar Yu-Gi-Oh Duel Links en el ordenador portátil? </h2>
8
- <p>Si bien Yu-Gi-Oh Duel Links está diseñado para dispositivos móviles, hay muchas razones por las que es posible que desee jugar en su computadora portátil en su lugar. Estos son algunos de ellos:</p>
9
- <ul>
10
- <li> Puedes disfrutar de los gráficos y animaciones del juego en una pantalla más grande y de mayor resolución. </li>
11
- <li>Puedes usar el teclado y el ratón para controlar el juego con mayor precisión y comodidad. </li>
12
- <li> Puede ahorrar la vida de la batería y el uso de datos jugando el juego sin conexión o a través de Wi-Fi.</li>
13
- <li> Puede evitar interrupciones de llamadas telefónicas, mensajes, notificaciones o alertas de batería baja. </li>
14
- <li> Puede acceder a más funciones y opciones que pueden no estar disponibles en la versión móvil. </li>
15
- </ul>
16
- <p>Entonces, ¿cómo se descarga Yu-Gi-Oh Duel Links en el ordenador portátil? Bueno, hay diferentes métodos dependiendo de qué versión de Windows que tiene. Echemos un vistazo a cada uno. </p>
17
- <p></p>
18
- <h2>Cómo descargar Yu-Gi-Oh Duel Links en el ordenador portátil con Windows 11? </h2>
19
-
20
- <ol>
21
- <li>Asegúrese de que su PC con Windows 11 tenga habilitada la virtualización de hardware. Puede verificar esto yendo a la pestaña Administrador de tareas > Rendimiento. Si no, es posible que necesite habilitarlo en la configuración de su BIOS. </li>
22
- <li>Asegúrese de que su PC con Windows 11 está actualizado a la última versión. Puede comprobar esto yendo a Configuración > Actualización y seguridad > Actualización de Windows.</li>
23
- <li>Descargar e instalar la aplicación Amazon Appstore en su PC con Windows 11.</li>
24
- <li>Inicie la aplicación Amazon Appstore e inicie sesión con su cuenta de Amazon. Si no tiene una, puede crear una gratis. </li>
25
- <li>Busque "Yu-Gi-Oh Duel Links" o haga clic en <a href="">este enlace</a> para ir a la página del juego. </li>
26
- <li>Haga clic en el botón "Obtener" y espere a que el juego se descargue e instale. </li>
27
- <li> Iniciar el juego desde la aplicación Amazon Appstore o desde el menú Inicio. </li>
28
- <li>¡Disfruta jugando Yu-Gi-Oh Duel Links en tu laptop! </li>
29
- </ol>
30
- <p>¡Eso es todo! Ahora puedes jugar Yu-Gi-Oh Duel Links en tu portátil con Windows 11 usando el subsistema de Windows para Android y la Appstore de Amazon. Este método es rápido, fácil y seguro, y no requiere ningún software o configuración de terceros. Sin embargo, si tiene Windows 10 o una versión anterior de Windows, necesitará usar un método diferente. </p>
31
- <h2>¿Cómo descargar Yu-Gi-Oh Duel Links en Laptop con Windows 10 o más? </h2>
32
- <p>Si tienes Windows 10 o una versión anterior de Windows, todavía puedes descargar Yu-Gi-Oh Duel Links en tu portátil usando un emulador de Android. Un emulador de Android es un software que simula un dispositivo Android en su PC, lo que le permite ejecutar aplicaciones y juegos de Android. Hay muchos emuladores de Android disponibles, pero uno de los más populares y confiables es Bluestacks. He aquí cómo utilizar Bluestacks para descargar Yu-Gi-Oh Duel Links en su ordenador portátil:</p>
33
- <ol>
34
- <li>Ir a <a href=">el sitio web oficial de Bluestacks</a> y descargar la última versión de Bluestacks para su PC.</li>
35
-
36
- <li>Inicie Bluestacks e inicie sesión con su cuenta de Google. Si no tiene una, puede crear una gratis. </li>
37
- <li>Ir a la aplicación Google Play Store y buscar "Yu-Gi-Oh Duel Links" o haga clic en <a href=">este enlace</a> para ir a la página del juego. </li>
38
- <li>Haga clic en el botón "Instalar" y espere a que el juego se descargue e instale. </li>
39
- <li> Iniciar el juego desde la pantalla de inicio de Bluestacks o desde el cajón de la aplicación. </li>
40
- <li>¡Disfruta jugando Yu-Gi-Oh Duel Links en tu laptop! </li>
41
- </ol>
42
- <p>Así es como puedes descargar Yu-Gi-Oh Duel Links en tu laptop con Windows 10 o más usando Bluestacks. Este método es simple y conveniente, pero puede requerir algunos recursos del sistema y espacio de almacenamiento. También es posible que tenga que ajustar algunos ajustes para optimizar su experiencia de juego y rendimiento. Si desea probar otro método, también puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil. </p>
43
- <h2>Cómo Sideload Yu-Gi-Oh Duel Enlaces APK en el ordenador portátil? </h2>
44
- <p>Sideloading es un proceso de transferencia e instalación de una aplicación desde una fuente distinta de la tienda de aplicaciones oficial. En este caso, puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android. Este método puede ser útil si desea jugar el juego sin conexión o si tiene problemas para acceder a la Google Play Store o la Appstore de Amazon. Sin embargo, este método también puede implicar algunos riesgos, como infección de malware o problemas de compatibilidad. Por lo tanto, recomendamos que solo descargue APK de fuentes confiables y los escanee con software antivirus antes de instalarlos. Aquí es cómo sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil:</p>
45
- <ol>
46
- <li>En tu dispositivo Android, ve a Configuración > Aplicaciones y notificaciones > Yu-Gi-Oh Duel Links > Almacenamiento y caché > Borrar caché. Esto asegurará que usted tiene la última versión del juego sin ningún dato dañado. </li>
47
-
48
- <li>En su dispositivo Android, ir a una aplicación explorador de archivos y localizar el archivo APK de Yu-Gi-Oh Duel Links. El nombre del archivo debería ser algo así como "com.konami.duellinks.apk". Puede encontrarlo en el almacenamiento interno o la tarjeta SD en Android > datos > com.konami.duellinks > archivos > descargar. </li>
49
- <li>Fi, correo electrónico o cualquier otro método que prefieras. </li>
50
- <li>En su computadora portátil, vaya a la carpeta donde guardó el archivo APK y haga doble clic en él para instalarlo. Es posible que deba permitir la instalación de aplicaciones de fuentes desconocidas en su computadora portátil. Puede hacer esto yendo a Configuración > Aplicaciones > Aplicaciones y características > Elija dónde obtener aplicaciones y seleccione En cualquier lugar.</li>
51
- <li>Inicie el juego desde el menú Inicio o desde el acceso directo del escritorio. </li>
52
- <li>¡Disfruta jugando Yu-Gi-Oh Duel Links en tu laptop! </li>
53
- </ol>
54
- <p>Así es como se puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android. Este método es flexible e independiente, pero también puede ser arriesgado y complicado. Es posible que tenga que actualizar el archivo APK manualmente cada vez que hay una nueva versión del juego. También puede encontrar algunos errores o errores que pueden afectar su experiencia de juego y el rendimiento. Por lo tanto, le sugerimos que utilice este método solo como último recurso. </p>
55
- <h2>Consejos y trucos para jugar Yu-Gi-Oh Duel Links en el ordenador portátil</h2>
56
- <p>Ahora que sabes cómo descargar Yu-Gi-Oh Duel Links en tu portátil, es posible que quieras conocer algunos consejos y trucos para aprovechar al máximo tu experiencia de juego y rendimiento. Estos son algunos de ellos:</p>
57
- <ul>
58
- <li>Ajusta la configuración del juego según las especificaciones y preferencias de tu portátil. Puedes acceder a la configuración del juego tocando el icono de engranaje en la esquina superior derecha de la pantalla. Puede cambiar la calidad gráfica, el volumen de sonido, el idioma de voz, las notificaciones y más. </li>
59
-
60
- <li>Usa gestos del ratón para realizar acciones más rápidas y fáciles. Puede ver y personalizar los gestos del ratón haciendo clic en el icono del ratón en la esquina inferior derecha de la pantalla. Puede asignar gestos para acciones como deslizar, tocar, arrastrar, etc.</li>
61
- <li>Sincronizar el progreso del juego en todos los dispositivos con su ID de Konami o cuenta de Google Play Games. Puede hacer esto tocando el icono de transferencia de datos en la esquina superior izquierda de la pantalla. También puede hacer copias de seguridad y restaurar los datos del juego usando esta función. </li>
62
- <li>Únete a una sala de duelos o crea la tuya propia para jugar con tus amigos u otros jugadores en línea. Puedes hacer esto tocando el icono de la sala de duelos en la esquina inferior izquierda de la pantalla. También puedes chatear con otros jugadores, enviar solicitudes de amistad e intercambiar cartas en la sala de duelos. </li>
63
- </ul>
64
- <p>Estos son algunos de los consejos y trucos que pueden ayudarle a jugar Yu-Gi-Oh Duel Links en su ordenador portátil más suave y agradable. Por supuesto, hay muchas más cosas que puedes descubrir y aprender mientras juegas. ¡Así que no tengas miedo de experimentar y explorar! </p>
65
- <h2>Conclusión</h2>
66
- <p>En conclusión, Yu-Gi-Oh Duel Links es un fantástico juego de cartas que puedes jugar en tu portátil con diferentes métodos dependiendo de tu versión de Windows. Puede usar el subsistema de Windows para Android y la Appstore de Amazon si tiene Windows 11, o un emulador de Android como Bluestacks si tiene Windows 10 o más. También puede sideload Yu-Gi-Oh Duel Links APK en su ordenador portátil desde su dispositivo Android si desea probar otra opción. Cualquiera que sea el método que elijas, asegúrate de seguir nuestros consejos y trucos para optimizar tu experiencia de juego y rendimiento. </p>
67
- <p>Así que, ¿qué estás esperando? Descargar Yu-Gi-Oh Duel Links en su ordenador portátil hoy y dar rienda suelta a sus habilidades de duelo! Y no te olvides de compartir este artículo con tus amigos que podrían estar interesados en jugar Yu-Gi-Oh Duel Links en sus portátiles también! </p>
68
- <h3>Preguntas frecuentes</h3>
69
-
70
- <ol>
71
- <li><b>Yu-Gi-Oh Duel Links es gratis para jugar? </b><br>Sí, Yu-Gi-Oh Duel Links es gratis para jugar con compras en la aplicación. Puede descargar y jugar el juego sin gastar dinero, pero también puede comprar gemas, tarjetas, paquetes y otros artículos con dinero real si desea mejorar su experiencia de juego. </li>
72
- <li><b>¿Es seguro descargar Yu-Gi-Oh Duel Links? </b><br>Sí, Yu-Gi-Oh Duel Links es seguro descargarlo siempre y cuando lo obtengas de una fuente confiable, como Google Play Store, Amazon Appstore o el sitio web oficial de Bluestacks. Sin embargo, si carga Yu -Gi-Oh Duel Links APK de una fuente desconocida, usted debe escanear con el software antivirus antes de instalarlo y tener cuidado de cualquier malware o problemas de compatibilidad. </li>
73
- <li><b>¿Puedo jugar Yu-Gi-Oh Duel Links sin conexión? </b><br>No, Yu-Gi-Oh Duel Links requiere una conexión a Internet para jugar. Necesitas estar en línea para acceder a las funciones del juego, como duelos, eventos, actualizaciones, etc. Sin embargo, puedes jugar el juego a través de Wi-Fi o Ethernet en lugar de usar tus datos móviles si quieres ahorrar en el uso de tus datos y en la duración de la batería. </li>
74
- <li><b>¿Puedo jugar Yu-Gi-Oh Duel Links con un controlador? </b><br>Sí, puedes jugar Yu-Gi-Oh Duel Links con un controlador si usas un emulador de Android como Bluestacks. Puede conectar su controlador a su computadora portátil a través de USB o Bluetooth y asignar los botones a las acciones del juego. También puede utilizar una aplicación de gamepad en su dispositivo Android para controlar el juego en su ordenador portátil. </li>
75
- <li><b>¿Puedo transferir mi cuenta de Yu-Gi-Oh Duel Links desde mi dispositivo móvil a mi computadora portátil? </b><br>Sí, puedes transferir tu cuenta de Yu-Gi-Oh Duel Links desde tu dispositivo móvil a tu laptop usando tu ID de Konami o cuenta de Google Play Games. Puede hacer esto tocando el icono de transferencia de datos en la esquina superior izquierda de la pantalla y siguiendo las instrucciones. También puede hacer copias de seguridad y restaurar los datos del juego usando esta función. </li>
76
- </ol> 64aa2da5cf<br />
77
- <br />
78
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/search_scope.py DELETED
@@ -1,132 +0,0 @@
1
- import itertools
2
- import logging
3
- import os
4
- import posixpath
5
- import urllib.parse
6
- from typing import List
7
-
8
- from pip._vendor.packaging.utils import canonicalize_name
9
-
10
- from pip._internal.models.index import PyPI
11
- from pip._internal.utils.compat import has_tls
12
- from pip._internal.utils.misc import normalize_path, redact_auth_from_url
13
-
14
- logger = logging.getLogger(__name__)
15
-
16
-
17
- class SearchScope:
18
-
19
- """
20
- Encapsulates the locations that pip is configured to search.
21
- """
22
-
23
- __slots__ = ["find_links", "index_urls", "no_index"]
24
-
25
- @classmethod
26
- def create(
27
- cls,
28
- find_links: List[str],
29
- index_urls: List[str],
30
- no_index: bool,
31
- ) -> "SearchScope":
32
- """
33
- Create a SearchScope object after normalizing the `find_links`.
34
- """
35
- # Build find_links. If an argument starts with ~, it may be
36
- # a local file relative to a home directory. So try normalizing
37
- # it and if it exists, use the normalized version.
38
- # This is deliberately conservative - it might be fine just to
39
- # blindly normalize anything starting with a ~...
40
- built_find_links: List[str] = []
41
- for link in find_links:
42
- if link.startswith("~"):
43
- new_link = normalize_path(link)
44
- if os.path.exists(new_link):
45
- link = new_link
46
- built_find_links.append(link)
47
-
48
- # If we don't have TLS enabled, then WARN if anyplace we're looking
49
- # relies on TLS.
50
- if not has_tls():
51
- for link in itertools.chain(index_urls, built_find_links):
52
- parsed = urllib.parse.urlparse(link)
53
- if parsed.scheme == "https":
54
- logger.warning(
55
- "pip is configured with locations that require "
56
- "TLS/SSL, however the ssl module in Python is not "
57
- "available."
58
- )
59
- break
60
-
61
- return cls(
62
- find_links=built_find_links,
63
- index_urls=index_urls,
64
- no_index=no_index,
65
- )
66
-
67
- def __init__(
68
- self,
69
- find_links: List[str],
70
- index_urls: List[str],
71
- no_index: bool,
72
- ) -> None:
73
- self.find_links = find_links
74
- self.index_urls = index_urls
75
- self.no_index = no_index
76
-
77
- def get_formatted_locations(self) -> str:
78
- lines = []
79
- redacted_index_urls = []
80
- if self.index_urls and self.index_urls != [PyPI.simple_url]:
81
- for url in self.index_urls:
82
- redacted_index_url = redact_auth_from_url(url)
83
-
84
- # Parse the URL
85
- purl = urllib.parse.urlsplit(redacted_index_url)
86
-
87
- # URL is generally invalid if scheme and netloc is missing
88
- # there are issues with Python and URL parsing, so this test
89
- # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
90
- # always parse invalid URLs correctly - it should raise
91
- # exceptions for malformed URLs
92
- if not purl.scheme and not purl.netloc:
93
- logger.warning(
94
- 'The index url "%s" seems invalid, please provide a scheme.',
95
- redacted_index_url,
96
- )
97
-
98
- redacted_index_urls.append(redacted_index_url)
99
-
100
- lines.append(
101
- "Looking in indexes: {}".format(", ".join(redacted_index_urls))
102
- )
103
-
104
- if self.find_links:
105
- lines.append(
106
- "Looking in links: {}".format(
107
- ", ".join(redact_auth_from_url(url) for url in self.find_links)
108
- )
109
- )
110
- return "\n".join(lines)
111
-
112
- def get_index_urls_locations(self, project_name: str) -> List[str]:
113
- """Returns the locations found via self.index_urls
114
-
115
- Checks the url_name on the main (first in the list) index and
116
- use this url_name to produce all locations
117
- """
118
-
119
- def mkurl_pypi_url(url: str) -> str:
120
- loc = posixpath.join(
121
- url, urllib.parse.quote(canonicalize_name(project_name))
122
- )
123
- # For maximum compatibility with easy_install, ensure the path
124
- # ends in a trailing slash. Although this isn't in the spec
125
- # (and PyPI can handle it without the slash) some other index
126
- # implementations might break if they relied on easy_install's
127
- # behavior.
128
- if not loc.endswith("/"):
129
- loc = loc + "/"
130
- return loc
131
-
132
- return [mkurl_pypi_url(url) for url in self.index_urls]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/wheel.py DELETED
@@ -1,92 +0,0 @@
1
- """Represents a wheel file and provides access to the various parts of the
2
- name that have meaning.
3
- """
4
- import re
5
- from typing import Dict, Iterable, List
6
-
7
- from pip._vendor.packaging.tags import Tag
8
-
9
- from pip._internal.exceptions import InvalidWheelFilename
10
-
11
-
12
- class Wheel:
13
- """A wheel file"""
14
-
15
- wheel_file_re = re.compile(
16
- r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?))
17
- ((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?)
18
- \.whl|\.dist-info)$""",
19
- re.VERBOSE,
20
- )
21
-
22
- def __init__(self, filename: str) -> None:
23
- """
24
- :raises InvalidWheelFilename: when the filename is invalid for a wheel
25
- """
26
- wheel_info = self.wheel_file_re.match(filename)
27
- if not wheel_info:
28
- raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.")
29
- self.filename = filename
30
- self.name = wheel_info.group("name").replace("_", "-")
31
- # we'll assume "_" means "-" due to wheel naming scheme
32
- # (https://github.com/pypa/pip/issues/1150)
33
- self.version = wheel_info.group("ver").replace("_", "-")
34
- self.build_tag = wheel_info.group("build")
35
- self.pyversions = wheel_info.group("pyver").split(".")
36
- self.abis = wheel_info.group("abi").split(".")
37
- self.plats = wheel_info.group("plat").split(".")
38
-
39
- # All the tag combinations from this file
40
- self.file_tags = {
41
- Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
42
- }
43
-
44
- def get_formatted_file_tags(self) -> List[str]:
45
- """Return the wheel's tags as a sorted list of strings."""
46
- return sorted(str(tag) for tag in self.file_tags)
47
-
48
- def support_index_min(self, tags: List[Tag]) -> int:
49
- """Return the lowest index that one of the wheel's file_tag combinations
50
- achieves in the given list of supported tags.
51
-
52
- For example, if there are 8 supported tags and one of the file tags
53
- is first in the list, then return 0.
54
-
55
- :param tags: the PEP 425 tags to check the wheel against, in order
56
- with most preferred first.
57
-
58
- :raises ValueError: If none of the wheel's file tags match one of
59
- the supported tags.
60
- """
61
- try:
62
- return next(i for i, t in enumerate(tags) if t in self.file_tags)
63
- except StopIteration:
64
- raise ValueError()
65
-
66
- def find_most_preferred_tag(
67
- self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
68
- ) -> int:
69
- """Return the priority of the most preferred tag that one of the wheel's file
70
- tag combinations achieves in the given list of supported tags using the given
71
- tag_to_priority mapping, where lower priorities are more-preferred.
72
-
73
- This is used in place of support_index_min in some cases in order to avoid
74
- an expensive linear scan of a large list of tags.
75
-
76
- :param tags: the PEP 425 tags to check the wheel against.
77
- :param tag_to_priority: a mapping from tag to priority of that tag, where
78
- lower is more preferred.
79
-
80
- :raises ValueError: If none of the wheel's file tags match one of
81
- the supported tags.
82
- """
83
- return min(
84
- tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
85
- )
86
-
87
- def supported(self, tags: Iterable[Tag]) -> bool:
88
- """Return whether the wheel is compatible with one of the given tags.
89
-
90
- :param tags: the PEP 425 tags to check the wheel against.
91
- """
92
- return not self.file_tags.isdisjoint(tags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BlitzEsports/TextToImage/html2canvas.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/lvis_v0_5_categories.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/CVPR/GFPGAN-example/tests/test_utils.py DELETED
@@ -1,43 +0,0 @@
1
- import cv2
2
- from facexlib.utils.face_restoration_helper import FaceRestoreHelper
3
-
4
- from gfpgan.archs.gfpganv1_arch import GFPGANv1
5
- from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean
6
- from gfpgan.utils import GFPGANer
7
-
8
-
9
- def test_gfpganer():
10
- # initialize with the clean model
11
- restorer = GFPGANer(
12
- model_path='experiments/pretrained_models/GFPGANCleanv1-NoCE-C2.pth',
13
- upscale=2,
14
- arch='clean',
15
- channel_multiplier=2,
16
- bg_upsampler=None)
17
- # test attribute
18
- assert isinstance(restorer.gfpgan, GFPGANv1Clean)
19
- assert isinstance(restorer.face_helper, FaceRestoreHelper)
20
-
21
- # initialize with the original model
22
- restorer = GFPGANer(
23
- model_path='experiments/pretrained_models/GFPGANv1.pth',
24
- upscale=2,
25
- arch='original',
26
- channel_multiplier=1,
27
- bg_upsampler=None)
28
- # test attribute
29
- assert isinstance(restorer.gfpgan, GFPGANv1)
30
- assert isinstance(restorer.face_helper, FaceRestoreHelper)
31
-
32
- # ------------------ test enhance ---------------- #
33
- img = cv2.imread('tests/data/gt/00000000.png', cv2.IMREAD_COLOR)
34
- result = restorer.enhance(img, has_aligned=False, paste_back=True)
35
- assert result[0][0].shape == (512, 512, 3)
36
- assert result[1][0].shape == (512, 512, 3)
37
- assert result[2].shape == (1024, 1024, 3)
38
-
39
- # with has_aligned=True
40
- result = restorer.enhance(img, has_aligned=True, paste_back=False)
41
- assert result[0][0].shape == (512, 512, 3)
42
- assert result[1][0].shape == (512, 512, 3)
43
- assert result[2] is None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_cmake_build/test.py DELETED
@@ -1,6 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import sys
3
- import test_cmake_build
4
-
5
- assert test_cmake_build.add(1, 2) == 3
6
- print("{} imports, runs, and adds: 1 + 2 = 3".format(sys.argv[1]))
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tools/FindEigen3.cmake DELETED
@@ -1,83 +0,0 @@
1
- # - Try to find Eigen3 lib
2
- #
3
- # This module supports requiring a minimum version, e.g. you can do
4
- # find_package(Eigen3 3.1.2)
5
- # to require version 3.1.2 or newer of Eigen3.
6
- #
7
- # Once done this will define
8
- #
9
- # EIGEN3_FOUND - system has eigen lib with correct version
10
- # EIGEN3_INCLUDE_DIR - the eigen include directory
11
- # EIGEN3_VERSION - eigen version
12
-
13
- # Copyright (c) 2006, 2007 Montel Laurent, <[email protected]>
14
- # Copyright (c) 2008, 2009 Gael Guennebaud, <[email protected]>
15
- # Copyright (c) 2009 Benoit Jacob <[email protected]>
16
- # Redistribution and use is allowed according to the terms of the 2-clause BSD license.
17
-
18
- if(NOT Eigen3_FIND_VERSION)
19
- if(NOT Eigen3_FIND_VERSION_MAJOR)
20
- set(Eigen3_FIND_VERSION_MAJOR 2)
21
- endif(NOT Eigen3_FIND_VERSION_MAJOR)
22
- if(NOT Eigen3_FIND_VERSION_MINOR)
23
- set(Eigen3_FIND_VERSION_MINOR 91)
24
- endif(NOT Eigen3_FIND_VERSION_MINOR)
25
- if(NOT Eigen3_FIND_VERSION_PATCH)
26
- set(Eigen3_FIND_VERSION_PATCH 0)
27
- endif(NOT Eigen3_FIND_VERSION_PATCH)
28
-
29
- set(Eigen3_FIND_VERSION
30
- "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}")
31
- endif(NOT Eigen3_FIND_VERSION)
32
-
33
- macro(_eigen3_check_version)
34
- file(READ "${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h" _eigen3_version_header)
35
-
36
- string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match
37
- "${_eigen3_version_header}")
38
- set(EIGEN3_WORLD_VERSION "${CMAKE_MATCH_1}")
39
- string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match
40
- "${_eigen3_version_header}")
41
- set(EIGEN3_MAJOR_VERSION "${CMAKE_MATCH_1}")
42
- string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match
43
- "${_eigen3_version_header}")
44
- set(EIGEN3_MINOR_VERSION "${CMAKE_MATCH_1}")
45
-
46
- set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION})
47
- if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
48
- set(EIGEN3_VERSION_OK FALSE)
49
- else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
50
- set(EIGEN3_VERSION_OK TRUE)
51
- endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION})
52
-
53
- if(NOT EIGEN3_VERSION_OK)
54
-
55
- message(STATUS "Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, "
56
- "but at least version ${Eigen3_FIND_VERSION} is required")
57
- endif(NOT EIGEN3_VERSION_OK)
58
- endmacro(_eigen3_check_version)
59
-
60
- if(EIGEN3_INCLUDE_DIR)
61
-
62
- # in cache already
63
- _eigen3_check_version()
64
- set(EIGEN3_FOUND ${EIGEN3_VERSION_OK})
65
-
66
- else(EIGEN3_INCLUDE_DIR)
67
-
68
- find_path(
69
- EIGEN3_INCLUDE_DIR
70
- NAMES signature_of_eigen3_matrix_library
71
- PATHS ${CMAKE_INSTALL_PREFIX}/include ${KDE4_INCLUDE_DIR}
72
- PATH_SUFFIXES eigen3 eigen)
73
-
74
- if(EIGEN3_INCLUDE_DIR)
75
- _eigen3_check_version()
76
- endif(EIGEN3_INCLUDE_DIR)
77
-
78
- include(FindPackageHandleStandardArgs)
79
- find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK)
80
-
81
- mark_as_advanced(EIGEN3_INCLUDE_DIR)
82
-
83
- endif(EIGEN3_INCLUDE_DIR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/MonoScene/helpers.py DELETED
@@ -1,336 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import fusion
4
- import pandas as pd
5
- import plotly.express as px
6
- import plotly.graph_objects as go
7
-
8
- def read_calib(calib_path):
9
- """
10
- Modify from https://github.com/utiasSTARS/pykitti/blob/d3e1bb81676e831886726cc5ed79ce1f049aef2c/pykitti/utils.py#L68
11
- :param calib_path: Path to a calibration text file.
12
- :return: dict with calibration matrices.
13
- """
14
- calib_all = {}
15
- with open(calib_path, "r") as f:
16
- for line in f.readlines():
17
- if line == "\n":
18
- break
19
- key, value = line.split(":", 1)
20
- calib_all[key] = np.array([float(x) for x in value.split()])
21
-
22
- # reshape matrices
23
- calib_out = {}
24
- # 3x4 projection matrix for left camera
25
- calib_out["P2"] = calib_all["P2"].reshape(3, 4)
26
- calib_out["Tr"] = np.identity(4) # 4x4 matrix
27
- calib_out["Tr"][:3, :4] = calib_all["Tr"].reshape(3, 4)
28
- return calib_out
29
-
30
-
31
- def vox2pix(cam_E, cam_k,
32
- vox_origin, voxel_size,
33
- img_W, img_H,
34
- scene_size):
35
- """
36
- compute the 2D projection of voxels centroids
37
-
38
- Parameters:
39
- ----------
40
- cam_E: 4x4
41
- =camera pose in case of NYUv2 dataset
42
- =Transformation from camera to lidar coordinate in case of SemKITTI
43
- cam_k: 3x3
44
- camera intrinsics
45
- vox_origin: (3,)
46
- world(NYU)/lidar(SemKITTI) cooridnates of the voxel at index (0, 0, 0)
47
- img_W: int
48
- image width
49
- img_H: int
50
- image height
51
- scene_size: (3,)
52
- scene size in meter: (51.2, 51.2, 6.4) for SemKITTI and (4.8, 4.8, 2.88) for NYUv2
53
-
54
- Returns
55
- -------
56
- projected_pix: (N, 2)
57
- Projected 2D positions of voxels
58
- fov_mask: (N,)
59
- Voxels mask indice voxels inside image's FOV
60
- pix_z: (N,)
61
- Voxels'distance to the sensor in meter
62
- """
63
- # Compute the x, y, z bounding of the scene in meter
64
- vol_bnds = np.zeros((3,2))
65
- vol_bnds[:,0] = vox_origin
66
- vol_bnds[:,1] = vox_origin + np.array(scene_size)
67
-
68
- # Compute the voxels centroids in lidar cooridnates
69
- vol_dim = np.ceil((vol_bnds[:,1]- vol_bnds[:,0])/ voxel_size).copy(order='C').astype(int)
70
- xv, yv, zv = np.meshgrid(
71
- range(vol_dim[0]),
72
- range(vol_dim[1]),
73
- range(vol_dim[2]),
74
- indexing='ij'
75
- )
76
- vox_coords = np.concatenate([
77
- xv.reshape(1,-1),
78
- yv.reshape(1,-1),
79
- zv.reshape(1,-1)
80
- ], axis=0).astype(int).T
81
-
82
- # Project voxels'centroid from lidar coordinates to camera coordinates
83
- cam_pts = fusion.TSDFVolume.vox2world(vox_origin, vox_coords, voxel_size)
84
- cam_pts = fusion.rigid_transform(cam_pts, cam_E)
85
-
86
- # Project camera coordinates to pixel positions
87
- projected_pix = fusion.TSDFVolume.cam2pix(cam_pts, cam_k)
88
- pix_x, pix_y = projected_pix[:, 0], projected_pix[:, 1]
89
-
90
- # Eliminate pixels outside view frustum
91
- pix_z = cam_pts[:, 2]
92
- fov_mask = np.logical_and(pix_x >= 0,
93
- np.logical_and(pix_x < img_W,
94
- np.logical_and(pix_y >= 0,
95
- np.logical_and(pix_y < img_H,
96
- pix_z > 0))))
97
-
98
-
99
- return torch.from_numpy(projected_pix), torch.from_numpy(fov_mask), torch.from_numpy(pix_z)
100
-
101
-
102
-
103
- def get_grid_coords(dims, resolution):
104
- """
105
- :param dims: the dimensions of the grid [x, y, z] (i.e. [256, 256, 32])
106
- :return coords_grid: is the center coords of voxels in the grid
107
- """
108
-
109
- g_xx = np.arange(0, dims[0] + 1)
110
- g_yy = np.arange(0, dims[1] + 1)
111
- sensor_pose = 10
112
- g_zz = np.arange(0, dims[2] + 1)
113
-
114
- # Obtaining the grid with coords...
115
- xx, yy, zz = np.meshgrid(g_xx[:-1], g_yy[:-1], g_zz[:-1])
116
- coords_grid = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T
117
- coords_grid = coords_grid.astype(np.float)
118
-
119
- coords_grid = (coords_grid * resolution) + resolution / 2
120
-
121
- temp = np.copy(coords_grid)
122
- temp[:, 0] = coords_grid[:, 1]
123
- temp[:, 1] = coords_grid[:, 0]
124
- coords_grid = np.copy(temp)
125
-
126
- return coords_grid
127
-
128
- def get_projections(img_W, img_H):
129
- scale_3ds = [1, 2]
130
- data = {}
131
- for scale_3d in scale_3ds:
132
- scene_size = (51.2, 51.2, 6.4)
133
- vox_origin = np.array([0, -25.6, -2])
134
- voxel_size = 0.2
135
-
136
- calib = read_calib("calib.txt")
137
- cam_k = calib["P2"][:3, :3]
138
- T_velo_2_cam = calib["Tr"]
139
-
140
- # compute the 3D-2D mapping
141
- projected_pix, fov_mask, pix_z = vox2pix(
142
- T_velo_2_cam,
143
- cam_k,
144
- vox_origin,
145
- voxel_size * scale_3d,
146
- img_W,
147
- img_H,
148
- scene_size,
149
- )
150
-
151
- data["projected_pix_{}".format(scale_3d)] = projected_pix
152
- data["pix_z_{}".format(scale_3d)] = pix_z
153
- data["fov_mask_{}".format(scale_3d)] = fov_mask
154
- return data
155
-
156
-
157
- def majority_pooling(grid, k_size=2):
158
- result = np.zeros(
159
- (grid.shape[0] // k_size, grid.shape[1] // k_size, grid.shape[2] // k_size)
160
- )
161
- for xx in range(0, int(np.floor(grid.shape[0] / k_size))):
162
- for yy in range(0, int(np.floor(grid.shape[1] / k_size))):
163
- for zz in range(0, int(np.floor(grid.shape[2] / k_size))):
164
-
165
- sub_m = grid[
166
- (xx * k_size) : (xx * k_size) + k_size,
167
- (yy * k_size) : (yy * k_size) + k_size,
168
- (zz * k_size) : (zz * k_size) + k_size,
169
- ]
170
- unique, counts = np.unique(sub_m, return_counts=True)
171
- if True in ((unique != 0) & (unique != 255)):
172
- # Remove counts with 0 and 255
173
- counts = counts[((unique != 0) & (unique != 255))]
174
- unique = unique[((unique != 0) & (unique != 255))]
175
- else:
176
- if True in (unique == 0):
177
- counts = counts[(unique != 255)]
178
- unique = unique[(unique != 255)]
179
- value = unique[np.argmax(counts)]
180
- result[xx, yy, zz] = value
181
- return result
182
-
183
-
184
- def draw(
185
- voxels,
186
- # T_velo_2_cam,
187
- # vox_origin,
188
- fov_mask,
189
- # img_size,
190
- # f,
191
- voxel_size=0.4,
192
- # d=7, # 7m - determine the size of the mesh representing the camera
193
- ):
194
-
195
- fov_mask = fov_mask.reshape(-1)
196
- # Compute the voxels coordinates
197
- grid_coords = get_grid_coords(
198
- [voxels.shape[0], voxels.shape[1], voxels.shape[2]], voxel_size
199
- )
200
-
201
-
202
- # Attach the predicted class to every voxel
203
- grid_coords = np.vstack([grid_coords.T, voxels.reshape(-1)]).T
204
-
205
- # Get the voxels inside FOV
206
- fov_grid_coords = grid_coords[fov_mask, :]
207
-
208
- # Get the voxels outside FOV
209
- outfov_grid_coords = grid_coords[~fov_mask, :]
210
-
211
- # Remove empty and unknown voxels
212
- fov_voxels = fov_grid_coords[
213
- (fov_grid_coords[:, 3] > 0) & (fov_grid_coords[:, 3] < 255), :
214
- ]
215
- # print(np.unique(fov_voxels[:, 3], return_counts=True))
216
- outfov_voxels = outfov_grid_coords[
217
- (outfov_grid_coords[:, 3] > 0) & (outfov_grid_coords[:, 3] < 255), :
218
- ]
219
-
220
- # figure = mlab.figure(size=(1400, 1400), bgcolor=(1, 1, 1))
221
- colors = np.array(
222
- [
223
- [0,0,0],
224
- [100, 150, 245],
225
- [100, 230, 245],
226
- [30, 60, 150],
227
- [80, 30, 180],
228
- [100, 80, 250],
229
- [255, 30, 30],
230
- [255, 40, 200],
231
- [150, 30, 90],
232
- [255, 0, 255],
233
- [255, 150, 255],
234
- [75, 0, 75],
235
- [175, 0, 75],
236
- [255, 200, 0],
237
- [255, 120, 50],
238
- [0, 175, 0],
239
- [135, 60, 0],
240
- [150, 240, 80],
241
- [255, 240, 150],
242
- [255, 0, 0],
243
- ]
244
- ).astype(np.uint8)
245
-
246
- pts_colors = [f'rgb({colors[int(i)][0]}, {colors[int(i)][1]}, {colors[int(i)][2]})' for i in fov_voxels[:, 3]]
247
- out_fov_colors = [f'rgb({colors[int(i)][0]//3*2}, {colors[int(i)][1]//3*2}, {colors[int(i)][2]//3*2})' for i in outfov_voxels[:, 3]]
248
- pts_colors = pts_colors + out_fov_colors
249
-
250
- fov_voxels = np.concatenate([fov_voxels, outfov_voxels], axis=0)
251
- x = fov_voxels[:, 0].flatten()
252
- y = fov_voxels[:, 1].flatten()
253
- z = fov_voxels[:, 2].flatten()
254
- # label = fov_voxels[:, 3].flatten()
255
- fig = go.Figure(data=[go.Scatter3d(x=x, y=y, z=z,mode='markers',
256
- marker=dict(
257
- size=2,
258
- color=pts_colors, # set color to an array/list of desired values
259
- # colorscale='Viridis', # choose a colorscale
260
- opacity=1.0,
261
- symbol='square'
262
- ))])
263
- fig.update_layout(
264
- scene = dict(
265
- aspectmode='data',
266
- xaxis = dict(
267
- backgroundcolor="rgb(255, 255, 255)",
268
- gridcolor="black",
269
- showbackground=True,
270
- zerolinecolor="black",
271
- nticks=4,
272
- visible=False,
273
- range=[-1,55],),
274
- yaxis = dict(
275
- backgroundcolor="rgb(255, 255, 255)",
276
- gridcolor="black",
277
- showbackground=True,
278
- zerolinecolor="black",
279
- visible=False,
280
- nticks=4, range=[-1,55],),
281
- zaxis = dict(
282
- backgroundcolor="rgb(255, 255, 255)",
283
- gridcolor="black",
284
- showbackground=True,
285
- zerolinecolor="black",
286
- visible=False,
287
- nticks=4, range=[-1,7],),
288
- bgcolor="black",
289
- ),
290
-
291
- )
292
-
293
- # fig = px.scatter_3d(
294
- # fov_voxels,
295
- # x=fov_voxels[:, 0], y="y", z="z", color="label")
296
- # Draw occupied inside FOV voxels
297
- # plt_plot_fov = mlab.points3d(
298
- # fov_voxels[:, 0],
299
- # fov_voxels[:, 1],
300
- # fov_voxels[:, 2],
301
- # fov_voxels[:, 3],
302
- # colormap="viridis",
303
- # scale_factor=voxel_size - 0.05 * voxel_size,
304
- # mode="cube",
305
- # opacity=1.0,
306
- # vmin=1,
307
- # vmax=19,
308
- # )
309
-
310
- # # Draw occupied outside FOV voxels
311
- # plt_plot_outfov = mlab.points3d(
312
- # outfov_voxels[:, 0],
313
- # outfov_voxels[:, 1],
314
- # outfov_voxels[:, 2],
315
- # outfov_voxels[:, 3],
316
- # colormap="viridis",
317
- # scale_factor=voxel_size - 0.05 * voxel_size,
318
- # mode="cube",
319
- # opacity=1.0,
320
- # vmin=1,
321
- # vmax=19,
322
- # )
323
-
324
-
325
-
326
- # plt_plot_fov.glyph.scale_mode = "scale_by_vector"
327
- # plt_plot_outfov.glyph.scale_mode = "scale_by_vector"
328
-
329
- # plt_plot_fov.module_manager.scalar_lut_manager.lut.table = colors
330
-
331
- # outfov_colors = colors
332
- # outfov_colors[:, :3] = outfov_colors[:, :3] // 3 * 2
333
- # plt_plot_outfov.module_manager.scalar_lut_manager.lut.table = outfov_colors
334
-
335
- # mlab.show()
336
- return fig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/Text2Human/train_parsing_gen.py DELETED
@@ -1,136 +0,0 @@
1
- import argparse
2
- import logging
3
- import os
4
- import os.path as osp
5
- import random
6
- import time
7
-
8
- import torch
9
-
10
- from data.parsing_generation_segm_attr_dataset import \
11
- ParsingGenerationDeepFashionAttrSegmDataset
12
- from models import create_model
13
- from utils.logger import MessageLogger, get_root_logger, init_tb_logger
14
- from utils.options import dict2str, dict_to_nonedict, parse
15
- from utils.util import make_exp_dirs
16
-
17
-
18
- def main():
19
- # options
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument('-opt', type=str, help='Path to option YAML file.')
22
- args = parser.parse_args()
23
- opt = parse(args.opt, is_train=True)
24
-
25
- # mkdir and loggers
26
- make_exp_dirs(opt)
27
- log_file = osp.join(opt['path']['log'], f"train_{opt['name']}.log")
28
- logger = get_root_logger(
29
- logger_name='base', log_level=logging.INFO, log_file=log_file)
30
- logger.info(dict2str(opt))
31
- # initialize tensorboard logger
32
- tb_logger = None
33
- if opt['use_tb_logger'] and 'debug' not in opt['name']:
34
- tb_logger = init_tb_logger(log_dir='./tb_logger/' + opt['name'])
35
-
36
- # convert to NoneDict, which returns None for missing keys
37
- opt = dict_to_nonedict(opt)
38
-
39
- # set up data loader
40
- train_dataset = ParsingGenerationDeepFashionAttrSegmDataset(
41
- segm_dir=opt['segm_dir'],
42
- pose_dir=opt['pose_dir'],
43
- ann_file=opt['train_ann_file'])
44
- train_loader = torch.utils.data.DataLoader(
45
- dataset=train_dataset,
46
- batch_size=opt['batch_size'],
47
- shuffle=True,
48
- num_workers=opt['num_workers'],
49
- drop_last=True)
50
- logger.info(f'Number of train set: {len(train_dataset)}.')
51
- opt['max_iters'] = opt['num_epochs'] * len(
52
- train_dataset) // opt['batch_size']
53
-
54
- val_dataset = ParsingGenerationDeepFashionAttrSegmDataset(
55
- segm_dir=opt['segm_dir'],
56
- pose_dir=opt['pose_dir'],
57
- ann_file=opt['val_ann_file'])
58
- val_loader = torch.utils.data.DataLoader(
59
- dataset=val_dataset,
60
- batch_size=1,
61
- shuffle=False,
62
- num_workers=opt['num_workers'])
63
- logger.info(f'Number of val set: {len(val_dataset)}.')
64
-
65
- test_dataset = ParsingGenerationDeepFashionAttrSegmDataset(
66
- segm_dir=opt['segm_dir'],
67
- pose_dir=opt['pose_dir'],
68
- ann_file=opt['test_ann_file'])
69
- test_loader = torch.utils.data.DataLoader(
70
- dataset=test_dataset,
71
- batch_size=1,
72
- shuffle=False,
73
- num_workers=opt['num_workers'])
74
- logger.info(f'Number of test set: {len(test_dataset)}.')
75
-
76
- current_iter = 0
77
- best_epoch = None
78
- best_acc = 0
79
-
80
- model = create_model(opt)
81
-
82
- data_time, iter_time = 0, 0
83
- current_iter = 0
84
-
85
- # create message logger (formatted outputs)
86
- msg_logger = MessageLogger(opt, current_iter, tb_logger)
87
-
88
- for epoch in range(opt['num_epochs']):
89
- lr = model.update_learning_rate(epoch)
90
-
91
- for _, batch_data in enumerate(train_loader):
92
- data_time = time.time() - data_time
93
-
94
- current_iter += 1
95
-
96
- model.feed_data(batch_data)
97
- model.optimize_parameters()
98
-
99
- iter_time = time.time() - iter_time
100
- if current_iter % opt['print_freq'] == 0:
101
- log_vars = {'epoch': epoch, 'iter': current_iter}
102
- log_vars.update({'lrs': [lr]})
103
- log_vars.update({'time': iter_time, 'data_time': data_time})
104
- log_vars.update(model.get_current_log())
105
- msg_logger(log_vars)
106
-
107
- data_time = time.time()
108
- iter_time = time.time()
109
-
110
- if epoch % opt['val_freq'] == 0:
111
- save_dir = f'{opt["path"]["visualization"]}/valset/epoch_{epoch:03d}'
112
- os.makedirs(save_dir, exist_ok=opt['debug'])
113
- val_acc = model.inference(val_loader, save_dir)
114
-
115
- save_dir = f'{opt["path"]["visualization"]}/testset/epoch_{epoch:03d}'
116
- os.makedirs(save_dir, exist_ok=opt['debug'])
117
- test_acc = model.inference(test_loader, save_dir)
118
-
119
- logger.info(f'Epoch: {epoch}, '
120
- f'val_acc: {val_acc: .4f}, '
121
- f'test_acc: {test_acc: .4f}.')
122
-
123
- if test_acc > best_acc:
124
- best_epoch = epoch
125
- best_acc = test_acc
126
-
127
- logger.info(f'Best epoch: {best_epoch}, '
128
- f'Best test acc: {best_acc: .4f}.')
129
-
130
- # save model
131
- model.save_network(
132
- f'{opt["path"]["models"]}/parsing_generation_epoch{epoch}.pth')
133
-
134
-
135
- if __name__ == '__main__':
136
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h DELETED
@@ -1,115 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates.
2
- #pragma once
3
- #include <torch/types.h>
4
-
5
- namespace detectron2 {
6
-
7
- at::Tensor ROIAlignRotated_forward_cpu(
8
- const at::Tensor& input,
9
- const at::Tensor& rois,
10
- const float spatial_scale,
11
- const int pooled_height,
12
- const int pooled_width,
13
- const int sampling_ratio);
14
-
15
- at::Tensor ROIAlignRotated_backward_cpu(
16
- const at::Tensor& grad,
17
- const at::Tensor& rois,
18
- const float spatial_scale,
19
- const int pooled_height,
20
- const int pooled_width,
21
- const int batch_size,
22
- const int channels,
23
- const int height,
24
- const int width,
25
- const int sampling_ratio);
26
-
27
- #if defined(WITH_CUDA) || defined(WITH_HIP)
28
- at::Tensor ROIAlignRotated_forward_cuda(
29
- const at::Tensor& input,
30
- const at::Tensor& rois,
31
- const float spatial_scale,
32
- const int pooled_height,
33
- const int pooled_width,
34
- const int sampling_ratio);
35
-
36
- at::Tensor ROIAlignRotated_backward_cuda(
37
- const at::Tensor& grad,
38
- const at::Tensor& rois,
39
- const float spatial_scale,
40
- const int pooled_height,
41
- const int pooled_width,
42
- const int batch_size,
43
- const int channels,
44
- const int height,
45
- const int width,
46
- const int sampling_ratio);
47
- #endif
48
-
49
- // Interface for Python
50
- inline at::Tensor ROIAlignRotated_forward(
51
- const at::Tensor& input,
52
- const at::Tensor& rois,
53
- const float spatial_scale,
54
- const int pooled_height,
55
- const int pooled_width,
56
- const int sampling_ratio) {
57
- if (input.is_cuda()) {
58
- #if defined(WITH_CUDA) || defined(WITH_HIP)
59
- return ROIAlignRotated_forward_cuda(
60
- input,
61
- rois,
62
- spatial_scale,
63
- pooled_height,
64
- pooled_width,
65
- sampling_ratio);
66
- #else
67
- AT_ERROR("Not compiled with GPU support");
68
- #endif
69
- }
70
- return ROIAlignRotated_forward_cpu(
71
- input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
72
- }
73
-
74
- inline at::Tensor ROIAlignRotated_backward(
75
- const at::Tensor& grad,
76
- const at::Tensor& rois,
77
- const float spatial_scale,
78
- const int pooled_height,
79
- const int pooled_width,
80
- const int batch_size,
81
- const int channels,
82
- const int height,
83
- const int width,
84
- const int sampling_ratio) {
85
- if (grad.is_cuda()) {
86
- #if defined(WITH_CUDA) || defined(WITH_HIP)
87
- return ROIAlignRotated_backward_cuda(
88
- grad,
89
- rois,
90
- spatial_scale,
91
- pooled_height,
92
- pooled_width,
93
- batch_size,
94
- channels,
95
- height,
96
- width,
97
- sampling_ratio);
98
- #else
99
- AT_ERROR("Not compiled with GPU support");
100
- #endif
101
- }
102
- return ROIAlignRotated_backward_cpu(
103
- grad,
104
- rois,
105
- spatial_scale,
106
- pooled_height,
107
- pooled_width,
108
- batch_size,
109
- channels,
110
- height,
111
- width,
112
- sampling_ratio);
113
- }
114
-
115
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CazimirRoman/summarize-your-webpage-api-with-gradio/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: URL Summarizer API (Gradio)
3
- emoji: 🐨
4
- colorFrom: green
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chemsseddine/summarisation/app.py DELETED
@@ -1,81 +0,0 @@
1
- from transformers import RobertaTokenizerFast, EncoderDecoderModel
2
- import torch
3
- #initialisation de tokenizer
4
- device = "cuda" if torch.cuda.is_available() else "cpu"
5
- tokenizer = RobertaTokenizerFast.from_pretrained("Chemsseddine/bert2gpt2SUMM-finetuned-mlsum")
6
- #Chemsseddine/bert2gpt2SUMM-finetuned-mlsum
7
- #aider les token special
8
- tokenizer.bos_token = tokenizer.cls_token
9
- tokenizer.eos_token = tokenizer.sep_token
10
- #initialisation du modele
11
- model = EncoderDecoderModel.from_pretrained("Chemsseddine/bert2gpt2SUMM-finetuned-mlsum").to(device)
12
- #tf.random.set_seed(0)
13
- # generate summary
14
- def generateSumm(input_texte,max,min):
15
- # encoder le texte entrée
16
- if input_texte and input_texte.strip():
17
- if min<len(input_texte):
18
-
19
- if max>min:
20
-
21
- input_ids = tokenizer.encode(input_texte, return_tensors='pt')
22
- #generation de resume a l'aide de texte encodé
23
- summary_ids = model.generate(input_ids,#le texte encodé
24
- max_length=max,#la longuer maximale du sequence de sortie
25
- min_length=min,#la longuer minimum du sequence de sortie
26
-
27
- num_beams=5,
28
- repetition_penalty=2.5,
29
- length_penalty=1.0,
30
- early_stopping=True,#pour que la génération soit terminée lorsque toutes les hypothèses de faisceau ont atteint le jeton EOS.
31
- no_repeat_ngram_size=2,#aucun 2 grammes n'apparaisse deux fois#Pour éviter les répétitions du même texte,
32
- use_cache=True,
33
- do_sample = True,
34
- # num_return_sequences=5,
35
- temperature = 0.8,
36
- top_k = 50,
37
- top_p = 0.95)
38
- #decodé la sequence de generé par le modele
39
- summary_text = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
40
- return summary_text
41
- else:
42
-
43
- summary_text="La longueur minimale est grande que la maximale"
44
- return summary_text
45
- else:
46
- summary_text="La longueur de texte entré est inferieur que la minimale que vous avez choisis"
47
- return summary_text
48
-
49
- else :
50
- summary_text="Entrer votre Texte S'il vous plait"
51
- return summary_text
52
-
53
-
54
- from difflib import Differ
55
- import gradio as gr
56
- demo = gr.Blocks()
57
-
58
- def diff_texts(text1, text2):
59
- d = Differ()
60
- return [
61
- (token[2:], token[0] if token[0] != " " else None)
62
- for token in d.compare(text1.split(), text2.split())]
63
-
64
-
65
- inp=gr.inputs.Textbox(label="Text Originale",placeholder="Entrer Texte ici...")
66
- out=gr.outputs.Textbox(label="Résumé")
67
- mx_length=gr.Slider(40, 512)
68
- mn_length=gr.Slider(10,120)
69
-
70
- with demo:
71
- gr.Markdown("***<center>Résumé Votre Text à l'aide de IA.</center>***\n\n Vous pouvez résumé votre texte par entrer le texte originale, et vous pouvez comparer le resultat avec votre texte originale en cliquant sur Comparer resultat ")
72
-
73
- with gr.Tabs():
74
-
75
- with gr.TabItem("Résumé"):
76
- gr.Interface(fn=generateSumm, inputs=[inp,mx_length,mn_length], outputs=out ,cache_examples=True,allow_flagging=False
77
- )
78
- with gr.TabItem("Comparer resultat"):
79
- gr.Interface(diff_texts,[inp,out],gr.HighlightedText(label="Difference"),allow_flagging=False)
80
-
81
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/listen_music/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from PIL.Image import Image as IMG
5
- from pil_utils import BuildImage
6
-
7
- from meme_generator import add_meme
8
- from meme_generator.utils import save_gif
9
-
10
- img_dir = Path(__file__).parent / "images"
11
-
12
-
13
- def listen_music(images: List[BuildImage], texts, args):
14
- img = images[0].convert("RGBA")
15
- frame = BuildImage.open(img_dir / "0.png")
16
- frames: List[IMG] = []
17
- for i in range(0, 360, 10):
18
- frames.append(
19
- frame.copy()
20
- .paste(img.rotate(-i).resize((215, 215)), (100, 100), below=True)
21
- .image
22
- )
23
- return save_gif(frames, 0.05)
24
-
25
-
26
- add_meme("listen_music", listen_music, min_images=1, max_images=1, keywords=["听音乐"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/client/js/chat.js DELETED
@@ -1,508 +0,0 @@
1
- const query = (obj) =>
2
- Object.keys(obj)
3
- .map((k) => encodeURIComponent(k) + "=" + encodeURIComponent(obj[k]))
4
- .join("&");
5
- const url_prefix = document.querySelector("body").getAttribute("data-urlprefix");
6
- const markdown = window.markdownit();
7
- const message_box = document.getElementById(`messages`);
8
- const message_input = document.getElementById(`message-input`);
9
- const box_conversations = document.querySelector(`.top`);
10
- const spinner = box_conversations.querySelector(".spinner");
11
- const stop_generating = document.querySelector(`.stop-generating`);
12
- const send_button = document.querySelector(`#send-button`);
13
- const user_image = `<img src="${url_prefix}/assets/img/user.png" alt="User Avatar">`;
14
- const gpt_image = `<img src="${url_prefix}/assets/img/gpt.png" alt="GPT Avatar">`;
15
- let prompt_lock = false;
16
-
17
- hljs.addPlugin(new CopyButtonPlugin());
18
-
19
- message_input.addEventListener("blur", () => {
20
- window.scrollTo(0, 0);
21
- });
22
-
23
- message_input.addEventListener("focus", () => {
24
- document.documentElement.scrollTop = document.documentElement.scrollHeight;
25
- });
26
-
27
- const delete_conversations = async () => {
28
- localStorage.clear();
29
- await new_conversation();
30
- };
31
-
32
- const handle_ask = async () => {
33
- message_input.style.height = `80px`;
34
- window.scrollTo(0, 0);
35
- let message = message_input.value;
36
-
37
- if (message.length > 0) {
38
- message_input.value = ``;
39
- message_input.dispatchEvent(new Event("input"));
40
- await ask_gpt(message);
41
- }
42
- };
43
-
44
- const remove_cancel_button = async () => {
45
- stop_generating.classList.add(`stop-generating-hiding`);
46
-
47
- setTimeout(() => {
48
- stop_generating.classList.remove(`stop-generating-hiding`);
49
- stop_generating.classList.add(`stop-generating-hidden`);
50
- }, 300);
51
- };
52
-
53
- const ask_gpt = async (message) => {
54
- try {
55
- message_input.value = ``;
56
- message_input.innerHTML = ``;
57
- message_input.innerText = ``;
58
-
59
- add_conversation(window.conversation_id, message.substr(0, 16));
60
- window.scrollTo(0, 0);
61
- window.controller = new AbortController();
62
-
63
- jailbreak = document.getElementById("jailbreak");
64
- model = document.getElementById("model");
65
- prompt_lock = true;
66
- window.text = ``;
67
- window.token = message_id();
68
-
69
- stop_generating.classList.remove(`stop-generating-hidden`);
70
-
71
- add_user_message_box(message);
72
-
73
- message_box.scrollTop = message_box.scrollHeight;
74
- window.scrollTo(0, 0);
75
- await new Promise((r) => setTimeout(r, 500));
76
- window.scrollTo(0, 0);
77
-
78
- message_box.innerHTML += `
79
- <div class="message">
80
- <div class="avatar-container">
81
- ${gpt_image}
82
- </div>
83
- <div class="content" id="gpt_${window.token}">
84
- <div id="cursor"></div>
85
- </div>
86
- </div>
87
- `;
88
-
89
- message_box.scrollTop = message_box.scrollHeight;
90
- window.scrollTo(0, 0);
91
- await new Promise((r) => setTimeout(r, 1000));
92
- window.scrollTo(0, 0);
93
-
94
- const response = await fetch(`${url_prefix}/backend-api/v2/conversation`, {
95
- method: `POST`,
96
- signal: window.controller.signal,
97
- headers: {
98
- "content-type": `application/json`,
99
- accept: `text/event-stream`,
100
- },
101
- body: JSON.stringify({
102
- conversation_id: window.conversation_id,
103
- action: `_ask`,
104
- model: model.options[model.selectedIndex].value,
105
- jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
106
- meta: {
107
- id: window.token,
108
- content: {
109
- conversation: await get_conversation(window.conversation_id),
110
- internet_access: document.getElementById("switch").checked,
111
- content_type: "text",
112
- parts: [
113
- {
114
- content: message,
115
- role: "user",
116
- },
117
- ],
118
- },
119
- },
120
- }),
121
- });
122
-
123
- const reader = response.body.getReader();
124
-
125
- while (true) {
126
- const { value, done } = await reader.read();
127
- if (done) break;
128
-
129
- chunk = decodeUnicode(new TextDecoder().decode(value));
130
-
131
- if (
132
- chunk.includes(`<form id="challenge-form" action="${url_prefix}/backend-api/v2/conversation?`)
133
- ) {
134
- chunk = `cloudflare token expired, please refresh the page.`;
135
- }
136
-
137
- text += chunk;
138
-
139
- document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text);
140
- document.querySelectorAll(`code`).forEach((el) => {
141
- hljs.highlightElement(el);
142
- });
143
-
144
- window.scrollTo(0, 0);
145
- message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
146
- }
147
-
148
- // if text contains :
149
- if (text.includes(`instead. Maintaining this website and API costs a lot of money`)) {
150
- document.getElementById(`gpt_${window.token}`).innerHTML =
151
- "An error occurred, please reload / refresh cache and try again.";
152
- }
153
-
154
- add_message(window.conversation_id, "user", message);
155
- add_message(window.conversation_id, "assistant", text);
156
-
157
- message_box.scrollTop = message_box.scrollHeight;
158
- await remove_cancel_button();
159
- prompt_lock = false;
160
-
161
- await load_conversations(20, 0);
162
- window.scrollTo(0, 0);
163
- } catch (e) {
164
- add_message(window.conversation_id, "user", message);
165
-
166
- message_box.scrollTop = message_box.scrollHeight;
167
- await remove_cancel_button();
168
- prompt_lock = false;
169
-
170
- await load_conversations(20, 0);
171
-
172
- console.log(e);
173
-
174
- let cursorDiv = document.getElementById(`cursor`);
175
- if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
176
-
177
- if (e.name != `AbortError`) {
178
- let error_message = `oops ! something went wrong, please try again / reload. [stacktrace in console]`;
179
-
180
- document.getElementById(`gpt_${window.token}`).innerHTML = error_message;
181
- add_message(window.conversation_id, "assistant", error_message);
182
- } else {
183
- document.getElementById(`gpt_${window.token}`).innerHTML += ` [aborted]`;
184
- add_message(window.conversation_id, "assistant", text + ` [aborted]`);
185
- }
186
-
187
- window.scrollTo(0, 0);
188
- }
189
- };
190
-
191
- const add_user_message_box = (message) => {
192
- const messageDiv = createElement("div", { classNames: ["message"] });
193
- const avatarContainer = createElement("div", { classNames: ["avatar-container"], innerHTML: user_image });
194
- const contentDiv = createElement("div", {
195
- classNames: ["content"],
196
- id: `user_${token}`,
197
- textContent: message,
198
- });
199
-
200
- messageDiv.append(avatarContainer, contentDiv);
201
- message_box.appendChild(messageDiv);
202
- };
203
-
204
- const decodeUnicode = (str) => {
205
- return str.replace(/\\u([a-fA-F0-9]{4})/g, function (match, grp) {
206
- return String.fromCharCode(parseInt(grp, 16));
207
- });
208
- };
209
-
210
- const clear_conversations = async () => {
211
- const elements = box_conversations.childNodes;
212
- let index = elements.length;
213
-
214
- if (index > 0) {
215
- while (index--) {
216
- const element = elements[index];
217
- if (element.nodeType === Node.ELEMENT_NODE && element.tagName.toLowerCase() !== `button`) {
218
- box_conversations.removeChild(element);
219
- }
220
- }
221
- }
222
- };
223
-
224
- const clear_conversation = async () => {
225
- let messages = message_box.getElementsByTagName(`div`);
226
-
227
- while (messages.length > 0) {
228
- message_box.removeChild(messages[0]);
229
- }
230
- };
231
-
232
- const delete_conversation = async (conversation_id) => {
233
- localStorage.removeItem(`conversation:${conversation_id}`);
234
-
235
- if (window.conversation_id == conversation_id) {
236
- await new_conversation();
237
- }
238
-
239
- await load_conversations(20, 0, true);
240
- };
241
-
242
- const set_conversation = async (conversation_id) => {
243
- history.pushState({}, null, `${url_prefix}/chat/${conversation_id}`);
244
- window.conversation_id = conversation_id;
245
-
246
- await clear_conversation();
247
- await load_conversation(conversation_id);
248
- await load_conversations(20, 0, true);
249
- };
250
-
251
- const new_conversation = async () => {
252
- history.pushState({}, null, `${url_prefix}/chat/`);
253
- window.conversation_id = uuid();
254
-
255
- await clear_conversation();
256
- await load_conversations(20, 0, true);
257
- };
258
-
259
- const load_conversation = async (conversation_id) => {
260
- let conversation = await JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
261
- console.log(conversation, conversation_id);
262
-
263
- for (item of conversation.items) {
264
- if (is_assistant(item.role)) {
265
- message_box.innerHTML += load_gpt_message_box(item.content);
266
- } else {
267
- message_box.innerHTML += load_user_message_box(item.content);
268
- }
269
- }
270
-
271
- document.querySelectorAll(`code`).forEach((el) => {
272
- hljs.highlightElement(el);
273
- });
274
-
275
- message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" });
276
-
277
- setTimeout(() => {
278
- message_box.scrollTop = message_box.scrollHeight;
279
- }, 500);
280
- };
281
-
282
- const load_user_message_box = (content) => {
283
- const messageDiv = createElement("div", { classNames: ["message"] });
284
- const avatarContainer = createElement("div", { classNames: ["avatar-container"], innerHTML: user_image });
285
- const contentDiv = createElement("div", { classNames: ["content"] });
286
- const preElement = document.createElement("pre");
287
- preElement.textContent = content;
288
- contentDiv.appendChild(preElement);
289
-
290
- messageDiv.append(avatarContainer, contentDiv);
291
-
292
- return messageDiv.outerHTML;
293
- };
294
-
295
- const load_gpt_message_box = (content) => {
296
- return `
297
- <div class="message">
298
- <div class="avatar-container">
299
- ${gpt_image}
300
- </div>
301
- <div class="content">
302
- ${markdown.render(content)}
303
- </div>
304
- </div>
305
- `;
306
- };
307
-
308
- const is_assistant = (role) => {
309
- return role == "assistant";
310
- };
311
-
312
- const get_conversation = async (conversation_id) => {
313
- let conversation = await JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
314
- return conversation.items;
315
- };
316
-
317
- const add_conversation = async (conversation_id, title) => {
318
- if (localStorage.getItem(`conversation:${conversation_id}`) == null) {
319
- localStorage.setItem(
320
- `conversation:${conversation_id}`,
321
- JSON.stringify({
322
- id: conversation_id,
323
- title: title,
324
- items: [],
325
- })
326
- );
327
- }
328
- };
329
-
330
- const add_message = async (conversation_id, role, content) => {
331
- before_adding = JSON.parse(localStorage.getItem(`conversation:${conversation_id}`));
332
-
333
- before_adding.items.push({
334
- role: role,
335
- content: content,
336
- });
337
-
338
- localStorage.setItem(`conversation:${conversation_id}`, JSON.stringify(before_adding)); // update conversation
339
- };
340
-
341
- const load_conversations = async (limit, offset, loader) => {
342
- //console.log(loader);
343
- //if (loader === undefined) box_conversations.appendChild(spinner);
344
-
345
- let conversations = [];
346
- for (let i = 0; i < localStorage.length; i++) {
347
- if (localStorage.key(i).startsWith("conversation:")) {
348
- let conversation = localStorage.getItem(localStorage.key(i));
349
- conversations.push(JSON.parse(conversation));
350
- }
351
- }
352
-
353
- //if (loader === undefined) spinner.parentNode.removeChild(spinner)
354
- await clear_conversations();
355
-
356
- for (conversation of conversations) {
357
- box_conversations.innerHTML += `
358
- <div class="conversation-sidebar">
359
- <div class="left" onclick="set_conversation('${conversation.id}')">
360
- <i class="fa-regular fa-comments"></i>
361
- <span class="conversation-title">${conversation.title}</span>
362
- </div>
363
- <i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-trash"></i>
364
- </div>
365
- `;
366
- }
367
-
368
- document.querySelectorAll(`code`).forEach((el) => {
369
- hljs.highlightElement(el);
370
- });
371
- };
372
-
373
- document.getElementById(`cancelButton`).addEventListener(`click`, async () => {
374
- window.controller.abort();
375
- console.log(`aborted ${window.conversation_id}`);
376
- });
377
-
378
- function h2a(str1) {
379
- var hex = str1.toString();
380
- var str = "";
381
-
382
- for (var n = 0; n < hex.length; n += 2) {
383
- str += String.fromCharCode(parseInt(hex.substr(n, 2), 16));
384
- }
385
-
386
- return str;
387
- }
388
-
389
- const uuid = () => {
390
- return `xxxxxxxx-xxxx-4xxx-yxxx-${Date.now().toString(16)}`.replace(/[xy]/g, function (c) {
391
- var r = (Math.random() * 16) | 0,
392
- v = c == "x" ? r : (r & 0x3) | 0x8;
393
- return v.toString(16);
394
- });
395
- };
396
-
397
- const message_id = () => {
398
- random_bytes = (Math.floor(Math.random() * 1338377565) + 2956589730).toString(2);
399
- unix = Math.floor(Date.now() / 1000).toString(2);
400
-
401
- return BigInt(`0b${unix}${random_bytes}`).toString();
402
- };
403
-
404
- window.onload = async () => {
405
- load_settings_localstorage();
406
-
407
- conversations = 0;
408
- for (let i = 0; i < localStorage.length; i++) {
409
- if (localStorage.key(i).startsWith("conversation:")) {
410
- conversations += 1;
411
- }
412
- }
413
-
414
- if (conversations == 0) localStorage.clear();
415
-
416
- await setTimeout(() => {
417
- load_conversations(20, 0);
418
- }, 1);
419
-
420
- if (!window.location.href.endsWith(`#`)) {
421
- if (/\/chat\/.+/.test(window.location.href.slice(url_prefix.length))) {
422
- await load_conversation(window.conversation_id);
423
- }
424
- }
425
-
426
- message_input.addEventListener("keydown", async (evt) => {
427
- if (prompt_lock) return;
428
-
429
- if (evt.key === "Enter" && !evt.shiftKey) {
430
- evt.preventDefault();
431
- await handle_ask();
432
- }
433
- });
434
-
435
- send_button.addEventListener("click", async (event) => {
436
- event.preventDefault();
437
- if (prompt_lock) return;
438
- message_input.blur();
439
- await handle_ask();
440
- });
441
-
442
- register_settings_localstorage();
443
- };
444
-
445
- const register_settings_localstorage = async () => {
446
- settings_ids = ["switch", "model", "jailbreak"];
447
- settings_elements = settings_ids.map((id) => document.getElementById(id));
448
- settings_elements.map((element) =>
449
- element.addEventListener(`change`, async (event) => {
450
- switch (event.target.type) {
451
- case "checkbox":
452
- localStorage.setItem(event.target.id, event.target.checked);
453
- break;
454
- case "select-one":
455
- localStorage.setItem(event.target.id, event.target.selectedIndex);
456
- break;
457
- default:
458
- console.warn("Unresolved element type");
459
- }
460
- })
461
- );
462
- };
463
-
464
- const load_settings_localstorage = async () => {
465
- settings_ids = ["switch", "model", "jailbreak"];
466
- settings_elements = settings_ids.map((id) => document.getElementById(id));
467
- settings_elements.map((element) => {
468
- if (localStorage.getItem(element.id)) {
469
- switch (element.type) {
470
- case "checkbox":
471
- element.checked = localStorage.getItem(element.id) === "true";
472
- break;
473
- case "select-one":
474
- element.selectedIndex = parseInt(localStorage.getItem(element.id));
475
- break;
476
- default:
477
- console.warn("Unresolved element type");
478
- }
479
- }
480
- });
481
- };
482
-
483
- function clearTextarea(textarea) {
484
- textarea.style.removeProperty("height");
485
- textarea.style.height = `${textarea.scrollHeight + 4}px`;
486
- if (textarea.value.trim() === "" && textarea.value.includes("\n")) {
487
- textarea.value = "";
488
- }
489
- }
490
-
491
- function createElement(tag, { classNames, id, innerHTML, textContent } = {}) {
492
- const el = document.createElement(tag);
493
- if (classNames) {
494
- el.classList.add(...classNames);
495
- }
496
- if (id) {
497
- el.id = id;
498
- }
499
- if (innerHTML) {
500
- el.innerHTML = innerHTML;
501
- }
502
- if (textContent) {
503
- const preElement = document.createElement("pre");
504
- preElement.textContent = textContent;
505
- el.appendChild(preElement);
506
- }
507
- return el;
508
- }