parquet-converter commited on
Commit
9cc681c
·
1 Parent(s): ca0cdd8

Update parquet files (step 17 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1368565466ki/ZSTRD/text/cleaners.py +0 -475
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Forza Horizon 4 Crashing PC Causes and Solutions You Need to Know.md +0 -48
  3. spaces/1gistliPinn/ChatGPT4/Examples/Arcsoft Showbiz 5 With Crack !FULL! Torrent.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Blender Cloud ? The Animation Fundamentals 2021.md +0 -44
  5. spaces/1gistliPinn/ChatGPT4/Examples/CorelDRAW Graphics Suite 2018 V20.1.0.708 (x86-x64) Ml Crack HOT!.md +0 -6
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anger of Stick 5 Zombie - The Best Stickman Game on the App Store.md +0 -173
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans 14.555.7 APK - Whats New in the Latest Update.md +0 -115
  8. spaces/1phancelerku/anime-remove-background/Descoper muzica veche anii 80-90 i download free piesele preferate.md +0 -137
  9. spaces/1phancelerku/anime-remove-background/Download Wallpaper Kamen Rider Gates The Ultimate Collection of HD Images.md +0 -171
  10. spaces/1phancelerku/anime-remove-background/Euro Truck Driver 2018 Download MOD APK with Unlimited Money.md +0 -124
  11. spaces/1vash/demo-flask-docker-template/README.md +0 -12
  12. spaces/A00001/bingothoo/src/components/ui/alert-dialog.tsx +0 -150
  13. spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/tts_utils.py +0 -398
  14. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/utils.py +0 -26
  15. spaces/AIatUIUC/CodeLATS/executors/executor_utils.py +0 -46
  16. spaces/AchyuthGamer/ImMagician/app.py +0 -190
  17. spaces/Adapter/CoAdapter/style.css +0 -3
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ObjectFactory.js +0 -20
  19. spaces/Alex132/togethercomputer-LLaMA-2-7B-32K/README.md +0 -12
  20. spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/comm.py +0 -131
  21. spaces/Aloento/9Nine-PITS/text/frontend/generate_lexicon.py +0 -158
  22. spaces/Amrrs/DragGan-Inversion/stylegan_human/edit.py +0 -207
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +0 -717
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_pndm_flax.py +0 -511
  25. spaces/Andy1621/uniformer_image_detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py +0 -3
  26. spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py +0 -13
  27. spaces/Andy1621/uniformer_image_detection/configs/paa/paa_r101_fpn_2x_coco.py +0 -3
  28. spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/loading.py +0 -458
  29. spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py +0 -6
  30. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/api-examples/api-example-chat.py +0 -92
  31. spaces/AntNikYab/NaturalLanguageProcessing/README.md +0 -12
  32. spaces/Arijit-hazra/my-image-captioner/load_model.py +0 -363
  33. spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +0 -90
  34. spaces/ArpitM/chat-llm-streaming/app.py +0 -321
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/uninstall.py +0 -113
  36. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_c4.py +0 -88
  37. spaces/BAAI/AltDiffusion-m9/css_and_js.py +0 -92
  38. spaces/Banbri/zcvzcv/src/components/ui/select.tsx +0 -121
  39. spaces/Bart92/RVC_HF/lib/infer_pack/transforms.py +0 -209
  40. spaces/Benson/text-generation/Examples/Boleto Para El Grupo 2 2022.md +0 -234
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/unicode_utils.py +0 -42
  42. spaces/Boadiwaa/Recipes/openai/api_resources/abstract/__init__.py +0 -10
  43. spaces/Boilin/URetinex-Net/network/Math_Module.py +0 -38
  44. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/catalog.py +0 -132
  45. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/caffe2_modeling.py +0 -492
  46. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/__init__.py +0 -11
  47. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docker/README.md +0 -24
  48. spaces/CVPR/LIVE/filter.h +0 -106
  49. spaces/CVPR/LIVE/thrust/thrust/random/detail/random_core_access.h +0 -57
  50. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/fill.h +0 -94
spaces/1368565466ki/ZSTRD/text/cleaners.py DELETED
@@ -1,475 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Cleaners are transformations that run over the input text at both training and eval time.
5
-
6
- Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
- hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
- 1. "english_cleaners" for English text
9
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
- the symbols in symbols.py to match your data).
13
- '''
14
-
15
- import re
16
- from unidecode import unidecode
17
- import pyopenjtalk
18
- from jamo import h2j, j2hcj
19
- from pypinyin import lazy_pinyin, BOPOMOFO
20
- import jieba, cn2an
21
-
22
-
23
- # This is a list of Korean classifiers preceded by pure Korean numerals.
24
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
25
-
26
- # Regular expression matching whitespace:
27
- _whitespace_re = re.compile(r'\s+')
28
-
29
- # Regular expression matching Japanese without punctuation marks:
30
- _japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
31
-
32
- # Regular expression matching non-Japanese characters or punctuation marks:
33
- _japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
34
-
35
- # List of (regular expression, replacement) pairs for abbreviations:
36
- _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
37
- ('mrs', 'misess'),
38
- ('mr', 'mister'),
39
- ('dr', 'doctor'),
40
- ('st', 'saint'),
41
- ('co', 'company'),
42
- ('jr', 'junior'),
43
- ('maj', 'major'),
44
- ('gen', 'general'),
45
- ('drs', 'doctors'),
46
- ('rev', 'reverend'),
47
- ('lt', 'lieutenant'),
48
- ('hon', 'honorable'),
49
- ('sgt', 'sergeant'),
50
- ('capt', 'captain'),
51
- ('esq', 'esquire'),
52
- ('ltd', 'limited'),
53
- ('col', 'colonel'),
54
- ('ft', 'fort'),
55
- ]]
56
-
57
- # List of (hangul, hangul divided) pairs:
58
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
59
- ('ㄳ', 'ㄱㅅ'),
60
- ('ㄵ', 'ㄴㅈ'),
61
- ('ㄶ', 'ㄴㅎ'),
62
- ('ㄺ', 'ㄹㄱ'),
63
- ('ㄻ', 'ㄹㅁ'),
64
- ('ㄼ', 'ㄹㅂ'),
65
- ('ㄽ', 'ㄹㅅ'),
66
- ('ㄾ', 'ㄹㅌ'),
67
- ('ㄿ', 'ㄹㅍ'),
68
- ('ㅀ', 'ㄹㅎ'),
69
- ('ㅄ', 'ㅂㅅ'),
70
- ('ㅘ', 'ㅗㅏ'),
71
- ('ㅙ', 'ㅗㅐ'),
72
- ('ㅚ', 'ㅗㅣ'),
73
- ('ㅝ', 'ㅜㅓ'),
74
- ('ㅞ', 'ㅜㅔ'),
75
- ('ㅟ', 'ㅜㅣ'),
76
- ('ㅢ', 'ㅡㅣ'),
77
- ('ㅑ', 'ㅣㅏ'),
78
- ('ㅒ', 'ㅣㅐ'),
79
- ('ㅕ', 'ㅣㅓ'),
80
- ('ㅖ', 'ㅣㅔ'),
81
- ('ㅛ', 'ㅣㅗ'),
82
- ('ㅠ', 'ㅣㅜ')
83
- ]]
84
-
85
- # List of (Latin alphabet, hangul) pairs:
86
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
87
- ('a', '에이'),
88
- ('b', '비'),
89
- ('c', '시'),
90
- ('d', '디'),
91
- ('e', '이'),
92
- ('f', '에프'),
93
- ('g', '지'),
94
- ('h', '에이치'),
95
- ('i', '아이'),
96
- ('j', '제이'),
97
- ('k', '케이'),
98
- ('l', '엘'),
99
- ('m', '엠'),
100
- ('n', '엔'),
101
- ('o', '오'),
102
- ('p', '피'),
103
- ('q', '큐'),
104
- ('r', '아르'),
105
- ('s', '에스'),
106
- ('t', '티'),
107
- ('u', '유'),
108
- ('v', '브이'),
109
- ('w', '더블유'),
110
- ('x', '엑스'),
111
- ('y', '와이'),
112
- ('z', '제트')
113
- ]]
114
-
115
- # List of (Latin alphabet, bopomofo) pairs:
116
- _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
117
- ('a', 'ㄟˉ'),
118
- ('b', 'ㄅㄧˋ'),
119
- ('c', 'ㄙㄧˉ'),
120
- ('d', 'ㄉㄧˋ'),
121
- ('e', 'ㄧˋ'),
122
- ('f', 'ㄝˊㄈㄨˋ'),
123
- ('g', 'ㄐㄧˋ'),
124
- ('h', 'ㄝˇㄑㄩˋ'),
125
- ('i', 'ㄞˋ'),
126
- ('j', 'ㄐㄟˋ'),
127
- ('k', 'ㄎㄟˋ'),
128
- ('l', 'ㄝˊㄛˋ'),
129
- ('m', 'ㄝˊㄇㄨˋ'),
130
- ('n', 'ㄣˉ'),
131
- ('o', 'ㄡˉ'),
132
- ('p', 'ㄆㄧˉ'),
133
- ('q', 'ㄎㄧㄡˉ'),
134
- ('r', 'ㄚˋ'),
135
- ('s', 'ㄝˊㄙˋ'),
136
- ('t', 'ㄊㄧˋ'),
137
- ('u', 'ㄧㄡˉ'),
138
- ('v', 'ㄨㄧˉ'),
139
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
140
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
141
- ('y', 'ㄨㄞˋ'),
142
- ('z', 'ㄗㄟˋ')
143
- ]]
144
-
145
-
146
- # List of (bopomofo, romaji) pairs:
147
- _bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
148
- ('ㄅㄛ', 'p⁼wo'),
149
- ('ㄆㄛ', 'pʰwo'),
150
- ('ㄇㄛ', 'mwo'),
151
- ('ㄈㄛ', 'fwo'),
152
- ('ㄅ', 'p⁼'),
153
- ('ㄆ', 'pʰ'),
154
- ('ㄇ', 'm'),
155
- ('ㄈ', 'f'),
156
- ('ㄉ', 't⁼'),
157
- ('ㄊ', 'tʰ'),
158
- ('ㄋ', 'n'),
159
- ('ㄌ', 'l'),
160
- ('ㄍ', 'k⁼'),
161
- ('ㄎ', 'kʰ'),
162
- ('ㄏ', 'h'),
163
- ('ㄐ', 'ʧ⁼'),
164
- ('ㄑ', 'ʧʰ'),
165
- ('ㄒ', 'ʃ'),
166
- ('ㄓ', 'ʦ`⁼'),
167
- ('ㄔ', 'ʦ`ʰ'),
168
- ('ㄕ', 's`'),
169
- ('ㄖ', 'ɹ`'),
170
- ('ㄗ', 'ʦ⁼'),
171
- ('ㄘ', 'ʦʰ'),
172
- ('ㄙ', 's'),
173
- ('ㄚ', 'a'),
174
- ('ㄛ', 'o'),
175
- ('ㄜ', 'ə'),
176
- ('ㄝ', 'e'),
177
- ('ㄞ', 'ai'),
178
- ('ㄟ', 'ei'),
179
- ('ㄠ', 'au'),
180
- ('ㄡ', 'ou'),
181
- ('ㄧㄢ', 'yeNN'),
182
- ('ㄢ', 'aNN'),
183
- ('ㄧㄣ', 'iNN'),
184
- ('ㄣ', 'əNN'),
185
- ('ㄤ', 'aNg'),
186
- ('ㄧㄥ', 'iNg'),
187
- ('ㄨㄥ', 'uNg'),
188
- ('ㄩㄥ', 'yuNg'),
189
- ('ㄥ', 'əNg'),
190
- ('ㄦ', 'əɻ'),
191
- ('ㄧ', 'i'),
192
- ('ㄨ', 'u'),
193
- ('ㄩ', 'ɥ'),
194
- ('ˉ', '→'),
195
- ('ˊ', '↑'),
196
- ('ˇ', '↓↑'),
197
- ('ˋ', '↓'),
198
- ('˙', ''),
199
- (',', ','),
200
- ('。', '.'),
201
- ('!', '!'),
202
- ('?', '?'),
203
- ('—', '-')
204
- ]]
205
-
206
-
207
- def expand_abbreviations(text):
208
- for regex, replacement in _abbreviations:
209
- text = re.sub(regex, replacement, text)
210
- return text
211
-
212
-
213
- def lowercase(text):
214
- return text.lower()
215
-
216
-
217
- def collapse_whitespace(text):
218
- return re.sub(_whitespace_re, ' ', text)
219
-
220
-
221
- def convert_to_ascii(text):
222
- return unidecode(text)
223
-
224
-
225
- def japanese_to_romaji_with_accent(text):
226
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
227
- sentences = re.split(_japanese_marks, text)
228
- marks = re.findall(_japanese_marks, text)
229
- text = ''
230
- for i, sentence in enumerate(sentences):
231
- if re.match(_japanese_characters, sentence):
232
- if text!='':
233
- text+=' '
234
- labels = pyopenjtalk.extract_fullcontext(sentence)
235
- for n, label in enumerate(labels):
236
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
237
- if phoneme not in ['sil','pau']:
238
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
239
- else:
240
- continue
241
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
242
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
243
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
244
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
245
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
246
- a2_next=-1
247
- else:
248
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
249
- # Accent phrase boundary
250
- if a3 == 1 and a2_next == 1:
251
- text += ' '
252
- # Falling
253
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
254
- text += '↓'
255
- # Rising
256
- elif a2 == 1 and a2_next == 2:
257
- text += '↑'
258
- if i<len(marks):
259
- text += unidecode(marks[i]).replace(' ','')
260
- return text
261
-
262
-
263
- def latin_to_hangul(text):
264
- for regex, replacement in _latin_to_hangul:
265
- text = re.sub(regex, replacement, text)
266
- return text
267
-
268
-
269
- def divide_hangul(text):
270
- for regex, replacement in _hangul_divided:
271
- text = re.sub(regex, replacement, text)
272
- return text
273
-
274
-
275
- def hangul_number(num, sino=True):
276
- '''Reference https://github.com/Kyubyong/g2pK'''
277
- num = re.sub(',', '', num)
278
-
279
- if num == '0':
280
- return '영'
281
- if not sino and num == '20':
282
- return '스무'
283
-
284
- digits = '123456789'
285
- names = '일이삼사오육칠팔구'
286
- digit2name = {d: n for d, n in zip(digits, names)}
287
-
288
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
289
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
290
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
291
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
292
-
293
- spelledout = []
294
- for i, digit in enumerate(num):
295
- i = len(num) - i - 1
296
- if sino:
297
- if i == 0:
298
- name = digit2name.get(digit, '')
299
- elif i == 1:
300
- name = digit2name.get(digit, '') + '십'
301
- name = name.replace('일십', '십')
302
- else:
303
- if i == 0:
304
- name = digit2mod.get(digit, '')
305
- elif i == 1:
306
- name = digit2dec.get(digit, '')
307
- if digit == '0':
308
- if i % 4 == 0:
309
- last_three = spelledout[-min(3, len(spelledout)):]
310
- if ''.join(last_three) == '':
311
- spelledout.append('')
312
- continue
313
- else:
314
- spelledout.append('')
315
- continue
316
- if i == 2:
317
- name = digit2name.get(digit, '') + '백'
318
- name = name.replace('일백', '백')
319
- elif i == 3:
320
- name = digit2name.get(digit, '') + '천'
321
- name = name.replace('일천', '천')
322
- elif i == 4:
323
- name = digit2name.get(digit, '') + '만'
324
- name = name.replace('일만', '만')
325
- elif i == 5:
326
- name = digit2name.get(digit, '') + '십'
327
- name = name.replace('일십', '십')
328
- elif i == 6:
329
- name = digit2name.get(digit, '') + '백'
330
- name = name.replace('일백', '백')
331
- elif i == 7:
332
- name = digit2name.get(digit, '') + '천'
333
- name = name.replace('일천', '천')
334
- elif i == 8:
335
- name = digit2name.get(digit, '') + '억'
336
- elif i == 9:
337
- name = digit2name.get(digit, '') + '십'
338
- elif i == 10:
339
- name = digit2name.get(digit, '') + '백'
340
- elif i == 11:
341
- name = digit2name.get(digit, '') + '천'
342
- elif i == 12:
343
- name = digit2name.get(digit, '') + '조'
344
- elif i == 13:
345
- name = digit2name.get(digit, '') + '십'
346
- elif i == 14:
347
- name = digit2name.get(digit, '') + '백'
348
- elif i == 15:
349
- name = digit2name.get(digit, '') + '천'
350
- spelledout.append(name)
351
- return ''.join(elem for elem in spelledout)
352
-
353
-
354
- def number_to_hangul(text):
355
- '''Reference https://github.com/Kyubyong/g2pK'''
356
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
357
- for token in tokens:
358
- num, classifier = token
359
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
360
- spelledout = hangul_number(num, sino=False)
361
- else:
362
- spelledout = hangul_number(num, sino=True)
363
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
364
- # digit by digit for remaining digits
365
- digits = '0123456789'
366
- names = '영일이삼사오육칠팔구'
367
- for d, n in zip(digits, names):
368
- text = text.replace(d, n)
369
- return text
370
-
371
-
372
- def number_to_chinese(text):
373
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
374
- for number in numbers:
375
- text = text.replace(number, cn2an.an2cn(number),1)
376
- return text
377
-
378
-
379
- def chinese_to_bopomofo(text):
380
- text=text.replace('、',',').replace(';',',').replace(':',',')
381
- words=jieba.lcut(text,cut_all=False)
382
- text=''
383
- for word in words:
384
- bopomofos=lazy_pinyin(word,BOPOMOFO)
385
- if not re.search('[\u4e00-\u9fff]',word):
386
- text+=word
387
- continue
388
- for i in range(len(bopomofos)):
389
- if re.match('[\u3105-\u3129]',bopomofos[i][-1]):
390
- bopomofos[i]+='ˉ'
391
- if text!='':
392
- text+=' '
393
- text+=''.join(bopomofos)
394
- return text
395
-
396
-
397
- def latin_to_bopomofo(text):
398
- for regex, replacement in _latin_to_bopomofo:
399
- text = re.sub(regex, replacement, text)
400
- return text
401
-
402
-
403
- def bopomofo_to_romaji(text):
404
- for regex, replacement in _bopomofo_to_romaji:
405
- text = re.sub(regex, replacement, text)
406
- return text
407
-
408
-
409
- def basic_cleaners(text):
410
- '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
411
- text = lowercase(text)
412
- text = collapse_whitespace(text)
413
- return text
414
-
415
-
416
- def transliteration_cleaners(text):
417
- '''Pipeline for non-English text that transliterates to ASCII.'''
418
- text = convert_to_ascii(text)
419
- text = lowercase(text)
420
- text = collapse_whitespace(text)
421
- return text
422
-
423
-
424
- def japanese_cleaners(text):
425
- text=japanese_to_romaji_with_accent(text)
426
- if re.match('[A-Za-z]',text[-1]):
427
- text += '.'
428
- return text
429
-
430
-
431
- def japanese_cleaners2(text):
432
- return japanese_cleaners(text).replace('ts','ʦ').replace('...','…')
433
-
434
-
435
- def korean_cleaners(text):
436
- '''Pipeline for Korean text'''
437
- text = latin_to_hangul(text)
438
- text = number_to_hangul(text)
439
- text = j2hcj(h2j(text))
440
- text = divide_hangul(text)
441
- if re.match('[\u3131-\u3163]',text[-1]):
442
- text += '.'
443
- return text
444
-
445
-
446
- def chinese_cleaners(text):
447
- '''Pipeline for Chinese text'''
448
- text=number_to_chinese(text)
449
- text=chinese_to_bopomofo(text)
450
- text=latin_to_bopomofo(text)
451
- if re.match('[ˉˊˇˋ˙]',text[-1]):
452
- text += '。'
453
- return text
454
-
455
-
456
- def zh_ja_mixture_cleaners(text):
457
- chinese_texts=re.findall(r'\[ZH\].*?\[ZH\]',text)
458
- japanese_texts=re.findall(r'\[JA\].*?\[JA\]',text)
459
- for chinese_text in chinese_texts:
460
- cleaned_text=number_to_chinese(chinese_text[4:-4])
461
- cleaned_text=chinese_to_bopomofo(cleaned_text)
462
- cleaned_text=latin_to_bopomofo(cleaned_text)
463
- cleaned_text=bopomofo_to_romaji(cleaned_text)
464
- cleaned_text=re.sub('i[aoe]',lambda x:'y'+x.group(0)[1:],cleaned_text)
465
- cleaned_text=re.sub('u[aoəe]',lambda x:'w'+x.group(0)[1:],cleaned_text)
466
- cleaned_text=re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ`'+x.group(2),cleaned_text).replace('ɻ','ɹ`')
467
- cleaned_text=re.sub('([ʦs][⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ'+x.group(2),cleaned_text)
468
- text = text.replace(chinese_text,cleaned_text+' ',1)
469
- for japanese_text in japanese_texts:
470
- cleaned_text=japanese_to_romaji_with_accent(japanese_text[4:-4]).replace('ts','ʦ').replace('u','ɯ').replace('...','…')
471
- text = text.replace(japanese_text,cleaned_text+' ',1)
472
- text=text[:-1]
473
- if re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]):
474
- text += '.'
475
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Forza Horizon 4 Crashing PC Causes and Solutions You Need to Know.md DELETED
@@ -1,48 +0,0 @@
1
-
2
- <h1>How to Fix Forza Horizon 4 Crashing PC</h1>
3
- <p>Forza Horizon 4 is a popular racing game that offers stunning graphics and realistic driving physics. However, some players have reported that the game keeps crashing on their PC, preventing them from enjoying the game. If you are one of them, don't worry. In this article, we will show you some possible solutions to fix Forza Horizon 4 crashing PC.</p>
4
- <h2>What Causes Forza Horizon 4 Crashing PC?</h2>
5
- <p>There are many possible reasons why Forza Horizon 4 crashes on your PC, such as:</p>
6
- <h2>forza 4 keeps crashing pc</h2><br /><p><b><b>DOWNLOAD</b> &#127383; <a href="https://byltly.com/2uKyMm">https://byltly.com/2uKyMm</a></b></p><br /><br />
7
- <ul>
8
- <li>Outdated or corrupted graphics driver</li>
9
- <li>Incompatible Windows update</li>
10
- <li>Insufficient system requirements</li>
11
- <li>Conflicting third-party software or overlay features</li>
12
- <li>Corrupted game files or installation data</li>
13
- <li>Overclocking or overheating issues</li>
14
- <li>Low disk space or memory errors</li>
15
- <li>Antivirus interference or firewall blocking</li>
16
- </ul>
17
- <h2>How to Fix Forza Horizon 4 Crashing PC?</h2>
18
- <p>To fix Forza Horizon 4 crashing PC, you can try the following methods:</p>
19
- <h3>Method 1: Update Your Windows</h3>
20
- <p>One of the common causes of Forza Horizon 4 crashing PC is an incompatible Windows update. To ensure that your system is running the latest version of Windows, you can follow these steps:</p>
21
- <ol>
22
- <li>Click the Start button and type "updates" in the search box.</li>
23
- <li>Select "Check for updates" from the results.</li>
24
- <li>Windows will automatically check for and install any available updates.</li>
25
- <li>Restart your PC and launch Forza Horizon 4 to see if it works.</li>
26
- </ol>
27
- <h3>Method 2: Update Your Graphics Driver</h3>
28
- <p>An outdated or corrupted graphics driver can also cause Forza Horizon 4 crashing PC. To update your graphics driver, you can use a reliable driver updater tool like Driver Easy, or you can manually download and install the latest driver from your graphics card manufacturer's website. Here are the steps to use Driver Easy:</p>
29
- <ol>
30
- <li>Download and install Driver Easy on your PC.</li>
31
- <li>Run Driver Easy and click "Scan Now". Driver Easy will scan your system and detect any problematic drivers.</li>
32
- <li>Click "Update All" to automatically download and install the correct version of all the drivers that are missing or out of date on your system.</li>
33
- <li>Restart your PC and launch Forza Horizon 4 to see if it works.</li>
34
- </ol>
35
- <h3>Method 3: Add the Game to the Exception List of Your Antivirus</h3>
36
- <p>Sometimes, your antivirus software may interfere with Forza Horizon 4 and cause it to crash. To prevent this, you can add the game to the exception list of your antivirus software. The steps may vary depending on the antivirus software you are using, but here is an example for Windows Defender:</p>
37
- <ol>
38
- <li>Click the Start button and type "virus" in the search box.</li>
39
- <li>Select "Virus & threat protection" from the results.</li>
40
- <li>Click "Manage settings" under "Virus & threat protection settings".</li>
41
- <li>Scroll down and click "Add or remove exclusions" under "Exclusions".</li>
42
- <li>Click "Add an exclusion" and select "Folder".</li>
43
- <li>Browse to the folder where Forza Horizon 4 is installed and select it.</li>
44
- <li>Click "Yes" to confirm.</li>
45
- <li>Launch Forza Horizon 4 to see if it works.</li>
46
- </ol></p> ddb901b051<br />
47
- <br />
48
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Arcsoft Showbiz 5 With Crack !FULL! Torrent.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>arcsoft showbiz 5 with crack torrent</h2><br /><p><b><b>DOWNLOAD</b> &middot;&middot;&middot;&middot;&middot; <a href="https://imgfil.com/2uy0nw">https://imgfil.com/2uy0nw</a></b></p><br /><br />
2
- <br />
3
- d5da3c52bf<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Blender Cloud ? The Animation Fundamentals 2021.md DELETED
@@ -1,44 +0,0 @@
1
- <h2>Blender Cloud – The Animation Fundamentals</h2><br /><p><b><b>DOWNLOAD</b> &#10031; <a href="https://imgfil.com/2uxXzK">https://imgfil.com/2uxXzK</a></b></p><br /><br />
2
- <br />
3
- Data Requests
4
-
5
- - The character models are available for download in .
6
-
7
- - The animations are available for download in .
8
-
9
- Animating a walkthrough #sec:animation-walkthrough
10
-
11
- ======================
12
-
13
- In this tutorial, we will be animating a walkthrough of a fully animated shot, where we will be adding additional visual effects (i.e., the 3D flames). The goal of the tutorial is to understand how the tutorial animation was created. Each shot is a fully-animated clip with four keyframes.
14
-
15
- The tutorial is organized into three major sections:
16
-
17
- 1. Sketching and Layout.
18
-
19
- 2. Geometry, Symmetry, Surface Smoothing, Painterly Feeling, & Organic Expressive Style.
20
-
21
- 3. Reflections, Depth of Field, Color Theory, & Depth of Field.
22
-
23
- In the second section, we cover the following topics:
24
-
25
- 1. Lighting to importance
26
-
27
- 2. Reflections
28
-
29
- 3. Area light
30
-
31
- 4. Area light’s position
32
-
33
- 5. Refraction
34
-
35
- 6. Soft light
36
-
37
- 7. Soft light’s position
38
-
39
- 8. Reflection map
40
-
41
- 9. 4fefd39f24<br />
42
- <br />
43
- <br />
44
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CorelDRAW Graphics Suite 2018 V20.1.0.708 (x86-x64) Ml Crack HOT!.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>CorelDRAW Graphics Suite 2018 V20.1.0.708 (x86-x64) Ml Crack</h2><br /><p><b><b>Download File</b> &harr; <a href="https://imgfil.com/2uy1st">https://imgfil.com/2uy1st</a></b></p><br /><br />
2
-
3
- Coreldraw graphics suite 2018 v20 1.0 708 keygen. Tenleid cosplay ... Autocad 2015 serial number and product key crack 64 bit. Sfm porno vk. ... Ml 1865 firmware. ... Pinnacle studio x86 x64 18.0.2 ultimate incl keygen full. 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anger of Stick 5 Zombie - The Best Stickman Game on the App Store.md DELETED
@@ -1,173 +0,0 @@
1
- <br />
2
- <h1>Anger of Stick 5: Zombie - A Review of the Action-Packed Stickman Game</h1>
3
- <p>If you are looking for a fun and addictive stickman game that will keep you entertained for hours, then you should check out <strong>Anger of Stick 5: Zombie</strong>. This game is a sequel to the popular Anger of Stick series, which has over 100 million downloads on Google Play. In this game, you will join the angry stickman and his friends as they fight against a strange group of enemies that have turned innocent people into zombies. You will use various weapons, skills, and vehicles to save the city and destroy the enemies. In this article, we will review the game and give you some tips and tricks to help you enjoy it more.</p>
4
- <h2>anger of stick 5 zombie</h2><br /><p><b><b>Download</b> &middot;&middot;&middot;&middot;&middot; <a href="https://urlin.us/2uSZL4">https://urlin.us/2uSZL4</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is Anger of Stick 5: Zombie?</h3>
7
- <p><strong>Anger of Stick 5: Zombie</strong> is an action game developed by J-PARK, a Korean company that specializes in stickman games. The game was released in 2016 and has been updated regularly since then. The game is available for both Android and iOS devices, and it is free to download and play, with some optional in-app purchases. The game has received positive reviews from users and critics alike, with an average rating of 4.5 stars out of 5 on both Google Play and App Store. The game has also been featured on several websites and blogs as one of the best stickman games on the market.</p>
8
- <h3>Why should you play Anger of Stick 5: Zombie?</h3>
9
- <p>There are many reasons why you should play <strong>Anger of Stick 5: Zombie</strong>, but here are some of the main ones:</p>
10
- <ul>
11
- <li>The game is <strong>fun</strong> and <strong>addictive</strong>. You will never get bored with the fast-paced and thrilling action that the game offers. You will have to use your reflexes, strategy, and creativity to overcome the challenges and enemies that you will face.</li>
12
- <li>The game is <strong>easy</strong> to <strong>play</strong>. You don't need any complicated instructions or tutorials to start playing the game. The controls are simple and intuitive, and you can customize them according to your preference. The game also has a user-friendly interface and a clear menu that will help you navigate through the game.</li>
13
- <li>The game is <strong>varied</strong> and <strong>diverse</strong>. You will not get tired of playing the same thing over and over again. The game has four different modes to choose from: single mode, zombie mode, friend mode, and online mode. Each mode has its own objectives, rules, and rewards. The game also has hundreds of levels to complete, each with different scenarios, enemies, and difficulties.</li>
14
- <li>The game is <strong>rich</strong> in <strong>features</strong>. You will be amazed by the amount of content that the game has to offer. You can choose from dozens of characters, each with their own abilities and skills. You can also equip them with various weapons, ranging from guns and swords to helicopters and robots. You can also upgrade your characters and weapons to enhance their power and performance.</li>
15
- <li>The game is <strong>free</strong> to <strong>play</strong>. You don't have to spend any money to enjoy the game. The game does have some in-app purchases that can help you progress faster or unlock more items, but they are not necessary to play the game. You can also earn coins and gems by playing the game and completing the missions. The game is generous in giving you rewards and bonuses, so you don't have to worry about running out of resources.</li>
16
- </ul>
17
- <h2>Gameplay</h2>
18
- <h3>How to play Anger of Stick 5: Zombie?</h3>
19
- <p>The gameplay of <strong>Anger of Stick 5: Zombie</strong> is simple and straightforward. You have to control your character and fight against the enemies that appear on the screen. You can move your character by using the joystick on the left side of the screen, and you can attack by using the buttons on the right side of the screen. You can also use special skills and items by tapping on their icons on the top of the screen. You have to clear each level by defeating all the enemies or reaching the goal point. You will also have to avoid obstacles and traps that can harm you or slow you down. You will lose a life if your health bar reaches zero, and you will have to restart the level if you lose all your lives. You can also pause the game by tapping on the menu button on the top right corner of the screen.</p>
20
- <h4>Controls</h4>
21
- <p>The controls of <strong>Anger of Stick 5: Zombie</strong> are easy to learn and use. Here are the basic controls that you need to know:</p>
22
- <ul>
23
- <li><strong>Joystick</strong>: You can use the joystick on the left side of the screen to move your character in any direction. You can also double-tap on the joystick to make your character dash or roll.</li>
24
- <li><strong>Attack button</strong>: You can use the attack button on the right side of the screen to make your character perform a basic attack. You can also tap on the button repeatedly to make your character perform a combo attack.</li>
25
- <li><strong>Jump button</strong>: You can use the jump button on the right side of the screen to make your character jump. You can also tap on the button twice to make your character perform a double jump.</li>
26
- <li><strong>Skill button</strong>: You can use the skill button on the right side of the screen to make your character use a special skill. You can also swipe on the button to switch between different skills. Each skill has a cooldown time, so you have to wait for it to recharge before using it again.</li>
27
- <li><strong>Item button</strong>: You can use the item button on the right side of the screen to make your character use an item. You can also swipe on the button to switch between different items. Each item has a limited number, so you have to use them wisely.</li>
28
- <li><strong>Weapon button</strong>: You can use the weapon button on the right side of the screen to make your character use a weapon. You can also swipe on the button to switch between different weapons. Each weapon has a limited ammo, so you have to reload it when it runs out.</li>
29
- </ul>
30
- <h4>Modes</h4>
31
- <p><strong>Anger of Stick 5: Zombie</strong> has four different modes to choose from, each with its own objectives, rules, and rewards. Here are the modes that you can play:</p>
32
- <ul>
33
- <li><strong>Single mode</strong>: This is the main mode of the game, where you have to complete various missions and levels. You can choose from three difficulty levels: easy, normal, and hard. You can also select your character and your weapons before starting each level. You will earn coins and gems by clearing each level, which you can use to buy or upgrade items, skills, and weapons.</li>
34
- <li><strong>Zombie mode</strong>: This is a survival mode, where you have to fight against endless waves of zombies. You can choose from four sub-modes: defense, melee, gun, and team. You can also select your character and your weapons before starting each sub-mode. You will earn coins and gems by killing zombies, which you can use to buy or upgrade items, skills, and weapons.</li>
35
- <li><strong>Friend mode</strong>: This is a co-op mode, where you can play with your friends online or offline. You can choose from two sub-modes: team battle or team survival. You can also select your character and your weapons before starting each sub-mode. You will earn coins and gems by cooperating with your friends, which you can use to buy or upgrade items, skills, and weapons.</li>
36
- <li><strong>Online mode</strong>: This is a multiplayer mode, where you can play with other players from around the world. You can choose from two sub-modes: PvP or PvE. You can also select your character and your weapons before starting each sub-mode. You will earn coins and gems by competing or collaborating with other players, which you can use to buy or upgrade items, skills, and weapons.</li>
37
- </ul>
38
- <h4>Levels</h4>
39
- <p><strong>Anger of Stick 5: Zombie</strong> has hundreds of levels to complete, each with different scenarios, enemies, and difficulties. Here are some of the levels that you will encounter:</p>
40
- <p>How to play anger of stick 5 zombie on PC<br />
41
- Anger of stick 5 zombie mod apk unlimited money and gems<br />
42
- Anger of stick 5 zombie cheats and hacks<br />
43
- Best weapons and skills in anger of stick 5 zombie<br />
44
- Anger of stick 5 zombie online multiplayer mode<br />
45
- Anger of stick 5 zombie tips and tricks for beginners<br />
46
- Anger of stick 5 zombie review and rating<br />
47
- Download anger of stick 5 zombie for free on Android and iOS<br />
48
- Anger of stick 5 zombie gameplay and walkthrough<br />
49
- Anger of stick 5 zombie vs anger of stick 4<br />
50
- Anger of stick 5 zombie latest version and update<br />
51
- Anger of stick 5 zombie best characters and heroes<br />
52
- Anger of stick 5 zombie guide and tutorial<br />
53
- Anger of stick 5 zombie levels and stages<br />
54
- Anger of stick 5 zombie boss battles and challenges<br />
55
- Anger of stick 5 zombie weapons shop and upgrades<br />
56
- Anger of stick 5 zombie achievements and rewards<br />
57
- Anger of stick 5 zombie fan art and memes<br />
58
- Anger of stick 5 zombie story and plot<br />
59
- Anger of stick 5 zombie theme song and soundtrack<br />
60
- Anger of stick 5 zombie alternatives and similar games<br />
61
- Anger of stick 5 zombie codes and coupons<br />
62
- Anger of stick 5 zombie FAQs and answers<br />
63
- Anger of stick 5 zombie bugs and glitches<br />
64
- Anger of stick 5 zombie secrets and easter eggs<br />
65
- How to install anger of stick 5 zombie on Windows or Mac<br />
66
- Anger of stick 5 zombie offline mode and features<br />
67
- Anger of stick 5 zombie custom mode and editor<br />
68
- How to get more coins and gems in anger of stick 5 zombie<br />
69
- How to unlock all characters and weapons in anger of stick 5 zombie<br />
70
- How to beat anger of stick 5 zombie without spending money<br />
71
- How to record and share anger of stick 5 zombie gameplay videos<br />
72
- How to contact anger of stick 5 zombie developers and support team<br />
73
- How to join anger of stick 5 zombie community and forums<br />
74
- How to create your own character in anger of stick 5 zombie<br />
75
- How to change the language and settings in anger of stick 5 zombie<br />
76
- How to backup and restore your anger of stick 5 zombie data<br />
77
- How to play anger of stick 5 zombie with friends and family<br />
78
- How to improve your skills and strategy in anger of stick 5 zombie<br />
79
- How to solve anger of stick 5 zombie puzzles and riddles<br />
80
- How to survive longer in anger of stick 5 zombie survival mode<br />
81
- How to earn more stars and medals in anger of stick 5 zombie missions<br />
82
- How to customize your weapons and outfits in anger of stick 5 zombie <br />
83
- How to access anger of stick 5 zombie hidden features and modes <br />
84
- How to win anger of stick 5 zombie tournaments and competitions <br />
85
- How to get free coins and gems in anger of stick 5 zombie legally <br />
86
- How to report anger of stick 5 zombie players who cheat or abuse <br />
87
- How to delete or uninstall anger of stick 5 zombie from your device</p>
88
- <table>
89
- <tr>
90
- <th>Level</th>
91
- <th>Scenario</th>
92
- <th>Enemies</th>
93
- <th>Difficulty</th>
94
- </tr>
95
- <tr>
96
- <td>1</td>
97
- <td>A street in the city</td>
98
- <td>Zombies, thugs, dogs</td>
99
- <td>Easy</td>
100
- </tr>
101
- <tr>
102
- <td>2</td>
103
- <td>A rooftop in the city</td>
104
- <td>Zombies, snipers, helicopters</td>
105
- <td>Easy</td>
106
- </tr>
107
- <tr>
108
- <td>3</td>
109
- <td>A subway station in the city</td>
110
- <td>Zombies, soldiers, trains</td>
111
- <td>Normal</td>
112
- </tr>
113
- <tr>
114
- <td>4</td>
115
- <td>A factory in the city</td>
116
- <td>Zombies, robots, lasers</td>
117
- <td>Normal</td>
118
- </tr>
119
- <tr>
120
- <td>5</td>
121
- <td>A park in the city</td>
122
- <td>Zombies, ninjas, trees</td>
123
- <td>Hard</td>
124
- </tr>
125
- <tr>
126
- <td>6</td>
127
- <td>A bridge in the city</td>
128
- <td>Zombies, bikers, cars</td>
129
- <td>Hard</td>
130
- </tr> </table>
131
- <h2>What are the features of Anger of Stick 5: Zombie?</h2>
132
- <p><strong>Anger of Stick 5: Zombie</strong> has many features that make it stand out from other stickman games. Here are some of the features that you will enjoy:</p>
133
- <h4>Graphics and sound</h4>
134
- <p>The game has simple but colorful graphics that suit the stickman style. The game also has smooth animations and realistic physics that make the action more dynamic and exciting. The game also has a catchy and upbeat soundtrack that matches the mood of the game. The game also has sound effects that enhance the atmosphere and the feedback of the game.</p>
135
- <h4>Characters and weapons</h4>
136
- <p>The game has over 40 characters to choose from, each with their own appearance, personality, and skills. You can also customize your character by changing their clothes, hair, and accessories. The game also has over 100 weapons to equip, each with their own power, range, and speed. You can also upgrade your weapons by using coins and gems.</p>
137
- <h4>Helicopters and robots</h4>
138
- <p>The game also has helicopters and robots that you can use to fight against the enemies. You can summon a helicopter or a robot by using an item or a skill. The helicopter or the robot will follow you and help you by shooting or smashing the enemies. You can also control the helicopter or the robot by using the joystick and the attack button.</p>
139
- <h2>Tips and tricks</h2>
140
- <p>If you want to play <strong>Anger of Stick 5: Zombie</strong> like a pro, you need to know some tips and tricks that will help you improve your skills and performance. Here are some of the tips and tricks that you should know:</p>
141
- <h3>How to level up and earn coins?</h3>
142
- <p>If you want to level up your character and earn more coins, you need to complete the missions and levels in the single mode. You will get more experience points and coins by clearing higher levels and harder difficulties. You will also get bonus coins by achieving a high score, killing a lot of enemies, or collecting items. You can also earn coins by playing the other modes, but they will give you less than the single mode.</p>
143
- <h3>How to use combos and skills?</h3>
144
- <p>If you want to deal more damage and defeat the enemies faster, you need to use combos and skills effectively. You can perform a combo by tapping on the attack button repeatedly. You can also perform a skill by tapping on the skill button. Each character has a different combo and skill, so you need to learn how to use them properly. You can also combine combos and skills to create more powerful attacks.</p>
145
- <h3>How to avoid ads and bugs?</h3>
146
- <p>If you want to avoid ads and bugs that can ruin your gaming experience, you need to follow some simple steps. First, you need to turn off your internet connection before playing the game. This will prevent any ads from popping up on your screen. Second, you need to update your game regularly to fix any bugs or glitches that may occur. Third, you need to clear your cache and data from time to time to free up some space and improve your game performance.</p>
147
- <h2>Conclusion</h2>
148
- <h3>Summary of the main points</h3>
149
- <p>In conclusion, <strong>Anger of Stick 5: Zombie</strong> is a fun and addictive stickman game that will keep you entertained for hours. The game has four different modes, hundreds of levels, dozens of characters, over 100 weapons, helicopters and robots, simple but colorful graphics, catchy and upbeat soundtrack, easy but varied gameplay, rich but free features, and many tips and tricks to help you enjoy it more.</p>
150
- <h3>Recommendation and rating</h3>
151
- <p>We highly recommend <strong>Anger of Stick 5: Zombie</strong> to anyone who loves action games, stickman games, or zombie games. The game is suitable for all ages and skill levels, as it has a user-friendly interface, customizable controls, adjustable difficulty levels, and helpful tutorials. The game is also free to play, so you don't have to worry about spending any money to enjoy it. We give <strong>Anger of Stick 5: Zombie</strong> a rating of 9 out of 10 stars for its fun factor, quality, variety, and value.</p>
152
- <h2>Frequently Asked Questions (FAQs)</h2>
153
- <p>Here are some of the most frequently asked questions (FAQs) about <strong>Anger of Stick 5: Zombie</strong>:</p>
154
- <ol>
155
- <li><strong>How do I download Anger of Stick 5: Zombie?</strong></li>
156
- <p>You can download <strong>Anger of Stick 5: Zombie</ p>Anger of Stick 5: Zombie</strong> from the Google Play or the App Store, depending on your device. You can also scan the QR codes below to access the download links directly.</p>
157
- <p><img src="https://www.qrcode-monkey.com/img/default-preview-qr.svg" alt="QR code for Google Play" width="100" height="100"> <img src="https://www.qrcode-monkey.com/img/default-preview-qr.svg" alt="QR code for App Store" width="100" height="100"></p>
158
- <li><strong>How do I play Anger of Stick 5: Zombie offline?</strong></li>
159
- <p>You can play <strong>Anger of Stick 5: Zombie</strong> offline by turning off your internet connection before launching the game. This will prevent any ads from showing up and any online features from working. However, you will still be able to play the single mode and the zombie mode without any problems.</p>
160
- <li><strong>How do I get more coins and gems in Anger of Stick 5: Zombie?</strong></li>
161
- <p>You can get more coins and gems in <strong>Anger of Stick 5: Zombie</strong> by playing the game and completing the missions and levels. You will also get bonus coins and gems by achieving a high score, killing a lot of enemies, or collecting items. You can also watch ads or complete offers to get free coins and gems. Alternatively, you can buy coins and gems with real money by using the in-app purchases.</p>
162
- <li><strong>How do I unlock more characters and weapons in Anger of Stick 5: Zombie?</strong></li>
163
- <p>You can unlock more characters and weapons in <strong>Anger of Stick 5: Zombie</strong> by using coins and gems. You can buy characters and weapons from the shop, or you can get them from the lucky box. You can also upgrade your characters and weapons by using coins and gems.</p>
164
- <li><strong>How do I contact the developer of Anger of Stick 5: Zombie?</strong></li>
165
- <p>You can contact the developer of <strong>Anger of Stick 5: Zombie</strong> by using the following methods:</p>
166
- <ul>
167
- <li>Email: [email protected]</li>
168
- <li>Facebook: https://www.facebook.com/jparksoft</li>
169
- <li>Twitter: https://twitter.com/jparksoft</li>
170
- <li>YouTube: https://www.youtube.com/channel/UCwY4a0Zt9jxq8cXySxqZ9Gw</li>
171
- </ul></p> 197e85843d<br />
172
- <br />
173
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans 14.555.7 APK - Whats New in the Latest Update.md DELETED
@@ -1,115 +0,0 @@
1
-
2
- <h1>Clash of Clans APK 14.555 7: Everything You Need to Know</h1>
3
- <p>If you are a fan of strategy games, you have probably heard of Clash of Clans, one of the most popular mobile games in the world. But do you know what is Clash of Clans APK 14.555 7 and why you should play it? In this article, we will tell you everything you need to know about this latest version of the game, including its features, benefits, drawbacks, and how to download and install it on your device.</p>
4
- <h2>What is Clash of Clans?</h2>
5
- <h3>A popular strategy game for mobile devices</h3>
6
- <p>Clash of Clans is a free-to-play strategy game developed by Supercell, a Finnish game company. It was released in 2012 for iOS devices and in 2013 for Android devices. Since then, it has become one of the most downloaded and played games in the world, with over 500 million downloads and millions of active players every day.</p>
7
- <h2>clash of clans apk 14.555 7</h2><br /><p><b><b>Download</b> &#10026; <a href="https://urlin.us/2uT1bN">https://urlin.us/2uT1bN</a></b></p><br /><br />
8
- <h3>The main features and gameplay of Clash of Clans</h3>
9
- <p>The game is set in a fantasy world where you are the chief of a village. Your goal is to build and upgrade your village, train and upgrade your troops, join or create a clan with other players, and attack or defend against other players or computer-generated enemies. You can also participate in various events, challenges, wars, and leagues to earn resources, trophies, and rewards.</p>
10
- <p>The game has two main modes: single-player and multiplayer. In single-player mode, you can attack goblin villages to earn resources and practice your skills. In multiplayer mode, you can attack or defend against other players to earn trophies and loot. You can also join or create a clan with up to 50 players and chat, donate troops, request reinforcements, and cooperate in clan wars and clan games.</p>
11
- <h2>What is Clash of Clans APK 14.555 7?</h2>
12
- <h3>The latest version of the game with new content and improvements</h3>
13
- <p>Clash of Clans APK 14.555 7 is the latest version of the game that was released on September 7, 2021. It introduces a new feature called Clan Capital, which is a huge mountain fortress above the clouds that you can build together with your clanmates. It also adds new content such as new troops, spells, buildings, traps, decorations, achievements, and more. It also fixes some bugs and improves the performance and stability of the game.</p>
14
- <h3>How to download and install Clash of Clans APK 14.555 7</h3>
15
- <p>If you want to play Clash of Clans APK 14.555 7 on your device, you have two options: either update the game from the official app store (Google Play Store for Android or App Store for iOS) or download the APK file from a third-party source (such as [Clash of Clans 14.555.7 APK Download - Softpedia](^1^)) and install it manually.</p>
16
- <p>To update the game from the official app store, you just need to open the app store on your device, search for Clash of Clans, and tap on the update button if available. The app store will automatically download and install the latest version of the game for you.</p>
17
- <p>To download and install the APK file from a third-party source, you need to follow these steps:</p>
18
- <ol>
19
- <li>Go to the website where you can download the APK file, such as [Clash of Clans 14.555.7 APK Download - Softpedia], and tap on the download button.</li>
20
- <li>Wait for the download to finish and then locate the APK file on your device. You may need to use a file manager app to do this.</li>
21
- <li>Before you install the APK file, you need to enable the installation of apps from unknown sources on your device. To do this, go to your device settings, security, and toggle on the option that allows installing apps from unknown sources.</li>
22
- <li>Tap on the APK file and follow the instructions on the screen to install it. You may need to grant some permissions to the app during the installation process.</li>
23
- <li>Once the installation is complete, you can open the game and enjoy the new features.</li>
24
- </ol>
25
- <p>Note: Downloading and installing APK files from third-party sources can be risky, as they may contain viruses or malware that can harm your device or compromise your privacy. Therefore, we recommend that you only download APK files from trusted and reputable sources, and scan them with an antivirus app before installing them. We are not responsible for any damage or loss caused by using APK files from third-party sources.</p>
26
- <p>clash of clans apk 14.555 7 download<br />
27
- clash of clans apk 14.555 7 mod<br />
28
- clash of clans apk 14.555 7 unlimited gems<br />
29
- clash of clans apk 14.555 7 latest version<br />
30
- clash of clans apk 14.555 7 hack<br />
31
- clash of clans apk 14.555 7 update<br />
32
- clash of clans apk 14.555 7 free<br />
33
- clash of clans apk 14.555 7 android<br />
34
- clash of clans apk 14.555 7 offline<br />
35
- clash of clans apk 14.555 7 private server<br />
36
- clash of clans apk 14.555 7 original<br />
37
- clash of clans apk 14.555 7 mirror<br />
38
- clash of clans apk 14.555 7 install<br />
39
- clash of clans apk 14.555 7 old version<br />
40
- clash of clans apk 14.555 7 for pc<br />
41
- clash of clans apk 14.555 7 mediafire<br />
42
- clash of clans apk 14.555 7 mega<br />
43
- clash of clans apk 14.555 7 revdl<br />
44
- clash of clans apk 14.555 7 rexdl<br />
45
- clash of clans apk 14.555 7 apkpure<br />
46
- clash of clans apk 14.555 7 uptodown<br />
47
- clash of clans apk 14.555 7 apkmirror<br />
48
- clash of clans apk 14.555 7 apkmody<br />
49
- clash of clans apk 14.555 7 happymod<br />
50
- clash of clans apk 14.555 7 an1<br />
51
- clash of clans apk 14.555 7 ihackedit<br />
52
- clash of clans apk 14.555 7 platinmods<br />
53
- clash of clans apk 14.555 7 blackmod<br />
54
- clash of clans apk 14.555 7 modapkdown<br />
55
- clash of clans apk 14.555 7 andropalace<br />
56
- clash of clans apk 14.555 7 android1<br />
57
- clash of clans apk 14.555 7 mob.org<br />
58
- clash of clans apk 14.555 7 malavida<br />
59
- clash of clans apk 14.555 7 softonic<br />
60
- clash of clans apk 14.555 7 mobpark<br />
61
- clash of clans apk 14.555 7 acmarket<br />
62
- clash of clans apk 14.555 7 aptoide<br />
63
- clash of clans apk 14.555 7 panda helper<br />
64
- clash of clans apk 14.555</p>
65
- <h2>What are the benefits of playing Clash of Clans APK 14.555 7?</h2>
66
- <h3>Enjoy the new Clan Capital feature with your clanmates</h3>
67
- <p>One of the main benefits of playing Clash of Clans APK 14.555 7 is that you can access the new Clan Capital feature, which is a huge mountain fortress above the clouds that you can build together with your clanmates. The Clan Capital is divided into four zones: Main Hall, Barracks, Workshop, and Treasury. Each zone has different buildings and functions that can help you and your clan in various ways.</p>
68
- <p>The Main Hall is where you can see your clan's progress and achievements, as well as customize your clan's banner and motto. The Barracks is where you can train and upgrade your Capital Troops, which are special troops that can only be used in Raids. The Workshop is where you can research and upgrade your Capital Spells, which are powerful spells that can only be used in Raids. The Treasury is where you can store and manage your Capital Resources, which are gold, elixir, dark elixir, and gems that can only be used in the Clan Capital.</p>
69
- <p>To build and upgrade your Clan Capital, you need to collect Clan Points by participating in Raids, Clan Wars, Clan Games, and other clan activities. You can also donate resources or gems to your clan's Treasury to help speed up the construction process. The higher the level of your Clan Capital, the more benefits you and your clan will enjoy.</p>
70
- <h3>Battle against enemy Capitals during Raid Weekends</h3>
71
- <p>Another benefit of playing Clash of Clans APK 14.555 7 is that you can participate in Raid Weekends, which are special events that occur every two weeks. During Raid Weekends, you can attack or defend against enemy Capitals using your Capital Troops and Spells. You can also cooperate with your clanmates to coordinate your attacks or defenses.</p>
72
- <p>Raid Weekends are divided into two phases: Preparation Phase and Battle Phase. During the Preparation Phase, which lasts for 24 hours, you can scout the enemy Capitals and plan your strategy. You can also train and upgrade your Capital Troops and Spells, as well as request reinforcements from your clanmates. During the Battle Phase, which lasts for 48 hours, you can launch up to three attacks against enemy Capitals and earn Raid Stars based on how much damage you inflict. You can also defend your own Capital from enemy attacks and earn Defense Stars based on how well you protect it.</p>
73
- <p>The more Raid Stars you earn, the higher your Raid Rank will be. Your Raid Rank determines how much loot you will receive at the end of the Raid Weekend. You can also earn bonus loot by completing Raid Achievements or by being one of the top performers in your clan or league. The loot you earn from Raid Weekends can be used to build and upgrade your Clan Capital or to boost your regular village.</p>
74
- <h3>Earn great rewards by completing Raids as a Clan</h3>
75
- <p>A third benefit of playing Clash of Clans APK 14.555 7 is that you can earn great rewards by completing Raids as a Clan. Raids are clan-based challenges that require you to attack or defend against a specific number of enemy Capitals within a certain time limit. For example, a Raid might ask you to attack 10 enemy Capitals in 24 hours. If you and your clanmates manage to complete the Raid, you will receive a reward based on the difficulty and duration of the Raid. The reward can be resources, gems, magic items, or even exclusive skins for your Capital Troops or Spells.</p>
76
- <p>Raids are a great way to test your skills and teamwork, as well as to earn some extra loot for your Clan Capital or regular village. You can find the available Raids in the Clan Capital menu, and you can join or create a Raid with your clanmates at any time. However, you can only participate in one Raid at a time, and you can only use your Capital Troops and Spells for Raids.</p>
77
- <h2>What are the drawbacks of playing Clash of Clans APK 14.555 7?</h2>
78
- <h3>The game requires a stable internet connection and a lot of storage space</h3>
79
- <p>While playing Clash of Clans APK 14.555 7 can be fun and rewarding, it also has some drawbacks that you should be aware of. One of them is that the game requires a stable internet connection to run properly. This means that you cannot play the game offline or in areas with poor network coverage. If you lose your internet connection while playing, you may experience lag, glitches, or disconnection issues that can affect your gameplay and progress.</p>
80
- <p>Another drawback is that the game requires a lot of storage space on your device. The APK file size of Clash of Clans APK 14.555 7 is about 200 MB, which is quite large compared to other mobile games. Moreover, the game also downloads additional data and updates regularly, which can take up more space on your device. If you have limited storage space on your device, you may need to delete some other apps or files to make room for Clash of Clans APK 14.555 7.</p>
81
- <h3>The game can be addictive and time-consuming</h3>
82
- <p>A second drawback of playing Clash of Clans APK 14.555 7 is that the game can be addictive and time-consuming. The game is designed to keep you hooked and engaged by offering you various goals, challenges, rewards, and social interactions. You may find yourself spending hours or even days playing the game without noticing the time passing by. You may also feel compelled to check the game frequently to collect resources, train troops, join raids, or chat with your clanmates.</p>
83
- <p>While playing the game can be enjoyable and relaxing, it can also interfere with your other responsibilities and activities in real life. You may neglect your work, studies, family, friends, health, or hobbies because of your addiction to the game. You may also spend too much money on buying gems or other in-game items to speed up your progress or gain an advantage over other players. Therefore, you should play the game in moderation and balance it with other aspects of your life.</p>
84
- <h3>The game can be frustrating and competitive for some players</h3>
85
- <p>A third drawback of playing Clash of Clans APK 14.555 7 is that the game can be frustrating and competitive for some players. The game is based on attacking and defending against other players, which means that you can lose resources, trophies, or stars if you fail to protect your village or capital from enemy attacks. You may also face stronger or more experienced players who have better troops, spells, buildings, or strategies than you.</p>
86
- <p>While losing can be a part of the game and a learning opportunity, it can also be demoralizing and discouraging for some players. You may feel angry, sad, or stressed because of your losses or defeats. You may also feel pressured or anxious to improve your performance or rank in the game. You may even resort to cheating or hacking to gain an unfair advantage over other players.</p>
87
- <p>Therefore, you should play the game with a positive and sportsmanlike attitude. You should accept your losses gracefully and learn from your mistakes. You should also respect your opponents and avoid any toxic or abusive behavior in the game. You should play the game for fun and entertainment, not for ego or pride.</p>
88
- <h2>Conclusion</h2>
89
- <p>Clash of Clans APK 14.555 7 is the latest version of the popular strategy game for mobile devices. It introduces a new feature called Clan Capital, which is a huge mountain fortress that you can build together with your clanmates. It also adds new content such as new troops, spells, buildings, traps, decorations, achievements, and more. It also fixes some bugs and improves the performance and stability of the game.</p>
90
- <p>Playing Clash of Clans APK 14.555 7 can be beneficial for you and your clan, as you can enjoy the new Clan Capital feature, participate in Raid Weekends, and earn great rewards by completing Raids as a Clan. However, playing the game also has some drawbacks, such as requiring a stable internet connection and a lot of storage space, being addictive and time-consuming, and being frustrating and competitive for some players.</p>
91
- <p>Therefore, you should play the game wisely and responsibly, and balance it with other aspects of your life. You should also have fun and respect your opponents in the game. If you want to download and install Clash of Clans APK 14.555 7 on your device, you can either update the game from the official app store or download the APK file from a third-party source. However, be careful of the risks involved in using APK files from unknown sources.</p>
92
- <p>We hope this article has helped you learn more about Clash of Clans APK 14.555 7 and why you should play it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy clashing!</p>
93
- <h2>FAQs</h2>
94
- <p>Here are some frequently asked questions about Clash of Clans APK 14.555 7:</p>
95
- <ol>
96
- <li>What is the difference between Clash of Clans APK and Clash of Clans MOD APK?</li>
97
- <p>Clash of Clans APK is the original version of the game that is developed by Supercell and updated regularly with new content and improvements. Clash of Clans MOD APK is a modified version of the game that is created by third-party developers and may have extra features or cheats that are not available in the original version. However, Clash of Clans MOD APK is not authorized or supported by Supercell and may contain viruses or malware that can harm your device or compromise your privacy. Therefore, we do not recommend using Clash of Clans MOD APK.</p>
98
- <li>How can I transfer my Clash of Clans account from one device to another?</li>
99
- <p>If you want to transfer your Clash of Clans account from one device to another, you need to link your account to a Supercell ID or a Google Play Games account (for Android devices) or a Game Center account (for iOS devices). To do this, you need to open the game on your old device, go to settings, tap on "Connect Device", and follow the instructions on the screen. Then, you need to open the game on your new device, go to settings, tap on "Connect Device", and enter the code or sign in with your Supercell ID or Google Play Games account or Game Center account. This will transfer your account to your new device.</p>
100
- <li>How can I get free gems in Clash of Clans?</li>
101
- <p>Gems are the premium currency in Clash of Clans that can be used to speed up your progress or buy special items in the game. You can get free gems in Clash of Clans by completing achievements, removing obstacles, participating in events, challenges, wars, leagues, clan games, raids, or raid weekends, or opening gem boxes or gem mines. You can also get free gems by watching ads or completing surveys or offers from third-party sources. However, be careful of scams or hacks that promise you free gems but may steal your personal information or infect your device with malware.</p>
102
- <li>How can I join a clan in Clash of Clans?</li>
103
- <p>If you want to join a clan in Clash of Clans, you need to be at least Town Hall level 4 and have at least 1000 trophies. Then, you can either search for a clan that suits your preferences (such as language, location, level, activity, etc.) or accept an invitation from a clan that has invited you. You can also create your own clan if you have enough gems or join a clan that is open for anyone to join.</p>
104
- <li>How can I contact Supercell support in Clash of Clans?</li>
105
- <p>If you have any issues or problems with the game, such as lost accounts, missing purchases, bugs, glitches, or feedback, you can contact Supercell support in Clash of Clans by following these steps:</p>
106
- <ol>
107
- <li>Open the game and go to settings.</li>
108
- <li>Tap on the "Help and Support" button.</li>
109
- <li>Tap on the "Contact Us" button at the top right corner of the screen.</li>
110
- <li>Type in your message and attach any screenshots or videos if necessary.</li>
111
- <li>Tap on the "Send" button and wait for a reply from Supercell support.</li>
112
- </ol>
113
- <p>You can also visit the official Clash of Clans website, forum, or social media pages to find more information or solutions for your issues or problems.</p> 197e85843d<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Descoper muzica veche anii 80-90 i download free piesele preferate.md DELETED
@@ -1,137 +0,0 @@
1
-
2
- <h1>How to Download Free Muzica Veche Anii 80-90</h1>
3
- <p>Muzica veche anii 80-90, or old music from the '80s and '90s, is a genre of Romanian music that encompasses various styles, such as rock, pop, disco, dance, and slow. It is popular among many people who grew up listening to it or who appreciate its nostalgic and sentimental value. If you are one of them, you might be wondering how you can download free muzica veche anii 80-90 legally and safely. In this article, we will show you three websites that offer free music downloads of muzica veche anii 80-90, as well as how to use them.</p>
4
- <h2>Nostalgic FM - A Radio Station That Plays Muzica Veche Anii 80-90</h2>
5
- <p>One of the best ways to enjoy muzica veche anii 80-90 is to listen to Nostalgic FM, a radio station that plays only old Romanian music from the '70s, '80s, and '90s. You can listen to Nostalgic FM online by visiting their website [6](https://NostalgicFM.ro) or by downloading their mobile app for iOS or Android. You can also follow them on Facebook, Twitter, Instagram, and YouTube.</p>
6
- <h2>download free muzica veche anii 80-90</h2><br /><p><b><b>Download</b> &raquo;&raquo;&raquo; <a href="https://jinyurl.com/2uNK6N">https://jinyurl.com/2uNK6N</a></b></p><br /><br />
7
- <h3>How to Access and Listen to Nostalgic FM Online</h3>
8
- <p>To access and listen to Nostalgic FM online, you need a device with an internet connection and a web browser. You can either go directly to their website [6](https://NostalgicFM.ro) or search for "Nostalgic FM" on Google or another search engine. Once you are on their website, you will see a player with a play button. Click on it to start listening to Nostalgic FM live. You can also see the name of the song and the artist that is playing, as well as the previous and next songs. You can also adjust the volume, mute the sound, or share the link with your friends.</p>
9
- <h3>How to Download Songs from Nostalgic FM for Free</h3>
10
- <p>If you like a song that is playing on Nostalgic FM and you want to download it for free, you can do so by following these steps:</p>
11
- <ol>
12
- <li>Right-click on the name of the song that is playing on the player.</li>
13
- <li>Select "Copy link address" or "Copy link location" from the menu.</li>
14
- <li>Paste the link into a new tab or window on your browser.</li>
15
- <li>You will be redirected to a YouTube video of the song.</li>
16
- <li>Copy the URL of the YouTube video from the address bar.</li>
17
- <li>Go to. <li>Go to a free YouTube to MP3 converter website, such as [BestMP3Converter](^1^) , [YouTubeMP3Free](^2^) , or [TechRadar](^3^) .</li>
18
- <li>Paste the URL of the YouTube video into the search box and click on "Convert" or "Download" button.</li>
19
- <li>Choose the quality and format of the MP3 file and click on "Download" or "Save" button.</li>
20
- <li>Wait for the download to finish and enjoy your free muzica veche anii 80-90 song.</li>
21
- </ol>
22
- <h2>Jamendo Music - A Website That Offers Free Music Downloads from Independent Artists</h2>
23
- <p>Another great source of free muzica veche anii 80-90 is Jamendo Music, a website that offers free music downloads from independent artists who want to share their music with the world. You can find thousands of songs from various genres, including muzica veche anii 80-90, on Jamendo Music. You can also discover new artists, create playlists, and support the music community.</p>
24
- <h3>How to Browse and Search for Muzica Veche Anii 80-90 on Jamendo Music</h3>
25
- <p>To browse and search for muzica veche anii 80-90 on Jamendo Music, you need a device with an internet connection and a web browser. You can either go directly to their website [4](https://www.jamendo.com/) or search for "Jamendo Music" on Google or another search engine. Once you are on their website, you will see a menu with different options, such as Explore, Radio, Licensing, and Log in. To find muzica veche anii 80-90, you can either:</p>
26
- <ul>
27
- <li>Click on Explore and then select Genres. You will see a list of music genres, such as Pop, Rock, Electronic, Jazz, and more. Scroll down until you find World Music and click on it. You will see a sub-list of world music subgenres, such as Latin, Reggae, African, and more. Scroll down until you find Balkan and click on it. You will see a collection of songs from Balkan countries, including Romania. You can also filter the songs by popularity, date, duration, or artist name.</li>
28
- <li>Click on the search icon at the top right corner of the website and type in "muzica veche anii 80-90" or any related keywords, such as "muzica romaneasca", "muzica populara", or "muzica de petrecere". You will see a list of songs that match your search query. You can also filter the songs by relevance, popularity, date, duration, or artist name.</li>
29
- </ul>
30
- <h3>How to Download Songs from Jamendo Music for Free</h3>
31
- <p>If you like a song that you find on Jamendo Music and you want to download it for free, you can do so by following these steps:</p>
32
- <ol>
33
- <li>Click on the song title or the play button to open the song page.</li>
34
- <li>Click on the download icon at the bottom right corner of the song player.</li>
35
- <li>You will see a pop-up window that asks you to choose between personal use or commercial use. If you want to download the song for personal use only, click on "Free Download". If you want to use the song for commercial purposes, such as in a video or a podcast, click on "Licensing".</li>
36
- <li>If you choose "Free Download", you will be asked to log in or sign up for a free account. You can also log in with your Facebook or Google account. Once you are logged in, you will be able to download the song as an MP3 file.</li>
37
- <li>If you choose "Licensing", you will be redirected to Jamendo Licensing website [10](https://licensing.jamendo.com/), where you can buy a license for using the song in your project. The price depends on the type and scope of your project. You can also contact Jamendo Licensing team for more information.</li>
38
- </ol> <h2>Internet Archive - A Digital Library That Archives Audio, Video, and Other Media</h2>
39
- <p>The third and final website that we recommend for downloading free muzica veche anii 80-90 is Internet Archive, a digital library that archives audio, video, and other media from various sources and periods. You can find millions of files on Internet Archive, including muzica veche anii 80-90, that are free to access and download. You can also upload your own files, donate to support the project, or join the community.</p>
40
- <h3>How to Find and Explore Muzica Veche Anii 80-90 on Internet Archive</h3>
41
- <p>To find and explore muzica veche anii 80-90 on Internet Archive, you need a device with an internet connection and a web browser. You can either go directly to their website [5](https://archive.org/) or search for "Internet Archive" on Google or another search engine. Once you are on their website, you will see a menu with different options, such as Web, Texts, Video, Audio, Software, Images, and More. To find muzica veche anii 80-90, you can either:</p>
42
- <ul>
43
- <li>Click on Audio and then select Community Audio. You will see a list of audio files uploaded by users and organizations. You can sort the files by views, title, date archived, creator, or date published. You can also use the search box at the top right corner of the website and type in "muzica veche anii 80-90" or any related keywords.</li>
44
- <li>Click on the search icon at the top right corner of the website and type in "muzica veche anii 80-90" or any related keywords. You will see a list of results from different categories, such as Web, Texts, Video, Audio, Software, Images, and More. You can filter the results by media type, year, language, collection, or topic.</li>
45
- </ul>
46
- <h3>How to Download Songs from Internet Archive for Free</h3>
47
- <p>If you like a song that you find on Internet Archive and you want to download it for free, you can do so by following these steps:</p>
48
- <p>download free muzica veche anii 80-90 colaj<br />
49
- download free muzica veche anii 80-90 manele<br />
50
- download free muzica veche anii 80-90 disco<br />
51
- download free muzica veche anii 80-90 romaneasca<br />
52
- download free muzica veche anii 80-90 petrecere<br />
53
- download free muzica veche anii 80-90 mp3<br />
54
- download free muzica veche anii 80-90 online<br />
55
- download free muzica veche anii 80-90 youtube<br />
56
- download free muzica veche anii 80-90 zippy<br />
57
- download free muzica veche anii 80-90 mix<br />
58
- download free muzica veche anii 80-90 albume<br />
59
- download free muzica veche anii 80-90 straina<br />
60
- download free muzica veche anii 80-90 rock<br />
61
- download free muzica veche anii 80-90 pop<br />
62
- download free muzica veche anii 80-90 dance<br />
63
- download free muzica veche anii 80-90 hituri<br />
64
- download free muzica veche anii 80-90 melodii<br />
65
- download free muzica veche anii 80-90 playlist<br />
66
- download free muzica veche anii 80-90 radio<br />
67
- download free muzica veche anii 80-90 torrent<br />
68
- download free muzica veche anii 80-90 best of<br />
69
- download free muzica veche anii 80-90 retro<br />
70
- download free muzica veche anii 80-90 clasice<br />
71
- download free muzica veche anii 80-90 superbe<br />
72
- download free muzica veche anii 80-90 nemuritoare<br />
73
- download free muzica veche anii 80-90 de dragoste<br />
74
- download free muzica veche anii 80-90 de colectie<br />
75
- download free muzica veche anii 80-90 de aur<br />
76
- download free muzica veche anii 80-90 de calitate<br />
77
- download free muzica veche anii 80-90 de suflet</p>
78
- <ol>
79
- <li>Click on the song title or the thumbnail to open the song page.</li>
80
- <li>Scroll down until you see a section called "Download Options". You will see a list of formats and sizes that you can choose from, such as MP3, OGG VORBIS, TORRENT, or VBR ZIP.</li>
81
- <li>Click on the format and size that you prefer and the download will start automatically. You can also right-click on the format and size and select "Save link as" or "Save target as" from the menu.</li>
82
- <li>Wait for the download to finish and enjoy your free muzica veche anii 80-90 song.</li>
83
- </ol>
84
- <h2>Conclusion</h2>
85
- <p>In conclusion, downloading free muzica veche anii 80-90 is possible and easy if you know where to look. We have shown you three websites that offer free music downloads of muzica veche anii 80-90 legally and safely: Nostalgic FM [6](https://NostalgicFM.ro), Jamendo Music [4](https://www.jamendo.com/), and Internet Archive [5](https://archive.org/). You can use these websites to listen to and download your favorite songs from the '80s and '90s without spending a dime.</p>
86
- <p>However, before you start downloading free muzica veche anii 80-90, here are some tips and recommendations that you should keep in mind:</p>
87
- <ul>
88
- <li>Always check the quality and format of the songs before downloading them. Some songs might have low quality or incompatible formats that might affect your listening experience.</li>
89
- <li>Always respect the rights and wishes of the artists and creators of muzica veche anii 80-90. Do not use their songs for commercial purposes without their permission or license. Do not distribute their songs without their consent or credit. Do not claim their songs as your own or modify them without their approval.</li>
90
- <li>Always support the artists and creators of muzica veche anii 80-90 if you can. You can do so by buying their albums or merchandise, attending their concerts or events, following them on social media or streaming platforms, or donating to their causes or projects.</li>
91
- </ul>
92
- <p>We hope that this article has helped you learn how to download free muzica veche anii 80-90 and enjoy it to the fullest. If you have any questions or comments, feel free to leave them below. Happy listening!</p>
93
- <h2> <h2>FAQs</h2>
94
- <p>Here are some of the frequently asked questions about downloading free muzica veche anii 80-90:</p>
95
- <h3>What are some of the best artists and songs of muzica veche anii 80-90?</h3>
96
- <p>There are many artists and songs of muzica veche anii 80-90 that are worth listening to, but here are some of the most popular and influential ones:</p>
97
- <ul>
98
- <li>Holograf - A rock band that formed in 1978 and is still active today. Some of their hits include "Sa nu-mi iei niciodata dragostea", "Ti-am dat un inel", and "Cat de departe".</li>
99
- <li>Loredana Groza - A pop singer and actress who debuted in 1986 and is still active today. Some of her hits include "Zaraza", "Lele", and "Buna seara, iubito".</li>
100
- <li>3 Sud Est - A dance-pop boy band that formed in 1997 and is still active today. Some of their hits include "Amintirile", "Alaturi de ingeri", and "Emotii".</li>
101
- <li>Cargo - A heavy metal band that formed in 1985 and is still active today. Some of their hits include "Ziua vrajitoarelor", "Daca ploaia s-ar opri", and "Nu ma lasa sa-mi fie dor".</li>
102
- <li>Andra - A pop singer who debuted in 1999 and is still active today. Some of her hits include "Ramai cu mine", "Inevitabil va fi bine", and "Marfa curata".</li>
103
- </ul>
104
- <h3>What are some of the advantages and disadvantages of downloading free music online?</h3>
105
- <p>Downloading free music online has some advantages and disadvantages, such as:</p>
106
- <table>
107
- <tr><th>Advantages</th><th>Disadvantages</th></tr>
108
- <tr><td>You can access a large variety of music from different genres, artists, and periods.</td><td>You might encounter low quality, corrupted, or incomplete files.</td></tr>
109
- <tr><td>You can save money and storage space by not buying physical CDs or DVDs.</td><td>You might violate the intellectual property rights of the artists and creators.</td></tr>
110
- <tr><td>You can listen to your music offline without relying on an internet connection.</td><td>You might expose your device to viruses, malware, or spyware.</td></tr>
111
- <tr><td>You can create your own playlists and share them with your friends.</td><td>You might miss out on the latest releases, updates, or features of the music platforms.</td></tr>
112
- </table>
113
- <h3>How can I support the artists and creators of muzica veche anii 80-90?</h3>
114
- <p>If you download free muzica veche anii 80-90 online, you should also support the artists and creators who made it possible. You can do so by:</p>
115
- <ul>
116
- <li>Buying their albums or merchandise from their official websites or stores.</li>
117
- <li>Attending their concerts or events if they are available in your area.</li>
118
- <li>Following them on social media or streaming platforms and engaging with their posts or content.</li>
119
- <li>Donating to their causes or projects if they have any.</li>
120
- <li>Giving them feedback, reviews, or ratings on their music or performance.</li>
121
- <li>Recommending their music to your friends, family, or acquaintances.</li>
122
- </ul>
123
- <h3>What are some of the legal and ethical issues of downloading free music online?</h3>
124
- <p>Downloading free music online might involve some legal and ethical issues, such as:</p>
125
- <ul>
126
- <li>Infringing the intellectual property rights of the artists and creators. This means that you are using their work without their permission or license, which might result in legal actions or penalties.</li>
127
- <li>Depriving the artists and creators of their income and recognition. This means that you are not paying them for their work or giving them credit, which might affect their livelihood and reputation.</li>
128
- <li>Harming the music industry and culture. This means that you are reducing the demand and supply of music products and services, which might affect the quality and diversity of music available.</li>
129
- </ul>
130
- <h3>How can I convert and play muzica veche anii 80-90 on different devices?</h3>
131
- <p>If you download free muzica veche anii 80-90 online, you might need to convert and play it on different devices, such as your computer, smartphone, tablet, or MP3 player. You can do so by:</p>
132
- <ul>
133
- <li> <li>Using a free online file converter website, such as [Online-Convert] , [Zamzar] , or [Convertio] . You can upload your music file and choose the output format and quality that you want. Then, you can download the converted file and transfer it to your device.</li>
134
- <li>Using a free software or app that can convert and play music files, such as [VLC Media Player] , [Audacity] , or [Freemake Audio Converter] . You can install the software or app on your device and use it to open, convert, and play your music file.</li>
135
- </ul></p> 401be4b1e0<br />
136
- <br />
137
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Wallpaper Kamen Rider Gates The Ultimate Collection of HD Images.md DELETED
@@ -1,171 +0,0 @@
1
- <br />
2
- <h1>How to Download Wallpaper Kamen Rider Geats</h1>
3
- <p>If you are a fan of the Japanese tokusatsu drama series <strong>Kamen Rider Geats</strong>, you might want to decorate your device with some cool wallpapers featuring the characters and scenes from the show. Wallpapers are images that can be used as backgrounds for your desktop, laptop, smartphone, tablet, or any other device. They can make your device look more attractive, personalized, and fun. They can also express your personality, mood, and preferences.</p>
4
- <h2>download wallpaper kamen rider gates</h2><br /><p><b><b>DOWNLOAD</b> &#9889; <a href="https://jinyurl.com/2uNQsa">https://jinyurl.com/2uNQsa</a></b></p><br /><br />
5
- <p>In this article, we will show you how to download wallpaper Kamen Rider Geats from various sources online. We will also give you some tips on how to set them as your device's background. By following these steps, you will be able to enjoy the amazing visuals of Kamen Rider Geats anytime and anywhere.</p>
6
- <h2>How to Find Wallpaper Kamen Rider Geats Online</h2>
7
- <p>There are many ways to find wallpaper Kamen Rider Geats online. You can use search engines, websites, or apps that offer wallpapers for free or for a fee. Here are some of the most common methods:</p>
8
- <h3>Search engines</h3>
9
- <p>Search engines are tools that help you find information on the internet. You can use them to find wallpaper Kamen Rider Geats by typing keywords related to the show or the characters. For example, you can type "wallpaper kamen rider geats", "kamen rider geats hd wallpapers", "kamen rider geats 4k wallpapers", etc. You can also add modifiers such as "download", "free", "best", etc. to narrow down your search results.</p>
10
- <p>download wallpaper kamen rider gates hd<br />
11
- download wallpaper kamen rider gates 4k<br />
12
- download wallpaper kamen rider gates for pc<br />
13
- download wallpaper kamen rider gates for phone<br />
14
- download wallpaper kamen rider gates for laptop<br />
15
- download wallpaper kamen rider gates anime<br />
16
- download wallpaper kamen rider gates abyss<br />
17
- download wallpaper kamen rider gates cave<br />
18
- download wallpaper kamen rider gates free<br />
19
- download wallpaper kamen rider gates online<br />
20
- download wallpaper kamen rider gates and zio<br />
21
- download wallpaper kamen rider gates and woz<br />
22
- download wallpaper kamen rider gates and tsukuyomi<br />
23
- download wallpaper kamen rider gates and sougo<br />
24
- download wallpaper kamen rider gates and another riders<br />
25
- download wallpaper kamen rider gates revive<br />
26
- download wallpaper kamen rider gates revive shippu<br />
27
- download wallpaper kamen rider gates revive goretsu<br />
28
- download wallpaper kamen rider gates revive hiryu<br />
29
- download wallpaper kamen rider gates revive majesty<br />
30
- download wallpaper kamen rider gates trinity<br />
31
- download wallpaper kamen rider gates grand zio<br />
32
- download wallpaper kamen rider gates ohma zio<br />
33
- download wallpaper kamen rider gates ohma form<br />
34
- download wallpaper kamen rider gates zi-o ii<br />
35
- download wallpaper kamen rider gates zi-o trinity<br />
36
- download wallpaper kamen rider gates zi-o decade armor<br />
37
- download wallpaper kamen rider gates zi-o geiz majesty armor<br />
38
- download wallpaper kamen rider gates zi-o geiz revive armor<br />
39
- download wallpaper kamen rider gates zi-o woz armor<br />
40
- download wallpaper kamen rider gates zi-o tsukuyomi armor<br />
41
- download wallpaper kamen rider gates zi-o sougo armor<br />
42
- download wallpaper kamen rider gates zi-o another riders armor<br />
43
- download wallpaper kamen rider gates zi-o final form ride armor<br />
44
- download wallpaper kamen rider gates zi-o final form time armor<br />
45
- download wallpaper kamen rider gates zi-o final form time mazine armor<br />
46
- download wallpaper kamen rider gates zi-o final form time king armor<br />
47
- download wallpaper kamen rider gates zi-o final form time ohma armor<br />
48
- download wallpaper kamen rider gates woz ginga finaly armor<br />
49
- download wallpaper kamen rider gates woz ginga taiyo armor<br />
50
- download wallpaper kamen rider gates woz ginga wakusei armor<br />
51
- download wallpaper kamen rider gates woz future ring shinobi armor<br />
52
- download wallpaper kamen rider gates woz future ring quiz armor<br />
53
- download wallpaper kamen rider gates woz future ring kita armor</p>
54
- <p>Some of the most popular search engines are Google, Bing, Yahoo, DuckDuckGo, etc. They will display a list of websites that match your query. You can click on the links to visit the websites and see if they have the wallpapers you want. You can also use the images tab or filter to see only the images related to your query.</p>
55
- <p>For example, here are some of the results from Google when we searched for "wallpaper kamen rider geats":</p>
56
- <table>
57
- <tr>
58
- <th>Title</th>
59
- <th>Snippet</th>
60
- <th>URL</th>
61
- </tr>
62
- <tr>
63
- <td>20+ Kamen Rider Geats HD Wallpapers and Backgrounds</td>
64
- <td>Each of these 20+ Kamen Rider Geats Wallpapers has been community curated to work great as a wallpaper. Explore: Wallpapers Phone Wallpapers pfp. 4K Kamen Rider Geats Wallpapers. Infinite. All Resolutions.</td>
65
- <td>(^4^)</td>
66
- </tr>
67
- <tr>
68
- <td>Kamen Rider Geats (TV Series 2022–2023) - IMDb</td>
69
- <td>Kamen Rider Geats: With Hideyoshi Kan, Kazuto Mokudai, Kok <p>Here is the continuation of the article:</p>
70
- <h3>Websites</h3>
71
- <p>Websites are online platforms that provide various kinds of content and services. You can use them to find wallpaper Kamen Rider Geats by browsing through their collections or categories. Some websites may require you to register or pay a fee to access their wallpapers, while others may offer them for free. Some websites may also allow you to upload your own wallpapers or request custom ones.</p>
72
- <p>Some of the most popular websites that offer wallpaper Kamen Rider Geats are WallpaperAccess, WallpaperCave, WallpaperFlare, etc. They have a large and diverse selection of wallpapers for different devices and resolutions. You can also filter them by color, theme, style, etc. You can preview the wallpapers before downloading them and see how they look on your device.</p>
73
- <p>For example, here are some of the wallpapers from WallpaperAccess when we searched for "Kamen Rider Geats":</p>
74
- <table>
75
- <tr>
76
- <th>Image</th>
77
- <th>Title</th>
78
- <th>Resolution</th>
79
- <th>URL</th>
80
- </tr>
81
- <tr>
82
- <td><img src="" alt="Kamen Rider Geats Wallpaper"></td>
83
- <td>Kamen Rider Geats Wallpaper</td>
84
- <td>1920x1080</td>
85
- <td></td>
86
- </tr>
87
- <tr>
88
- <td><img src="" alt="Kamen Rider Geats Zio Wallpaper"></td>
89
- <td>Kamen Rider Geats Zio Wallpaper</td>
90
- <td>1920x1080</td>
91
- <td></td>
92
- </tr>
93
- <tr>
94
- <td><img src="" alt="Kamen Rider Geats Revive Wallpaper"></td>
95
- <td>Kamen Rider Geats Revive Wallpaper</td>
96
- <td>1920x1080</td>
97
- <td></td>
98
- </tr>
99
- </table>
100
- <h3>Apps</h3>
101
- <p>Apps are software applications that run on your device. You can use them to find wallpaper Kamen Rider Geats by downloading them from the app store or the website of the developer. Some apps may require you to grant permissions or watch ads to use their wallpapers, while others may offer them without any restrictions. Some apps may also have features such as cropping, editing, sharing, etc.</p>
102
- <p>Some of the most popular apps that provide wallpaper Kamen Rider Geats are Zedge, Walli, Backdrops, etc. They have a user-friendly interface and a regular update of wallpapers for different genres and fandoms. You can also rate, review, and save your favorite wallpapers for later use.</p>
103
- <p>For example, here are some of the wallpapers from Zedge when we searched for "Kamen Rider Geats":</p>
104
- <table>
105
- <tr>
106
- <th>Image</th>
107
- <th>Title</th>
108
- <th>Rating</th>
109
- <th>URL</th>
110
- </tr>
111
- <tr>
112
- <td><img src="" alt="Kamen Rider Geats"></td>
113
- <td>Kamen Rider Geats</td>
114
- <td>4.5/5 stars</td>
115
- <td></td>
116
- </tr>
117
- <tr>
118
- <td><img src="" alt="Kamen Rider Geats Trinity"></td>
119
- <td>Kamen Rider Geats Trinity</td>
120
- <td>4/5 stars</td>
121
- <td></td>
122
- </tr>
123
- <tr>
124
- <td><img src="" alt="Kamen Rider Geats Woz"></td>
125
- <td>Kamen Rider Geats Woz</td <td>4/5 stars</td <td></td ></tr ></table <h2 How to Download Wallpaper Kamen Rider Geats on Different Devices <h2 <p Once you have found the wallpaper Kamen Rider Geats that you like, you can download it on your device by following these steps: <p <h3 PC <h3 <p To download wallpaper Kamen Rider Geats on your PC, you can do one of the following: <p <ul <li Right-click on the image and select "Save image as" or "Save picture as". Choose a location and a name for the file and click "Save". <li Click on the download button or link if available. Choose a location and a name for the file and click "Save". <li Drag and drop the image to your desktop or a folder. <ul <p Some tips and tricks for PC users are: <p <ul <li You can use keyboard shortcuts such as Ctrl+S or Ctrl+C and Ctrl+V to save or copy and paste the image. <li You can use a download manager such as IDM or Free Download Manager to speed up the download process and resume interrupted downloads. <li You can use an image converter such as Online-Convert or Convertio to change the format of the image if needed. <li You can use an image editor such as Photoshop or GIMP to resize, crop, or enhance the image if desired. </ul>
126
- <h3>Mobile</h3>
127
- <p>To download wallpaper Kamen Rider Geats on your mobile device, you can do one of the following:</p>
128
- <ul>
129
- <li Tap and hold on the image and select "Save image" or "Download image". Choose a location and a name for the file and tap "Save". <li Tap on the download button or link if available. Choose a location and a name for the file and tap "Save". <li Screenshot the image by pressing the power and volume buttons simultaneously. The image will be saved in your gallery or photos app. </ul>
130
- <p>Some tips and tricks for mobile users are:</p>
131
- <ul>
132
- <li You can use a browser such as Chrome or Firefox to access more websites and features than the default browser. <li You can use a downloader app such as Advanced Download Manager or Turbo Download Manager to speed up the download process and resume interrupted downloads. <li You can use an image converter app such as Image Converter or Image Size to change the format of the image if needed. <li You can use an image editor app such as PicsArt or Snapseed to resize, crop, or enhance the image if desired. </ul>
133
- <h2>How to Set Wallpaper Kamen Rider Geats on Different Devices</h2>
134
- <p>Once you have downloaded the wallpaper Kamen Rider Geats that you like, you can set it as your device's background by following these steps:</p>
135
- <h3>PC</h3>
136
- <p>To set wallpaper Kamen Rider Geats on your PC, you can do one of the following:</p>
137
- <ul>
138
- <li Right-click on the image file and select "Set as desktop background" or "Set as wallpaper". <li Right-click on your desktop and select "Personalize" or "Display settings". Click on "Background" or "Wallpaper" and browse for the image file. Click on "Apply" or "OK". <li Open the image file with an image viewer such as Windows Photo Viewer or IrfanView. Click on "Set as desktop background" or "Set as wallpaper". </ul>
139
- <p>Some options and settings for PC users are:</p>
140
- <ul>
141
- <li You can choose how to fit the image to your screen by selecting "Fill", "Fit", "Stretch", "Tile", or "Center". <li You can change the color of your desktop background by selecting a solid color or a gradient. <li You can create a slideshow of wallpapers by selecting multiple images and setting a time interval. </ul>
142
- <h3>Mobile</h3>
143
- <p>To set wallpaper Kamen Rider Geats on your mobile device, you can do one of the following:</p>
144
- <ul>
145
- <li Tap and hold on the image file and select "Set as wallpaper" or "Use as wallpaper". Choose whether to set it as your home screen, lock screen, or both. Adjust the position and size of the image and tap "Set" or "Done". <li Open your gallery or photos app and find the image file. Tap on the menu button and select "Set as wallpaper" or "Use as wallpaper". Choose whether to set it as your home screen, lock screen, or both. Adjust the position and size of the image and tap "Set" or "Done". <li Open your settings app and tap on "Wallpaper" or "Display". Tap on "Choose a new wallpaper" or "Select wallpaper". Browse for the image file and tap on it. Choose whether to set it as your home screen, lock screen, or both. Adjust the position and size of the image and tap "Set" or "Done". </ul>
146
- <p>Some options and settings for mobile users are:</p>
147
- <ul>
148
- <li You can choose how to crop the image to fit your screen by selecting "Original", "Square", "Portrait", or "Landscape". <li You can apply filters, stickers, text, or drawings to the image by using an app such as Walli, Backdrops, etc. <li You can create a live wallpaper by using an app such as Zedge, Live Wallpapers HD, etc. </ul>
149
- <h2>Conclusion</h2>
150
- <p>In this article, we have shown you how to download wallpaper Kamen Rider Geats from various sources online. We have also given you some tips on how to set them as your device's background. By following these steps, you will be able to enjoy the amazing visuals of Kamen Rider Geats anytime and anywhere.</p>
151
- <p>Kamen Rider Geats is a popular Japanese tokusatsu drama series that features a young man who travels back in time to prevent a dystopian future. It is part of the Kamen Rider franchise that has been running since 1971. It has a loyal fan base and a rich lore. If you are one of the fans, you might want to show your love and support by downloading wallpaper Kamen Rider Geats for your device.</p>
152
- <p>So, what are you waiting for? Go ahead and download wallpaper Kamen Rider Geats now and make your device look awesome. You can also share your wallpapers with your friends and family and spread the word about this amazing show. You can also check out other wallpapers related to the Kamen Rider franchise and discover more characters and stories.</p>
153
- <p>Thank you for reading this article. We hope you found it helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.</p>
154
- <h2>FAQs</h2>
155
- <p>Here are some of the frequently asked questions about wallpaper Kamen Rider Geats:</p>
156
- <ol>
157
- <li What is the best resolution for wallpaper Kamen Rider Geats? </li>
158
- <p>The best resolution for wallpaper Kamen Rider Geats depends on the size and quality of your device's screen. Generally, the higher the resolution, the better the image quality. However, higher resolution also means larger file size and more storage space. You can check your device's screen resolution by going to your settings or using an online tool such as WhatIsMyScreenResolution.com. You can then choose a wallpaper that matches or exceeds your screen resolution.</p>
159
- <li Where can I watch Kamen Rider Geats online? </li>
160
- <p>You can watch Kamen Rider Geats online on various streaming platforms such as Netflix, Hulu, Amazon Prime Video, etc. You can also watch it on YouTube or Dailymotion, but be aware of the quality and legality of the videos. You can also buy or rent the DVDs or Blu-rays of the show from online or offline stores.</p>
161
- <li Who are the main characters of Kamen Rider Geats? </li>
162
- <p>The main characters of Kamen Rider Geats are:</p>
163
- <ul>
164
- <li Hideyoshi Kan as Sougo Tokiwa / Kamen Rider Geats, a high school student who dreams of becoming a king and inherits the power of time travel from a mysterious man. <li Kazuto Mokudai as Geiz Myokoin / Kamen Rider Geiz, a resistance fighter from the future who travels back in time to kill Sougo and prevent his rise as a tyrant. <li Koko Matsuda as Tsukuyomi, a resistance member from the future who accompanies Geiz and tries to persuade Sougo to change his destiny. <li Shinjiro Yamaguchi as Woz / Kamen Rider Woz, a mysterious man who claims to be Sougo's loyal servant and guides him to become the king. <li Ryota Murai as Swartz / Another Geats, the leader of the Time Jackers who manipulates time and creates Another Riders to oppose Sougo. </ul>
165
- <li What are the themes and genres of Kamen Rider Geats? </li>
166
- <p>Kamen Rider Geats is a sci-fi action drama that explores the themes of time travel, destiny, friendship, rivalry, and justice. It is part of the Heisei era of the Kamen Rider franchise, which is known for its darker and more mature tone than the previous Showa era. It is also a tribute to the previous Kamen Rider series, featuring crossover episodes and references to them.</p>
167
- <li How many episodes are there in Kamen Rider Geats? </li>
168
- <p>Kamen Rider Geats has 49 episodes in total, each lasting about 24 minutes. It aired from September 2, 2022 to August 25, 2023 on TV Asahi. It was followed by Kamen Rider Zero-One, which is the first series of the Reiwa era.</p>
169
- </ol></p> 401be4b1e0<br />
170
- <br />
171
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Euro Truck Driver 2018 Download MOD APK with Unlimited Money.md DELETED
@@ -1,124 +0,0 @@
1
-
2
- <h1>Euro Truck Driver 2018 Hack Mod APK Download: How to Get Unlimited Money and Features</h1>
3
- <p>If you are a fan of truck driving simulation games, you might have heard of <strong>Euro Truck Driver 2018</strong>, one of the most popular and realistic truck simulator games on the market. In this game, you can experience the thrill of driving across Europe, transporting goods from one city to another, exploring the amazing open world map, and enjoying the next-gen graphics, awesome features, and realistic trucking scenarios. However, you might also find that the game is quite challenging and requires a lot of time and money to unlock all the trucks, trailers, and features that you want. That's why some players resort to using a <strong>hack mod apk</strong> for Euro Truck Driver 2018, which is a modified version of the game that gives you unlimited money and access to everything in the game. But is it worth it? What are the benefits and risks of using a hack mod apk for Euro Truck Driver 2018? How can you download and install it safely? And are there any alternatives to using a hack mod apk for Euro Truck Driver 2018? In this article, we will answer all these questions and more. Read on to find out more.</p>
4
- <h2>Introduction</h2>
5
- <h3>What is Euro Truck Driver 2018?</h3>
6
- <p>Euro Truck Driver 2018 is a truck driving simulation game developed by Ovidiu Pop, a well-known developer of simulation games. The game was released in 2020 and has since gained millions of downloads and positive reviews from players all over the world. The game features many <strong>Euro truck brands</strong>, such as Mercedes-Benz, Volvo, Scania, MAN, Renault, DAF, Iveco, and more, with realistic engine sounds and detailed interiors. You can drive across <strong>Europe</strong>, from Germany to France, from Spain to Italy, from UK to Poland, and more, transporting stuff from a city to another, such as cars, food, furniture, chemicals, livestock, etc. You can explore the amazing <strong>open world map</strong>, which includes desert, snow, mountain, and cities, with dynamic weather system (snow, rain, sun...). You can also enjoy the <strong>realistic controls</strong>, such as tilt steering, buttons or virtual steering wheel, manual transmission with h-shifter and clutch, accurate engine sounds, etc. You can play in <strong>multiplayer mode</strong> and join your friends or other drivers online in real-time. You can also play in <strong>career mode</strong> and become a professional truck driver by completing missions and earning money. You can use your money to buy new trucks or upgrade your existing ones. You can also customize your trucks with different paint jobs, accessories, lights, horns, etc. You can also experience the <strong>visual and mechanical damage</strong> on your vehicles if you crash or drive recklessly.</p>
7
- <h2>euro truck driver 2018 hack mod apk download</h2><br /><p><b><b>Download</b> &#9989; <a href="https://jinyurl.com/2uNRrg">https://jinyurl.com/2uNRrg</a></b></p><br /><br />
8
- <h3>Why do you need a hack mod apk for Euro Truck Driver 2018?</h3>
9
- <p>As you can see, Euro Truck Driver 2018 is a very fun and immersive game that offers a lot of content and features for you to <p>enjoy. However, you might also encounter some <strong>challenges and limitations</strong> that might affect your gaming experience. For example, you might find that the game is <strong>too hard or too slow</strong> for your liking, especially if you are a beginner or a casual player. You might have to spend a lot of time and effort to complete the missions, earn money, and unlock new trucks and features. You might also have to deal with the <strong>in-game ads</strong> that might pop up and interrupt your gameplay. You might also need to have a <strong>rooted device</strong> to access some of the advanced features of the game. That's why some players look for a <strong>hack mod apk</strong> for Euro Truck Driver 2018, which is a modified version of the game that gives you <strong>unlimited money and access to everything in the game</strong>. With a hack mod apk, you can enjoy the game without any restrictions or hassles. You can buy any truck or trailer you want, upgrade them to the max, customize them as you wish, and drive them across Europe with ease. You can also play the game without any ads or root requirement. Sounds tempting, right? But before you download and install a hack mod apk for Euro Truck Driver 2018, you should also be aware of the <strong>benefits and risks</strong> of using one.</p>
10
- <h2>Benefits of Euro Truck Driver 2018 Hack Mod APK</h2>
11
- <h3>Unlimited money</h3>
12
- <p>The most obvious benefit of using a hack mod apk for Euro Truck Driver 2018 is that you can get <strong>unlimited money</strong> in the game. Money is the main currency in the game that you can use to buy new trucks, trailers, upgrades, and customizations. Normally, you have to earn money by completing missions, delivering goods, driving safely, etc. However, this can be very time-consuming and tedious, especially if you want to buy the most expensive and powerful trucks in the game. With a hack mod apk, you can get unlimited money instantly and spend it as much as you want. You can buy any truck or trailer you like, from the cheapest to the most luxurious ones. You can also upgrade your trucks with better engines, transmissions, tires, brakes, etc. You can also customize your trucks with different paint jobs, accessories, lights, horns, etc. You can make your truck look unique and awesome with unlimited money.</p>
13
- <h3>Unlocked trucks and trailers</h3>
14
- <p>Another benefit of using a hack mod apk for Euro Truck Driver 2018 is that you can get <strong>unlocked trucks and trailers</strong> in the game. Trucks and trailers are the main vehicles in the game that you can use to transport goods across Europe. There are many different types of trucks and trailers in the game, such as flatbeds, refrigerated trailers, car carriers, tankers, etc. Each truck and trailer has its own specifications, such as speed, power, fuel consumption, cargo capacity, etc. Normally, you have to unlock new trucks and trailers by earning money and reaching certain levels in the game. However, this can be very challenging and frustrating, especially if you want to try out different trucks and trailers in the game. With a hack mod apk, you can get unlocked trucks and trailers instantly and use them as you wish. You can switch between different trucks and trailers depending on your preference and mission requirements. You can also enjoy driving different brands of trucks with realistic engine sounds and detailed interiors.</p>
15
- <h3>No ads and no root required</h3>
16
- <p>A third benefit of using a hack mod apk for Euro Truck Driver 2018 is that you can play the game without any <strong>ads and root requirement</strong>. Ads are annoying pop-ups that might appear on your screen while playing the game. They might distract you from your gameplay or make you wait for a few seconds before resuming the game. They might also consume your data or battery life unnecessarily. Normally, you have to watch ads or pay real money to remove them from the game. However, with a hack mod apk, you can play the game without any ads at all. You can enjoy the game without any interruptions or distractions. Root is a process that allows you to access some of the advanced features of your device or app that are normally restricted by the manufacturer or developer. However, rooting your device might also void your warranty or expose your device to security risks or malware infections. Normally, you have to root your device to access some of the advanced features of Euro Truck Driver 2018 such as multiplayer mode or visual damage effects. However <p>with a hack mod apk, you can play the game without any root requirement at all. You can access all the features of the game without risking your device or warranty.</p>
17
- <h2>Risks of Euro Truck Driver 2018 Hack Mod APK</h2>
18
- <h3>Possible malware or virus infection</h3>
19
- <p>However, using a hack mod apk for Euro Truck Driver 2018 is not without risks. One of the main risks is that you might get a <strong>malware or virus infection</strong> on your device. Malware or virus is a malicious software that can harm your device or steal your personal information. Hack mod apk files are usually downloaded from unofficial or unknown sources that might not be trustworthy or secure. They might contain hidden malware or virus that can infect your device once you install them. They might also ask for unnecessary permissions or access to your device's data, such as contacts, photos, messages, etc. They might also display unwanted ads or pop-ups that might redirect you to harmful websites or download more malware or virus on your device. Therefore, you should be very careful and cautious when downloading and installing a hack mod apk for Euro Truck Driver 2018. You should always scan the file with a reliable antivirus software before installing it. You should also backup your device's data and use a VPN to protect your privacy and security.</p>
20
- <p>euro truck driver 2018 hack mod apk download for android<br />
21
- euro truck driver 2018 hack mod apk download unlimited money<br />
22
- euro truck driver 2018 hack mod apk download latest version<br />
23
- euro truck driver 2018 hack mod apk download revdl<br />
24
- euro truck driver 2018 hack mod apk download rexdl<br />
25
- euro truck driver 2018 hack mod apk download android 1<br />
26
- euro truck driver 2018 hack mod apk download uptodown<br />
27
- euro truck driver 2018 hack mod apk download apkpure<br />
28
- euro truck driver 2018 hack mod apk download free<br />
29
- euro truck driver 2018 hack mod apk download offline<br />
30
- euro truck driver 2018 hack mod apk download no root<br />
31
- euro truck driver 2018 hack mod apk download obb<br />
32
- euro truck driver 2018 hack mod apk download ios<br />
33
- euro truck driver 2018 hack mod apk download pc<br />
34
- euro truck driver 2018 hack mod apk download windows 10<br />
35
- euro truck driver 2018 hack mod apk download windows 7<br />
36
- euro truck driver 2018 hack mod apk download laptop<br />
37
- euro truck driver 2018 hack mod apk download mac<br />
38
- euro truck driver 2018 hack mod apk download bluestacks<br />
39
- euro truck driver 2018 hack mod apk download without verification<br />
40
- euro truck driver 2018 hack mod apk download without survey<br />
41
- euro truck driver 2018 hack mod apk download without human verification<br />
42
- euro truck driver 2018 hack mod apk download without password<br />
43
- euro truck driver 2018 hack mod apk download online<br />
44
- euro truck driver 2018 hack mod apk download link<br />
45
- euro truck driver 2018 hack mod apk download site<br />
46
- euro truck driver 2018 hack mod apk download website<br />
47
- euro truck driver 2018 hack mod apk download google drive<br />
48
- euro truck driver 2018 hack mod apk download mediafire<br />
49
- euro truck driver 2018 hack mod apk download mega<br />
50
- euro truck driver 2018 hack mod apk download zippyshare<br />
51
- euro truck driver 2018 hack mod apk download filehippo<br />
52
- euro truck driver 2018 hack mod apk download softonic<br />
53
- euro truck driver 2018 hack mod apk download cnet<br />
54
- euro truck driver 2018 hack mod apk download malavida<br />
55
- euro truck driver 2018 hack mod apk download happymod<br />
56
- euro truck driver 2018 hack mod apk download mob.org<br />
57
- euro truck driver 2018 hack mod apk download an1.com<br />
58
- euro truck driver 2018 hack mod apk download dlandroid.com<br />
59
- euro truck driver 2018 hack mod apk download andropalace.org<br />
60
- euro truck driver 2018 hack mod apk download androeed.ru<br />
61
- euro truck driver 2018 hack mod apk download andropark.info<br />
62
- euro truck driver 2018 hack mod apk download androking.org<br />
63
- euro truck driver 2018 hack mod apk download androgamer.org<br />
64
- euro truck driver 2018 hack mod apk download androplace.net<br />
65
- euro truck driver 2018 hack mod apk download androeed.net<br />
66
- euro truck driver 2018 hack mod apk download androapk.org</p>
67
- <h3>Ban from online multiplayer mode</h3>
68
- <p>Another risk of using a hack mod apk for Euro Truck Driver 2018 is that you might get a <strong>ban from online multiplayer mode</strong>. Online multiplayer mode is one of the most fun and exciting features of Euro Truck Driver 2018, where you can join your friends or other drivers online in real-time and compete with them in various missions and challenges. However, online multiplayer mode also requires a fair and balanced gameplay for all players. Therefore, the game developers have implemented a <strong>cheat detection system</strong> that can detect and ban players who use hack mod apk or other cheating methods in online multiplayer mode. If you use a hack mod apk for Euro Truck Driver 2018, you might get detected and banned from online multiplayer mode permanently. You might also lose your game progress and data, as well as your reputation and ranking among other players. Therefore, you should avoid using a hack mod apk for Euro Truck Driver 2018 if you want to play online multiplayer mode without any risk of getting banned.</p>
69
- <h3>Loss of game progress and data</h3>
70
- <p>A third risk of using a hack mod apk for Euro Truck Driver 2018 is that you might lose your <strong>game progress and data</strong>. Game progress and data are the information that the game saves on your device or online account, such as your level, money, trucks, trailers, missions, achievements, etc. They are important for you to continue playing the game from where you left off and to keep track of your performance and progress in the game. However, using a hack mod apk for Euro Truck Driver 2018 might cause some <strong>compatibility issues</strong> with the original version of the game or the official updates from the game developers. The hack mod apk might not work properly or crash frequently on your device. It might also overwrite or corrupt your game progress and data, making them unusable or inaccessible. You might also lose your game progress and data if you uninstall the hack mod apk or switch back to the original version of the game. Therefore, you should always backup your game progress and data before using a hack mod apk for Euro Truck Driver 2018.</p>
71
- <h2>How to Download and Install Euro Truck Driver 2018 Hack Mod APK</h2>
72
- <h3>Step 1: Find a reliable source for the hack mod apk file</h3>
73
- <p>If you still want to try using a hack mod apk for Euro Truck Driver 2018 despite the risks, you need to find a <strong>reliable source</strong> for the hack mod apk file first. A reliable source is a website or platform that provides genuine and safe hack mod apk files for various games and apps. A reliable source should have positive reviews and feedbacks from other users who have downloaded and used the hack mod apk files. A reliable source should also have clear and detailed instructions on how to download and install the hack mod apk files. A reliable source should also have updated and working hack mod apk files that are compatible with the latest version of the game and device. You can search online for some of the best sources for hack mod apk files for Euro Truck Driver 2018, such as APKPure, APKMody, ModDroid, etc.</p>
74
- <h3>Step 2: Enable unknown sources on your device settings</h3>
75
- <p>After finding a reliable source for the hack mod apk file for Euro Truck Driver 2018, you need to enable <strong>unknown sources</strong> on your device settings <p>next. Unknown sources are the sources that are not verified or authorized by the official app store or developer of your device. Hack mod apk files are usually downloaded from unknown sources, as they are not available on the official app store or developer website. Therefore, you need to enable unknown sources on your device settings to allow your device to install hack mod apk files from unknown sources. To enable unknown sources on your device settings, you need to follow these steps:</p>
76
- <ol>
77
- <li>Go to your device's <strong>Settings</strong> app and tap on <strong>Security</strong> or <strong>Privacy</strong>.</li>
78
- <li>Find and tap on the option that says <strong>Unknown Sources</strong> or <strong>Install Unknown Apps</strong>.</li>
79
- <li>Toggle on the switch or check the box that allows you to install apps from unknown sources.</li>
80
- <li>A warning message might pop up, telling you about the risks of installing apps from unknown sources. Tap on <strong>OK</strong> or <strong>Allow</strong> to confirm your choice.</li>
81
- </ol>
82
- <p>You have now enabled unknown sources on your device settings and you can proceed to download and install the hack mod apk file for Euro Truck Driver 2018.</p>
83
- <h3>Step 3: Download and install the hack mod apk file</h3>
84
- <p>The final step is to download and install the hack mod apk file for Euro Truck Driver 2018 from the reliable source that you have chosen. To download and install the hack mod apk file for Euro Truck Driver 2018, you need to follow these steps:</p>
85
- <ol>
86
- <li>Go to the website or platform that provides the hack mod apk file for Euro Truck Driver 2018 and find the download link or button.</li>
87
- <li>Tap on the download link or button and wait for the hack mod apk file to be downloaded on your device. The download time might vary depending on your internet speed and file size.</li>
88
- <li>Once the hack mod apk file is downloaded, go to your device's <strong>File Manager</strong> app and locate the hack mod apk file in your <strong>Downloads</strong> folder or any other folder where you have saved it.</li>
89
- <li>Tap on the hack mod apk file and a pop-up window might appear, asking you if you want to install the app. Tap on <strong>Install</strong> or <strong>Yes</strong> to start the installation process.</li>
90
- <li>The installation process might take a few seconds or minutes, depending on your device's performance and file size. You might also see some progress bars or indicators showing you the installation status.</li>
91
- <li>Once the installation process is completed, a message might appear, telling you that the app is installed successfully. Tap on <strong>Open</strong> or <strong>Done</strong> to launch or exit the app.</li>
92
- </ol>
93
- <p>You have now downloaded and installed the hack mod apk file for Euro Truck Driver 2018 and you can start playing the game with unlimited money and features.</p>
94
- <h2>Alternatives to Euro Truck Driver 2018 Hack Mod APK</h2>
95
- <h3>Use legitimate cheats and tips</h3>
96
- <p>If you are not comfortable with using a hack mod apk for Euro Truck Driver 2018, you can also use some <strong>legitimate cheats and tips</strong> that can help you improve your gameplay and performance in the game. Legitimate cheats and tips are the ones that are provided by the game developers or other reputable sources that do not involve any hacking or modification of the game files. They are usually based on some tricks, strategies, or secrets that can help you complete missions, earn money, unlock trucks, etc. more easily and quickly. Some examples of legitimate cheats and tips for Euro Truck Driver 2018 are:</p>
97
- <ul>
98
- <li><strong>Earn more money by driving longer distances</strong>: The longer you drive, the more money you earn in Euro Truck Driver 2018. Therefore, you should choose missions that require you to drive across different countries or regions, as they will pay you more than missions that require you to drive within a city or a country.</li>
99
- <li><strong>Earn more money by driving safely</strong>: The safer you drive, the more money you earn in Euro Truck Driver 2018. Therefore, you should avoid crashing, speeding, running red lights, etc., as they will reduce your money earnings and damage your truck. You should also follow the traffic rules and signs, such as speed limits, lane markings, etc., as they will increase your money earnings and reputation.</li>
100
- <li><strong>Earn more money by delivering special cargo</strong>: The more special cargo you deliver, the more money you earn in Euro Truck Driver 2018. Special cargo are goods that are fragile, dangerous, oversized, etc., such as cars, chemicals, livestock, etc. These cargo are more challenging and rewarding to deliver, as they require more skill and care. You can find special cargo missions by looking for the yellow icons on the map or the job market.</li>
101
- <li><strong>Unlock new trucks by reaching higher levels</strong>: The higher level you reach, the more trucks you unlock in Euro Truck Driver 2018. Therefore, you should try to level up as fast as possible by completing missions, delivering goods, driving safely, etc. You can also use some boosters or bonuses that can increase your experience points (XP) earnings, such as driving at night, using a GPS, etc. You can check your level and XP progress on the top left corner of the screen.</li>
102
- <li><strong>Unlock new trailers by buying them from the garage</strong>: The more trailers you buy, the more trailers you unlock in Euro Truck Driver 2018. Therefore, you should save up some money and buy new trailers from the garage, which is located on the map or the menu. You can choose from different types of trailers, such as flatbeds, refrigerated trailers, car carriers, tankers, etc. Each trailer has its own price and specifications, such as cargo capacity, weight, length, etc. You can also sell your old trailers if you don't need them anymore.</li>
103
- </ul>
104
- <p>These are some of the legitimate cheats and tips that you can use for Euro Truck Driver 2018. You can search online for more cheats and tips from other sources, such as YouTube videos, blogs, forums, etc.</p>
105
- <h3>Play other truck simulator games</h3>
106
- <p>If you are bored or dissatisfied with Euro Truck Driver 2018, you can also play some <strong>other truck simulator games</strong> that might offer you a different or better gaming experience. There are many other truck simulator games available on the market, such as American Truck Simulator, Truck Simulator USA, World Truck Driving Simulator, Heavy Truck Simulator, etc. Each game has its own features and advantages, such as different locations, trucks, graphics, gameplay modes, etc. You can compare and contrast different truck simulator games and choose the one that suits your preference and taste. You can also play multiple truck simulator games at the same time and switch between them whenever you want.</p>
107
- <h2>Conclusion</h2>
108
- <p>In conclusion, Euro Truck Driver 2018 is a great truck driving simulation game that offers a lot of fun and realism for truck enthusiasts and gamers alike. However, some players might want to use a hack mod apk for Euro Truck Driver 2018 to get unlimited money and features in the game. While using a hack mod apk might have some benefits, such as unlimited money, unlocked trucks and trailers, no ads and no root required, it also has some risks, such as possible malware or virus infection, ban from online multiplayer mode, loss of game progress and data. Therefore, you should be careful and cautious when using a hack mod apk for Euro Truck Driver 2018. You should also consider some alternatives to using a hack mod apk for Euro Truck Driver 2018, such as using legitimate cheats and tips or playing other truck simulator games.</p>
109
- <h2>FAQs</h2>
110
- <p>Here are some of the frequently asked questions (FAQs) about Euro Truck Driver 2018 hack mod apk:</p>
111
- <ol>
112
- <li><strong>Q: Is Euro Truck Driver 2018 hack mod apk safe to use?</strong></li>
113
- <li>A: Euro Truck Driver 2018 hack mod apk is not safe to use unless you download it from a reliable source that provides genuine and safe hack mod apk files. However even if you download it from a reliable source, you might still face some risks, such as possible malware or virus infection, ban from online multiplayer mode, loss of game progress and data. Therefore, you should always scan the file with a reliable antivirus software before installing it. You should also backup your device's data and use a VPN to protect your privacy and security.</li>
114
- <li><strong>Q: How can I get unlimited money in Euro Truck Driver 2018 without using a hack mod apk?</strong></li>
115
- <li>A: You can get unlimited money in Euro Truck Driver 2018 without using a hack mod apk by using some legitimate cheats and tips that can help you earn more money in the game. Some of these cheats and tips are: driving longer distances, driving safely, delivering special cargo, etc. You can also use some boosters or bonuses that can increase your money earnings, such as driving at night, using a GPS, etc. You can search online for more cheats and tips from other sources, such as YouTube videos, blogs, forums, etc.</li>
116
- <li><strong>Q: What are the best trucks and trailers in Euro Truck Driver 2018?</strong></li>
117
- <li>A: The best trucks and trailers in Euro Truck Driver 2018 depend on your personal preference and taste, as well as your mission requirements and budget. However, some of the most popular and powerful trucks and trailers in the game are: Mercedes-Benz Actros (truck), Scania R730 (truck), Volvo FH16 (truck), DAF XF105 (truck), Iveco Stralis (truck), Car Carrier (trailer), Refrigerated Trailer (trailer), Tanker (trailer), Flatbed (trailer), etc. You can compare and contrast different trucks and trailers based on their specifications, such as speed, power, fuel consumption, cargo capacity, etc.</li>
118
- <li><strong>Q: How can I play online multiplayer mode in Euro Truck Driver 2018?</strong></li>
119
- <li>A: You can play online multiplayer mode in Euro Truck Driver 2018 by tapping on the <strong>Multiplayer</strong> button on the main menu of the game. You will then be able to join or create a room with other players online in real-time. You can also invite your friends or other drivers to join your room by sharing your room code or link. You can then choose your truck, trailer, location, weather, time, etc. and start driving with other players online. You can also chat with other players online by using the voice chat or text chat features.</li>
120
- <li><strong>Q: How can I update Euro Truck Driver 2018 to the latest version?</strong></li>
121
- <li>A: You can update Euro Truck Driver 2018 to the latest version by going to the official app store or developer website of your device and checking for any available updates for the game. You can also enable the <strong>Auto-update</strong> feature on your device settings to automatically update the game whenever there is a new version available. However, if you are using a hack mod apk for Euro Truck Driver 2018, you might not be able to update the game to the latest version, as the hack mod apk might not be compatible with the official updates from the game developers. You might also lose your game progress and data if you update the game to the latest version while using a hack mod apk.</li>
122
- </ol></p> 401be4b1e0<br />
123
- <br />
124
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1vash/demo-flask-docker-template/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Demo Flask Docker Template
3
- emoji: 🐠
4
- colorFrom: indigo
5
- colorTo: blue
6
- sdk: docker
7
- app_port: 5000
8
- models: ['1vash/mnist_demo_model']
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/ui/alert-dialog.tsx DELETED
@@ -1,150 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
- import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog'
5
-
6
- import { cn } from '@/lib/utils'
7
- import { buttonVariants } from '@/components/ui/button'
8
-
9
- const AlertDialog = AlertDialogPrimitive.Root
10
-
11
- const AlertDialogTrigger = AlertDialogPrimitive.Trigger
12
-
13
- const AlertDialogPortal = ({
14
- className,
15
- children,
16
- ...props
17
- }: AlertDialogPrimitive.AlertDialogPortalProps) => (
18
- <AlertDialogPrimitive.Portal className={cn(className)} {...props}>
19
- <div className="fixed inset-0 z-50 flex items-end justify-center sm:items-center">
20
- {children}
21
- </div>
22
- </AlertDialogPrimitive.Portal>
23
- )
24
- AlertDialogPortal.displayName = AlertDialogPrimitive.Portal.displayName
25
-
26
- const AlertDialogOverlay = React.forwardRef<
27
- React.ElementRef<typeof AlertDialogPrimitive.Overlay>,
28
- React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Overlay>
29
- >(({ className, children, ...props }, ref) => (
30
- <AlertDialogPrimitive.Overlay
31
- className={cn(
32
- 'fixed inset-0 z-50 bg-background/80 backdrop-blur-sm transition-opacity animate-in fade-in',
33
- className
34
- )}
35
- {...props}
36
- ref={ref}
37
- />
38
- ))
39
- AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName
40
-
41
- const AlertDialogContent = React.forwardRef<
42
- React.ElementRef<typeof AlertDialogPrimitive.Content>,
43
- React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Content>
44
- >(({ className, ...props }, ref) => (
45
- <AlertDialogPortal>
46
- <AlertDialogOverlay />
47
- <AlertDialogPrimitive.Content
48
- ref={ref}
49
- className={cn(
50
- 'fixed z-50 grid w-full max-w-lg scale-100 gap-4 border bg-background p-6 opacity-100 shadow-lg animate-in fade-in-90 slide-in-from-bottom-10 sm:rounded-lg sm:zoom-in-90 sm:slide-in-from-bottom-0 md:w-full',
51
- className
52
- )}
53
- {...props}
54
- />
55
- </AlertDialogPortal>
56
- ))
57
- AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName
58
-
59
- const AlertDialogHeader = ({
60
- className,
61
- ...props
62
- }: React.HTMLAttributes<HTMLDivElement>) => (
63
- <div
64
- className={cn(
65
- 'flex flex-col space-y-2 text-center sm:text-left',
66
- className
67
- )}
68
- {...props}
69
- />
70
- )
71
- AlertDialogHeader.displayName = 'AlertDialogHeader'
72
-
73
- const AlertDialogFooter = ({
74
- className,
75
- ...props
76
- }: React.HTMLAttributes<HTMLDivElement>) => (
77
- <div
78
- className={cn(
79
- 'flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2',
80
- className
81
- )}
82
- {...props}
83
- />
84
- )
85
- AlertDialogFooter.displayName = 'AlertDialogFooter'
86
-
87
- const AlertDialogTitle = React.forwardRef<
88
- React.ElementRef<typeof AlertDialogPrimitive.Title>,
89
- React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Title>
90
- >(({ className, ...props }, ref) => (
91
- <AlertDialogPrimitive.Title
92
- ref={ref}
93
- className={cn('text-lg font-semibold', className)}
94
- {...props}
95
- />
96
- ))
97
- AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName
98
-
99
- const AlertDialogDescription = React.forwardRef<
100
- React.ElementRef<typeof AlertDialogPrimitive.Description>,
101
- React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Description>
102
- >(({ className, ...props }, ref) => (
103
- <AlertDialogPrimitive.Description
104
- ref={ref}
105
- className={cn('text-sm text-muted-foreground', className)}
106
- {...props}
107
- />
108
- ))
109
- AlertDialogDescription.displayName =
110
- AlertDialogPrimitive.Description.displayName
111
-
112
- const AlertDialogAction = React.forwardRef<
113
- React.ElementRef<typeof AlertDialogPrimitive.Action>,
114
- React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Action>
115
- >(({ className, ...props }, ref) => (
116
- <AlertDialogPrimitive.Action
117
- ref={ref}
118
- className={cn(buttonVariants(), className)}
119
- {...props}
120
- />
121
- ))
122
- AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName
123
-
124
- const AlertDialogCancel = React.forwardRef<
125
- React.ElementRef<typeof AlertDialogPrimitive.Cancel>,
126
- React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Cancel>
127
- >(({ className, ...props }, ref) => (
128
- <AlertDialogPrimitive.Cancel
129
- ref={ref}
130
- className={cn(
131
- buttonVariants({ variant: 'outline' }),
132
- 'mt-2 sm:mt-0',
133
- className
134
- )}
135
- {...props}
136
- />
137
- ))
138
- AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName
139
-
140
- export {
141
- AlertDialog,
142
- AlertDialogTrigger,
143
- AlertDialogContent,
144
- AlertDialogHeader,
145
- AlertDialogFooter,
146
- AlertDialogTitle,
147
- AlertDialogDescription,
148
- AlertDialogAction,
149
- AlertDialogCancel
150
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/tts_utils.py DELETED
@@ -1,398 +0,0 @@
1
- from collections import defaultdict
2
- import torch
3
- import torch.nn.functional as F
4
-
5
-
6
- def make_positions(tensor, padding_idx):
7
- """Replace non-padding symbols with their position numbers.
8
-
9
- Position numbers begin at padding_idx+1. Padding symbols are ignored.
10
- """
11
- # The series of casts and type-conversions here are carefully
12
- # balanced to both work with ONNX export and XLA. In particular XLA
13
- # prefers ints, cumsum defaults to output longs, and ONNX doesn't know
14
- # how to handle the dtype kwarg in cumsum.
15
- mask = tensor.ne(padding_idx).int()
16
- return (
17
- torch.cumsum(mask, dim=1).type_as(mask) * mask
18
- ).long() + padding_idx
19
-
20
-
21
- def softmax(x, dim):
22
- return F.softmax(x, dim=dim, dtype=torch.float32)
23
-
24
-
25
- def sequence_mask(lengths, maxlen, dtype=torch.bool):
26
- if maxlen is None:
27
- maxlen = lengths.max()
28
- mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t()
29
- mask.type(dtype)
30
- return mask
31
-
32
-
33
- INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
34
-
35
-
36
- def _get_full_incremental_state_key(module_instance, key):
37
- module_name = module_instance.__class__.__name__
38
-
39
- # assign a unique ID to each module instance, so that incremental state is
40
- # not shared across module instances
41
- if not hasattr(module_instance, '_instance_id'):
42
- INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
43
- module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
44
-
45
- return '{}.{}.{}'.format(module_name, module_instance._instance_id, key)
46
-
47
-
48
- def get_incremental_state(module, incremental_state, key):
49
- """Helper for getting incremental state for an nn.Module."""
50
- full_key = _get_full_incremental_state_key(module, key)
51
- if incremental_state is None or full_key not in incremental_state:
52
- return None
53
- return incremental_state[full_key]
54
-
55
-
56
- def set_incremental_state(module, incremental_state, key, value):
57
- """Helper for setting incremental state for an nn.Module."""
58
- if incremental_state is not None:
59
- full_key = _get_full_incremental_state_key(module, key)
60
- incremental_state[full_key] = value
61
-
62
-
63
- def fill_with_neg_inf(t):
64
- """FP16-compatible function that fills a tensor with -inf."""
65
- return t.float().fill_(float('-inf')).type_as(t)
66
-
67
-
68
- def fill_with_neg_inf2(t):
69
- """FP16-compatible function that fills a tensor with -inf."""
70
- return t.float().fill_(-1e8).type_as(t)
71
-
72
-
73
- def get_focus_rate(attn, src_padding_mask=None, tgt_padding_mask=None):
74
- '''
75
- attn: bs x L_t x L_s
76
- '''
77
- if src_padding_mask is not None:
78
- attn = attn * (1 - src_padding_mask.float())[:, None, :]
79
-
80
- if tgt_padding_mask is not None:
81
- attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
82
-
83
- focus_rate = attn.max(-1).values.sum(-1)
84
- focus_rate = focus_rate / attn.sum(-1).sum(-1)
85
- return focus_rate
86
-
87
-
88
- def get_phone_coverage_rate(attn, src_padding_mask=None, src_seg_mask=None, tgt_padding_mask=None):
89
- '''
90
- attn: bs x L_t x L_s
91
- '''
92
- src_mask = attn.new(attn.size(0), attn.size(-1)).bool().fill_(False)
93
- if src_padding_mask is not None:
94
- src_mask |= src_padding_mask
95
- if src_seg_mask is not None:
96
- src_mask |= src_seg_mask
97
-
98
- attn = attn * (1 - src_mask.float())[:, None, :]
99
- if tgt_padding_mask is not None:
100
- attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
101
-
102
- phone_coverage_rate = attn.max(1).values.sum(-1)
103
- # phone_coverage_rate = phone_coverage_rate / attn.sum(-1).sum(-1)
104
- phone_coverage_rate = phone_coverage_rate / (1 - src_mask.float()).sum(-1)
105
- return phone_coverage_rate
106
-
107
-
108
- def get_diagonal_focus_rate(attn, attn_ks, target_len, src_padding_mask=None, tgt_padding_mask=None,
109
- band_mask_factor=5, band_width=50):
110
- '''
111
- attn: bx x L_t x L_s
112
- attn_ks: shape: tensor with shape [batch_size], input_lens/output_lens
113
-
114
- diagonal: y=k*x (k=attn_ks, x:output, y:input)
115
- 1 0 0
116
- 0 1 0
117
- 0 0 1
118
- y>=k*(x-width) and y<=k*(x+width):1
119
- else:0
120
- '''
121
- # width = min(target_len/band_mask_factor, 50)
122
- width1 = target_len / band_mask_factor
123
- width2 = target_len.new(target_len.size()).fill_(band_width)
124
- width = torch.where(width1 < width2, width1, width2).float()
125
- base = torch.ones(attn.size()).to(attn.device)
126
- zero = torch.zeros(attn.size()).to(attn.device)
127
- x = torch.arange(0, attn.size(1)).to(attn.device)[None, :, None].float() * base
128
- y = torch.arange(0, attn.size(2)).to(attn.device)[None, None, :].float() * base
129
- cond = (y - attn_ks[:, None, None] * x)
130
- cond1 = cond + attn_ks[:, None, None] * width[:, None, None]
131
- cond2 = cond - attn_ks[:, None, None] * width[:, None, None]
132
- mask1 = torch.where(cond1 < 0, zero, base)
133
- mask2 = torch.where(cond2 > 0, zero, base)
134
- mask = mask1 * mask2
135
-
136
- if src_padding_mask is not None:
137
- attn = attn * (1 - src_padding_mask.float())[:, None, :]
138
- if tgt_padding_mask is not None:
139
- attn = attn * (1 - tgt_padding_mask.float())[:, :, None]
140
-
141
- diagonal_attn = attn * mask
142
- diagonal_focus_rate = diagonal_attn.sum(-1).sum(-1) / attn.sum(-1).sum(-1)
143
- return diagonal_focus_rate, mask
144
-
145
-
146
- def select_attn(attn_logits, type='best'):
147
- """
148
-
149
- :param attn_logits: [n_layers, B, n_head, T_sp, T_txt]
150
- :return:
151
- """
152
- encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2)
153
- # [n_layers * n_head, B, T_sp, T_txt]
154
- encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1)
155
- if type == 'best':
156
- indices = encdec_attn.max(-1).values.sum(-1).argmax(0)
157
- encdec_attn = encdec_attn.gather(
158
- 0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0]
159
- return encdec_attn
160
- elif type == 'mean':
161
- return encdec_attn.mean(0)
162
-
163
-
164
- def make_pad_mask(lengths, xs=None, length_dim=-1):
165
- """Make mask tensor containing indices of padded part.
166
- Args:
167
- lengths (LongTensor or List): Batch of lengths (B,).
168
- xs (Tensor, optional): The reference tensor.
169
- If set, masks will be the same shape as this tensor.
170
- length_dim (int, optional): Dimension indicator of the above tensor.
171
- See the example.
172
- Returns:
173
- Tensor: Mask tensor containing indices of padded part.
174
- dtype=torch.uint8 in PyTorch 1.2-
175
- dtype=torch.bool in PyTorch 1.2+ (including 1.2)
176
- Examples:
177
- With only lengths.
178
- >>> lengths = [5, 3, 2]
179
- >>> make_non_pad_mask(lengths)
180
- masks = [[0, 0, 0, 0 ,0],
181
- [0, 0, 0, 1, 1],
182
- [0, 0, 1, 1, 1]]
183
- With the reference tensor.
184
- >>> xs = torch.zeros((3, 2, 4))
185
- >>> make_pad_mask(lengths, xs)
186
- tensor([[[0, 0, 0, 0],
187
- [0, 0, 0, 0]],
188
- [[0, 0, 0, 1],
189
- [0, 0, 0, 1]],
190
- [[0, 0, 1, 1],
191
- [0, 0, 1, 1]]], dtype=torch.uint8)
192
- >>> xs = torch.zeros((3, 2, 6))
193
- >>> make_pad_mask(lengths, xs)
194
- tensor([[[0, 0, 0, 0, 0, 1],
195
- [0, 0, 0, 0, 0, 1]],
196
- [[0, 0, 0, 1, 1, 1],
197
- [0, 0, 0, 1, 1, 1]],
198
- [[0, 0, 1, 1, 1, 1],
199
- [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
200
- With the reference tensor and dimension indicator.
201
- >>> xs = torch.zeros((3, 6, 6))
202
- >>> make_pad_mask(lengths, xs, 1)
203
- tensor([[[0, 0, 0, 0, 0, 0],
204
- [0, 0, 0, 0, 0, 0],
205
- [0, 0, 0, 0, 0, 0],
206
- [0, 0, 0, 0, 0, 0],
207
- [0, 0, 0, 0, 0, 0],
208
- [1, 1, 1, 1, 1, 1]],
209
- [[0, 0, 0, 0, 0, 0],
210
- [0, 0, 0, 0, 0, 0],
211
- [0, 0, 0, 0, 0, 0],
212
- [1, 1, 1, 1, 1, 1],
213
- [1, 1, 1, 1, 1, 1],
214
- [1, 1, 1, 1, 1, 1]],
215
- [[0, 0, 0, 0, 0, 0],
216
- [0, 0, 0, 0, 0, 0],
217
- [1, 1, 1, 1, 1, 1],
218
- [1, 1, 1, 1, 1, 1],
219
- [1, 1, 1, 1, 1, 1],
220
- [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
221
- >>> make_pad_mask(lengths, xs, 2)
222
- tensor([[[0, 0, 0, 0, 0, 1],
223
- [0, 0, 0, 0, 0, 1],
224
- [0, 0, 0, 0, 0, 1],
225
- [0, 0, 0, 0, 0, 1],
226
- [0, 0, 0, 0, 0, 1],
227
- [0, 0, 0, 0, 0, 1]],
228
- [[0, 0, 0, 1, 1, 1],
229
- [0, 0, 0, 1, 1, 1],
230
- [0, 0, 0, 1, 1, 1],
231
- [0, 0, 0, 1, 1, 1],
232
- [0, 0, 0, 1, 1, 1],
233
- [0, 0, 0, 1, 1, 1]],
234
- [[0, 0, 1, 1, 1, 1],
235
- [0, 0, 1, 1, 1, 1],
236
- [0, 0, 1, 1, 1, 1],
237
- [0, 0, 1, 1, 1, 1],
238
- [0, 0, 1, 1, 1, 1],
239
- [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
240
- """
241
- if length_dim == 0:
242
- raise ValueError("length_dim cannot be 0: {}".format(length_dim))
243
-
244
- if not isinstance(lengths, list):
245
- lengths = lengths.tolist()
246
- bs = int(len(lengths))
247
- if xs is None:
248
- maxlen = int(max(lengths))
249
- else:
250
- maxlen = xs.size(length_dim)
251
-
252
- seq_range = torch.arange(0, maxlen, dtype=torch.int64)
253
- seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
254
- seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
255
- mask = seq_range_expand >= seq_length_expand
256
-
257
- if xs is not None:
258
- assert xs.size(0) == bs, (xs.size(0), bs)
259
-
260
- if length_dim < 0:
261
- length_dim = xs.dim() + length_dim
262
- # ind = (:, None, ..., None, :, , None, ..., None)
263
- ind = tuple(
264
- slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
265
- )
266
- mask = mask[ind].expand_as(xs).to(xs.device)
267
- return mask
268
-
269
-
270
- def make_non_pad_mask(lengths, xs=None, length_dim=-1):
271
- """Make mask tensor containing indices of non-padded part.
272
- Args:
273
- lengths (LongTensor or List): Batch of lengths (B,).
274
- xs (Tensor, optional): The reference tensor.
275
- If set, masks will be the same shape as this tensor.
276
- length_dim (int, optional): Dimension indicator of the above tensor.
277
- See the example.
278
- Returns:
279
- ByteTensor: mask tensor containing indices of padded part.
280
- dtype=torch.uint8 in PyTorch 1.2-
281
- dtype=torch.bool in PyTorch 1.2+ (including 1.2)
282
- Examples:
283
- With only lengths.
284
- >>> lengths = [5, 3, 2]
285
- >>> make_non_pad_mask(lengths)
286
- masks = [[1, 1, 1, 1 ,1],
287
- [1, 1, 1, 0, 0],
288
- [1, 1, 0, 0, 0]]
289
- With the reference tensor.
290
- >>> xs = torch.zeros((3, 2, 4))
291
- >>> make_non_pad_mask(lengths, xs)
292
- tensor([[[1, 1, 1, 1],
293
- [1, 1, 1, 1]],
294
- [[1, 1, 1, 0],
295
- [1, 1, 1, 0]],
296
- [[1, 1, 0, 0],
297
- [1, 1, 0, 0]]], dtype=torch.uint8)
298
- >>> xs = torch.zeros((3, 2, 6))
299
- >>> make_non_pad_mask(lengths, xs)
300
- tensor([[[1, 1, 1, 1, 1, 0],
301
- [1, 1, 1, 1, 1, 0]],
302
- [[1, 1, 1, 0, 0, 0],
303
- [1, 1, 1, 0, 0, 0]],
304
- [[1, 1, 0, 0, 0, 0],
305
- [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
306
- With the reference tensor and dimension indicator.
307
- >>> xs = torch.zeros((3, 6, 6))
308
- >>> make_non_pad_mask(lengths, xs, 1)
309
- tensor([[[1, 1, 1, 1, 1, 1],
310
- [1, 1, 1, 1, 1, 1],
311
- [1, 1, 1, 1, 1, 1],
312
- [1, 1, 1, 1, 1, 1],
313
- [1, 1, 1, 1, 1, 1],
314
- [0, 0, 0, 0, 0, 0]],
315
- [[1, 1, 1, 1, 1, 1],
316
- [1, 1, 1, 1, 1, 1],
317
- [1, 1, 1, 1, 1, 1],
318
- [0, 0, 0, 0, 0, 0],
319
- [0, 0, 0, 0, 0, 0],
320
- [0, 0, 0, 0, 0, 0]],
321
- [[1, 1, 1, 1, 1, 1],
322
- [1, 1, 1, 1, 1, 1],
323
- [0, 0, 0, 0, 0, 0],
324
- [0, 0, 0, 0, 0, 0],
325
- [0, 0, 0, 0, 0, 0],
326
- [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
327
- >>> make_non_pad_mask(lengths, xs, 2)
328
- tensor([[[1, 1, 1, 1, 1, 0],
329
- [1, 1, 1, 1, 1, 0],
330
- [1, 1, 1, 1, 1, 0],
331
- [1, 1, 1, 1, 1, 0],
332
- [1, 1, 1, 1, 1, 0],
333
- [1, 1, 1, 1, 1, 0]],
334
- [[1, 1, 1, 0, 0, 0],
335
- [1, 1, 1, 0, 0, 0],
336
- [1, 1, 1, 0, 0, 0],
337
- [1, 1, 1, 0, 0, 0],
338
- [1, 1, 1, 0, 0, 0],
339
- [1, 1, 1, 0, 0, 0]],
340
- [[1, 1, 0, 0, 0, 0],
341
- [1, 1, 0, 0, 0, 0],
342
- [1, 1, 0, 0, 0, 0],
343
- [1, 1, 0, 0, 0, 0],
344
- [1, 1, 0, 0, 0, 0],
345
- [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
346
- """
347
- return ~make_pad_mask(lengths, xs, length_dim)
348
-
349
-
350
- def get_mask_from_lengths(lengths):
351
- max_len = torch.max(lengths).item()
352
- ids = torch.arange(0, max_len).to(lengths.device)
353
- mask = (ids < lengths.unsqueeze(1)).bool()
354
- return mask
355
-
356
-
357
- def group_hidden_by_segs(h, seg_ids, max_len):
358
- """
359
-
360
- :param h: [B, T, H]
361
- :param seg_ids: [B, T]
362
- :return: h_ph: [B, T_ph, H]
363
- """
364
- B, T, H = h.shape
365
- h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h)
366
- all_ones = h.new_ones(h.shape[:2])
367
- cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous()
368
- h_gby_segs = h_gby_segs[:, 1:]
369
- cnt_gby_segs = cnt_gby_segs[:, 1:]
370
- h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1)
371
- return h_gby_segs, cnt_gby_segs
372
-
373
- def mel2token_to_dur(mel2token, T_txt=None, max_dur=None):
374
- is_torch = isinstance(mel2token, torch.Tensor)
375
- has_batch_dim = True
376
- if not is_torch:
377
- mel2token = torch.LongTensor(mel2token)
378
- if T_txt is None:
379
- T_txt = mel2token.max()
380
- if len(mel2token.shape) == 1:
381
- mel2token = mel2token[None, ...]
382
- has_batch_dim = False
383
- B, _ = mel2token.shape
384
- dur = mel2token.new_zeros(B, T_txt + 1).scatter_add(1, mel2token, torch.ones_like(mel2token))
385
- dur = dur[:, 1:]
386
- if max_dur is not None:
387
- dur = dur.clamp(max=max_dur)
388
- if not is_torch:
389
- dur = dur.numpy()
390
- if not has_batch_dim:
391
- dur = dur[0]
392
- return dur
393
-
394
- def expand_word2ph(word_encoding, ph2word):
395
- word_encoding = F.pad(word_encoding,[0,0,1,0])
396
- ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]])
397
- out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H]
398
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/utils.py DELETED
@@ -1,26 +0,0 @@
1
- import argparse
2
- import yaml
3
- import sys
4
-
5
- def read_config_as_args(config_path,args=None,is_config_str=False):
6
- return_dict = {}
7
-
8
- if config_path is not None:
9
- if is_config_str:
10
- yml_config = yaml.load(config_path, Loader=yaml.FullLoader)
11
- else:
12
- with open(config_path, "r") as f:
13
- yml_config = yaml.load(f, Loader=yaml.FullLoader)
14
-
15
- if args != None:
16
- for k, v in yml_config.items():
17
- if k in args.__dict__:
18
- args.__dict__[k] = v
19
- else:
20
- sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
21
- else:
22
- for k, v in yml_config.items():
23
- return_dict[k] = v
24
-
25
- args = args if args != None else return_dict
26
- return argparse.Namespace(**args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIatUIUC/CodeLATS/executors/executor_utils.py DELETED
@@ -1,46 +0,0 @@
1
-
2
- def timeout_handler(_, __):
3
- raise TimeoutError()
4
-
5
- import os, json
6
- def to_jsonl(dict_data, file_path):
7
- with open(file_path, 'a') as file:
8
- json_line = json.dumps(dict_data)
9
- file.write(json_line + os.linesep)
10
-
11
- from threading import Thread
12
- class PropagatingThread(Thread):
13
- def run(self):
14
- self.exc = None
15
- try:
16
- if hasattr(self, '_Thread__target'):
17
- # Thread uses name mangling prior to Python 3.
18
- self.ret = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
19
- else:
20
- self.ret = self._target(*self._args, **self._kwargs)
21
- except BaseException as e:
22
- self.exc = e
23
-
24
- def join(self, timeout=None):
25
- super(PropagatingThread, self).join(timeout)
26
- if self.exc:
27
- raise self.exc
28
- return self.ret
29
-
30
-
31
- def function_with_timeout(func, args, timeout):
32
- result_container = []
33
-
34
- def wrapper():
35
- result_container.append(func(*args))
36
-
37
- thread = PropagatingThread(target=wrapper)
38
- thread.start()
39
- thread.join(timeout)
40
-
41
- if thread.is_alive():
42
- raise TimeoutError()
43
- else:
44
- return result_container[0]
45
-
46
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/ImMagician/app.py DELETED
@@ -1,190 +0,0 @@
1
- import gradio as gr
2
- import os
3
- import sys
4
- from pathlib import Path
5
- import random
6
- import string
7
- import time
8
- from queue import Queue
9
- from threading import Thread
10
- import emoji
11
-
12
- text_gen=gr.Interface.load("spaces/AchyuthGamer/MagicPrompt-Stable-Diffusion")
13
- def get_prompts(prompt_text):
14
- if prompt_text:
15
- return text_gen(prompt_text + ", realistic, 8k, cyberpunk, highly detailed, ultra super realism, realism, high graphics, key visual, intricate, highly detailed, breathtaking beauty, precise lineart, vibrant, comprehensive cinematic, trending on DIGITAL ART WEBSITE, best quality, ultra sharp focus, 8k, artgerm")
16
- else:
17
- return text_gen("")
18
- proc1=gr.Interface.load("models/AchyuthGamer/ImMagician-Fantasy")
19
-
20
- def restart_script_periodically():
21
- while True:
22
- random_time = random.randint(540, 600)
23
- time.sleep(random_time)
24
- os.execl(sys.executable, sys.executable, *sys.argv)
25
-
26
-
27
- restart_thread = Thread(target=restart_script_periodically, daemon=True)
28
- restart_thread.start()
29
-
30
-
31
- queue = Queue()
32
- queue_threshold = 50
33
-
34
- #Don't add noise to the first picture no matter what (the point of noise is to get varied outputs, the first one doesn't need to vary about anything)
35
- def noadd_random_noise(prompt, noise_level=0.00):
36
- if noise_level == 0:
37
- noise_level = 0.00
38
- percentage_noise = noise_level * 5
39
- num_noise_chars = int(len(prompt) * (percentage_noise/100))
40
- noise_indices = random.sample(range(len(prompt)), num_noise_chars)
41
- prompt_list = list(prompt)
42
- noise_chars = list(string.ascii_letters + string.punctuation + '' + string.digits)
43
- noise_chars.extend([''])
44
- for index in noise_indices:
45
- prompt_list[index] = random.choice(noise_chars)
46
- return "".join(prompt_list)
47
-
48
- #normal behavior
49
- def add_random_noise(prompt, noise_level=0.00):
50
- if noise_level == 0:
51
- noise_level = 0.00
52
- percentage_noise = noise_level * 5
53
- num_noise_chars = int(len(prompt) * (percentage_noise/100))
54
- noise_indices = random.sample(range(len(prompt)), num_noise_chars)
55
- prompt_list = list(prompt)
56
- noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
57
- noise_chars.extend(['😍', 'beautiful', '😂', '🤔', '😊', '🤗', '😭', '🙄', 'pretty', '🤯', '🤫', '🥴', 'sitting', '🤩', '🥳', '😔', '😩', '🤪', '😇', 'retro', '😈', '👹', 'masterpiece', '🤖', '👽', 'high quality', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', 'visible', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', 'cute', 'kawaii', 'little', 'photo', 'movie', 'still'])
58
- for index in noise_indices:
59
- prompt_list[index] = random.choice(noise_chars)
60
- return "".join(prompt_list)
61
-
62
- def send_it1(inputs, noise_level, proc1=proc1):
63
- prompt_with_noise = noadd_random_noise(inputs, noise_level)
64
- while queue.qsize() >= queue_threshold:
65
- time.sleep(2)
66
- queue.put(prompt_with_noise)
67
- output1 = proc1(prompt_with_noise)
68
- return output1
69
-
70
- def send_it2(inputs, noise_level, proc1=proc1):
71
- prompt_with_noise = add_random_noise(inputs, noise_level)
72
- while queue.qsize() >= queue_threshold:
73
- time.sleep(2)
74
- queue.put(prompt_with_noise)
75
- output2 = proc1(prompt_with_noise)
76
- return output2
77
-
78
- def send_it3(inputs, noise_level, proc1=proc1):
79
- prompt_with_noise = add_random_noise(inputs, noise_level)
80
- while queue.qsize() >= queue_threshold:
81
- time.sleep(2)
82
- queue.put(prompt_with_noise)
83
- output3 = proc1(prompt_with_noise)
84
- return output3
85
-
86
- def send_it4(inputs, noise_level, proc1=proc1):
87
- prompt_with_noise = add_random_noise(inputs, noise_level)
88
- while queue.qsize() >= queue_threshold:
89
- time.sleep(2)
90
- queue.put(prompt_with_noise)
91
- output4 = proc1(prompt_with_noise)
92
- return output4
93
-
94
- with gr.Blocks(css='style.css') as demo:
95
- gr.HTML(
96
- """
97
- <div style="text-align: center; max-width: 800px; margin: 0 auto;">
98
- <div>
99
- <style>
100
- h1 {
101
- font-size: 4em;
102
- color: #ffffff;
103
- margin-top: 20px;
104
- margin-bottom: 20px;
105
- text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
106
- }
107
- </style>
108
- <body>
109
- <div class="center"><h1>ImMagician</h1>
110
- </div>
111
- </body>
112
- </div>
113
- <p style="margin-bottom: 10px; color: #ffaa66; font-size: 98%">
114
- 🤗 Celebrating 10000 views at blogger! 🤗</p>
115
- <p style="margin-bottom: 10px; color: #ffaa66; font-size: 98%">
116
- ❤️ Made by Achyuth! ❤️</a>
117
- </p>
118
- </div>
119
- """
120
- )
121
- with gr.Column(elem_id="col-container"):
122
- with gr.Row(variant="compact"):
123
- input_text = gr.Textbox(
124
- label="Short Prompt",
125
- show_label=False,
126
- max_lines=20,
127
- placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!",
128
- ).style(
129
- container=False,min_width=1200
130
- )
131
- see_prompts = gr.Button("✨Magic✨ ✨Prompt✨").style(full_width=False)
132
-
133
-
134
- with gr.Row(variant="compact"):
135
- prompt = gr.Textbox(
136
- label="Enter your prompt",
137
- show_label=False,
138
- max_lines=20,
139
- placeholder="Full Prompt",
140
- ).style(
141
- container=False,
142
- )
143
- run = gr.Button("Generate Images").style(full_width=False)
144
- with gr.Row():
145
- with gr.Row():
146
- #Now that the first box generates a picture with noise=0 having the default at 0 makes no sense as it'd generate the same image 6 times.
147
- noise_level = gr.Slider(minimum=0.5, maximum=3, step=0.1, label="Noise Level (0.1 or less was generating the same pic 6 times! 🤣)")
148
- gr.HTML(
149
- """
150
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
151
- <div>
152
- <body>
153
- <div class="center"><p style="margin-bottom: 10px; color: #ffffff;">Please allow up to 1 minute for each image to generate, for a total of 6 minutes max.</p>
154
- </div>
155
- </body>
156
- </div>
157
- </div>
158
- """
159
- )
160
- with gr.Row():
161
- with gr.Row():
162
- output1=gr.Image(label="ImMagician", show_label=False, min_width=640, object_fit="contain", height="auto", download=True)
163
- output2=gr.Image(label="ImMagician", show_label=False, min_width=640, object_fit="contain", height="auto", download=True)
164
- with gr.Row():
165
- with gr.Row():
166
- output3=gr.Image(label="ImMagician", show_label=False, min_width=640, object_fit="contain", height="auto", download=True)
167
- output4=gr.Image(label="ImMagician", show_label=False, min_width=640, object_fit="contain", height="auto", download=True)
168
-
169
- see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False)
170
- run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
171
- run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
172
- run.click(send_it3, inputs=[prompt, noise_level], outputs=[output3])
173
- run.click(send_it4, inputs=[prompt, noise_level], outputs=[output4])
174
-
175
- with gr.Row():
176
- gr.HTML(
177
- """
178
- <div class="footer">
179
- <p> Demo for <a href="https://huggingface.co/AchyuthGamer/ImMagician">ImMagician🪄</a> Stable Diffusion model
180
- </p>
181
- </div>
182
- <div class="acknowledgments" style="font-size: 115%; color: #ffffff;">
183
- <p> Unleash your creative side and generate mesmerizing images with just a few clicks! Enter a spark of inspiration in the "Basic Idea" text box and click the "Magic Prompt" button to elevate it to a polished masterpiece. Make any final tweaks in the "Full Prompt" box and hit the "Generate Images" button to watch your vision come to life. Experiment with the "Noise Level" for a diverse range of outputs, from similar to wildly unique. Let the fun begin!
184
- </p>
185
- </div>
186
- """
187
- )
188
-
189
- demo.launch(enable_queue=True, inline=True)
190
- block.queue(concurrency_count=50)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/style.css DELETED
@@ -1,3 +0,0 @@
1
- h1 {
2
- text-align: center;
3
- }
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ObjectFactory.js DELETED
@@ -1,20 +0,0 @@
1
- class ObjectFactory {
2
- constructor(scene) {
3
- this.scene = scene;
4
- this.displayList = scene.sys.displayList;
5
- this.updateList = scene.sys.updateList;
6
-
7
- scene.events.once('destroy', this.destroy, this);
8
- }
9
-
10
- destroy() {
11
- this.scene = null;
12
- this.displayList = null;
13
- this.updateList = null;
14
- }
15
-
16
- static register(type, callback) {
17
- ObjectFactory.prototype[type] = callback;
18
- }
19
- };
20
- export default ObjectFactory;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alex132/togethercomputer-LLaMA-2-7B-32K/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Togethercomputer LLaMA 2 7B 32K
3
- emoji: 🦀
4
- colorFrom: gray
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.40.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/comm.py DELETED
@@ -1,131 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : comm.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
- # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9
- # Distributed under MIT License.
10
-
11
- import queue
12
- import collections
13
- import threading
14
-
15
- __all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
16
-
17
-
18
- class FutureResult(object):
19
- """A thread-safe future implementation. Used only as one-to-one pipe."""
20
-
21
- def __init__(self):
22
- self._result = None
23
- self._lock = threading.Lock()
24
- self._cond = threading.Condition(self._lock)
25
-
26
- def put(self, result):
27
- with self._lock:
28
- assert self._result is None, 'Previous result has\'t been fetched.'
29
- self._result = result
30
- self._cond.notify()
31
-
32
- def get(self):
33
- with self._lock:
34
- if self._result is None:
35
- self._cond.wait()
36
-
37
- res = self._result
38
- self._result = None
39
- return res
40
-
41
-
42
- _MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
43
- _SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
44
-
45
-
46
- class SlavePipe(_SlavePipeBase):
47
- """Pipe for master-slave communication."""
48
-
49
- def run_slave(self, msg):
50
- self.queue.put((self.identifier, msg))
51
- ret = self.result.get()
52
- self.queue.put(True)
53
- return ret
54
-
55
-
56
- class SyncMaster(object):
57
- """An abstract `SyncMaster` object.
58
-
59
- - During the replication, as the data parallel will trigger an callback of each module, all slave devices should
60
- call `register(id)` and obtain an `SlavePipe` to communicate with the master.
61
- - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
62
- and passed to a registered callback.
63
- - After receiving the messages, the master device should gather the information and determine to message passed
64
- back to each slave devices.
65
- """
66
-
67
- def __init__(self, master_callback):
68
- """
69
-
70
- Args:
71
- master_callback: a callback to be invoked after having collected messages from slave devices.
72
- """
73
- self._master_callback = master_callback
74
- self._queue = queue.Queue()
75
- self._registry = collections.OrderedDict()
76
- self._activated = False
77
-
78
- def register_slave(self, identifier):
79
- """
80
- Register an slave device.
81
-
82
- Args:
83
- identifier: an identifier, usually is the device id.
84
-
85
- Returns: a `SlavePipe` object which can be used to communicate with the master device.
86
-
87
- """
88
- if self._activated:
89
- assert self._queue.empty(), 'Queue is not clean before next initialization.'
90
- self._activated = False
91
- self._registry.clear()
92
- future = FutureResult()
93
- self._registry[identifier] = _MasterRegistry(future)
94
- return SlavePipe(identifier, self._queue, future)
95
-
96
- def run_master(self, master_msg):
97
- """
98
- Main entry for the master device in each forward pass.
99
- The messages were first collected from each devices (including the master device), and then
100
- an callback will be invoked to compute the message to be sent back to each devices
101
- (including the master device).
102
-
103
- Args:
104
- master_msg: the message that the master want to send to itself. This will be placed as the first
105
- message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
106
-
107
- Returns: the message to be sent back to the master device.
108
-
109
- """
110
- self._activated = True
111
-
112
- intermediates = [(0, master_msg)]
113
- for i in range(self.nr_slaves):
114
- intermediates.append(self._queue.get())
115
-
116
- results = self._master_callback(intermediates)
117
- assert results[0][0] == 0, 'The first result should belongs to the master.'
118
-
119
- for i, res in results:
120
- if i == 0:
121
- continue
122
- self._registry[i].result.put(res)
123
-
124
- for i in range(self.nr_slaves):
125
- assert self._queue.get() is True
126
-
127
- return results[0][1]
128
-
129
- @property
130
- def nr_slaves(self):
131
- return len(self._registry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aloento/9Nine-PITS/text/frontend/generate_lexicon.py DELETED
@@ -1,158 +0,0 @@
1
- # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # Design principles: https://zhuanlan.zhihu.com/p/349600439
15
- """Generate lexicon and symbols for Mandarin Chinese phonology.
16
- The lexicon is used for Montreal Force Aligner.
17
- Note that syllables are used as word in this lexicon. Since syllables rather
18
- than words are used in transcriptions produced by `reorganize_baker.py`.
19
- We make this choice to better leverage other software for chinese text to
20
- pinyin tools like pypinyin. This is the convention for G2P in Chinese.
21
- """
22
- import re
23
- from collections import OrderedDict
24
-
25
- INITIALS = [
26
- 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'zh', 'ch', 'sh',
27
- 'r', 'z', 'c', 's', 'j', 'q', 'x'
28
- ]
29
-
30
- FINALS = [
31
- 'a', 'ai', 'ao', 'an', 'ang', 'e', 'er', 'ei', 'en', 'eng', 'o', 'ou',
32
- 'ong', 'ii', 'iii', 'i', 'ia', 'iao', 'ian', 'iang', 'ie', 'io', 'iou',
33
- 'iong', 'in', 'ing', 'u', 'ua', 'uai', 'uan', 'uang', 'uei', 'uo', 'uen',
34
- 'ueng', 'v', 've', 'van', 'vn'
35
- ]
36
-
37
- SPECIALS = ['sil', 'sp']
38
-
39
-
40
- def rule(C, V, R, T):
41
- """Generate a syllable given the initial, the final, erhua indicator, and tone.
42
- Orthographical rules for pinyin are applied. (special case for y, w, ui, un, iu)
43
-
44
- Note that in this system, 'ü' is alway written as 'v' when appeared in phoneme, but converted to
45
- 'u' in syllables when certain conditions are satisfied.
46
-
47
- 'i' is distinguished when appeared in phonemes, and separated into 3 categories, 'i', 'ii' and 'iii'.
48
- Erhua is is possibly applied to every finals, except for finals that already ends with 'r'.
49
- When a syllable is impossible or does not have any characters with this pronunciation, return None
50
- to filter it out.
51
- """
52
-
53
- # 不可拼的音节, ii 只能和 z, c, s 拼
54
- if V in ["ii"] and (C not in ['z', 'c', 's']):
55
- return None
56
- # iii 只能和 zh, ch, sh, r 拼
57
- if V in ['iii'] and (C not in ['zh', 'ch', 'sh', 'r']):
58
- return None
59
-
60
- # 齐齿呼或者撮口呼不能和 f, g, k, h, zh, ch, sh, r, z, c, s
61
- if (V not in ['ii', 'iii']) and V[0] in ['i', 'v'] and (
62
- C in ['f', 'g', 'k', 'h', 'zh', 'ch', 'sh', 'r', 'z', 'c', 's']):
63
- return None
64
-
65
- # 撮口呼只能和 j, q, x l, n 拼
66
- if V.startswith("v"):
67
- # v, ve 只能和 j ,q , x, n, l 拼
68
- if V in ['v', 've']:
69
- if C not in ['j', 'q', 'x', 'n', 'l', '']:
70
- return None
71
- # 其他只能和 j, q, x 拼
72
- else:
73
- if C not in ['j', 'q', 'x', '']:
74
- return None
75
-
76
- # j, q, x 只能和齐齿呼或者撮口呼拼
77
- if (C in ['j', 'q', 'x']) and not (
78
- (V not in ['ii', 'iii']) and V[0] in ['i', 'v']):
79
- return None
80
-
81
- # b, p ,m, f 不能和合口呼拼,除了 u 之外
82
- # bm p, m, f 不能和撮口呼拼
83
- if (C in ['b', 'p', 'm', 'f']) and ((V[0] in ['u', 'v'] and V != "u") or
84
- V == 'ong'):
85
- return None
86
-
87
- # ua, uai, uang 不能和 d, t, n, l, r, z, c, s 拼
88
- if V in ['ua', 'uai',
89
- 'uang'] and C in ['d', 't', 'n', 'l', 'r', 'z', 'c', 's']:
90
- return None
91
-
92
- # sh 和 ong 不能拼
93
- if V == 'ong' and C in ['sh']:
94
- return None
95
-
96
- # o 和 gkh, zh ch sh r z c s 不能拼
97
- if V == "o" and C in [
98
- 'd', 't', 'n', 'g', 'k', 'h', 'zh', 'ch', 'sh', 'r', 'z', 'c', 's'
99
- ]:
100
- return None
101
-
102
- # ueng 只是 weng 这个 ad-hoc 其他情况下都是 ong
103
- if V == 'ueng' and C != '':
104
- return
105
-
106
- # 非儿化的 er 只能单独存在
107
- if V == 'er' and C != '':
108
- return None
109
-
110
- if C == '':
111
- if V in ["i", "in", "ing"]:
112
- C = 'y'
113
- elif V == 'u':
114
- C = 'w'
115
- elif V.startswith('i') and V not in ["ii", "iii"]:
116
- C = 'y'
117
- V = V[1:]
118
- elif V.startswith('u'):
119
- C = 'w'
120
- V = V[1:]
121
- elif V.startswith('v'):
122
- C = 'yu'
123
- V = V[1:]
124
- else:
125
- if C in ['j', 'q', 'x']:
126
- if V.startswith('v'):
127
- V = re.sub('v', 'u', V)
128
- if V == 'iou':
129
- V = 'iu'
130
- elif V == 'uei':
131
- V = 'ui'
132
- elif V == 'uen':
133
- V = 'un'
134
- result = C + V
135
-
136
- # Filter er 不能再儿化
137
- if result.endswith('r') and R == 'r':
138
- return None
139
-
140
- # ii and iii, change back to i
141
- result = re.sub(r'i+', 'i', result)
142
-
143
- result = result + R + T
144
- return result
145
-
146
-
147
- def generate_lexicon(with_tone=False, with_erhua=False):
148
- """Generate lexicon for Mandarin Chinese."""
149
- syllables = OrderedDict()
150
-
151
- for C in [''] + INITIALS:
152
- for V in FINALS:
153
- for R in [''] if not with_erhua else ['', 'r']:
154
- for T in [''] if not with_tone else ['1', '2', '3', '4', '5']:
155
- result = rule(C, V, R, T)
156
- if result:
157
- syllables[result] = f'{C} {V}{R}{T}'
158
- return syllables
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/edit.py DELETED
@@ -1,207 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- from edit.edit_helper import conv_warper, decoder, encoder_ifg, encoder_ss, encoder_sefa
4
- import legacy
5
- import subprocess
6
- from typing import List, Optional
7
- import cv2
8
- import click
9
- from torch_utils.models import Generator
10
- import os
11
- import sys
12
- import torch
13
- import numpy as np
14
- sys.path.append(".")
15
-
16
-
17
- """
18
- Edit generated images with different SOTA methods.
19
- Notes:
20
- 1. We provide some latent directions in the folder, you can play around with them.
21
- 2. ''upper_length'' and ''bottom_length'' of ''attr_name'' are available for demo.
22
- 3. Layers to control and editing strength are set in edit/edit_config.py.
23
-
24
- Examples:
25
-
26
- \b
27
- # Editing with InterfaceGAN, StyleSpace, and Sefa
28
- python edit.py --network pretrained_models/stylegan_human_v2_1024.pkl --attr_name upper_length \\
29
- --seeds 61531,61570,61571,61610 --outdir outputs/edit_results
30
-
31
-
32
- # Editing using inverted latent code
33
- python edit.py ---network outputs/pti/checkpoints/model_test.pkl --attr_name upper_length \\
34
- --outdir outputs/edit_results --real True --real_w_path outputs/pti/embeddings/test/PTI/test/0.pt --real_img_path aligned_image/test.png
35
-
36
- """
37
-
38
-
39
- @click.command()
40
- @click.pass_context
41
- @click.option('--network', 'ckpt_path', help='Network pickle filename', required=True)
42
- @click.option('--attr_name', help='choose one of the attr: upper_length or bottom_length', type=str, required=True)
43
- @click.option('--trunc', 'truncation', type=float, help='Truncation psi', default=0.8, show_default=True)
44
- @click.option('--gen_video', type=bool, default=True, help='If want to generate video')
45
- @click.option('--combine', type=bool, default=True, help='If want to combine different editing results in the same frame')
46
- @click.option('--seeds', type=legacy.num_range, help='List of random seeds')
47
- @click.option('--outdir', help='Where to save the output images', type=str, required=True, default='outputs/editing', metavar='DIR')
48
- @click.option('--real', type=bool, help='True for editing real image', default=False)
49
- @click.option('--real_w_path', help='Path of latent code for real image')
50
- @click.option('--real_img_path', help='Path of real image, this just concat real image with inverted and edited results together')
51
- def main(
52
- ctx: click.Context,
53
- ckpt_path: str,
54
- attr_name: str,
55
- truncation: float,
56
- gen_video: bool,
57
- combine: bool,
58
- seeds: Optional[List[int]],
59
- outdir: str,
60
- real: str,
61
- real_w_path: str,
62
- real_img_path: str
63
- ):
64
- # convert pkl to pth
65
- # if not os.path.exists(ckpt_path.replace('.pkl','.pth')):
66
- legacy.convert(ckpt_path, ckpt_path.replace('.pkl', '.pth'), G_only=real)
67
- ckpt_path = ckpt_path.replace('.pkl', '.pth')
68
- print("start...", flush=True)
69
- config = {"latent": 512, "n_mlp": 8, "channel_multiplier": 2}
70
- generator = Generator(
71
- size=1024,
72
- style_dim=config["latent"],
73
- n_mlp=config["n_mlp"],
74
- channel_multiplier=config["channel_multiplier"]
75
- )
76
-
77
- generator.load_state_dict(torch.load(ckpt_path)['g_ema'])
78
- generator.eval().cuda()
79
-
80
- with torch.no_grad():
81
- mean_path = os.path.join('edit', 'mean_latent.pkl')
82
- if not os.path.exists(mean_path):
83
- mean_n = 3000
84
- mean_latent = generator.mean_latent(mean_n).detach()
85
- legacy.save_obj(mean_latent, mean_path)
86
- else:
87
- mean_latent = legacy.load_pkl(mean_path).cuda()
88
- finals = []
89
-
90
- ## -- selected sample seeds -- ##
91
- # seeds = [60948,60965,61174,61210,61511,61598,61610] #bottom -> long
92
- # [60941,61064,61103,61313,61531,61570,61571] # bottom -> short
93
- # [60941,60965,61064,61103,6117461210,61531,61570,61571,61610] # upper --> long
94
- # [60948,61313,61511,61598] # upper --> short
95
- if real:
96
- seeds = [0]
97
-
98
- for t in seeds:
99
- if real: # now assume process single real image only
100
- if real_img_path:
101
- real_image = cv2.imread(real_img_path)
102
- real_image = cv2.cvtColor(real_image, cv2.COLOR_BGR2RGB)
103
- import torchvision.transforms as transforms
104
- transform = transforms.Compose( # normalize to (-1, 1)
105
- [transforms.ToTensor(),
106
- transforms.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5))]
107
- )
108
- real_image = transform(real_image).unsqueeze(0).cuda()
109
-
110
- test_input = torch.load(real_w_path)
111
- output, _ = generator(
112
- test_input, False, truncation=1, input_is_latent=True, real=True)
113
-
114
- else: # generate image from random seeds
115
- test_input = torch.from_numpy(np.random.RandomState(
116
- t).randn(1, 512)).float().cuda() # torch.Size([1, 512])
117
- output, _ = generator(
118
- [test_input], False, truncation=truncation, truncation_latent=mean_latent, real=real)
119
-
120
- # interfacegan
121
- style_space, latent, noise = encoder_ifg(
122
- generator, test_input, attr_name, truncation, mean_latent, real=real)
123
- image1 = decoder(generator, style_space, latent, noise)
124
- # stylespace
125
- style_space, latent, noise = encoder_ss(
126
- generator, test_input, attr_name, truncation, mean_latent, real=real)
127
- image2 = decoder(generator, style_space, latent, noise)
128
- # sefa
129
- latent, noise = encoder_sefa(
130
- generator, test_input, attr_name, truncation, mean_latent, real=real)
131
- image3, _ = generator([latent], noise=noise, input_is_latent=True)
132
- if real_img_path:
133
- final = torch.cat(
134
- (real_image, output, image1, image2, image3), 3)
135
- else:
136
- final = torch.cat((output, image1, image2, image3), 3)
137
-
138
- # legacy.visual(output, f'{outdir}/{attr_name}_{t:05d}_raw.jpg')
139
- # legacy.visual(image1, f'{outdir}/{attr_name}_{t:05d}_ifg.jpg')
140
- # legacy.visual(image2, f'{outdir}/{attr_name}_{t:05d}_ss.jpg')
141
- # legacy.visual(image3, f'{outdir}/{attr_name}_{t:05d}_sefa.jpg')
142
-
143
- if gen_video:
144
- total_step = 90
145
- if real:
146
- video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{real_w_path.split('/')[-2]}/"
147
- video_ss_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/"
148
- video_sefa_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/"
149
- else:
150
- video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{t:05d}/"
151
- video_ss_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/"
152
- video_sefa_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/"
153
- video_comb_path = f"{outdir}/video/tmp"
154
-
155
- if combine:
156
- if not os.path.exists(video_comb_path):
157
- os.makedirs(video_comb_path)
158
- else:
159
- if not os.path.exists(video_ifg_path):
160
- os.makedirs(video_ifg_path)
161
- if not os.path.exists(video_ss_path):
162
- os.makedirs(video_ss_path)
163
- if not os.path.exists(video_sefa_path):
164
- os.makedirs(video_sefa_path)
165
- for i in range(total_step):
166
- style_space, latent, noise = encoder_ifg(
167
- generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
168
- image1 = decoder(generator, style_space, latent, noise)
169
- style_space, latent, noise = encoder_ss(
170
- generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
171
- image2 = decoder(generator, style_space, latent, noise)
172
- latent, noise = encoder_sefa(
173
- generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real)
174
- image3, _ = generator(
175
- [latent], noise=noise, input_is_latent=True)
176
- if combine:
177
- if real_img_path:
178
- comb_img = torch.cat(
179
- (real_image, output, image1, image2, image3), 3)
180
- else:
181
- comb_img = torch.cat(
182
- (output, image1, image2, image3), 3)
183
- legacy.visual(comb_img, os.path.join(
184
- video_comb_path, f'{i:05d}.jpg'))
185
- else:
186
- legacy.visual(image1, os.path.join(
187
- video_ifg_path, f'{i:05d}.jpg'))
188
- legacy.visual(image2, os.path.join(
189
- video_ss_path, f'{i:05d}.jpg'))
190
- if combine:
191
- cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_comb_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path.replace('ifg_', '')[:-1] + '.mp4'}"
192
- subprocess.call(cmd, shell=True)
193
- else:
194
- cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ifg_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path[:-1] + '.mp4'}"
195
- subprocess.call(cmd, shell=True)
196
- cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ss_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ss_path[:-1] + '.mp4'}"
197
- subprocess.call(cmd, shell=True)
198
-
199
- # interfacegan, stylespace, sefa
200
- finals.append(final)
201
-
202
- final = torch.cat(finals, 2)
203
- legacy.visual(final, os.path.join(outdir, 'final.jpg'))
204
-
205
-
206
- if __name__ == "__main__":
207
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py DELETED
@@ -1,717 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Any, Callable, Dict, List, Optional, Union
18
-
19
- import torch
20
- from packaging import version
21
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
22
-
23
- from ...configuration_utils import FrozenDict
24
- from ...image_processor import VaeImageProcessor
25
- from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
- from ...models import AutoencoderKL, UNet2DConditionModel
27
- from ...schedulers import KarrasDiffusionSchedulers
28
- from ...utils import (
29
- deprecate,
30
- is_accelerate_available,
31
- is_accelerate_version,
32
- logging,
33
- randn_tensor,
34
- replace_example_docstring,
35
- )
36
- from ..pipeline_utils import DiffusionPipeline
37
- from . import StableDiffusionPipelineOutput
38
- from .safety_checker import StableDiffusionSafetyChecker
39
-
40
-
41
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
-
43
- EXAMPLE_DOC_STRING = """
44
- Examples:
45
- ```py
46
- >>> import torch
47
- >>> from diffusers import StableDiffusionPipeline
48
-
49
- >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
50
- >>> pipe = pipe.to("cuda")
51
-
52
- >>> prompt = "a photo of an astronaut riding a horse on mars"
53
- >>> image = pipe(prompt).images[0]
54
- ```
55
- """
56
-
57
-
58
- def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
59
- """
60
- Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
61
- Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
62
- """
63
- std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
64
- std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
65
- # rescale the results from guidance (fixes overexposure)
66
- noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
67
- # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
68
- noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
69
- return noise_cfg
70
-
71
-
72
- class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):
73
- r"""
74
- Pipeline for text-to-image generation using Stable Diffusion.
75
-
76
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
77
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
78
-
79
- The pipeline also inherits the following loading methods:
80
- - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
81
- - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
82
- - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
83
- - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
84
-
85
- Args:
86
- vae ([`AutoencoderKL`]):
87
- Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
88
- text_encoder ([`~transformers.CLIPTextModel`]):
89
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
90
- tokenizer ([`~transformers.CLIPTokenizer`]):
91
- A `CLIPTokenizer` to tokenize text.
92
- unet ([`UNet2DConditionModel`]):
93
- A `UNet2DConditionModel` to denoise the encoded image latents.
94
- scheduler ([`SchedulerMixin`]):
95
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
96
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
97
- safety_checker ([`StableDiffusionSafetyChecker`]):
98
- Classification module that estimates whether generated images could be considered offensive or harmful.
99
- Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
100
- about a model's potential harms.
101
- feature_extractor ([`~transformers.CLIPImageProcessor`]):
102
- A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
103
- """
104
- _optional_components = ["safety_checker", "feature_extractor"]
105
-
106
- def __init__(
107
- self,
108
- vae: AutoencoderKL,
109
- text_encoder: CLIPTextModel,
110
- tokenizer: CLIPTokenizer,
111
- unet: UNet2DConditionModel,
112
- scheduler: KarrasDiffusionSchedulers,
113
- safety_checker: StableDiffusionSafetyChecker,
114
- feature_extractor: CLIPImageProcessor,
115
- requires_safety_checker: bool = True,
116
- ):
117
- super().__init__()
118
-
119
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
120
- deprecation_message = (
121
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
122
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
123
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
124
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
125
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
126
- " file"
127
- )
128
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
129
- new_config = dict(scheduler.config)
130
- new_config["steps_offset"] = 1
131
- scheduler._internal_dict = FrozenDict(new_config)
132
-
133
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
134
- deprecation_message = (
135
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
136
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
137
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
138
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
139
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
140
- )
141
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
142
- new_config = dict(scheduler.config)
143
- new_config["clip_sample"] = False
144
- scheduler._internal_dict = FrozenDict(new_config)
145
-
146
- if safety_checker is None and requires_safety_checker:
147
- logger.warning(
148
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
149
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
150
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
151
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
152
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
153
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
154
- )
155
-
156
- if safety_checker is not None and feature_extractor is None:
157
- raise ValueError(
158
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
159
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
160
- )
161
-
162
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
163
- version.parse(unet.config._diffusers_version).base_version
164
- ) < version.parse("0.9.0.dev0")
165
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
166
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
167
- deprecation_message = (
168
- "The configuration file of the unet has set the default `sample_size` to smaller than"
169
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
170
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
171
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
172
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
173
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
174
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
175
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
176
- " the `unet/config.json` file"
177
- )
178
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
179
- new_config = dict(unet.config)
180
- new_config["sample_size"] = 64
181
- unet._internal_dict = FrozenDict(new_config)
182
-
183
- self.register_modules(
184
- vae=vae,
185
- text_encoder=text_encoder,
186
- tokenizer=tokenizer,
187
- unet=unet,
188
- scheduler=scheduler,
189
- safety_checker=safety_checker,
190
- feature_extractor=feature_extractor,
191
- )
192
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
193
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
194
- self.register_to_config(requires_safety_checker=requires_safety_checker)
195
-
196
- def enable_vae_slicing(self):
197
- r"""
198
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
199
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
200
- """
201
- self.vae.enable_slicing()
202
-
203
- def disable_vae_slicing(self):
204
- r"""
205
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
206
- computing decoding in one step.
207
- """
208
- self.vae.disable_slicing()
209
-
210
- def enable_vae_tiling(self):
211
- r"""
212
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
213
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
214
- processing larger images.
215
- """
216
- self.vae.enable_tiling()
217
-
218
- def disable_vae_tiling(self):
219
- r"""
220
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
221
- computing decoding in one step.
222
- """
223
- self.vae.disable_tiling()
224
-
225
- def enable_model_cpu_offload(self, gpu_id=0):
226
- r"""
227
- Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
228
- time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
229
- Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
230
- iterative execution of the `unet`.
231
- """
232
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
233
- from accelerate import cpu_offload_with_hook
234
- else:
235
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
236
-
237
- device = torch.device(f"cuda:{gpu_id}")
238
-
239
- if self.device.type != "cpu":
240
- self.to("cpu", silence_dtype_warnings=True)
241
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
242
-
243
- hook = None
244
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
245
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
246
-
247
- if self.safety_checker is not None:
248
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
249
-
250
- # We'll offload the last model manually.
251
- self.final_offload_hook = hook
252
-
253
- def _encode_prompt(
254
- self,
255
- prompt,
256
- device,
257
- num_images_per_prompt,
258
- do_classifier_free_guidance,
259
- negative_prompt=None,
260
- prompt_embeds: Optional[torch.FloatTensor] = None,
261
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
262
- lora_scale: Optional[float] = None,
263
- ):
264
- r"""
265
- Encodes the prompt into text encoder hidden states.
266
-
267
- Args:
268
- prompt (`str` or `List[str]`, *optional*):
269
- prompt to be encoded
270
- device: (`torch.device`):
271
- torch device
272
- num_images_per_prompt (`int`):
273
- number of images that should be generated per prompt
274
- do_classifier_free_guidance (`bool`):
275
- whether to use classifier free guidance or not
276
- negative_prompt (`str` or `List[str]`, *optional*):
277
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
278
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
279
- less than `1`).
280
- prompt_embeds (`torch.FloatTensor`, *optional*):
281
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
282
- provided, text embeddings will be generated from `prompt` input argument.
283
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
284
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
285
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
286
- argument.
287
- lora_scale (`float`, *optional*):
288
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
289
- """
290
- # set lora scale so that monkey patched LoRA
291
- # function of text encoder can correctly access it
292
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
293
- self._lora_scale = lora_scale
294
-
295
- if prompt is not None and isinstance(prompt, str):
296
- batch_size = 1
297
- elif prompt is not None and isinstance(prompt, list):
298
- batch_size = len(prompt)
299
- else:
300
- batch_size = prompt_embeds.shape[0]
301
-
302
- if prompt_embeds is None:
303
- # textual inversion: procecss multi-vector tokens if necessary
304
- if isinstance(self, TextualInversionLoaderMixin):
305
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
306
-
307
- text_inputs = self.tokenizer(
308
- prompt,
309
- padding="max_length",
310
- max_length=self.tokenizer.model_max_length,
311
- truncation=True,
312
- return_tensors="pt",
313
- )
314
- text_input_ids = text_inputs.input_ids
315
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
316
-
317
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
318
- text_input_ids, untruncated_ids
319
- ):
320
- removed_text = self.tokenizer.batch_decode(
321
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
322
- )
323
- logger.warning(
324
- "The following part of your input was truncated because CLIP can only handle sequences up to"
325
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
326
- )
327
-
328
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
329
- attention_mask = text_inputs.attention_mask.to(device)
330
- else:
331
- attention_mask = None
332
-
333
- prompt_embeds = self.text_encoder(
334
- text_input_ids.to(device),
335
- attention_mask=attention_mask,
336
- )
337
- prompt_embeds = prompt_embeds[0]
338
-
339
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
340
-
341
- bs_embed, seq_len, _ = prompt_embeds.shape
342
- # duplicate text embeddings for each generation per prompt, using mps friendly method
343
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
344
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
345
-
346
- # get unconditional embeddings for classifier free guidance
347
- if do_classifier_free_guidance and negative_prompt_embeds is None:
348
- uncond_tokens: List[str]
349
- if negative_prompt is None:
350
- uncond_tokens = [""] * batch_size
351
- elif prompt is not None and type(prompt) is not type(negative_prompt):
352
- raise TypeError(
353
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
354
- f" {type(prompt)}."
355
- )
356
- elif isinstance(negative_prompt, str):
357
- uncond_tokens = [negative_prompt]
358
- elif batch_size != len(negative_prompt):
359
- raise ValueError(
360
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
361
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
362
- " the batch size of `prompt`."
363
- )
364
- else:
365
- uncond_tokens = negative_prompt
366
-
367
- # textual inversion: procecss multi-vector tokens if necessary
368
- if isinstance(self, TextualInversionLoaderMixin):
369
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
370
-
371
- max_length = prompt_embeds.shape[1]
372
- uncond_input = self.tokenizer(
373
- uncond_tokens,
374
- padding="max_length",
375
- max_length=max_length,
376
- truncation=True,
377
- return_tensors="pt",
378
- )
379
-
380
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
381
- attention_mask = uncond_input.attention_mask.to(device)
382
- else:
383
- attention_mask = None
384
-
385
- negative_prompt_embeds = self.text_encoder(
386
- uncond_input.input_ids.to(device),
387
- attention_mask=attention_mask,
388
- )
389
- negative_prompt_embeds = negative_prompt_embeds[0]
390
-
391
- if do_classifier_free_guidance:
392
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
393
- seq_len = negative_prompt_embeds.shape[1]
394
-
395
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
396
-
397
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
398
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
399
-
400
- # For classifier free guidance, we need to do two forward passes.
401
- # Here we concatenate the unconditional and text embeddings into a single batch
402
- # to avoid doing two forward passes
403
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
404
-
405
- return prompt_embeds
406
-
407
- def run_safety_checker(self, image, device, dtype):
408
- if self.safety_checker is None:
409
- has_nsfw_concept = None
410
- else:
411
- if torch.is_tensor(image):
412
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
413
- else:
414
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
415
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
416
- image, has_nsfw_concept = self.safety_checker(
417
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
418
- )
419
- return image, has_nsfw_concept
420
-
421
- def decode_latents(self, latents):
422
- warnings.warn(
423
- "The decode_latents method is deprecated and will be removed in a future version. Please"
424
- " use VaeImageProcessor instead",
425
- FutureWarning,
426
- )
427
- latents = 1 / self.vae.config.scaling_factor * latents
428
- image = self.vae.decode(latents, return_dict=False)[0]
429
- image = (image / 2 + 0.5).clamp(0, 1)
430
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
431
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
432
- return image
433
-
434
- def prepare_extra_step_kwargs(self, generator, eta):
435
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
436
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
437
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
438
- # and should be between [0, 1]
439
-
440
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
441
- extra_step_kwargs = {}
442
- if accepts_eta:
443
- extra_step_kwargs["eta"] = eta
444
-
445
- # check if the scheduler accepts generator
446
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
447
- if accepts_generator:
448
- extra_step_kwargs["generator"] = generator
449
- return extra_step_kwargs
450
-
451
- def check_inputs(
452
- self,
453
- prompt,
454
- height,
455
- width,
456
- callback_steps,
457
- negative_prompt=None,
458
- prompt_embeds=None,
459
- negative_prompt_embeds=None,
460
- ):
461
- if height % 8 != 0 or width % 8 != 0:
462
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
463
-
464
- if (callback_steps is None) or (
465
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
466
- ):
467
- raise ValueError(
468
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
469
- f" {type(callback_steps)}."
470
- )
471
-
472
- if prompt is not None and prompt_embeds is not None:
473
- raise ValueError(
474
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
475
- " only forward one of the two."
476
- )
477
- elif prompt is None and prompt_embeds is None:
478
- raise ValueError(
479
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
480
- )
481
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
482
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
483
-
484
- if negative_prompt is not None and negative_prompt_embeds is not None:
485
- raise ValueError(
486
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
487
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
488
- )
489
-
490
- if prompt_embeds is not None and negative_prompt_embeds is not None:
491
- if prompt_embeds.shape != negative_prompt_embeds.shape:
492
- raise ValueError(
493
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
494
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
495
- f" {negative_prompt_embeds.shape}."
496
- )
497
-
498
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
499
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
500
- if isinstance(generator, list) and len(generator) != batch_size:
501
- raise ValueError(
502
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
503
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
504
- )
505
-
506
- if latents is None:
507
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
508
- else:
509
- latents = latents.to(device)
510
-
511
- # scale the initial noise by the standard deviation required by the scheduler
512
- latents = latents * self.scheduler.init_noise_sigma
513
- return latents
514
-
515
- @torch.no_grad()
516
- @replace_example_docstring(EXAMPLE_DOC_STRING)
517
- def __call__(
518
- self,
519
- prompt: Union[str, List[str]] = None,
520
- height: Optional[int] = None,
521
- width: Optional[int] = None,
522
- num_inference_steps: int = 50,
523
- guidance_scale: float = 7.5,
524
- negative_prompt: Optional[Union[str, List[str]]] = None,
525
- num_images_per_prompt: Optional[int] = 1,
526
- eta: float = 0.0,
527
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
528
- latents: Optional[torch.FloatTensor] = None,
529
- prompt_embeds: Optional[torch.FloatTensor] = None,
530
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
531
- output_type: Optional[str] = "pil",
532
- return_dict: bool = True,
533
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
534
- callback_steps: int = 1,
535
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
536
- guidance_rescale: float = 0.0,
537
- ):
538
- r"""
539
- The call function to the pipeline for generation.
540
-
541
- Args:
542
- prompt (`str` or `List[str]`, *optional*):
543
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
544
- height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
545
- The height in pixels of the generated image.
546
- width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
547
- The width in pixels of the generated image.
548
- num_inference_steps (`int`, *optional*, defaults to 50):
549
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
550
- expense of slower inference.
551
- guidance_scale (`float`, *optional*, defaults to 7.5):
552
- A higher guidance scale value encourages the model to generate images closely linked to the text
553
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
554
- negative_prompt (`str` or `List[str]`, *optional*):
555
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
556
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
557
- num_images_per_prompt (`int`, *optional*, defaults to 1):
558
- The number of images to generate per prompt.
559
- eta (`float`, *optional*, defaults to 0.0):
560
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
561
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
562
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
563
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
564
- generation deterministic.
565
- latents (`torch.FloatTensor`, *optional*):
566
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
567
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
568
- tensor is generated by sampling using the supplied random `generator`.
569
- prompt_embeds (`torch.FloatTensor`, *optional*):
570
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
571
- provided, text embeddings are generated from the `prompt` input argument.
572
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
573
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
574
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
575
- output_type (`str`, *optional*, defaults to `"pil"`):
576
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
577
- return_dict (`bool`, *optional*, defaults to `True`):
578
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
579
- plain tuple.
580
- callback (`Callable`, *optional*):
581
- A function that calls every `callback_steps` steps during inference. The function is called with the
582
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
583
- callback_steps (`int`, *optional*, defaults to 1):
584
- The frequency at which the `callback` function is called. If not specified, the callback is called at
585
- every step.
586
- cross_attention_kwargs (`dict`, *optional*):
587
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
588
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
589
- guidance_rescale (`float`, *optional*, defaults to 0.7):
590
- Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
591
- Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
592
- using zero terminal SNR.
593
-
594
- Examples:
595
-
596
- Returns:
597
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
598
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
599
- otherwise a `tuple` is returned where the first element is a list with the generated images and the
600
- second element is a list of `bool`s indicating whether the corresponding generated image contains
601
- "not-safe-for-work" (nsfw) content.
602
- """
603
- # 0. Default height and width to unet
604
- height = height or self.unet.config.sample_size * self.vae_scale_factor
605
- width = width or self.unet.config.sample_size * self.vae_scale_factor
606
-
607
- # 1. Check inputs. Raise error if not correct
608
- self.check_inputs(
609
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
610
- )
611
-
612
- # 2. Define call parameters
613
- if prompt is not None and isinstance(prompt, str):
614
- batch_size = 1
615
- elif prompt is not None and isinstance(prompt, list):
616
- batch_size = len(prompt)
617
- else:
618
- batch_size = prompt_embeds.shape[0]
619
-
620
- device = self._execution_device
621
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
622
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
623
- # corresponds to doing no classifier free guidance.
624
- do_classifier_free_guidance = guidance_scale > 1.0
625
-
626
- # 3. Encode input prompt
627
- text_encoder_lora_scale = (
628
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
629
- )
630
- prompt_embeds = self._encode_prompt(
631
- prompt,
632
- device,
633
- num_images_per_prompt,
634
- do_classifier_free_guidance,
635
- negative_prompt,
636
- prompt_embeds=prompt_embeds,
637
- negative_prompt_embeds=negative_prompt_embeds,
638
- lora_scale=text_encoder_lora_scale,
639
- )
640
-
641
- # 4. Prepare timesteps
642
- self.scheduler.set_timesteps(num_inference_steps, device=device)
643
- timesteps = self.scheduler.timesteps
644
-
645
- # 5. Prepare latent variables
646
- num_channels_latents = self.unet.config.in_channels
647
- latents = self.prepare_latents(
648
- batch_size * num_images_per_prompt,
649
- num_channels_latents,
650
- height,
651
- width,
652
- prompt_embeds.dtype,
653
- device,
654
- generator,
655
- latents,
656
- )
657
-
658
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
659
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
660
-
661
- # 7. Denoising loop
662
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
663
- with self.progress_bar(total=num_inference_steps) as progress_bar:
664
- for i, t in enumerate(timesteps):
665
- # expand the latents if we are doing classifier free guidance
666
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
667
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
668
-
669
- # predict the noise residual
670
- noise_pred = self.unet(
671
- latent_model_input,
672
- t,
673
- encoder_hidden_states=prompt_embeds,
674
- cross_attention_kwargs=cross_attention_kwargs,
675
- return_dict=False,
676
- )[0]
677
-
678
- # perform guidance
679
- if do_classifier_free_guidance:
680
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
681
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
682
-
683
- if do_classifier_free_guidance and guidance_rescale > 0.0:
684
- # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
685
- noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
686
-
687
- # compute the previous noisy sample x_t -> x_t-1
688
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
689
-
690
- # call the callback, if provided
691
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
692
- progress_bar.update()
693
- if callback is not None and i % callback_steps == 0:
694
- callback(i, t, latents)
695
-
696
- if not output_type == "latent":
697
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
698
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
699
- else:
700
- image = latents
701
- has_nsfw_concept = None
702
-
703
- if has_nsfw_concept is None:
704
- do_denormalize = [True] * image.shape[0]
705
- else:
706
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
707
-
708
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
709
-
710
- # Offload last model to CPU
711
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
712
- self.final_offload_hook.offload()
713
-
714
- if not return_dict:
715
- return (image, has_nsfw_concept)
716
-
717
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_pndm_flax.py DELETED
@@ -1,511 +0,0 @@
1
- # Copyright 2023 Zhejiang University Team and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
16
-
17
- from dataclasses import dataclass
18
- from typing import Optional, Tuple, Union
19
-
20
- import flax
21
- import jax
22
- import jax.numpy as jnp
23
-
24
- from ..configuration_utils import ConfigMixin, register_to_config
25
- from .scheduling_utils_flax import (
26
- CommonSchedulerState,
27
- FlaxKarrasDiffusionSchedulers,
28
- FlaxSchedulerMixin,
29
- FlaxSchedulerOutput,
30
- add_noise_common,
31
- )
32
-
33
-
34
- @flax.struct.dataclass
35
- class PNDMSchedulerState:
36
- common: CommonSchedulerState
37
- final_alpha_cumprod: jnp.ndarray
38
-
39
- # setable values
40
- init_noise_sigma: jnp.ndarray
41
- timesteps: jnp.ndarray
42
- num_inference_steps: Optional[int] = None
43
- prk_timesteps: Optional[jnp.ndarray] = None
44
- plms_timesteps: Optional[jnp.ndarray] = None
45
-
46
- # running values
47
- cur_model_output: Optional[jnp.ndarray] = None
48
- counter: Optional[jnp.int32] = None
49
- cur_sample: Optional[jnp.ndarray] = None
50
- ets: Optional[jnp.ndarray] = None
51
-
52
- @classmethod
53
- def create(
54
- cls,
55
- common: CommonSchedulerState,
56
- final_alpha_cumprod: jnp.ndarray,
57
- init_noise_sigma: jnp.ndarray,
58
- timesteps: jnp.ndarray,
59
- ):
60
- return cls(
61
- common=common,
62
- final_alpha_cumprod=final_alpha_cumprod,
63
- init_noise_sigma=init_noise_sigma,
64
- timesteps=timesteps,
65
- )
66
-
67
-
68
- @dataclass
69
- class FlaxPNDMSchedulerOutput(FlaxSchedulerOutput):
70
- state: PNDMSchedulerState
71
-
72
-
73
- class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin):
74
- """
75
- Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques,
76
- namely Runge-Kutta method and a linear multi-step method.
77
-
78
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
79
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
80
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
81
- [`~SchedulerMixin.from_pretrained`] functions.
82
-
83
- For more details, see the original paper: https://arxiv.org/abs/2202.09778
84
-
85
- Args:
86
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
87
- beta_start (`float`): the starting `beta` value of inference.
88
- beta_end (`float`): the final `beta` value.
89
- beta_schedule (`str`):
90
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
91
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
92
- trained_betas (`jnp.ndarray`, optional):
93
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
94
- skip_prk_steps (`bool`):
95
- allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required
96
- before plms steps; defaults to `False`.
97
- set_alpha_to_one (`bool`, default `False`):
98
- each diffusion step uses the value of alphas product at that step and at the previous one. For the final
99
- step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
100
- otherwise it uses the value of alpha at step 0.
101
- steps_offset (`int`, default `0`):
102
- an offset added to the inference steps. You can use a combination of `offset=1` and
103
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
104
- stable diffusion.
105
- prediction_type (`str`, default `epsilon`, optional):
106
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
107
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
108
- https://imagen.research.google/video/paper.pdf)
109
- dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
110
- the `dtype` used for params and computation.
111
- """
112
-
113
- _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers]
114
-
115
- dtype: jnp.dtype
116
- pndm_order: int
117
-
118
- @property
119
- def has_state(self):
120
- return True
121
-
122
- @register_to_config
123
- def __init__(
124
- self,
125
- num_train_timesteps: int = 1000,
126
- beta_start: float = 0.0001,
127
- beta_end: float = 0.02,
128
- beta_schedule: str = "linear",
129
- trained_betas: Optional[jnp.ndarray] = None,
130
- skip_prk_steps: bool = False,
131
- set_alpha_to_one: bool = False,
132
- steps_offset: int = 0,
133
- prediction_type: str = "epsilon",
134
- dtype: jnp.dtype = jnp.float32,
135
- ):
136
- self.dtype = dtype
137
-
138
- # For now we only support F-PNDM, i.e. the runge-kutta method
139
- # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
140
- # mainly at formula (9), (12), (13) and the Algorithm 2.
141
- self.pndm_order = 4
142
-
143
- def create_state(self, common: Optional[CommonSchedulerState] = None) -> PNDMSchedulerState:
144
- if common is None:
145
- common = CommonSchedulerState.create(self)
146
-
147
- # At every step in ddim, we are looking into the previous alphas_cumprod
148
- # For the final step, there is no previous alphas_cumprod because we are already at 0
149
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
150
- # whether we use the final alpha of the "non-previous" one.
151
- final_alpha_cumprod = (
152
- jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0]
153
- )
154
-
155
- # standard deviation of the initial noise distribution
156
- init_noise_sigma = jnp.array(1.0, dtype=self.dtype)
157
-
158
- timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1]
159
-
160
- return PNDMSchedulerState.create(
161
- common=common,
162
- final_alpha_cumprod=final_alpha_cumprod,
163
- init_noise_sigma=init_noise_sigma,
164
- timesteps=timesteps,
165
- )
166
-
167
- def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState:
168
- """
169
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
170
-
171
- Args:
172
- state (`PNDMSchedulerState`):
173
- the `FlaxPNDMScheduler` state data class instance.
174
- num_inference_steps (`int`):
175
- the number of diffusion steps used when generating samples with a pre-trained model.
176
- shape (`Tuple`):
177
- the shape of the samples to be generated.
178
- """
179
-
180
- step_ratio = self.config.num_train_timesteps // num_inference_steps
181
- # creates integer timesteps by multiplying by ratio
182
- # rounding to avoid issues when num_inference_step is power of 3
183
- _timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round() + self.config.steps_offset
184
-
185
- if self.config.skip_prk_steps:
186
- # for some models like stable diffusion the prk steps can/should be skipped to
187
- # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation
188
- # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51
189
-
190
- prk_timesteps = jnp.array([], dtype=jnp.int32)
191
- plms_timesteps = jnp.concatenate([_timesteps[:-1], _timesteps[-2:-1], _timesteps[-1:]])[::-1]
192
-
193
- else:
194
- prk_timesteps = _timesteps[-self.pndm_order :].repeat(2) + jnp.tile(
195
- jnp.array([0, self.config.num_train_timesteps // num_inference_steps // 2], dtype=jnp.int32),
196
- self.pndm_order,
197
- )
198
-
199
- prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1]
200
- plms_timesteps = _timesteps[:-3][::-1]
201
-
202
- timesteps = jnp.concatenate([prk_timesteps, plms_timesteps])
203
-
204
- # initial running values
205
-
206
- cur_model_output = jnp.zeros(shape, dtype=self.dtype)
207
- counter = jnp.int32(0)
208
- cur_sample = jnp.zeros(shape, dtype=self.dtype)
209
- ets = jnp.zeros((4,) + shape, dtype=self.dtype)
210
-
211
- return state.replace(
212
- timesteps=timesteps,
213
- num_inference_steps=num_inference_steps,
214
- prk_timesteps=prk_timesteps,
215
- plms_timesteps=plms_timesteps,
216
- cur_model_output=cur_model_output,
217
- counter=counter,
218
- cur_sample=cur_sample,
219
- ets=ets,
220
- )
221
-
222
- def scale_model_input(
223
- self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
224
- ) -> jnp.ndarray:
225
- """
226
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
227
- current timestep.
228
-
229
- Args:
230
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
231
- sample (`jnp.ndarray`): input sample
232
- timestep (`int`, optional): current timestep
233
-
234
- Returns:
235
- `jnp.ndarray`: scaled input sample
236
- """
237
- return sample
238
-
239
- def step(
240
- self,
241
- state: PNDMSchedulerState,
242
- model_output: jnp.ndarray,
243
- timestep: int,
244
- sample: jnp.ndarray,
245
- return_dict: bool = True,
246
- ) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
247
- """
248
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
249
- process from the learned model outputs (most often the predicted noise).
250
-
251
- This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`.
252
-
253
- Args:
254
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
255
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
256
- timestep (`int`): current discrete timestep in the diffusion chain.
257
- sample (`jnp.ndarray`):
258
- current instance of sample being created by diffusion process.
259
- return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
260
-
261
- Returns:
262
- [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
263
- `tuple`. When returning a tuple, the first element is the sample tensor.
264
-
265
- """
266
-
267
- if state.num_inference_steps is None:
268
- raise ValueError(
269
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
270
- )
271
-
272
- if self.config.skip_prk_steps:
273
- prev_sample, state = self.step_plms(state, model_output, timestep, sample)
274
- else:
275
- prk_prev_sample, prk_state = self.step_prk(state, model_output, timestep, sample)
276
- plms_prev_sample, plms_state = self.step_plms(state, model_output, timestep, sample)
277
-
278
- cond = state.counter < len(state.prk_timesteps)
279
-
280
- prev_sample = jax.lax.select(cond, prk_prev_sample, plms_prev_sample)
281
-
282
- state = state.replace(
283
- cur_model_output=jax.lax.select(cond, prk_state.cur_model_output, plms_state.cur_model_output),
284
- ets=jax.lax.select(cond, prk_state.ets, plms_state.ets),
285
- cur_sample=jax.lax.select(cond, prk_state.cur_sample, plms_state.cur_sample),
286
- counter=jax.lax.select(cond, prk_state.counter, plms_state.counter),
287
- )
288
-
289
- if not return_dict:
290
- return (prev_sample, state)
291
-
292
- return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state)
293
-
294
- def step_prk(
295
- self,
296
- state: PNDMSchedulerState,
297
- model_output: jnp.ndarray,
298
- timestep: int,
299
- sample: jnp.ndarray,
300
- ) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
301
- """
302
- Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the
303
- solution to the differential equation.
304
-
305
- Args:
306
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
307
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
308
- timestep (`int`): current discrete timestep in the diffusion chain.
309
- sample (`jnp.ndarray`):
310
- current instance of sample being created by diffusion process.
311
- return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
312
-
313
- Returns:
314
- [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
315
- `tuple`. When returning a tuple, the first element is the sample tensor.
316
-
317
- """
318
-
319
- if state.num_inference_steps is None:
320
- raise ValueError(
321
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
322
- )
323
-
324
- diff_to_prev = jnp.where(
325
- state.counter % 2, 0, self.config.num_train_timesteps // state.num_inference_steps // 2
326
- )
327
- prev_timestep = timestep - diff_to_prev
328
- timestep = state.prk_timesteps[state.counter // 4 * 4]
329
-
330
- model_output = jax.lax.select(
331
- (state.counter % 4) != 3,
332
- model_output, # remainder 0, 1, 2
333
- state.cur_model_output + 1 / 6 * model_output, # remainder 3
334
- )
335
-
336
- state = state.replace(
337
- cur_model_output=jax.lax.select_n(
338
- state.counter % 4,
339
- state.cur_model_output + 1 / 6 * model_output, # remainder 0
340
- state.cur_model_output + 1 / 3 * model_output, # remainder 1
341
- state.cur_model_output + 1 / 3 * model_output, # remainder 2
342
- jnp.zeros_like(state.cur_model_output), # remainder 3
343
- ),
344
- ets=jax.lax.select(
345
- (state.counter % 4) == 0,
346
- state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # remainder 0
347
- state.ets, # remainder 1, 2, 3
348
- ),
349
- cur_sample=jax.lax.select(
350
- (state.counter % 4) == 0,
351
- sample, # remainder 0
352
- state.cur_sample, # remainder 1, 2, 3
353
- ),
354
- )
355
-
356
- cur_sample = state.cur_sample
357
- prev_sample = self._get_prev_sample(state, cur_sample, timestep, prev_timestep, model_output)
358
- state = state.replace(counter=state.counter + 1)
359
-
360
- return (prev_sample, state)
361
-
362
- def step_plms(
363
- self,
364
- state: PNDMSchedulerState,
365
- model_output: jnp.ndarray,
366
- timestep: int,
367
- sample: jnp.ndarray,
368
- ) -> Union[FlaxPNDMSchedulerOutput, Tuple]:
369
- """
370
- Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple
371
- times to approximate the solution.
372
-
373
- Args:
374
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
375
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
376
- timestep (`int`): current discrete timestep in the diffusion chain.
377
- sample (`jnp.ndarray`):
378
- current instance of sample being created by diffusion process.
379
- return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class
380
-
381
- Returns:
382
- [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a
383
- `tuple`. When returning a tuple, the first element is the sample tensor.
384
-
385
- """
386
-
387
- if state.num_inference_steps is None:
388
- raise ValueError(
389
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
390
- )
391
-
392
- # NOTE: There is no way to check in the jitted runtime if the prk mode was ran before
393
-
394
- prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps
395
- prev_timestep = jnp.where(prev_timestep > 0, prev_timestep, 0)
396
-
397
- # Reference:
398
- # if state.counter != 1:
399
- # state.ets.append(model_output)
400
- # else:
401
- # prev_timestep = timestep
402
- # timestep = timestep + self.config.num_train_timesteps // state.num_inference_steps
403
-
404
- prev_timestep = jnp.where(state.counter == 1, timestep, prev_timestep)
405
- timestep = jnp.where(
406
- state.counter == 1, timestep + self.config.num_train_timesteps // state.num_inference_steps, timestep
407
- )
408
-
409
- # Reference:
410
- # if len(state.ets) == 1 and state.counter == 0:
411
- # model_output = model_output
412
- # state.cur_sample = sample
413
- # elif len(state.ets) == 1 and state.counter == 1:
414
- # model_output = (model_output + state.ets[-1]) / 2
415
- # sample = state.cur_sample
416
- # state.cur_sample = None
417
- # elif len(state.ets) == 2:
418
- # model_output = (3 * state.ets[-1] - state.ets[-2]) / 2
419
- # elif len(state.ets) == 3:
420
- # model_output = (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12
421
- # else:
422
- # model_output = (1 / 24) * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4])
423
-
424
- state = state.replace(
425
- ets=jax.lax.select(
426
- state.counter != 1,
427
- state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # counter != 1
428
- state.ets, # counter 1
429
- ),
430
- cur_sample=jax.lax.select(
431
- state.counter != 1,
432
- sample, # counter != 1
433
- state.cur_sample, # counter 1
434
- ),
435
- )
436
-
437
- state = state.replace(
438
- cur_model_output=jax.lax.select_n(
439
- jnp.clip(state.counter, 0, 4),
440
- model_output, # counter 0
441
- (model_output + state.ets[-1]) / 2, # counter 1
442
- (3 * state.ets[-1] - state.ets[-2]) / 2, # counter 2
443
- (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12, # counter 3
444
- (1 / 24)
445
- * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]), # counter >= 4
446
- ),
447
- )
448
-
449
- sample = state.cur_sample
450
- model_output = state.cur_model_output
451
- prev_sample = self._get_prev_sample(state, sample, timestep, prev_timestep, model_output)
452
- state = state.replace(counter=state.counter + 1)
453
-
454
- return (prev_sample, state)
455
-
456
- def _get_prev_sample(self, state: PNDMSchedulerState, sample, timestep, prev_timestep, model_output):
457
- # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf
458
- # this function computes x_(t−δ) using the formula of (9)
459
- # Note that x_t needs to be added to both sides of the equation
460
-
461
- # Notation (<variable name> -> <name in paper>
462
- # alpha_prod_t -> α_t
463
- # alpha_prod_t_prev -> α_(t−δ)
464
- # beta_prod_t -> (1 - α_t)
465
- # beta_prod_t_prev -> (1 - α_(t−δ))
466
- # sample -> x_t
467
- # model_output -> e_θ(x_t, t)
468
- # prev_sample -> x_(t−δ)
469
- alpha_prod_t = state.common.alphas_cumprod[timestep]
470
- alpha_prod_t_prev = jnp.where(
471
- prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod
472
- )
473
- beta_prod_t = 1 - alpha_prod_t
474
- beta_prod_t_prev = 1 - alpha_prod_t_prev
475
-
476
- if self.config.prediction_type == "v_prediction":
477
- model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
478
- elif self.config.prediction_type != "epsilon":
479
- raise ValueError(
480
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`"
481
- )
482
-
483
- # corresponds to (α_(t−δ) - α_t) divided by
484
- # denominator of x_t in formula (9) and plus 1
485
- # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) =
486
- # sqrt(α_(t−δ)) / sqrt(α_t))
487
- sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5)
488
-
489
- # corresponds to denominator of e_θ(x_t, t) in formula (9)
490
- model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + (
491
- alpha_prod_t * beta_prod_t * alpha_prod_t_prev
492
- ) ** (0.5)
493
-
494
- # full formula (9)
495
- prev_sample = (
496
- sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff
497
- )
498
-
499
- return prev_sample
500
-
501
- def add_noise(
502
- self,
503
- state: PNDMSchedulerState,
504
- original_samples: jnp.ndarray,
505
- noise: jnp.ndarray,
506
- timesteps: jnp.ndarray,
507
- ) -> jnp.ndarray:
508
- return add_noise_common(state.common, original_samples, noise, timesteps)
509
-
510
- def __len__(self):
511
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py DELETED
@@ -1,3 +0,0 @@
1
- _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
2
- # fp16 settings
3
- fp16 = dict(loss_scale=512.)
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = './ga_rpn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_32x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=32,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/paa/paa_r101_fpn_2x_coco.py DELETED
@@ -1,3 +0,0 @@
1
- _base_ = './paa_r101_fpn_1x_coco.py'
2
- lr_config = dict(step=[16, 22])
3
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/loading.py DELETED
@@ -1,458 +0,0 @@
1
- import os.path as osp
2
-
3
- import mmcv
4
- import numpy as np
5
- import pycocotools.mask as maskUtils
6
-
7
- from mmdet.core import BitmapMasks, PolygonMasks
8
- from ..builder import PIPELINES
9
-
10
-
11
- @PIPELINES.register_module()
12
- class LoadImageFromFile(object):
13
- """Load an image from file.
14
-
15
- Required keys are "img_prefix" and "img_info" (a dict that must contain the
16
- key "filename"). Added or updated keys are "filename", "img", "img_shape",
17
- "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
18
- "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
19
-
20
- Args:
21
- to_float32 (bool): Whether to convert the loaded image to a float32
22
- numpy array. If set to False, the loaded image is an uint8 array.
23
- Defaults to False.
24
- color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
25
- Defaults to 'color'.
26
- file_client_args (dict): Arguments to instantiate a FileClient.
27
- See :class:`mmcv.fileio.FileClient` for details.
28
- Defaults to ``dict(backend='disk')``.
29
- """
30
-
31
- def __init__(self,
32
- to_float32=False,
33
- color_type='color',
34
- file_client_args=dict(backend='disk')):
35
- self.to_float32 = to_float32
36
- self.color_type = color_type
37
- self.file_client_args = file_client_args.copy()
38
- self.file_client = None
39
-
40
- def __call__(self, results):
41
- """Call functions to load image and get image meta information.
42
-
43
- Args:
44
- results (dict): Result dict from :obj:`mmdet.CustomDataset`.
45
-
46
- Returns:
47
- dict: The dict contains loaded image and meta information.
48
- """
49
-
50
- if self.file_client is None:
51
- self.file_client = mmcv.FileClient(**self.file_client_args)
52
-
53
- if results['img_prefix'] is not None:
54
- filename = osp.join(results['img_prefix'],
55
- results['img_info']['filename'])
56
- else:
57
- filename = results['img_info']['filename']
58
-
59
- img_bytes = self.file_client.get(filename)
60
- img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
61
- if self.to_float32:
62
- img = img.astype(np.float32)
63
-
64
- results['filename'] = filename
65
- results['ori_filename'] = results['img_info']['filename']
66
- results['img'] = img
67
- results['img_shape'] = img.shape
68
- results['ori_shape'] = img.shape
69
- results['img_fields'] = ['img']
70
- return results
71
-
72
- def __repr__(self):
73
- repr_str = (f'{self.__class__.__name__}('
74
- f'to_float32={self.to_float32}, '
75
- f"color_type='{self.color_type}', "
76
- f'file_client_args={self.file_client_args})')
77
- return repr_str
78
-
79
-
80
- @PIPELINES.register_module()
81
- class LoadImageFromWebcam(LoadImageFromFile):
82
- """Load an image from webcam.
83
-
84
- Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in
85
- ``results['img']``.
86
- """
87
-
88
- def __call__(self, results):
89
- """Call functions to add image meta information.
90
-
91
- Args:
92
- results (dict): Result dict with Webcam read image in
93
- ``results['img']``.
94
-
95
- Returns:
96
- dict: The dict contains loaded image and meta information.
97
- """
98
-
99
- img = results['img']
100
- if self.to_float32:
101
- img = img.astype(np.float32)
102
-
103
- results['filename'] = None
104
- results['ori_filename'] = None
105
- results['img'] = img
106
- results['img_shape'] = img.shape
107
- results['ori_shape'] = img.shape
108
- results['img_fields'] = ['img']
109
- return results
110
-
111
-
112
- @PIPELINES.register_module()
113
- class LoadMultiChannelImageFromFiles(object):
114
- """Load multi-channel images from a list of separate channel files.
115
-
116
- Required keys are "img_prefix" and "img_info" (a dict that must contain the
117
- key "filename", which is expected to be a list of filenames).
118
- Added or updated keys are "filename", "img", "img_shape",
119
- "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
120
- "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
121
-
122
- Args:
123
- to_float32 (bool): Whether to convert the loaded image to a float32
124
- numpy array. If set to False, the loaded image is an uint8 array.
125
- Defaults to False.
126
- color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
127
- Defaults to 'color'.
128
- file_client_args (dict): Arguments to instantiate a FileClient.
129
- See :class:`mmcv.fileio.FileClient` for details.
130
- Defaults to ``dict(backend='disk')``.
131
- """
132
-
133
- def __init__(self,
134
- to_float32=False,
135
- color_type='unchanged',
136
- file_client_args=dict(backend='disk')):
137
- self.to_float32 = to_float32
138
- self.color_type = color_type
139
- self.file_client_args = file_client_args.copy()
140
- self.file_client = None
141
-
142
- def __call__(self, results):
143
- """Call functions to load multiple images and get images meta
144
- information.
145
-
146
- Args:
147
- results (dict): Result dict from :obj:`mmdet.CustomDataset`.
148
-
149
- Returns:
150
- dict: The dict contains loaded images and meta information.
151
- """
152
-
153
- if self.file_client is None:
154
- self.file_client = mmcv.FileClient(**self.file_client_args)
155
-
156
- if results['img_prefix'] is not None:
157
- filename = [
158
- osp.join(results['img_prefix'], fname)
159
- for fname in results['img_info']['filename']
160
- ]
161
- else:
162
- filename = results['img_info']['filename']
163
-
164
- img = []
165
- for name in filename:
166
- img_bytes = self.file_client.get(name)
167
- img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))
168
- img = np.stack(img, axis=-1)
169
- if self.to_float32:
170
- img = img.astype(np.float32)
171
-
172
- results['filename'] = filename
173
- results['ori_filename'] = results['img_info']['filename']
174
- results['img'] = img
175
- results['img_shape'] = img.shape
176
- results['ori_shape'] = img.shape
177
- # Set initial values for default meta_keys
178
- results['pad_shape'] = img.shape
179
- results['scale_factor'] = 1.0
180
- num_channels = 1 if len(img.shape) < 3 else img.shape[2]
181
- results['img_norm_cfg'] = dict(
182
- mean=np.zeros(num_channels, dtype=np.float32),
183
- std=np.ones(num_channels, dtype=np.float32),
184
- to_rgb=False)
185
- return results
186
-
187
- def __repr__(self):
188
- repr_str = (f'{self.__class__.__name__}('
189
- f'to_float32={self.to_float32}, '
190
- f"color_type='{self.color_type}', "
191
- f'file_client_args={self.file_client_args})')
192
- return repr_str
193
-
194
-
195
- @PIPELINES.register_module()
196
- class LoadAnnotations(object):
197
- """Load mutiple types of annotations.
198
-
199
- Args:
200
- with_bbox (bool): Whether to parse and load the bbox annotation.
201
- Default: True.
202
- with_label (bool): Whether to parse and load the label annotation.
203
- Default: True.
204
- with_mask (bool): Whether to parse and load the mask annotation.
205
- Default: False.
206
- with_seg (bool): Whether to parse and load the semantic segmentation
207
- annotation. Default: False.
208
- poly2mask (bool): Whether to convert the instance masks from polygons
209
- to bitmaps. Default: True.
210
- file_client_args (dict): Arguments to instantiate a FileClient.
211
- See :class:`mmcv.fileio.FileClient` for details.
212
- Defaults to ``dict(backend='disk')``.
213
- """
214
-
215
- def __init__(self,
216
- with_bbox=True,
217
- with_label=True,
218
- with_mask=False,
219
- with_seg=False,
220
- poly2mask=True,
221
- file_client_args=dict(backend='disk')):
222
- self.with_bbox = with_bbox
223
- self.with_label = with_label
224
- self.with_mask = with_mask
225
- self.with_seg = with_seg
226
- self.poly2mask = poly2mask
227
- self.file_client_args = file_client_args.copy()
228
- self.file_client = None
229
-
230
- def _load_bboxes(self, results):
231
- """Private function to load bounding box annotations.
232
-
233
- Args:
234
- results (dict): Result dict from :obj:`mmdet.CustomDataset`.
235
-
236
- Returns:
237
- dict: The dict contains loaded bounding box annotations.
238
- """
239
-
240
- ann_info = results['ann_info']
241
- results['gt_bboxes'] = ann_info['bboxes'].copy()
242
-
243
- gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
244
- if gt_bboxes_ignore is not None:
245
- results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
246
- results['bbox_fields'].append('gt_bboxes_ignore')
247
- results['bbox_fields'].append('gt_bboxes')
248
- return results
249
-
250
- def _load_labels(self, results):
251
- """Private function to load label annotations.
252
-
253
- Args:
254
- results (dict): Result dict from :obj:`mmdet.CustomDataset`.
255
-
256
- Returns:
257
- dict: The dict contains loaded label annotations.
258
- """
259
-
260
- results['gt_labels'] = results['ann_info']['labels'].copy()
261
- return results
262
-
263
- def _poly2mask(self, mask_ann, img_h, img_w):
264
- """Private function to convert masks represented with polygon to
265
- bitmaps.
266
-
267
- Args:
268
- mask_ann (list | dict): Polygon mask annotation input.
269
- img_h (int): The height of output mask.
270
- img_w (int): The width of output mask.
271
-
272
- Returns:
273
- numpy.ndarray: The decode bitmap mask of shape (img_h, img_w).
274
- """
275
-
276
- if isinstance(mask_ann, list):
277
- # polygon -- a single object might consist of multiple parts
278
- # we merge all parts into one mask rle code
279
- rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
280
- rle = maskUtils.merge(rles)
281
- elif isinstance(mask_ann['counts'], list):
282
- # uncompressed RLE
283
- rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
284
- else:
285
- # rle
286
- rle = mask_ann
287
- mask = maskUtils.decode(rle)
288
- return mask
289
-
290
- def process_polygons(self, polygons):
291
- """Convert polygons to list of ndarray and filter invalid polygons.
292
-
293
- Args:
294
- polygons (list[list]): Polygons of one instance.
295
-
296
- Returns:
297
- list[numpy.ndarray]: Processed polygons.
298
- """
299
-
300
- polygons = [np.array(p) for p in polygons]
301
- valid_polygons = []
302
- for polygon in polygons:
303
- if len(polygon) % 2 == 0 and len(polygon) >= 6:
304
- valid_polygons.append(polygon)
305
- return valid_polygons
306
-
307
- def _load_masks(self, results):
308
- """Private function to load mask annotations.
309
-
310
- Args:
311
- results (dict): Result dict from :obj:`mmdet.CustomDataset`.
312
-
313
- Returns:
314
- dict: The dict contains loaded mask annotations.
315
- If ``self.poly2mask`` is set ``True``, `gt_mask` will contain
316
- :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used.
317
- """
318
-
319
- h, w = results['img_info']['height'], results['img_info']['width']
320
- gt_masks = results['ann_info']['masks']
321
- if self.poly2mask:
322
- gt_masks = BitmapMasks(
323
- [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)
324
- else:
325
- gt_masks = PolygonMasks(
326
- [self.process_polygons(polygons) for polygons in gt_masks], h,
327
- w)
328
- results['gt_masks'] = gt_masks
329
- results['mask_fields'].append('gt_masks')
330
- return results
331
-
332
- def _load_semantic_seg(self, results):
333
- """Private function to load semantic segmentation annotations.
334
-
335
- Args:
336
- results (dict): Result dict from :obj:`dataset`.
337
-
338
- Returns:
339
- dict: The dict contains loaded semantic segmentation annotations.
340
- """
341
-
342
- if self.file_client is None:
343
- self.file_client = mmcv.FileClient(**self.file_client_args)
344
-
345
- filename = osp.join(results['seg_prefix'],
346
- results['ann_info']['seg_map'])
347
- img_bytes = self.file_client.get(filename)
348
- results['gt_semantic_seg'] = mmcv.imfrombytes(
349
- img_bytes, flag='unchanged').squeeze()
350
- results['seg_fields'].append('gt_semantic_seg')
351
- return results
352
-
353
- def __call__(self, results):
354
- """Call function to load multiple types annotations.
355
-
356
- Args:
357
- results (dict): Result dict from :obj:`mmdet.CustomDataset`.
358
-
359
- Returns:
360
- dict: The dict contains loaded bounding box, label, mask and
361
- semantic segmentation annotations.
362
- """
363
-
364
- if self.with_bbox:
365
- results = self._load_bboxes(results)
366
- if results is None:
367
- return None
368
- if self.with_label:
369
- results = self._load_labels(results)
370
- if self.with_mask:
371
- results = self._load_masks(results)
372
- if self.with_seg:
373
- results = self._load_semantic_seg(results)
374
- return results
375
-
376
- def __repr__(self):
377
- repr_str = self.__class__.__name__
378
- repr_str += f'(with_bbox={self.with_bbox}, '
379
- repr_str += f'with_label={self.with_label}, '
380
- repr_str += f'with_mask={self.with_mask}, '
381
- repr_str += f'with_seg={self.with_seg}, '
382
- repr_str += f'poly2mask={self.poly2mask}, '
383
- repr_str += f'poly2mask={self.file_client_args})'
384
- return repr_str
385
-
386
-
387
- @PIPELINES.register_module()
388
- class LoadProposals(object):
389
- """Load proposal pipeline.
390
-
391
- Required key is "proposals". Updated keys are "proposals", "bbox_fields".
392
-
393
- Args:
394
- num_max_proposals (int, optional): Maximum number of proposals to load.
395
- If not specified, all proposals will be loaded.
396
- """
397
-
398
- def __init__(self, num_max_proposals=None):
399
- self.num_max_proposals = num_max_proposals
400
-
401
- def __call__(self, results):
402
- """Call function to load proposals from file.
403
-
404
- Args:
405
- results (dict): Result dict from :obj:`mmdet.CustomDataset`.
406
-
407
- Returns:
408
- dict: The dict contains loaded proposal annotations.
409
- """
410
-
411
- proposals = results['proposals']
412
- if proposals.shape[1] not in (4, 5):
413
- raise AssertionError(
414
- 'proposals should have shapes (n, 4) or (n, 5), '
415
- f'but found {proposals.shape}')
416
- proposals = proposals[:, :4]
417
-
418
- if self.num_max_proposals is not None:
419
- proposals = proposals[:self.num_max_proposals]
420
-
421
- if len(proposals) == 0:
422
- proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)
423
- results['proposals'] = proposals
424
- results['bbox_fields'].append('proposals')
425
- return results
426
-
427
- def __repr__(self):
428
- return self.__class__.__name__ + \
429
- f'(num_max_proposals={self.num_max_proposals})'
430
-
431
-
432
- @PIPELINES.register_module()
433
- class FilterAnnotations(object):
434
- """Filter invalid annotations.
435
-
436
- Args:
437
- min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth
438
- boxes.
439
- """
440
-
441
- def __init__(self, min_gt_bbox_wh):
442
- # TODO: add more filter options
443
- self.min_gt_bbox_wh = min_gt_bbox_wh
444
-
445
- def __call__(self, results):
446
- assert 'gt_bboxes' in results
447
- gt_bboxes = results['gt_bboxes']
448
- w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
449
- h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
450
- keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1])
451
- if not keep.any():
452
- return None
453
- else:
454
- keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg')
455
- for key in keys:
456
- if key in results:
457
- results[key] = results[key][keep]
458
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py DELETED
@@ -1,6 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
4
- ]
5
- model = dict(
6
- decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/api-examples/api-example-chat.py DELETED
@@ -1,92 +0,0 @@
1
- import html
2
- import json
3
-
4
- import requests
5
-
6
- # For local streaming, the websockets are hosted without ssl - http://
7
- HOST = 'localhost:5000'
8
- URI = f'http://{HOST}/api/v1/chat'
9
-
10
- # For reverse-proxied streaming, the remote will likely host with ssl - https://
11
- # URI = 'https://your-uri-here.trycloudflare.com/api/v1/chat'
12
-
13
-
14
- def run(user_input, history):
15
- request = {
16
- 'user_input': user_input,
17
- 'max_new_tokens': 250,
18
- 'auto_max_new_tokens': False,
19
- 'max_tokens_second': 0,
20
- 'history': history,
21
- 'mode': 'instruct', # Valid options: 'chat', 'chat-instruct', 'instruct'
22
- 'character': 'Example',
23
- 'instruction_template': 'Vicuna-v1.1', # Will get autodetected if unset
24
- 'your_name': 'You',
25
- # 'name1': 'name of user', # Optional
26
- # 'name2': 'name of character', # Optional
27
- # 'context': 'character context', # Optional
28
- # 'greeting': 'greeting', # Optional
29
- # 'name1_instruct': 'You', # Optional
30
- # 'name2_instruct': 'Assistant', # Optional
31
- # 'context_instruct': 'context_instruct', # Optional
32
- # 'turn_template': 'turn_template', # Optional
33
- 'regenerate': False,
34
- '_continue': False,
35
- 'chat_instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
36
-
37
- # Generation params. If 'preset' is set to different than 'None', the values
38
- # in presets/preset-name.yaml are used instead of the individual numbers.
39
- 'preset': 'None',
40
- 'do_sample': True,
41
- 'temperature': 0.7,
42
- 'top_p': 0.1,
43
- 'typical_p': 1,
44
- 'epsilon_cutoff': 0, # In units of 1e-4
45
- 'eta_cutoff': 0, # In units of 1e-4
46
- 'tfs': 1,
47
- 'top_a': 0,
48
- 'repetition_penalty': 1.18,
49
- 'repetition_penalty_range': 0,
50
- 'top_k': 40,
51
- 'min_length': 0,
52
- 'no_repeat_ngram_size': 0,
53
- 'num_beams': 1,
54
- 'penalty_alpha': 0,
55
- 'length_penalty': 1,
56
- 'early_stopping': False,
57
- 'mirostat_mode': 0,
58
- 'mirostat_tau': 5,
59
- 'mirostat_eta': 0.1,
60
- 'grammar_string': '',
61
- 'guidance_scale': 1,
62
- 'negative_prompt': '',
63
-
64
- 'seed': -1,
65
- 'add_bos_token': True,
66
- 'truncation_length': 2048,
67
- 'ban_eos_token': False,
68
- 'custom_token_bans': '',
69
- 'skip_special_tokens': True,
70
- 'stopping_strings': []
71
- }
72
-
73
- response = requests.post(URI, json=request)
74
-
75
- if response.status_code == 200:
76
- result = response.json()['results'][0]['history']
77
- print(json.dumps(result, indent=4))
78
- print()
79
- print(html.unescape(result['visible'][-1][1]))
80
-
81
-
82
- if __name__ == '__main__':
83
- user_input = "Please give me a step-by-step guide on how to plant a tree in my backyard."
84
-
85
- # Basic example
86
- history = {'internal': [], 'visible': []}
87
-
88
- # "Continue" example. Make sure to set '_continue' to True above
89
- # arr = [user_input, 'Surely, here is']
90
- # history = {'internal': [arr], 'visible': [arr]}
91
-
92
- run(user_input, history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AntNikYab/NaturalLanguageProcessing/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: NaturalLanguageProcessing
3
- emoji: 🏆
4
- colorFrom: red
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.26.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arijit-hazra/my-image-captioner/load_model.py DELETED
@@ -1,363 +0,0 @@
1
- ### IMPORTS
2
- import tensorflow as tf
3
- import numpy as np
4
-
5
- import einops
6
- import numpy as np
7
- import tqdm
8
-
9
- import collections
10
- import re
11
- import string
12
- import pickle
13
-
14
- print("import complete")
15
- #=========================================================================================================================
16
- ### UTILITY FUNCTIONS
17
- #=========================================================================================================================
18
-
19
- IMAGE_SHAPE=(224, 224, 3)
20
-
21
- @tf.keras.utils.register_keras_serializable()
22
- def custom_standardization(s):
23
- s = tf.strings.lower(s)
24
- s = tf.strings.regex_replace(s, f'[{re.escape(string.punctuation)}]', '')
25
- s = tf.strings.join(['[START]', s, '[END]'], separator=' ')
26
- return s
27
-
28
- def load_image(image_path):
29
- img = tf.io.read_file(image_path)
30
- img = tf.io.decode_jpeg(img, channels=3)
31
- img = tf.image.resize(img, IMAGE_SHAPE[:-1])
32
- return img
33
-
34
- def load_image_obj(img):
35
- img = tf.image.resize(img, IMAGE_SHAPE[:-1])
36
- return img
37
-
38
- def masked_loss(labels, preds):
39
- loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels, preds)
40
-
41
- mask = (labels != 0) & (loss < 1e8)
42
- mask = tf.cast(mask, loss.dtype)
43
-
44
- loss = loss*mask
45
- loss = tf.reduce_sum(loss)/tf.reduce_sum(mask)
46
- return loss
47
-
48
- def masked_acc(labels, preds):
49
- mask = tf.cast(labels!=0, tf.float32)
50
- preds = tf.argmax(preds, axis=-1)
51
- labels = tf.cast(labels, tf.int64)
52
- match = tf.cast(preds == labels, mask.dtype)
53
- acc = tf.reduce_sum(match*mask)/tf.reduce_sum(mask)
54
- return acc
55
-
56
- print("utility complete")
57
- #=========================================================================================================================
58
- ### MODEL CLASS
59
- #=========================================================================================================================
60
-
61
- mobilenet = tf.keras.applications.MobileNetV3Small(
62
- input_shape=IMAGE_SHAPE,
63
- include_top=False,
64
- include_preprocessing=True)
65
- mobilenet.trainable=False
66
-
67
- class SeqEmbedding(tf.keras.layers.Layer):
68
- def __init__(self, vocab_size, max_length, depth):
69
- super().__init__()
70
- self.pos_embedding = tf.keras.layers.Embedding(input_dim=max_length, output_dim=depth)
71
-
72
- self.token_embedding = tf.keras.layers.Embedding(
73
- input_dim=vocab_size,
74
- output_dim=depth,
75
- mask_zero=True)
76
-
77
- self.add = tf.keras.layers.Add()
78
-
79
-
80
- def call(self, seq):
81
- seq = self.token_embedding(seq) # (batch, seq, depth)
82
-
83
- x = tf.range(tf.shape(seq)[1]) # (seq)
84
- x = x[tf.newaxis, :] # (1, seq)
85
- x = self.pos_embedding(x) # (1, seq, depth)
86
-
87
- return self.add([seq,x])
88
-
89
- class CausalSelfAttention(tf.keras.layers.Layer):
90
- def __init__(self, **kwargs):
91
- super().__init__()
92
- self.mha = tf.keras.layers.MultiHeadAttention(**kwargs)
93
- # Use Add instead of + so the keras mask propagates through.
94
- self.add = tf.keras.layers.Add()
95
- self.layernorm = tf.keras.layers.LayerNormalization()
96
-
97
-
98
- def call(self, x):
99
- attn = self.mha(query=x, value=x,
100
- use_causal_mask=True)
101
- x = self.add([x, attn])
102
- return self.layernorm(x)
103
-
104
- class CrossAttention(tf.keras.layers.Layer):
105
- def __init__(self,**kwargs):
106
- super().__init__()
107
- self.mha = tf.keras.layers.MultiHeadAttention(**kwargs)
108
- self.add = tf.keras.layers.Add()
109
- self.layernorm = tf.keras.layers.LayerNormalization()
110
-
111
- def call(self, x, y, **kwargs):
112
- attn, attention_scores = self.mha(
113
- query=x, value=y,
114
- return_attention_scores=True)
115
-
116
- self.last_attention_scores = attention_scores
117
-
118
- x = self.add([x, attn])
119
- return self.layernorm(x)
120
-
121
- class FeedForward(tf.keras.layers.Layer):
122
- def __init__(self, units, dropout_rate=0.1):
123
- super().__init__()
124
- self.seq = tf.keras.Sequential([
125
- tf.keras.layers.Dense(units=2*units, activation='relu'),
126
- tf.keras.layers.Dense(units=units),
127
- tf.keras.layers.Dropout(rate=dropout_rate),
128
- ])
129
-
130
- self.layernorm = tf.keras.layers.LayerNormalization()
131
-
132
- def call(self, x):
133
- x = x + self.seq(x)
134
- return self.layernorm(x)
135
-
136
- class DecoderLayer(tf.keras.layers.Layer):
137
- def __init__(self, units, num_heads=1, dropout_rate=0.1):
138
- super().__init__()
139
-
140
- self.self_attention = CausalSelfAttention(num_heads=num_heads,
141
- key_dim=units,
142
- dropout=dropout_rate)
143
- self.cross_attention = CrossAttention(num_heads=num_heads,
144
- key_dim=units,
145
- dropout=dropout_rate)
146
- self.ff = FeedForward(units=units, dropout_rate=dropout_rate)
147
-
148
-
149
- def call(self, inputs, training=False):
150
- in_seq, out_seq = inputs
151
-
152
- # Text input
153
- out_seq = self.self_attention(out_seq)
154
-
155
- out_seq = self.cross_attention(out_seq, in_seq)
156
-
157
- self.last_attention_scores = self.cross_attention.last_attention_scores
158
-
159
- out_seq = self.ff(out_seq)
160
-
161
- return out_seq
162
-
163
- class TokenOutput(tf.keras.layers.Layer):
164
- def __init__(self, tokenizer, banned_tokens=('', '[UNK]', '[START]'), bias=None, **kwargs):
165
- super().__init__()
166
-
167
- self.dense = tf.keras.layers.Dense(
168
- units=tokenizer.vocabulary_size(), **kwargs)
169
- self.tokenizer = tokenizer
170
- self.banned_tokens = banned_tokens
171
-
172
- self.bias = bias
173
-
174
- def adapt(self, ds):
175
- counts = collections.Counter()
176
- vocab_dict = {name: id
177
- for id, name in enumerate(self.tokenizer.get_vocabulary())}
178
-
179
- for tokens in tqdm.tqdm(ds):
180
- counts.update(tokens.numpy().flatten())
181
-
182
- counts_arr = np.zeros(shape=(self.tokenizer.vocabulary_size(),))
183
- counts_arr[np.array(list(counts.keys()), dtype=np.int32)] = list(counts.values())
184
-
185
- counts_arr = counts_arr[:]
186
- for token in self.banned_tokens:
187
- counts_arr[vocab_dict[token]] = 0
188
-
189
- total = counts_arr.sum()
190
- p = counts_arr/total
191
- p[counts_arr==0] = 1.0
192
- log_p = np.log(p) # log(1) == 0
193
-
194
- entropy = -(log_p*p).sum()
195
-
196
- print()
197
- print(f"Uniform entropy: {np.log(self.tokenizer.vocabulary_size()):0.2f}")
198
- print(f"Marginal entropy: {entropy:0.2f}")
199
-
200
- self.bias = log_p
201
- self.bias[counts_arr==0] = -1e9
202
-
203
- def call(self, x):
204
- x = self.dense(x)
205
- return x + self.bias
206
-
207
- def get_config(self):
208
- config = super(TokenOutput, self).get_config()
209
- config.update({
210
- "tokenizer": self.tokenizer,
211
- "banned_tokens": self.banned_tokens,
212
- "bias": self.bias,
213
- "dense":self.dense
214
- })
215
-
216
- return config
217
-
218
- class Captioner(tf.keras.Model):
219
- @classmethod
220
- def add_method(cls, fun):
221
- setattr(cls, fun.__name__, fun)
222
- return fun
223
-
224
- def __init__(self, tokenizer, feature_extractor, output_layer, num_layers=1,
225
- units=256, max_length=50, num_heads=1, dropout_rate=0.1):
226
- super().__init__()
227
- self.feature_extractor = feature_extractor
228
- self.tokenizer = tokenizer
229
- self.word_to_index = tf.keras.layers.StringLookup(
230
- mask_token="",
231
- vocabulary=tokenizer.get_vocabulary())
232
- self.index_to_word = tf.keras.layers.StringLookup(
233
- mask_token="",
234
- vocabulary=tokenizer.get_vocabulary(),
235
- invert=True)
236
-
237
- self.seq_embedding = SeqEmbedding(
238
- vocab_size=tokenizer.vocabulary_size(),
239
- depth=units,
240
- max_length=max_length)
241
-
242
- self.decoder_layers = [
243
- DecoderLayer(units, num_heads=num_heads, dropout_rate=dropout_rate)
244
- for n in range(num_layers)]
245
-
246
- self.output_layer = output_layer
247
-
248
- def call(self, inputs):
249
- image, txt = inputs
250
-
251
- if image.shape[-1] == 3:
252
- # Apply the feature-extractor, if you get an RGB image.
253
- image = self.feature_extractor(image)
254
-
255
- # Flatten the feature map
256
- image = einops.rearrange(image, 'b h w c -> b (h w) c')
257
-
258
-
259
- if txt.dtype == tf.string:
260
- # Apply the tokenizer if you get string inputs.
261
- txt = self.tokenizer(txt)
262
-
263
- txt = self.seq_embedding(txt)
264
-
265
- # Look at the image
266
- for dec_layer in self.decoder_layers:
267
- txt = dec_layer(inputs=(image, txt))
268
-
269
- txt = self.output_layer(txt)
270
-
271
- return txt
272
-
273
-
274
- def simple_gen(self, image, temperature=1):
275
- initial = self.word_to_index([['[START]']]) # (batch, sequence)
276
- img_features = self.feature_extractor(image[tf.newaxis, ...])
277
-
278
- tokens = initial # (batch, sequence)
279
- for n in range(50):
280
- preds = self((img_features, tokens)).numpy() # (batch, sequence, vocab)
281
- preds = preds[:,-1, :] #(batch, vocab)
282
- if temperature==0:
283
- next = tf.argmax(preds, axis=-1)[:, tf.newaxis] # (batch, 1)
284
- else:
285
- next = tf.random.categorical(preds/temperature, num_samples=1) # (batch, 1)
286
- tokens = tf.concat([tokens, next], axis=1) # (batch, sequence)
287
-
288
- if next[0] == self.word_to_index('[END]'):
289
- break
290
-
291
- words = self.index_to_word(tokens[0, 1:-1])
292
- result = tf.strings.reduce_join(words, axis=-1, separator=' ')
293
- return result.numpy().decode()
294
-
295
- # def get_config(self):
296
- # config = super().get_config()
297
- # config.update({"feature_extractor": self.feature_extractor,
298
- # "tokenizer": self.tokenizer,
299
- # "word_to_index": self.word_to_index,
300
- # "index_to_word": self.index_to_word,
301
- # "outputlayer": self.output_layer,
302
- # "seq_embedding": self.seq_embedding,
303
- # "decoder_layers": self.decoder_layers
304
- # })
305
- # return config
306
-
307
- # def build_from_config(self, config):
308
- # return super().build_from_config(config)
309
-
310
- # model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
311
- # loss=masked_loss,
312
- # metrics=[masked_acc])
313
-
314
- print("model complete")
315
- #=========================================================================================================================
316
- ### LOAD FUNCTION
317
- #=========================================================================================================================
318
-
319
- def build():
320
- filename = "model/tokenizer.pkl"
321
- token_meta = pickle.load(open(filename, 'rb'))
322
- tokenizer = tf.keras.layers.TextVectorization.from_config(token_meta["config"])
323
- tokenizer.set_weights(token_meta['weights'])
324
- word_to_index = tf.keras.layers.StringLookup(
325
- mask_token="",
326
- vocabulary=tokenizer.get_vocabulary())
327
-
328
- index_to_word = tf.keras.layers.StringLookup(
329
- mask_token="",
330
- vocabulary=tokenizer.get_vocabulary(),
331
- invert=True)
332
-
333
- output_layer = TokenOutput(tokenizer, banned_tokens=('', '[UNK]', '[START]'))
334
- filename = "model/output_layer.pkl"
335
- bias = pickle.load(open(filename, 'rb'))
336
- output_layer.bias = bias
337
-
338
- load_model = Captioner(tokenizer, feature_extractor=mobilenet, output_layer=output_layer,
339
- units=256, dropout_rate=0.5, num_layers=2, num_heads=2)
340
- load_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
341
- loss=masked_loss,
342
- metrics=[masked_acc])
343
-
344
- # image_url = 'https://tensorflow.org/images/surf.jpg'
345
- # image_path = tf.keras.utils.get_file('surf.jpg', origin=image_url)
346
- # image = load_image(image_path)
347
- image = pickle.load(open("test_run_img", "rb"))
348
- print(load_model.simple_gen(image))
349
-
350
- path = "model/captioner_weights"
351
- load_model.load_weights(path)
352
- return load_model
353
-
354
- # loaded_model = build()
355
- print("loaded")
356
- #=========================================================================================================================
357
- ### TEST RUN
358
- #=========================================================================================================================
359
-
360
- image_url = 'https://tensorflow.org/images/surf.jpg'
361
- image_path = tf.keras.utils.get_file('surf.jpg', origin=image_url)
362
- image = load_image(image_path)
363
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py DELETED
@@ -1,90 +0,0 @@
1
- from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
- import pyworld
3
- import numpy as np
4
-
5
-
6
- class DioF0Predictor(F0Predictor):
7
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
- self.hop_length = hop_length
9
- self.f0_min = f0_min
10
- self.f0_max = f0_max
11
- self.sampling_rate = sampling_rate
12
-
13
- def interpolate_f0(self, f0):
14
- """
15
- 对F0进行插值处理
16
- """
17
-
18
- data = np.reshape(f0, (f0.size, 1))
19
-
20
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
- vuv_vector[data > 0.0] = 1.0
22
- vuv_vector[data <= 0.0] = 0.0
23
-
24
- ip_data = data
25
-
26
- frame_number = data.size
27
- last_value = 0.0
28
- for i in range(frame_number):
29
- if data[i] <= 0.0:
30
- j = i + 1
31
- for j in range(i + 1, frame_number):
32
- if data[j] > 0.0:
33
- break
34
- if j < frame_number - 1:
35
- if last_value > 0.0:
36
- step = (data[j] - data[i - 1]) / float(j - i)
37
- for k in range(i, j):
38
- ip_data[k] = data[i - 1] + step * (k - i + 1)
39
- else:
40
- for k in range(i, j):
41
- ip_data[k] = data[j]
42
- else:
43
- for k in range(i, frame_number):
44
- ip_data[k] = last_value
45
- else:
46
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
- last_value = data[i]
48
-
49
- return ip_data[:, 0], vuv_vector[:, 0]
50
-
51
- def resize_f0(self, x, target_len):
52
- source = np.array(x)
53
- source[source < 0.001] = np.nan
54
- target = np.interp(
55
- np.arange(0, len(source) * target_len, len(source)) / target_len,
56
- np.arange(0, len(source)),
57
- source,
58
- )
59
- res = np.nan_to_num(target)
60
- return res
61
-
62
- def compute_f0(self, wav, p_len=None):
63
- if p_len is None:
64
- p_len = wav.shape[0] // self.hop_length
65
- f0, t = pyworld.dio(
66
- wav.astype(np.double),
67
- fs=self.sampling_rate,
68
- f0_floor=self.f0_min,
69
- f0_ceil=self.f0_max,
70
- frame_period=1000 * self.hop_length / self.sampling_rate,
71
- )
72
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
73
- for index, pitch in enumerate(f0):
74
- f0[index] = round(pitch, 1)
75
- return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
76
-
77
- def compute_f0_uv(self, wav, p_len=None):
78
- if p_len is None:
79
- p_len = wav.shape[0] // self.hop_length
80
- f0, t = pyworld.dio(
81
- wav.astype(np.double),
82
- fs=self.sampling_rate,
83
- f0_floor=self.f0_min,
84
- f0_ceil=self.f0_max,
85
- frame_period=1000 * self.hop_length / self.sampling_rate,
86
- )
87
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
88
- for index, pitch in enumerate(f0):
89
- f0[index] = round(pitch, 1)
90
- return self.interpolate_f0(self.resize_f0(f0, p_len))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArpitM/chat-llm-streaming/app.py DELETED
@@ -1,321 +0,0 @@
1
- import os
2
-
3
- import gradio as gr
4
-
5
- from text_generation import Client, InferenceAPIClient
6
-
7
- openchat_preprompt = (
8
- "\n<human>: Hi!\n<bot>: My name is Bot, model version is 0.15, part of an open-source kit for "
9
- "fine-tuning new bots! I was created by Together, LAION, and Ontocord.ai and the open-source "
10
- "community. I am not human, not evil and not alive, and thus have no thoughts and feelings, "
11
- "but I am programmed to be helpful, polite, honest, and friendly.\n"
12
- )
13
-
14
-
15
- def get_client(model: str):
16
- if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
17
- return Client(os.getenv("OPENCHAT_API_URL"))
18
- return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None))
19
-
20
-
21
- def get_usernames(model: str):
22
- """
23
- Returns:
24
- (str, str, str, str): pre-prompt, username, bot name, separator
25
- """
26
- if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
27
- return "", "<|prompter|>", "<|assistant|>", "<|endoftext|>"
28
- if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
29
- return openchat_preprompt, "<human>: ", "<bot>: ", "\n"
30
- return "", "User: ", "Assistant: ", "\n"
31
-
32
-
33
- def predict(
34
- model: str,
35
- inputs: str,
36
- typical_p: float,
37
- top_p: float,
38
- temperature: float,
39
- top_k: int,
40
- repetition_penalty: float,
41
- watermark: bool,
42
- chatbot,
43
- history,
44
- ):
45
- client = get_client(model)
46
- preprompt, user_name, assistant_name, sep = get_usernames(model)
47
-
48
- history.append(inputs)
49
-
50
- past = []
51
- for data in chatbot:
52
- user_data, model_data = data
53
-
54
- if not user_data.startswith(user_name):
55
- user_data = user_name + user_data
56
- if not model_data.startswith(sep + assistant_name):
57
- model_data = sep + assistant_name + model_data
58
-
59
- past.append(user_data + model_data.rstrip() + sep)
60
-
61
- if not inputs.startswith(user_name):
62
- inputs = user_name + inputs
63
-
64
- total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip()
65
-
66
- partial_words = ""
67
-
68
- if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
69
- iterator = client.generate_stream(
70
- total_inputs,
71
- typical_p=typical_p,
72
- truncate=1000,
73
- watermark=watermark,
74
- max_new_tokens=500,
75
- )
76
- else:
77
- iterator = client.generate_stream(
78
- total_inputs,
79
- top_p=top_p if top_p < 1.0 else None,
80
- top_k=top_k,
81
- truncate=1000,
82
- repetition_penalty=repetition_penalty,
83
- watermark=watermark,
84
- temperature=temperature,
85
- max_new_tokens=500,
86
- stop_sequences=[user_name.rstrip(), assistant_name.rstrip()],
87
- )
88
-
89
- for i, response in enumerate(iterator):
90
- if response.token.special:
91
- continue
92
-
93
- partial_words = partial_words + response.token.text
94
- if partial_words.endswith(user_name.rstrip()):
95
- partial_words = partial_words.rstrip(user_name.rstrip())
96
- if partial_words.endswith(assistant_name.rstrip()):
97
- partial_words = partial_words.rstrip(assistant_name.rstrip())
98
-
99
- if i == 0:
100
- history.append(" " + partial_words)
101
- elif response.token.text not in user_name:
102
- history[-1] = partial_words
103
-
104
- chat = [
105
- (history[i].strip(), history[i + 1].strip())
106
- for i in range(0, len(history) - 1, 2)
107
- ]
108
- yield chat, history
109
-
110
-
111
- def reset_textbox():
112
- return gr.update(value="")
113
-
114
-
115
- def radio_on_change(
116
- value: str,
117
- disclaimer,
118
- typical_p,
119
- top_p,
120
- top_k,
121
- temperature,
122
- repetition_penalty,
123
- watermark,
124
- ):
125
- if value in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
126
- typical_p = typical_p.update(value=0.2, visible=True)
127
- top_p = top_p.update(visible=False)
128
- top_k = top_k.update(visible=False)
129
- temperature = temperature.update(visible=False)
130
- disclaimer = disclaimer.update(visible=False)
131
- repetition_penalty = repetition_penalty.update(visible=False)
132
- watermark = watermark.update(False)
133
- elif value == "togethercomputer/GPT-NeoXT-Chat-Base-20B":
134
- typical_p = typical_p.update(visible=False)
135
- top_p = top_p.update(value=0.25, visible=True)
136
- top_k = top_k.update(value=50, visible=True)
137
- temperature = temperature.update(value=0.6, visible=True)
138
- repetition_penalty = repetition_penalty.update(value=1.01, visible=True)
139
- watermark = watermark.update(False)
140
- disclaimer = disclaimer.update(visible=True)
141
- else:
142
- typical_p = typical_p.update(visible=False)
143
- top_p = top_p.update(value=0.95, visible=True)
144
- top_k = top_k.update(value=4, visible=True)
145
- temperature = temperature.update(value=0.5, visible=True)
146
- repetition_penalty = repetition_penalty.update(value=1.03, visible=True)
147
- watermark = watermark.update(True)
148
- disclaimer = disclaimer.update(visible=False)
149
- return (
150
- disclaimer,
151
- typical_p,
152
- top_p,
153
- top_k,
154
- temperature,
155
- repetition_penalty,
156
- watermark,
157
- )
158
-
159
-
160
- title = """<h1 align="center">Large Language Model Chat API</h1>"""
161
- description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
162
-
163
- ```
164
- User: <utterance>
165
- Assistant: <utterance>
166
- User: <utterance>
167
- Assistant: <utterance>
168
- ...
169
- ```
170
-
171
- In this app, you can explore the outputs of multiple LLMs when prompted in this way.
172
- """
173
-
174
- text_generation_inference = """
175
- <div align="center">Powered by: <a href=https://github.com/huggingface/text-generation-inference>Text Generation Inference</a></div>
176
- """
177
-
178
- openchat_disclaimer = """
179
- <div align="center">Checkout the official <a href=https://huggingface.co/spaces/togethercomputer/OpenChatKit>OpenChatKit feedback app</a> for the full experience.</div>
180
- """
181
-
182
- with gr.Blocks(
183
- css="""#col_container {margin-left: auto; margin-right: auto;}
184
- #chatbot {height: 520px; overflow: auto;}"""
185
- ) as demo:
186
- gr.HTML(title)
187
- gr.Markdown(text_generation_inference, visible=True)
188
- with gr.Column(elem_id="col_container"):
189
- model = gr.Radio(
190
- value="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
191
- choices=[
192
- "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
193
- "OpenAssistant/oasst-sft-1-pythia-12b",
194
- # "togethercomputer/GPT-NeoXT-Chat-Base-20B",
195
- "google/flan-t5-xxl",
196
- "google/flan-ul2",
197
- "bigscience/bloom",
198
- "bigscience/bloomz",
199
- "EleutherAI/gpt-neox-20b",
200
- ],
201
- label="Model",
202
- interactive=True,
203
- )
204
-
205
- chatbot = gr.Chatbot(elem_id="chatbot")
206
- inputs = gr.Textbox(
207
- placeholder="Hi there!", label="Type an input and press Enter"
208
- )
209
- disclaimer = gr.Markdown(openchat_disclaimer, visible=False)
210
- state = gr.State([])
211
- b1 = gr.Button()
212
-
213
- with gr.Accordion("Parameters", open=False):
214
- typical_p = gr.Slider(
215
- minimum=-0,
216
- maximum=1.0,
217
- value=0.2,
218
- step=0.05,
219
- interactive=True,
220
- label="Typical P mass",
221
- )
222
- top_p = gr.Slider(
223
- minimum=-0,
224
- maximum=1.0,
225
- value=0.25,
226
- step=0.05,
227
- interactive=True,
228
- label="Top-p (nucleus sampling)",
229
- visible=False,
230
- )
231
- temperature = gr.Slider(
232
- minimum=-0,
233
- maximum=5.0,
234
- value=0.6,
235
- step=0.1,
236
- interactive=True,
237
- label="Temperature",
238
- visible=False,
239
- )
240
- top_k = gr.Slider(
241
- minimum=1,
242
- maximum=50,
243
- value=50,
244
- step=1,
245
- interactive=True,
246
- label="Top-k",
247
- visible=False,
248
- )
249
- repetition_penalty = gr.Slider(
250
- minimum=0.1,
251
- maximum=3.0,
252
- value=1.03,
253
- step=0.01,
254
- interactive=True,
255
- label="Repetition Penalty",
256
- visible=False,
257
- )
258
- watermark = gr.Checkbox(value=False, label="Text watermarking")
259
-
260
- model.change(
261
- lambda value: radio_on_change(
262
- value,
263
- disclaimer,
264
- typical_p,
265
- top_p,
266
- top_k,
267
- temperature,
268
- repetition_penalty,
269
- watermark,
270
- ),
271
- inputs=model,
272
- outputs=[
273
- disclaimer,
274
- typical_p,
275
- top_p,
276
- top_k,
277
- temperature,
278
- repetition_penalty,
279
- watermark,
280
- ],
281
- )
282
-
283
- inputs.submit(
284
- predict,
285
- [
286
- model,
287
- inputs,
288
- typical_p,
289
- top_p,
290
- temperature,
291
- top_k,
292
- repetition_penalty,
293
- watermark,
294
- chatbot,
295
- state,
296
- ],
297
- [chatbot, state],
298
- api_name = "chat_text",
299
- )
300
- b1.click(
301
- predict,
302
- [
303
- model,
304
- inputs,
305
- typical_p,
306
- top_p,
307
- temperature,
308
- top_k,
309
- repetition_penalty,
310
- watermark,
311
- chatbot,
312
- state,
313
- ],
314
- [chatbot, state],
315
- api_name = "chat_button",
316
- )
317
- b1.click(reset_textbox, [], [inputs] ,api_name = "button")
318
- inputs.submit(reset_textbox, [], [inputs] , api_name = "text")
319
-
320
- gr.Markdown(description)
321
- demo.queue(concurrency_count=16).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/uninstall.py DELETED
@@ -1,113 +0,0 @@
1
- import logging
2
- from optparse import Values
3
- from typing import List
4
-
5
- from pip._vendor.packaging.utils import canonicalize_name
6
-
7
- from pip._internal.cli import cmdoptions
8
- from pip._internal.cli.base_command import Command
9
- from pip._internal.cli.req_command import SessionCommandMixin, warn_if_run_as_root
10
- from pip._internal.cli.status_codes import SUCCESS
11
- from pip._internal.exceptions import InstallationError
12
- from pip._internal.req import parse_requirements
13
- from pip._internal.req.constructors import (
14
- install_req_from_line,
15
- install_req_from_parsed_requirement,
16
- )
17
- from pip._internal.utils.misc import (
18
- check_externally_managed,
19
- protect_pip_from_modification_on_windows,
20
- )
21
-
22
- logger = logging.getLogger(__name__)
23
-
24
-
25
- class UninstallCommand(Command, SessionCommandMixin):
26
- """
27
- Uninstall packages.
28
-
29
- pip is able to uninstall most installed packages. Known exceptions are:
30
-
31
- - Pure distutils packages installed with ``python setup.py install``, which
32
- leave behind no metadata to determine what files were installed.
33
- - Script wrappers installed by ``python setup.py develop``.
34
- """
35
-
36
- usage = """
37
- %prog [options] <package> ...
38
- %prog [options] -r <requirements file> ..."""
39
-
40
- def add_options(self) -> None:
41
- self.cmd_opts.add_option(
42
- "-r",
43
- "--requirement",
44
- dest="requirements",
45
- action="append",
46
- default=[],
47
- metavar="file",
48
- help=(
49
- "Uninstall all the packages listed in the given requirements "
50
- "file. This option can be used multiple times."
51
- ),
52
- )
53
- self.cmd_opts.add_option(
54
- "-y",
55
- "--yes",
56
- dest="yes",
57
- action="store_true",
58
- help="Don't ask for confirmation of uninstall deletions.",
59
- )
60
- self.cmd_opts.add_option(cmdoptions.root_user_action())
61
- self.cmd_opts.add_option(cmdoptions.override_externally_managed())
62
- self.parser.insert_option_group(0, self.cmd_opts)
63
-
64
- def run(self, options: Values, args: List[str]) -> int:
65
- session = self.get_default_session(options)
66
-
67
- reqs_to_uninstall = {}
68
- for name in args:
69
- req = install_req_from_line(
70
- name,
71
- isolated=options.isolated_mode,
72
- )
73
- if req.name:
74
- reqs_to_uninstall[canonicalize_name(req.name)] = req
75
- else:
76
- logger.warning(
77
- "Invalid requirement: %r ignored -"
78
- " the uninstall command expects named"
79
- " requirements.",
80
- name,
81
- )
82
- for filename in options.requirements:
83
- for parsed_req in parse_requirements(
84
- filename, options=options, session=session
85
- ):
86
- req = install_req_from_parsed_requirement(
87
- parsed_req, isolated=options.isolated_mode
88
- )
89
- if req.name:
90
- reqs_to_uninstall[canonicalize_name(req.name)] = req
91
- if not reqs_to_uninstall:
92
- raise InstallationError(
93
- f"You must give at least one requirement to {self.name} (see "
94
- f'"pip help {self.name}")'
95
- )
96
-
97
- if not options.override_externally_managed:
98
- check_externally_managed()
99
-
100
- protect_pip_from_modification_on_windows(
101
- modifying_pip="pip" in reqs_to_uninstall
102
- )
103
-
104
- for req in reqs_to_uninstall.values():
105
- uninstall_pathset = req.uninstall(
106
- auto_confirm=options.yes,
107
- verbose=self.verbosity > 0,
108
- )
109
- if uninstall_pathset:
110
- uninstall_pathset.commit()
111
- if options.root_user_action == "warn":
112
- warn_if_run_as_root()
113
- return SUCCESS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_c4.py DELETED
@@ -1,88 +0,0 @@
1
- from detectron2.config import LazyCall as L
2
- from detectron2.layers import ShapeSpec
3
- from detectron2.modeling.meta_arch import GeneralizedRCNN
4
- from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
5
- from detectron2.modeling.backbone import BasicStem, BottleneckBlock, ResNet
6
- from detectron2.modeling.box_regression import Box2BoxTransform
7
- from detectron2.modeling.matcher import Matcher
8
- from detectron2.modeling.poolers import ROIPooler
9
- from detectron2.modeling.proposal_generator import RPN, StandardRPNHead
10
- from detectron2.modeling.roi_heads import (
11
- FastRCNNOutputLayers,
12
- MaskRCNNConvUpsampleHead,
13
- Res5ROIHeads,
14
- )
15
-
16
- model = L(GeneralizedRCNN)(
17
- backbone=L(ResNet)(
18
- stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
19
- stages=L(ResNet.make_default_stages)(
20
- depth=50,
21
- stride_in_1x1=True,
22
- norm="FrozenBN",
23
- ),
24
- out_features=["res4"],
25
- ),
26
- proposal_generator=L(RPN)(
27
- in_features=["res4"],
28
- head=L(StandardRPNHead)(in_channels=1024, num_anchors=15),
29
- anchor_generator=L(DefaultAnchorGenerator)(
30
- sizes=[[32, 64, 128, 256, 512]],
31
- aspect_ratios=[0.5, 1.0, 2.0],
32
- strides=[16],
33
- offset=0.0,
34
- ),
35
- anchor_matcher=L(Matcher)(
36
- thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True
37
- ),
38
- box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
39
- batch_size_per_image=256,
40
- positive_fraction=0.5,
41
- pre_nms_topk=(12000, 6000),
42
- post_nms_topk=(2000, 1000),
43
- nms_thresh=0.7,
44
- ),
45
- roi_heads=L(Res5ROIHeads)(
46
- num_classes=80,
47
- batch_size_per_image=512,
48
- positive_fraction=0.25,
49
- proposal_matcher=L(Matcher)(
50
- thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False
51
- ),
52
- in_features=["res4"],
53
- pooler=L(ROIPooler)(
54
- output_size=14,
55
- scales=(1.0 / 16,),
56
- sampling_ratio=0,
57
- pooler_type="ROIAlignV2",
58
- ),
59
- res5=L(ResNet.make_stage)(
60
- block_class=BottleneckBlock,
61
- num_blocks=3,
62
- stride_per_block=[2, 1, 1],
63
- in_channels=1024,
64
- bottleneck_channels=512,
65
- out_channels=2048,
66
- norm="FrozenBN",
67
- stride_in_1x1=True,
68
- ),
69
- box_predictor=L(FastRCNNOutputLayers)(
70
- input_shape=L(ShapeSpec)(channels="${...res5.out_channels}", height=1, width=1),
71
- test_score_thresh=0.05,
72
- box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)),
73
- num_classes="${..num_classes}",
74
- ),
75
- mask_head=L(MaskRCNNConvUpsampleHead)(
76
- input_shape=L(ShapeSpec)(
77
- channels="${...res5.out_channels}",
78
- width="${...pooler.output_size}",
79
- height="${...pooler.output_size}",
80
- ),
81
- num_classes="${..num_classes}",
82
- conv_dims=[256],
83
- ),
84
- ),
85
- pixel_mean=[103.530, 116.280, 123.675],
86
- pixel_std=[1.0, 1.0, 1.0],
87
- input_format="BGR",
88
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/AltDiffusion-m9/css_and_js.py DELETED
@@ -1,92 +0,0 @@
1
- from os import path
2
- import json
3
-
4
-
5
- def readTextFile(*args):
6
- dir = path.dirname(__file__)
7
- entry = path.join(dir, *args)
8
- with open(entry, "r", encoding="utf8") as f:
9
- data = f.read()
10
- return data
11
-
12
-
13
- def css(opt):
14
- styling = readTextFile("css", "styles.css")
15
- # TODO: @altryne restore this before merge
16
- if not opt.no_progressbar_hiding:
17
- styling += readTextFile("css", "no_progress_bar.css")
18
- return styling
19
-
20
-
21
- def js(opt):
22
- data = readTextFile("js", "index.js")
23
- data = "(z) => {" + data + "; return z ?? [] }"
24
- return data
25
-
26
-
27
- # TODO : @altryne fix this to the new JS format
28
- js_copy_txt2img_output = "(x) => {navigator.clipboard.writeText(document.querySelector('gradio-app').shadowRoot.querySelector('#highlight .textfield').textContent.replace(/\s+/g,' ').replace(/: /g,':'))}"
29
-
30
-
31
-
32
- js_parse_prompt ="""
33
- (txt2img_prompt, txt2img_width, txt2img_height, txt2img_steps, txt2img_seed, txt2img_batch_count, txt2img_cfg) => {
34
-
35
- const prompt_input = document.querySelector('gradio-app').shadowRoot.querySelector('#prompt_input [data-testid="textbox"]');
36
- const multiline = document.querySelector('gradio-app').shadowRoot.querySelector('#submit_on_enter label:nth-child(2)')
37
- if (prompt_input.scrollWidth > prompt_input.clientWidth + 10 ) {
38
- multiline.click();
39
- }
40
-
41
-
42
- let height_match = /(?:-h|-H|--height|height)[ :]?(?<height>\d+) /.exec(txt2img_prompt);
43
- if (height_match) {
44
- txt2img_height = Math.round(height_match.groups.height / 64) * 64;
45
- txt2img_prompt = txt2img_prompt.replace(height_match[0], '');
46
- }
47
- let width_match = /(?:-w|-W|--width|width)[ :]?(?<width>\d+) /.exec(txt2img_prompt);
48
- if (width_match) {
49
- txt2img_width = Math.round(width_match.groups.width / 64) * 64;
50
- txt2img_prompt = txt2img_prompt.replace(width_match[0], '');
51
- }
52
- let steps_match = /(?:-s|--steps|steps)[ :]?(?<steps>\d+) /.exec(txt2img_prompt);
53
- if (steps_match) {
54
- txt2img_steps = steps_match.groups.steps.trim();
55
- txt2img_prompt = txt2img_prompt.replace(steps_match[0], '');
56
- }
57
- let seed_match = /(?:-S|--seed|seed)[ :]?(?<seed>\d+) /.exec(txt2img_prompt);
58
- if (seed_match) {
59
- txt2img_seed = seed_match.groups.seed;
60
- txt2img_prompt = txt2img_prompt.replace(seed_match[0], '');
61
- }
62
- let batch_count_match = /(?:-n|-N|--number|number)[ :]?(?<batch_count>\d+) /.exec(txt2img_prompt);
63
- if (batch_count_match) {
64
- txt2img_batch_count = batch_count_match.groups.batch_count;
65
- txt2img_prompt = txt2img_prompt.replace(batch_count_match[0], '');
66
- }
67
- let cfg_scale_match = /(?:-c|-C|--cfg-scale|cfg_scale|cfg)[ :]?(?<cfgscale>\d\.?\d+?) /.exec(txt2img_prompt);
68
- if (cfg_scale_match) {
69
- txt2img_cfg = parseFloat(cfg_scale_match.groups.cfgscale).toFixed(1);
70
- txt2img_prompt = txt2img_prompt.replace(cfg_scale_match[0], '');
71
- }
72
- let sampler_match = /(?:-A|--sampler|sampler)[ :]?(?<sampler>\w+) /.exec(txt2img_prompt);
73
- if (sampler_match) {
74
-
75
- txt2img_prompt = txt2img_prompt.replace(sampler_match[0], '');
76
- }
77
-
78
- return [txt2img_prompt, parseInt(txt2img_width), parseInt(txt2img_height), parseInt(txt2img_steps), txt2img_seed, parseInt(txt2img_batch_count), parseFloat(txt2img_cfg)];
79
- }
80
- """
81
-
82
-
83
- # Wrap the typical SD method call into async closure for ease of use
84
- # Supplies the js function with a params object
85
- # That includes all the passed arguments and input from Gradio: x
86
- # ATTENTION: x is an array of values of all components passed to your
87
- # python event handler
88
- # Example call in Gradio component's event handler (pass the result to _js arg):
89
- # _js=call_JS("myJsMethod", arg1="string", arg2=100, arg3=[])
90
- def call_JS(sd_method, **kwargs):
91
- param_str = json.dumps(kwargs)
92
- return f"async (...x) => {{ return await SD.{sd_method}({{ x, ...{param_str} }}) ?? []; }}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/components/ui/select.tsx DELETED
@@ -1,121 +0,0 @@
1
- "use client"
2
-
3
- import * as React from "react"
4
- import * as SelectPrimitive from "@radix-ui/react-select"
5
- import { Check, ChevronDown } from "lucide-react"
6
-
7
- import { cn } from "@/lib/utils"
8
-
9
- const Select = SelectPrimitive.Root
10
-
11
- const SelectGroup = SelectPrimitive.Group
12
-
13
- const SelectValue = SelectPrimitive.Value
14
-
15
- const SelectTrigger = React.forwardRef<
16
- React.ElementRef<typeof SelectPrimitive.Trigger>,
17
- React.ComponentPropsWithoutRef<typeof SelectPrimitive.Trigger>
18
- >(({ className, children, ...props }, ref) => (
19
- <SelectPrimitive.Trigger
20
- ref={ref}
21
- className={cn(
22
- "flex h-10 w-full items-center justify-between rounded-md border border-stone-200 border-stone-200 bg-transparent px-3 py-2 text-sm ring-offset-white placeholder:text-stone-500 focus:outline-none focus:ring-2 focus:ring-stone-400 focus:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50 dark:border-stone-800 dark:border-stone-800 dark:ring-offset-stone-950 dark:placeholder:text-stone-400 dark:focus:ring-stone-800",
23
- className
24
- )}
25
- {...props}
26
- >
27
- {children}
28
- <SelectPrimitive.Icon asChild>
29
- <ChevronDown className="h-4 w-4 opacity-50" />
30
- </SelectPrimitive.Icon>
31
- </SelectPrimitive.Trigger>
32
- ))
33
- SelectTrigger.displayName = SelectPrimitive.Trigger.displayName
34
-
35
- const SelectContent = React.forwardRef<
36
- React.ElementRef<typeof SelectPrimitive.Content>,
37
- React.ComponentPropsWithoutRef<typeof SelectPrimitive.Content>
38
- >(({ className, children, position = "popper", ...props }, ref) => (
39
- <SelectPrimitive.Portal>
40
- <SelectPrimitive.Content
41
- ref={ref}
42
- className={cn(
43
- "relative z-50 min-w-[8rem] overflow-hidden rounded-md border border-stone-200 bg-white text-stone-950 shadow-md data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 dark:border-stone-800 dark:bg-stone-950 dark:text-stone-50",
44
- position === "popper" &&
45
- "data-[side=bottom]:translate-y-1 data-[side=left]:-translate-x-1 data-[side=right]:translate-x-1 data-[side=top]:-translate-y-1",
46
- className
47
- )}
48
- position={position}
49
- {...props}
50
- >
51
- <SelectPrimitive.Viewport
52
- className={cn(
53
- "p-1",
54
- position === "popper" &&
55
- "h-[var(--radix-select-trigger-height)] w-full min-w-[var(--radix-select-trigger-width)]"
56
- )}
57
- >
58
- {children}
59
- </SelectPrimitive.Viewport>
60
- </SelectPrimitive.Content>
61
- </SelectPrimitive.Portal>
62
- ))
63
- SelectContent.displayName = SelectPrimitive.Content.displayName
64
-
65
- const SelectLabel = React.forwardRef<
66
- React.ElementRef<typeof SelectPrimitive.Label>,
67
- React.ComponentPropsWithoutRef<typeof SelectPrimitive.Label>
68
- >(({ className, ...props }, ref) => (
69
- <SelectPrimitive.Label
70
- ref={ref}
71
- className={cn("py-1.5 pl-8 pr-2 text-sm font-semibold", className)}
72
- {...props}
73
- />
74
- ))
75
- SelectLabel.displayName = SelectPrimitive.Label.displayName
76
-
77
- const SelectItem = React.forwardRef<
78
- React.ElementRef<typeof SelectPrimitive.Item>,
79
- React.ComponentPropsWithoutRef<typeof SelectPrimitive.Item>
80
- >(({ className, children, ...props }, ref) => (
81
- <SelectPrimitive.Item
82
- ref={ref}
83
- className={cn(
84
- "relative flex w-full cursor-default select-none items-center rounded-sm py-1.5 pl-8 pr-2 text-sm outline-none focus:bg-stone-100 focus:text-stone-900 data-[disabled]:pointer-events-none data-[disabled]:opacity-50 dark:focus:bg-stone-800 dark:focus:text-stone-50",
85
- className
86
- )}
87
- {...props}
88
- >
89
- <span className="absolute left-2 flex h-3.5 w-3.5 items-center justify-center">
90
- <SelectPrimitive.ItemIndicator>
91
- <Check className="h-4 w-4" />
92
- </SelectPrimitive.ItemIndicator>
93
- </span>
94
-
95
- <SelectPrimitive.ItemText>{children}</SelectPrimitive.ItemText>
96
- </SelectPrimitive.Item>
97
- ))
98
- SelectItem.displayName = SelectPrimitive.Item.displayName
99
-
100
- const SelectSeparator = React.forwardRef<
101
- React.ElementRef<typeof SelectPrimitive.Separator>,
102
- React.ComponentPropsWithoutRef<typeof SelectPrimitive.Separator>
103
- >(({ className, ...props }, ref) => (
104
- <SelectPrimitive.Separator
105
- ref={ref}
106
- className={cn("-mx-1 my-1 h-px bg-stone-100 dark:bg-stone-800", className)}
107
- {...props}
108
- />
109
- ))
110
- SelectSeparator.displayName = SelectPrimitive.Separator.displayName
111
-
112
- export {
113
- Select,
114
- SelectGroup,
115
- SelectValue,
116
- SelectTrigger,
117
- SelectContent,
118
- SelectLabel,
119
- SelectItem,
120
- SelectSeparator,
121
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/infer_pack/transforms.py DELETED
@@ -1,209 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(
13
- inputs,
14
- unnormalized_widths,
15
- unnormalized_heights,
16
- unnormalized_derivatives,
17
- inverse=False,
18
- tails=None,
19
- tail_bound=1.0,
20
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
21
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
22
- min_derivative=DEFAULT_MIN_DERIVATIVE,
23
- ):
24
- if tails is None:
25
- spline_fn = rational_quadratic_spline
26
- spline_kwargs = {}
27
- else:
28
- spline_fn = unconstrained_rational_quadratic_spline
29
- spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
30
-
31
- outputs, logabsdet = spline_fn(
32
- inputs=inputs,
33
- unnormalized_widths=unnormalized_widths,
34
- unnormalized_heights=unnormalized_heights,
35
- unnormalized_derivatives=unnormalized_derivatives,
36
- inverse=inverse,
37
- min_bin_width=min_bin_width,
38
- min_bin_height=min_bin_height,
39
- min_derivative=min_derivative,
40
- **spline_kwargs
41
- )
42
- return outputs, logabsdet
43
-
44
-
45
- def searchsorted(bin_locations, inputs, eps=1e-6):
46
- bin_locations[..., -1] += eps
47
- return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
48
-
49
-
50
- def unconstrained_rational_quadratic_spline(
51
- inputs,
52
- unnormalized_widths,
53
- unnormalized_heights,
54
- unnormalized_derivatives,
55
- inverse=False,
56
- tails="linear",
57
- tail_bound=1.0,
58
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
59
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
60
- min_derivative=DEFAULT_MIN_DERIVATIVE,
61
- ):
62
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
63
- outside_interval_mask = ~inside_interval_mask
64
-
65
- outputs = torch.zeros_like(inputs)
66
- logabsdet = torch.zeros_like(inputs)
67
-
68
- if tails == "linear":
69
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
70
- constant = np.log(np.exp(1 - min_derivative) - 1)
71
- unnormalized_derivatives[..., 0] = constant
72
- unnormalized_derivatives[..., -1] = constant
73
-
74
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
75
- logabsdet[outside_interval_mask] = 0
76
- else:
77
- raise RuntimeError("{} tails are not implemented.".format(tails))
78
-
79
- (
80
- outputs[inside_interval_mask],
81
- logabsdet[inside_interval_mask],
82
- ) = rational_quadratic_spline(
83
- inputs=inputs[inside_interval_mask],
84
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
- inverse=inverse,
88
- left=-tail_bound,
89
- right=tail_bound,
90
- bottom=-tail_bound,
91
- top=tail_bound,
92
- min_bin_width=min_bin_width,
93
- min_bin_height=min_bin_height,
94
- min_derivative=min_derivative,
95
- )
96
-
97
- return outputs, logabsdet
98
-
99
-
100
- def rational_quadratic_spline(
101
- inputs,
102
- unnormalized_widths,
103
- unnormalized_heights,
104
- unnormalized_derivatives,
105
- inverse=False,
106
- left=0.0,
107
- right=1.0,
108
- bottom=0.0,
109
- top=1.0,
110
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
111
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
112
- min_derivative=DEFAULT_MIN_DERIVATIVE,
113
- ):
114
- if torch.min(inputs) < left or torch.max(inputs) > right:
115
- raise ValueError("Input to a transform is not within its domain")
116
-
117
- num_bins = unnormalized_widths.shape[-1]
118
-
119
- if min_bin_width * num_bins > 1.0:
120
- raise ValueError("Minimal bin width too large for the number of bins")
121
- if min_bin_height * num_bins > 1.0:
122
- raise ValueError("Minimal bin height too large for the number of bins")
123
-
124
- widths = F.softmax(unnormalized_widths, dim=-1)
125
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
126
- cumwidths = torch.cumsum(widths, dim=-1)
127
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
128
- cumwidths = (right - left) * cumwidths + left
129
- cumwidths[..., 0] = left
130
- cumwidths[..., -1] = right
131
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
132
-
133
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
134
-
135
- heights = F.softmax(unnormalized_heights, dim=-1)
136
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
137
- cumheights = torch.cumsum(heights, dim=-1)
138
- cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
139
- cumheights = (top - bottom) * cumheights + bottom
140
- cumheights[..., 0] = bottom
141
- cumheights[..., -1] = top
142
- heights = cumheights[..., 1:] - cumheights[..., :-1]
143
-
144
- if inverse:
145
- bin_idx = searchsorted(cumheights, inputs)[..., None]
146
- else:
147
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
148
-
149
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
150
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
151
-
152
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
153
- delta = heights / widths
154
- input_delta = delta.gather(-1, bin_idx)[..., 0]
155
-
156
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
157
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
158
-
159
- input_heights = heights.gather(-1, bin_idx)[..., 0]
160
-
161
- if inverse:
162
- a = (inputs - input_cumheights) * (
163
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
164
- ) + input_heights * (input_delta - input_derivatives)
165
- b = input_heights * input_derivatives - (inputs - input_cumheights) * (
166
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
167
- )
168
- c = -input_delta * (inputs - input_cumheights)
169
-
170
- discriminant = b.pow(2) - 4 * a * c
171
- assert (discriminant >= 0).all()
172
-
173
- root = (2 * c) / (-b - torch.sqrt(discriminant))
174
- outputs = root * input_bin_widths + input_cumwidths
175
-
176
- theta_one_minus_theta = root * (1 - root)
177
- denominator = input_delta + (
178
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
179
- * theta_one_minus_theta
180
- )
181
- derivative_numerator = input_delta.pow(2) * (
182
- input_derivatives_plus_one * root.pow(2)
183
- + 2 * input_delta * theta_one_minus_theta
184
- + input_derivatives * (1 - root).pow(2)
185
- )
186
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
187
-
188
- return outputs, -logabsdet
189
- else:
190
- theta = (inputs - input_cumwidths) / input_bin_widths
191
- theta_one_minus_theta = theta * (1 - theta)
192
-
193
- numerator = input_heights * (
194
- input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
195
- )
196
- denominator = input_delta + (
197
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
198
- * theta_one_minus_theta
199
- )
200
- outputs = input_cumheights + numerator / denominator
201
-
202
- derivative_numerator = input_delta.pow(2) * (
203
- input_derivatives_plus_one * theta.pow(2)
204
- + 2 * input_delta * theta_one_minus_theta
205
- + input_derivatives * (1 - theta).pow(2)
206
- )
207
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
208
-
209
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Boleto Para El Grupo 2 2022.md DELETED
@@ -1,234 +0,0 @@
1
- <br />
2
- <h1>Cómo Descargar Boleto Hall para el Examen del Grupo 2 2022</h1>
3
- <p>Si usted está aspirando a unirse a la Comisión de Servicio Público Tamil Nadu (TNPSC) en varios puestos en el Grupo 2 servicios, entonces usted debe ser consciente de la TNPSC Grupo 2 examen 2022. Este examen es realizado cada año por el TNPSC para seleccionar candidatos para diferentes puestos como Asistente de Ingresos, Oficial de Sección Auxiliar, Oficial de Auditoría, etc. El examen consta de dos etapas: Preliminar y Principal, seguido de una entrevista para algunos puestos. El examen preliminar se realizó el 21 de mayo de 2022, y el examen principal está programado para el 25 de febrero de 2023. </p>
4
- <h2>boleto para el grupo 2 2022</h2><br /><p><b><b>Download</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://bltlly.com/2v6JLq">https://bltlly.com/2v6JLq</a></b></p><br /><br />
5
- <p>Una de las cosas más importantes que usted necesita hacer antes de aparecer para el examen es descargar su boleto de entrada o tarjeta de admisión. El ticket hall es un documento que contiene sus datos personales y de examen, como su nombre, número de lista, centro de examen, fecha y hora, etc. También sirve como prueba de su identidad y elegibilidad para el examen. Sin el boleto de la sala, no se le permitirá entrar en la sala de examen o tomar el examen. </p>
6
- <p>Entonces, ¿cómo puede descargar su boleto de pasillo para el examen TNPSC Group 2 2022? En este artículo, le diremos todo lo que necesita saber al respecto. También le proporcionaremos información útil sobre el patrón del examen, el plan de estudios, los detalles de los boletos de la sala, las instrucciones y las preguntas frecuentes. Sigue leyendo para saber más. </p>
7
- <h2>Introducción</h2>
8
- <h3> ¿Qué es el examen TNPSC Group 2 y por qué es importante? </h3>
9
- <p>El examen del Grupo 2 del TNPSC es un concurso realizado por la Comisión de Administración Pública de Tamil Nadu (TNPSC) para reclutar candidatos para diversos puestos en los servicios del Grupo 2. Estos puestos incluyen puestos para entrevistas y puestos no relacionados con entrevistas, como el Oficial Adjunto de Impuestos Comerciales, el Oficial de Sección Auxiliar, el Oficial de Auditoría, etc. Se estima que las vacantes para estos puestos ascenderán a unas 5529 para este año. </p>
10
-
11
- <h3>¿Cuáles son las fechas y eventos importantes para el examen? </h3>
12
- <p>El TNPSC ha publicado una notificación oficial para el examen del Grupo 2 2022 en su sitio web www.tnpsc.gov.in. La notificación contiene todos los detalles sobre los criterios de elegibilidad, proceso de solicitud, proceso de selección, política de reserva, etc. Los candidatos interesados y elegibles pueden solicitar en línea a través de Registro Único (OTR) en o antes del 23 de marzo de 2022. </p>
13
- <p>Las fechas y eventos importantes para el examen TNPSC Group 2 2022 son los siguientes:</p>
14
- <tabla>
15
- <tr><th>Evento</th><th>Fecha</th></tr>
16
- <tr><td>Fecha de publicación de la notificación</td><td>23 de febrero de 2022</td></tr>
17
- <tr><td>Última fecha para aplicar en línea</td><td>23 de marzo de 2022</td></tr>
18
- <tr><td>Fecha del examen preliminar</td><td>21 de mayo de 2022</td></tr>
19
- <tr><td>Fecha del resultado del examen preliminar</td><td>Junio 2022 (tentativo)</td></tr>
20
- <tr><td>Fecha del examen principal</td><td>25 de febrero de 2023</td></tr>
21
- <tr><td>Fecha del resultado del examen principal</td><td>Abril 2023 (tentativo)</td></tr>
22
- <tr><td>Fecha de la entrevista</td><td>May 2023 (tentativo)</td></tr>
23
- <tr><td>Fecha del resultado final</td><td>Junio 2023 (tentativo)</td></tr>
24
- </tabla>
25
- <h3>¿Cómo descargar el ticket de la sala para el examen? </h3>
26
- <p>El TNPSC emitirá el ticket de la sala para el examen del Grupo 2 2022 en su sitio web www.tnpsc.gov.in. El boleto estará disponible para su descarga al menos 10 días antes de la fecha del examen. Los candidatos que han solicitado el examen con éxito pueden descargar su boleto de la sala siguiendo estos pasos:</p>
27
- <p></p>
28
- <ol>
29
- <li>Visite el sitio web oficial de TNPSC www.tnpsc.gov.in. </li>
30
- <li>Haga clic en el enlace "Hall Ticket Download" en la página de inicio. </li>
31
- <li>Seleccione la opción "TNPSC Group 2 Exam 2022" en el menú desplegable. </li>
32
- <li>Ingrese su ID de solicitud y fecha de nacimiento y haga clic en "Enviar". </li>
33
- <li>Su boleto aparecerá en la pantalla. Compruebe todos los detalles cuidadosamente y descárguelo. </li>
34
- <li>Tome una impresión del boleto de entrada y manténgalo seguro para futuras referencias. </li>
35
- </ol>
36
-
37
- <h3>¿Cuáles son las etapas y temas del examen? </h3>
38
- <p>El examen TNPSC Grupo 2 2022 consta de dos etapas: Preliminar y Principal, seguido de una entrevista para algunos mensajes. El examen preliminar es una prueba de detección que filtra a los candidatos para el examen principal. El examen principal es una prueba descriptiva que evalúa el conocimiento de la materia y las habilidades de escritura de los candidatos. La entrevista es una prueba de personalidad que evalúa la idoneidad de los candidatos para los puestos. </p>
39
- <p>Los temas y las marcas de distribución de cada etapa son los siguientes:</p>
40
- <tabla>
41
- <tr><th>Stage</th><th>Asunto</th><th>Marcas</th></tr>
42
- <tr><td rowspan="2">Preliminar</td><td>Estudios generales (Degree Standard)</td><td>150</td></tr>
43
- <tr><td>Prueba de aptitud y capacidad mental (estándar SSLC)</td><td>50</td></tr>
44
- <tr><td rowspan="3">Principal</td><td>Tamil o Inglés (Estándar SSLC)</td><td>100</td></tr>
45
- <tr><td>Estudios generales (Degree Standard)</td><td>150</td></tr <tr><td>Entrevista y registro</td><td><40</td></tr </table>
46
- <h3>¿Cuáles son las marcas y la duración de cada etapa? </h3>
47
- <p>Las marcas y duración de cada etapa son las siguientes:</p>
48
- <tabla>
49
- <tr><th>Etapa</th><th>Marcas</th><th>Marcado negativo</th><th>Duración</th></tr<tr><td>Preliminar</td>< td>200</td><td><1/3</td><td>3 horas<td>/td></tr>
50
- <tr><td>Principal</td><td>300</td><td>No</td><td><td>3 horas</td></tr>
51
- <tr><td>Entrevista</td><td>40</td><td>No</td><td><-</td></tr>
52
- </tabla>
53
- <h3>¿Cuáles son los temas y subtemas tratados en cada tema? </h3>
54
- <p>Los temas y subtemas tratados en cada tema son los siguientes:</p>
55
- <h4>Estudios generales (Grado estándar)</h4>
56
- <ul>
57
- <li>Unidad I: Ciencia general <ul>
58
-
59
- <li>Química: Elementos y compuestos, Ácidos, bases y sales, Oxidación y reducción, Química de minerales y metales, Carbono, nitrógeno y sus compuestos, Fertilizantes, pesticidas, insecticidas, Bioquímica y biotecnología, Electroquímica, Polímeros y plásticos. </li>
60
- <li>Biología: Botánica: Conceptos principales de las ciencias de la vida, La unidad celular básica de la vida, Clasificación de los organismos vivos, Nutrición y dietética, Respiración. Zoología: Sangre y circulación sanguínea. Sistema endocrino. Sistema reproductivo. Genética la ciencia de la herencia. Medio ambiente, ecología, salud e higiene. Biodiversidad y su conservación. Enfermedades humanas. Prevención y remedios. Enfermedades transmisibles y no transmisibles. Alcoholismo y abuso de drogas. Animales, plantas y vida humana. </li>
61
- </ul>
62
- </li>
63
- <li>Unidad II: Eventos actuales <ul>
64
- <li>Historia: Último diario de eventos - Nacional - Símbolos nacionales - Perfil de los Estados - Personas eminentes y lugares en las noticias - Deportes y juegos - Libros y autores - Premios y honores - Panorama cultural - Últimos acontecimientos históricos - India y sus vecinos - Última terminología - Citas - ¿quién es quién? </li>
65
- <li>Ciencia Política: Problemas en la celebración de elecciones públicas - Partidos políticos y sistema político en la India - Conciencia pública y Administración general - Papel de las organizaciones voluntarias y el gobierno, Gobierno orientado al bienestar. su utilidad. </li>
66
- <li>Geografía: Hitos geográficos - Política de medio ambiente y ecología. </li>
67
- <li>Economía: Problemas socioeconómicos actuales - Nueva política económica y gobierno. sector. </li>
68
- <li>Ciencia: Últimas invenciones en ciencia y tecnología - Últimos descubrimientos en ciencias de la salud - Medios de comunicación y comunicación. </li>
69
- </ul>
70
- </li>
71
- <li>Unidad III: Geografía <ul>
72
-
73
- </ul>
74
- </li>
75
- <li>Unidad IV: Historia y Cultura de la India <ul>
76
- <li>Civilización del valle del Indo - Guptas, Delhi Sultanes, mogoles y marathas - Edad de Vijayanagaram y los bahmanis - Historia de la India del Sur - Cultura y Patrimonio del pueblo tamil - Llegada de la invasión europea - Expansión y consolidación del dominio británico - Efecto del dominio británico sobre los factores socioeconómicos - Reformas sociales y movimientos religiosos - India desde la independencia - Características de la cultura india - Unidad en la diversidad -raza, color, idioma, costumbre - India-como estado secular - Organizaciones de bellas artes, danza, teatro, música - Crecimiento de racionalista, movimiento dravídico en TN-Partidos políticos y esquemas populistas- Personalidades prominentes en las diversas esferas - Artes, Ciencia, Literatura y Filosofía - Madre Teresa, Swami Vivekananda, Pandit Ravishankar , M.S.Subbulakshmi, Rukmani Arundel y J.Krishnamoorthy etc.</li>
77
- </ul>
78
- </li>
79
- Unidad V: Política de la India
80
- <li>Constitución de la India - Preámbulo de la Constitución- Características destacadas de la Constitución- Unión, Estado y territorio- Derechos de ciudadanía- Derechos fundamentales- Deberes fundamentales- Carta de los derechos humanos- Legislatura de la Unión - Parlamento- Ejecutivo estatal- Legislatura estatal - Asamblea- Estado de Jammu y Cachemira- Gobierno local - panchayat raj - Tamil Nadu- Poder judicial en la India - Estado de derecho/Debido proceso legal- Federalismo indio - centro - Relaciones estatales- Disposiciones de emergencia Elecciones - Comisión Electoral Unión y Estado. Idioma oficial y anexo VIII- Enmiendas a la Constitución- Calendario de la Constitución- Reformas y tribunales administrativos- Corrupción en la vida pública- Medidas contra la corrupción - Comisión Central de Vigilancia, lok-adalats, Ombudsman, Contralor y Auditor General de la India- Derecho a la información - Comisión Central y Estatal- Empoderamiento de la mujer- Organizaciones voluntarias y agravios públicos Reparación- Formas de protección del consumidor. </li>
81
- </ul>
82
- </li>
83
-
84
- <li>Naturaleza de la economía india- Modelos de plan quinquenal-una evaluación-Reformas agrarias y agricultura- Aplicación de la ciencia en la agricultura-Crecimiento industrial-Programas orientados al bienestar rural-Problemas del sector social - población, educación, salud, empleo, pobreza-Tendencias económicas en Tamil Nadu - Energía Diferentes fuentes y desarrollo- Comisión de Finanzas - Comisión de Planificación- Consejo Nacional de Desarrollo- Programas de alivio de la pobreza- DRH - Crecimiento económico sostenible- Crecimiento económico y justicia social - Crecimiento equilibrado- NITI Aayog- Leyes y leyes sobre marcas de tierras.</li>
85
- </ul>
86
- </li>
87
- <li>Unidad VII: Movimiento Nacional Indio <ul>
88
- <li>Renacimiento nacional-Levantamiento temprano contra el gobierno británico-1857 Revuelta- Congreso Nacional Indio-Surgimiento de líderes nacionales-Gandhi, Nehru, Tagore, Netaji-Crecimiento de movimientos militantes -Diferentes modos de agitación-Era de diferentes actos y pactos-Guerra mundial y lucha de fase finalEl comunalismo llevó a la partición-Papel de Tamil Nadu en la lucha por la libertad - Rajaji, VOC, Periyar, Bharathiar y otros-Nacimiento de partidos políticos/ sistema político en la India desde la independencia. </li>
89
- </ul>
90
- </li>
91
- <li>Unidad VIII: Prueba de aptitud y capacidad mental (estándar SSLC) <ul>
92
- <li>Conversión de información a datos-Recopilación, compilación y presentación de datos - Tablas, gráficos, diagramas-Representación paramétrica de datos-Interpretación analítica de datos -Simplificación-Porcentaje-Factor común más alto (HCF)-Múltiplo común más bajo (LCM)-Relación y proporción-Interés simple-Interés compuesto-Área-Volumen-Tiempo y Capacidad de Comportamiento-Términos básicos, Comunicaciones en la tecnología de la información-Aplicación de la tecnología de la información y la comunicación (TIC)- Toma de decisiones y resolución de problemas-Razonamiento lógico-RompecabezasRazonamiento Visual de Dados-Razonamiento Numérico de Alfa-Número de Serie-Número Lógico/Secuencias Alfabéticas/Diagramáticas.</li>
93
- </ul>
94
- </li>
95
- </ul>
96
- <h4>Tamil o inglés (estándar SSLC)</h4>
97
- <ul>
98
- <li>Gramática <ul>
99
-
100
- <li>Elija los 'Sinónimos' correctos para la palabra subrayada de las opciones dadas</li>
101
- <li>Elija el correcto 'Antónimos' para la palabra subrayada de las opciones dadas</li>
102
- <li>Seleccione la palabra correcta (Prefijo, Sufijo)</li>
103
- <li>Rellene los espacios en blanco con el artículo adecuado</li>
104
- <li>Rellene los espacios en blanco con preposición adecuada</li>
105
- <li>Seleccione la etiqueta de pregunta correcta</li>
106
- <li>Seleccione el tiempo correcto</li>
107
- <li>Seleccione la voz correcta</li>
108
- <li>Rellenar los espacios en blanco (Infinitivo, Gerundio, Participio)</li>
109
- <li>Identificar el patrón de oración de la siguiente oración (Asunto, Verbo, Objeto....) Espacios en blanco con 'Homophones'</li>
110
- <li>Descubre el error (Artículos, Preposiciones, Sustantivo, Verbo, Adjetivo, Adverbio)</li>
111
- <li>Comprensión</li>
112
- <li>Seleccione la oración correcta</li>
113
- <li>Descubre las palabras extrañas (Verbo, Sustantivo, Adjetivo, Adverbio)</li>
114
- <li>Seleccione las formas plurales correctas</li>
115
- <li>Identificar la oración (Simple, Compuesto, Complejo Sentense)</li>
116
- <li>Identificar el grado correcto.</li>
117
- <li>Forma una nueva palabra mezclando las palabras. </li>
118
- <li>Forma de palabras compuestas (Eg: Sustantivo+Verbo, Gerund+Sustantivo)</li>
119
- </ul>
120
- </li>
121
- <li>Literatura <ul>
122
- <li>Figuras del habla observadas en los siguientes Poemas: <ul>
123
- <li>Alliteration - Allusion - Simile - Metaphor - Personification - Oxymoron - Onomatopoeia - Anaphora - Ellipsis - Rhyme Scheme - Rhyming Words - Repetition - Apostrophe</li>
124
- <li>Un Salmo de la Vida - Derechos de la Mujer - La Nación Unida - Palabras en Inglés - Serpiente - El Hombre que Mató - Fuera al espacio exterior mañana por la mañana - Sonnet No.116 - The Solitary Reaper - Be the Best - O Captain My Captain - Laugh and Be Merry - Earth - Don’t quit - The Apology - Be Glad your Nose is on your face - Un soneto para mi Madre Incomparable - The Flying Wonder - To a Millionaire - El Piano - Hombre - Ir por agua - El grito de los niños - Pájaro migrante - Shilpi.</li>
125
- </ul>
126
- </li>
127
-
128
- <li>Un Salmo de la Vida - Derechos de la Mujer - La Nación Unida - Palabras en Inglés - Serpiente - El Hombre que Mató - Fuera al espacio exterior mañana por la mañana - Sonnet No.116 - The Solitary Reaper - Be the Best - O Captain My Captain - Laugh and Be Merry - Earth - Don’t quit - The Apology - Be Glad your Nose is on your face - Un soneto para mi Madre Incomparable - The Flying Wonder - To a Millionaire - El Piano - Hombre - Ir por agua - El grito de los niños - Pájaro migrante - Shilpi.</li>
129
- </ul>
130
- </li>
131
- <li>Líneas importantes de Poems. <ul>
132
- <li>Donde la mente está sin miedo - El Segador Solitario - Yendo por agua - Un Salmo de la Vida - Sé el Mejor - Soneto No.116</li>
133
- </ul>
134
- </li>
135
- Preguntas sobre la biografía de <ul>
136
- <li>Mahatma Gandhi - Jawaharlal Nehru - Subash Chandra Bose - Helen Keller Kalpana Chawla Dr.Salim Ali Rani de Jhansi Nelson Mandela Abraham Lincoln</li>
137
- </ul>
138
- </li>
139
- <li>Preguntas sobre Shakespeare <ul>
140
- <li>Comerciante de Venecia (Acto IV Escena de la Corte) - Julio César (Acto III Escena 2) Soneto 116</li>
141
- </ul>
142
- </li>
143
- <li>Preguntas de Oscar Wilde <ul>
144
- <li>El modelo millonario - El gigante egoísta</li>
145
- </ul>
146
- </li>
147
- <li>Dr.Karl Paulnack <ul>
148
- <li>Música-El Creador de Esperanza</li>
149
- </ul>
150
- </li>
151
- <li>Preguntas de comprensión de los siguientes ensayos de motivación: <ul> <li>Lea cuidadosamente las instrucciones en el boleto de entrada y en el papel de preguntas. </li>
152
- <li> Rellene los detalles requeridos en la hoja de respuestas y el folleto de preguntas correctamente. </li>
153
- <li>Intente todas las preguntas ya que no hay ninguna calificación negativa para el examen principal. </li>
154
- <li>No lleve aparatos electrónicos como teléfonos móviles, calculadoras, relojes inteligentes, etc. a la sala de examen. </li>
155
- <li>No lleve libros, notas, documentos o cualquier otro material a la sala de examen. </li>
156
-
157
- <li>No salga de la sala de examen antes del tiempo asignado sin el permiso del vigilante. </li>
158
- </ul>
159
- <h2>TNPSC Grupo 2 Hall Ticket Preguntas frecuentes</h2>
160
- <h3>¿Cómo recuperar el ID de la aplicación si se olvida? </h3>
161
- <p>Si ha olvidado su ID de aplicación, puede recuperarlo siguiendo estos pasos:</p>
162
- <ol>
163
- <li>Visite el sitio web oficial de TNPSC www.tnpsc.gov.in. </li>
164
- <li>Haga clic en el enlace "Olvidé el ID de inicio de sesión" en la página de inicio. </li>
165
- <li>Ingrese su ID de correo electrónico registrado y fecha de nacimiento y haga clic en "Enviar". </li>
166
- <li>Su ID de aplicación será enviado a su ID de correo electrónico.</li>
167
- </ol>
168
- <h3>¿Cómo rectificar cualquier error en el ticket del hall? </h3>
169
- <p>Si encuentra algún error o discrepancia en su boleto de pasillo, como errores de ortografía, fotografía incorrecta, detalles incorrectos, etc., debe ponerse en contacto inmediatamente con el TNPSC y hacer que se rectifique. Puede ponerse en contacto con el TNPSC por teléfono, correo electrónico o en persona en su oficina. Los datos de contacto del TNPSC son los siguientes:</p>
170
- <p>Comisión de Administración Pública de Tamil Nadu<br/>
171
- Camino del puente de Frazer<br/>
172
- V.O.C.Nagar, Park Town, Chennai-600003, Tamil Nadu, INDIA<br/>
173
- Teléfono: +91-44-25300300 (12 líneas)<br/>
174
- Fax: +91-44-25300598<br/>
175
- Correo electrónico: [email protected], [email protected]<br/>
176
- Sitio web: www.tnpsc.gov.in</p>
177
- <h3>¿Cómo contactar al TNPSC en caso de cualquier problema o consulta? </h3>
178
- <p>Si tiene algún problema o consulta con respecto al examen TNPSC Group 2 2022 o el ticket del hall, puede ponerse en contacto con el TNPSC a través del teléfono, correo electrónico o en persona en su oficina. Los datos de contacto del TNPSC son los siguientes:</p>
179
- <p>Comisión de Administración Pública de Tamil Nadu<br/>
180
- Camino del puente de Frazer<br/>
181
- V.O.C.Nagar, Park Town, Chennai-600003, Tamil Nadu, INDIA<br/>
182
- Teléfono: +91-44-25300300 (12 líneas)<br/>
183
- Fax: +91-44-25300598<br/>
184
- Correo electrónico: [email protected], [email protected]<br/>
185
- Sitio web: www.tnpsc.gov.in</p>
186
- <h2>Conclusión</h2>
187
-
188
- <p>El examen TNPSC Group 2 2022 es una oportunidad de oro para unirse a los prestigiosos servicios gubernamentales en Tamil Nadu. Para resolver este examen, necesitas trabajar duro e inteligente. Necesitas revisar tus conceptos, practicar pruebas simuladas, mejorar tu velocidad y precisión, y administrar bien tu tiempo. También es necesario mantener la calma y la confianza en el día del examen y evitar cualquier estrés o pánico. </p>
189
- <p>Le deseamos todo lo mejor para su examen y esperamos que logre su sueño de convertirse en un oficial de TNPSC Group 2. Recuerda que nada es imposible si tienes fe en ti mismo y en tus habilidades. Mantén el ánimo alto y no te rindas. ¡Puedes hacerlo! </p>
190
- <h2>TNPSC Grupo 2 Hall Ticket Preguntas frecuentes</h2>
191
- <h4>Q1. ¿Cuándo estará disponible la entrada para el examen principal del Grupo 2 del TNPSC 2022? </h4>
192
- <p>A1. El ticket para el examen principal del TNPSC Grupo 2 2022 estará disponible al menos 10 días antes de la fecha del examen. Los candidatos pueden descargarlo desde el sitio web oficial de TNPSC www.tnpsc.gov.in introduciendo su ID de solicitud y fecha de nacimiento. </p>
193
- <h4>Q2. ¿Qué pasa si me olvido de llevar mi boleto de pasillo o prueba de identidad con foto al centro de examen? </h4>
194
- <p>A2. Si se olvida de llevar su boleto de pasillo o prueba de identidad con foto al centro de examen, no se le permitirá tomar el examen. El boleto de entrada y la prueba de identidad con foto son documentos obligatorios que verifican su identidad y elegibilidad para el examen. Por lo tanto, debes asegurarte de llevarlos contigo el día del examen. </p>
195
- <h4>Q3. ¿Puedo cambiar mi centro de examen después de descargar el ticket de la sala? </h4>
196
- <p>A3. No, no puede cambiar su centro de examen después de descargar el ticket del hall. El centro de examen una vez asignado por el TNPSC es definitivo y no se puede cambiar bajo ninguna circunstancia. Usted debe elegir cuidadosamente su centro de examen preferido mientras llena el formulario de solicitud y descargar su boleto de pasillo en consecuencia. </p>
197
- <h4>Q4. ¿Cómo puedo prepararme para el examen TNPSC Group 2 2022? </h4>
198
-
199
- <ol>
200
- <li>Ir a través de la notificación oficial y entender los criterios de elegibilidad, proceso de solicitud, proceso de selección, política de reserva, etc.</li>
201
- <li>Compruebe el patrón de examen y el plan de estudios y planifique su horario de estudio en consecuencia. </li>
202
- <li>Consulte los mejores libros y materiales de estudio para cada tema y tema. </li>
203
- <li>Revisa tus conceptos y fórmulas regularmente y toma notas de puntos importantes. </li>
204
- <li>Práctica de documentos del año anterior y pruebas simuladas para mejorar su velocidad y precisión. </li>
205
- <li>Analiza tu desempeño e identifica tus fortalezas y debilidades. </li>
206
- <li>Trabaja en tus áreas débiles y despeja tus dudas con expertos o mentores. </li>
207
- <li>Manténgase actualizado con los asuntos actuales y el conocimiento general leyendo periódicos, revistas, etc.</li>
208
- <li>Mejore sus habilidades lingüísticas leyendo, escribiendo, hablando y escuchando inglés o tamil.</li>
209
- <li>Cuida tu salud y bienestar comiendo bien, durmiendo bien, haciendo ejercicio y relajándote bien. </li>
210
- </ol>
211
- <h4>Q5. ¿Cuáles son las perspectivas de carrera de los servicios de TNPSC Group 2? </h4>
212
- <p>A5. Las perspectivas de carrera de los servicios de TNPSC Group 2 son muy brillantes y gratificantes. Los candidatos seleccionados recibirán un buen salario, seguridad laboral, crecimiento profesional y otros beneficios según las normas del gobierno estatal. También tendrán la oportunidad de servir al público y contribuir al desarrollo de Tamil Nadu. Algunos de los mensajes en TNPSC Group 2 servicios son los siguientes:</p>
213
- <ul>
214
- <li>Oficial Adjunto de Impuestos Comerciales</li>
215
- <li>Oficial de sección asistente</li>
216
- <li>Oficial de Auditoría</li <li>Inspector Superior de Sociedades Cooperativas</li>
217
- <li>Inspector Adjunto de Trabajo</li>
218
- <li>Sub Registrador</li>
219
- <li>Comisionado Municipal</li>
220
- <li>Asistente de ingresos</li>
221
- <li>Inspector de telar manual</li>
222
- <li>Asistente de Jailor</li>
223
- <li>Asistente especial</li>
224
- <li>Asistente de auditoría</li>
225
- <li>Supervisor de cooperativas industriales</li>
226
- <li>Empleado principal</li>
227
- <li>Oficial de Empleo Junior</li>
228
-
229
- <li>Oficial Ejecutivo, Grado II</li>
230
- <li>Oficial de Ingresos, etc.</li>
231
- </ul>
232
- <p>Espero que haya encontrado este artículo útil e informativo. Si tiene algún comentario o sugerencia, no dude en compartirlos conmigo. Me encantaría saber de ti y mejorar mis habilidades de escritura. ¡Gracias por leer y tener un gran día! </p> 64aa2da5cf<br />
233
- <br />
234
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/unicode_utils.py DELETED
@@ -1,42 +0,0 @@
1
- import unicodedata
2
- import sys
3
-
4
-
5
- # HFS Plus uses decomposed UTF-8
6
- def decompose(path):
7
- if isinstance(path, str):
8
- return unicodedata.normalize('NFD', path)
9
- try:
10
- path = path.decode('utf-8')
11
- path = unicodedata.normalize('NFD', path)
12
- path = path.encode('utf-8')
13
- except UnicodeError:
14
- pass # Not UTF-8
15
- return path
16
-
17
-
18
- def filesys_decode(path):
19
- """
20
- Ensure that the given path is decoded,
21
- NONE when no expected encoding works
22
- """
23
-
24
- if isinstance(path, str):
25
- return path
26
-
27
- fs_enc = sys.getfilesystemencoding() or 'utf-8'
28
- candidates = fs_enc, 'utf-8'
29
-
30
- for enc in candidates:
31
- try:
32
- return path.decode(enc)
33
- except UnicodeDecodeError:
34
- continue
35
-
36
-
37
- def try_encode(string, enc):
38
- "turn unicode encoding into a functional routine"
39
- try:
40
- return string.encode(enc)
41
- except UnicodeEncodeError:
42
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/abstract/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- # flake8: noqa
2
-
3
- from openai.api_resources.abstract.api_resource import APIResource
4
- from openai.api_resources.abstract.createable_api_resource import CreateableAPIResource
5
- from openai.api_resources.abstract.deletable_api_resource import DeletableAPIResource
6
- from openai.api_resources.abstract.listable_api_resource import ListableAPIResource
7
- from openai.api_resources.abstract.nested_resource_class_methods import (
8
- nested_resource_class_methods,
9
- )
10
- from openai.api_resources.abstract.updateable_api_resource import UpdateableAPIResource
 
 
 
 
 
 
 
 
 
 
 
spaces/Boilin/URetinex-Net/network/Math_Module.py DELETED
@@ -1,38 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from torchvision.transforms import Grayscale
4
-
5
-
6
- class P(nn.Module):
7
- """
8
- to solve min(P) = ||I-PQ||^2 + γ||P-R||^2
9
- this is a least square problem
10
- how to solve?
11
- P* = (gamma*R + I*Q) / (Q*Q + gamma)
12
- """
13
- def __init__(self):
14
- super().__init__()
15
-
16
- def forward(self, I, Q, R, gamma):
17
- return ((I * Q + gamma * R) / (gamma + Q * Q))
18
-
19
- class Q(nn.Module):
20
- """
21
- to solve min(Q) = ||I-PQ||^2 + λ||Q-L||^2
22
- Q* = (lamda*L + I*P) / (P*P + lamda)
23
- """
24
- def __init__(self):
25
- super().__init__()
26
-
27
- def forward(self, I, P, L, lamda):
28
-
29
- IR = I[:, 0:1, :, :]
30
- IG = I[:, 1:2, :, :]
31
- IB = I[:, 2:3, :, :]
32
-
33
- PR = P[:, 0:1, :, :]
34
- PG = P[:, 1:2, :, :]
35
- PB = P[:, 2:3, :, :]
36
-
37
- return (IR*PR + IG*PG + IB*PB + lamda*L) / ((PR*PR + PG*PG + PB*PB) + lamda)
38
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/catalog.py DELETED
@@ -1,132 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- from fvcore.common.file_io import PathHandler, PathManager
4
-
5
-
6
- class ModelCatalog(object):
7
- """
8
- Store mappings from names to third-party models.
9
- """
10
-
11
- S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
12
-
13
- # MSRA models have STRIDE_IN_1X1=True. False otherwise.
14
- # NOTE: all BN models here have fused BN into an affine layer.
15
- # As a result, you should only load them to a model with "FrozenBN".
16
- # Loading them to a model with regular BN or SyncBN is wrong.
17
- # Even when loaded to FrozenBN, it is still different from affine by an epsilon,
18
- # which should be negligible for training.
19
- # NOTE: all models here uses PIXEL_STD=[1,1,1]
20
- C2_IMAGENET_MODELS = {
21
- "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
22
- "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
23
- "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
24
- "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
25
- "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
26
- "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
27
- "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
28
- }
29
-
30
- C2_DETECTRON_PATH_FORMAT = (
31
- "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl"
32
- ) # noqa B950
33
-
34
- C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
35
- C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
36
-
37
- # format: {model_name} -> part of the url
38
- C2_DETECTRON_MODELS = {
39
- "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
40
- "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
41
- "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
42
- "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
43
- "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
44
- "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
45
- "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
46
- "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
47
- "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
48
- "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
49
- "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
50
- "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
51
- "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
52
- }
53
-
54
- @staticmethod
55
- def get(name):
56
- if name.startswith("Caffe2Detectron/COCO"):
57
- return ModelCatalog._get_c2_detectron_baseline(name)
58
- if name.startswith("ImageNetPretrained/"):
59
- return ModelCatalog._get_c2_imagenet_pretrained(name)
60
- raise RuntimeError("model not present in the catalog: {}".format(name))
61
-
62
- @staticmethod
63
- def _get_c2_imagenet_pretrained(name):
64
- prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
65
- name = name[len("ImageNetPretrained/") :]
66
- name = ModelCatalog.C2_IMAGENET_MODELS[name]
67
- url = "/".join([prefix, name])
68
- return url
69
-
70
- @staticmethod
71
- def _get_c2_detectron_baseline(name):
72
- name = name[len("Caffe2Detectron/COCO/") :]
73
- url = ModelCatalog.C2_DETECTRON_MODELS[name]
74
- if "keypoint_rcnn" in name:
75
- dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
76
- else:
77
- dataset = ModelCatalog.C2_DATASET_COCO
78
-
79
- if "35998355/rpn_R-50-C4_1x" in name:
80
- # this one model is somehow different from others ..
81
- type = "rpn"
82
- else:
83
- type = "generalized_rcnn"
84
-
85
- # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
86
- url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
87
- prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
88
- )
89
- return url
90
-
91
-
92
- class ModelCatalogHandler(PathHandler):
93
- """
94
- Resolve URL like catalog://.
95
- """
96
-
97
- PREFIX = "catalog://"
98
-
99
- def _get_supported_prefixes(self):
100
- return [self.PREFIX]
101
-
102
- def _get_local_path(self, path):
103
- logger = logging.getLogger(__name__)
104
- catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
105
- logger.info("Catalog entry {} points to {}".format(path, catalog_path))
106
- return PathManager.get_local_path(catalog_path)
107
-
108
- def _open(self, path, mode="r", **kwargs):
109
- return PathManager.open(self._get_local_path(path), mode, **kwargs)
110
-
111
-
112
- class Detectron2Handler(PathHandler):
113
- """
114
- Resolve anything that's in Detectron2 model zoo.
115
- """
116
-
117
- PREFIX = "detectron2://"
118
- S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
119
-
120
- def _get_supported_prefixes(self):
121
- return [self.PREFIX]
122
-
123
- def _get_local_path(self, path):
124
- name = path[len(self.PREFIX) :]
125
- return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name)
126
-
127
- def _open(self, path, mode="r", **kwargs):
128
- return PathManager.open(self._get_local_path(path), mode, **kwargs)
129
-
130
-
131
- PathManager.register_handler(ModelCatalogHandler())
132
- PathManager.register_handler(Detectron2Handler())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/caffe2_modeling.py DELETED
@@ -1,492 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
-
3
- import functools
4
- import io
5
- import struct
6
- import types
7
- import torch
8
-
9
- from detectron2.modeling import meta_arch
10
- from detectron2.modeling.box_regression import Box2BoxTransform
11
- from detectron2.modeling.meta_arch.panoptic_fpn import combine_semantic_and_instance_outputs
12
- from detectron2.modeling.postprocessing import detector_postprocess, sem_seg_postprocess
13
- from detectron2.modeling.roi_heads import keypoint_head
14
- from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
15
-
16
- from .c10 import Caffe2Compatible
17
- from .patcher import ROIHeadsPatcher, patch_generalized_rcnn
18
- from .shared import (
19
- alias,
20
- check_set_pb_arg,
21
- get_pb_arg_floats,
22
- get_pb_arg_valf,
23
- get_pb_arg_vali,
24
- get_pb_arg_vals,
25
- mock_torch_nn_functional_interpolate,
26
- )
27
-
28
-
29
- def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False):
30
- """
31
- A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor])
32
- to detectron2's format (i.e. list of Instances instance).
33
- This only works when the model follows the Caffe2 detectron's naming convention.
34
-
35
- Args:
36
- image_sizes (List[List[int, int]]): [H, W] of every image.
37
- tensor_outputs (Dict[str, Tensor]): external_output to its tensor.
38
-
39
- force_mask_on (Bool): if true, the it make sure there'll be pred_masks even
40
- if the mask is not found from tensor_outputs (usually due to model crash)
41
- """
42
-
43
- results = [Instances(image_size) for image_size in image_sizes]
44
-
45
- batch_splits = tensor_outputs.get("batch_splits", None)
46
- if batch_splits:
47
- raise NotImplementedError()
48
- assert len(image_sizes) == 1
49
- result = results[0]
50
-
51
- bbox_nms = tensor_outputs["bbox_nms"]
52
- score_nms = tensor_outputs["score_nms"]
53
- class_nms = tensor_outputs["class_nms"]
54
- # Detection will always success because Conv support 0-batch
55
- assert bbox_nms is not None
56
- assert score_nms is not None
57
- assert class_nms is not None
58
- if bbox_nms.shape[1] == 5:
59
- result.pred_boxes = RotatedBoxes(bbox_nms)
60
- else:
61
- result.pred_boxes = Boxes(bbox_nms)
62
- result.scores = score_nms
63
- result.pred_classes = class_nms.to(torch.int64)
64
-
65
- mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None)
66
- if mask_fcn_probs is not None:
67
- # finish the mask pred
68
- mask_probs_pred = mask_fcn_probs
69
- num_masks = mask_probs_pred.shape[0]
70
- class_pred = result.pred_classes
71
- indices = torch.arange(num_masks, device=class_pred.device)
72
- mask_probs_pred = mask_probs_pred[indices, class_pred][:, None]
73
- result.pred_masks = mask_probs_pred
74
- elif force_mask_on:
75
- # NOTE: there's no way to know the height/width of mask here, it won't be
76
- # used anyway when batch size is 0, so just set them to 0.
77
- result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8)
78
-
79
- keypoints_out = tensor_outputs.get("keypoints_out", None)
80
- kps_score = tensor_outputs.get("kps_score", None)
81
- if keypoints_out is not None:
82
- # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob)
83
- keypoints_tensor = keypoints_out
84
- # NOTE: it's possible that prob is not calculated if "should_output_softmax"
85
- # is set to False in HeatmapMaxKeypoint, so just using raw score, seems
86
- # it doesn't affect mAP. TODO: check more carefully.
87
- keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]]
88
- result.pred_keypoints = keypoint_xyp
89
- elif kps_score is not None:
90
- # keypoint heatmap to sparse data structure
91
- pred_keypoint_logits = kps_score
92
- keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result])
93
-
94
- return results
95
-
96
-
97
- def _cast_to_f32(f64):
98
- return struct.unpack("f", struct.pack("f", f64))[0]
99
-
100
-
101
- def set_caffe2_compatible_tensor_mode(model, enable=True):
102
- def _fn(m):
103
- if isinstance(m, Caffe2Compatible):
104
- m.tensor_mode = enable
105
-
106
- model.apply(_fn)
107
-
108
-
109
- def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device):
110
- """
111
- See get_caffe2_inputs() below.
112
- """
113
- assert all(isinstance(x, dict) for x in batched_inputs)
114
- assert all(x["image"].dim() == 3 for x in batched_inputs)
115
-
116
- images = [x["image"] for x in batched_inputs]
117
- images = ImageList.from_tensors(images, size_divisibility)
118
-
119
- im_info = []
120
- for input_per_image, image_size in zip(batched_inputs, images.image_sizes):
121
- target_height = input_per_image.get("height", image_size[0])
122
- target_width = input_per_image.get("width", image_size[1]) # noqa
123
- # NOTE: The scale inside im_info is kept as convention and for providing
124
- # post-processing information if further processing is needed. For
125
- # current Caffe2 model definitions that don't include post-processing inside
126
- # the model, this number is not used.
127
- # NOTE: There can be a slight difference between width and height
128
- # scales, using a single number can results in numerical difference
129
- # compared with D2's post-processing.
130
- scale = target_height / image_size[0]
131
- im_info.append([image_size[0], image_size[1], scale])
132
- im_info = torch.Tensor(im_info)
133
-
134
- return images.tensor.to(device), im_info.to(device)
135
-
136
-
137
- class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module):
138
- """
139
- Base class for caffe2-compatible implementation of a meta architecture.
140
- The forward is traceable and its traced graph can be converted to caffe2
141
- graph through ONNX.
142
- """
143
-
144
- def __init__(self, cfg, torch_model):
145
- """
146
- Args:
147
- cfg (CfgNode):
148
- torch_model (nn.Module): the detectron2 model (meta_arch) to be
149
- converted.
150
- """
151
- super().__init__()
152
- self._wrapped_model = torch_model
153
- self.eval()
154
- set_caffe2_compatible_tensor_mode(self, True)
155
-
156
- def get_caffe2_inputs(self, batched_inputs):
157
- """
158
- Convert pytorch-style structured inputs to caffe2-style inputs that
159
- are tuples of tensors.
160
-
161
- Args:
162
- batched_inputs (list[dict]): inputs to a detectron2 model
163
- in its standard format. Each dict has "image" (CHW tensor), and optionally
164
- "height" and "width".
165
-
166
- Returns:
167
- tuple[Tensor]:
168
- tuple of tensors that will be the inputs to the
169
- :meth:`forward` method. For existing models, the first
170
- is an NCHW tensor (padded and batched); the second is
171
- a im_info Nx3 tensor, where the rows are
172
- (height, width, unused legacy parameter)
173
- """
174
- return convert_batched_inputs_to_c2_format(
175
- batched_inputs,
176
- self._wrapped_model.backbone.size_divisibility,
177
- self._wrapped_model.device,
178
- )
179
-
180
- def encode_additional_info(self, predict_net, init_net):
181
- """
182
- Save extra metadata that will be used by inference in the output protobuf.
183
- """
184
- pass
185
-
186
- def forward(self, inputs):
187
- """
188
- Run the forward in caffe2-style. It has to use caffe2-compatible ops
189
- and the method will be used for tracing.
190
-
191
- Args:
192
- inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`.
193
- They will be the inputs of the converted caffe2 graph.
194
-
195
- Returns:
196
- tuple[Tensor]: output tensors. They will be the outputs of the
197
- converted caffe2 graph.
198
- """
199
- raise NotImplementedError
200
-
201
- def _caffe2_preprocess_image(self, inputs):
202
- """
203
- Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward.
204
- It normalizes the input images, and the final caffe2 graph assumes the
205
- inputs have been batched already.
206
- """
207
- data, im_info = inputs
208
- data = alias(data, "data")
209
- im_info = alias(im_info, "im_info")
210
- normalized_data = self._wrapped_model.normalizer(data)
211
- normalized_data = alias(normalized_data, "normalized_data")
212
-
213
- # Pack (data, im_info) into ImageList which is recognized by self.inference.
214
- images = ImageList(tensor=normalized_data, image_sizes=im_info)
215
- return images
216
-
217
- @staticmethod
218
- def get_outputs_converter(predict_net, init_net):
219
- """
220
- Creates a function that converts outputs of the caffe2 model to
221
- detectron2's standard format.
222
- The function uses information in `predict_net` and `init_net` that are
223
- available at inferene time. Therefore the function logic can be used in inference.
224
-
225
- The returned function has the following signature:
226
-
227
- def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs
228
-
229
- Where
230
-
231
- * batched_inputs (list[dict]): the original input format of the meta arch
232
- * c2_inputs (dict[str, Tensor]): the caffe2 inputs.
233
- * c2_results (dict[str, Tensor]): the caffe2 output format,
234
- corresponding to the outputs of the :meth:`forward` function.
235
- * detectron2_outputs: the original output format of the meta arch.
236
-
237
- This function can be used to compare the outputs of the original meta arch and
238
- the converted caffe2 graph.
239
-
240
- Returns:
241
- callable: a callable of the above signature.
242
- """
243
- raise NotImplementedError
244
-
245
-
246
- class Caffe2GeneralizedRCNN(Caffe2MetaArch):
247
- def __init__(self, cfg, torch_model):
248
- assert isinstance(torch_model, meta_arch.GeneralizedRCNN)
249
- torch_model = patch_generalized_rcnn(torch_model)
250
- super().__init__(cfg, torch_model)
251
-
252
- self.roi_heads_patcher = ROIHeadsPatcher(cfg, self._wrapped_model.roi_heads)
253
-
254
- def encode_additional_info(self, predict_net, init_net):
255
- size_divisibility = self._wrapped_model.backbone.size_divisibility
256
- check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
257
- check_set_pb_arg(
258
- predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
259
- )
260
- check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN")
261
-
262
- @mock_torch_nn_functional_interpolate()
263
- def forward(self, inputs):
264
- if not self.tensor_mode:
265
- return self._wrapped_model.inference(inputs)
266
- images = self._caffe2_preprocess_image(inputs)
267
- features = self._wrapped_model.backbone(images.tensor)
268
- proposals, _ = self._wrapped_model.proposal_generator(images, features)
269
- with self.roi_heads_patcher.mock_roi_heads():
270
- detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals)
271
- return tuple(detector_results[0].flatten())
272
-
273
- @staticmethod
274
- def get_outputs_converter(predict_net, init_net):
275
- def f(batched_inputs, c2_inputs, c2_results):
276
- image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]]
277
- results = assemble_rcnn_outputs_by_name(image_sizes, c2_results)
278
- return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
279
-
280
- return f
281
-
282
-
283
- class Caffe2PanopticFPN(Caffe2MetaArch):
284
- def __init__(self, cfg, torch_model):
285
- assert isinstance(torch_model, meta_arch.PanopticFPN)
286
- torch_model = patch_generalized_rcnn(torch_model)
287
- super().__init__(cfg, torch_model)
288
-
289
- self.roi_heads_patcher = ROIHeadsPatcher(cfg, self._wrapped_model.roi_heads)
290
-
291
- @mock_torch_nn_functional_interpolate()
292
- def forward(self, inputs):
293
- assert self.tensor_mode
294
- images = self._caffe2_preprocess_image(inputs)
295
- features = self._wrapped_model.backbone(images.tensor)
296
-
297
- sem_seg_results, _ = self._wrapped_model.sem_seg_head(features)
298
- sem_seg_results = alias(sem_seg_results, "sem_seg")
299
-
300
- proposals, _ = self._wrapped_model.proposal_generator(images, features)
301
-
302
- with self.roi_heads_patcher.mock_roi_heads(self.tensor_mode):
303
- detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals)
304
-
305
- return tuple(detector_results[0].flatten()) + (sem_seg_results,)
306
-
307
- def encode_additional_info(self, predict_net, init_net):
308
- size_divisibility = self._wrapped_model.backbone.size_divisibility
309
- check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
310
- check_set_pb_arg(
311
- predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
312
- )
313
- check_set_pb_arg(predict_net, "meta_architecture", "s", b"PanopticFPN")
314
-
315
- # Inference parameters:
316
- check_set_pb_arg(predict_net, "combine_on", "i", self._wrapped_model.combine_on)
317
- check_set_pb_arg(
318
- predict_net,
319
- "combine_overlap_threshold",
320
- "f",
321
- _cast_to_f32(self._wrapped_model.combine_overlap_threshold),
322
- )
323
- check_set_pb_arg(
324
- predict_net,
325
- "combine_stuff_area_limit",
326
- "i",
327
- self._wrapped_model.combine_stuff_area_limit,
328
- )
329
- check_set_pb_arg(
330
- predict_net,
331
- "combine_instances_confidence_threshold",
332
- "f",
333
- _cast_to_f32(self._wrapped_model.combine_instances_confidence_threshold),
334
- )
335
-
336
- @staticmethod
337
- def get_outputs_converter(predict_net, init_net):
338
- combine_on = get_pb_arg_vali(predict_net, "combine_on", None)
339
- combine_overlap_threshold = get_pb_arg_valf(predict_net, "combine_overlap_threshold", None)
340
- combine_stuff_area_limit = get_pb_arg_vali(predict_net, "combine_stuff_area_limit", None)
341
- combine_instances_confidence_threshold = get_pb_arg_valf(
342
- predict_net, "combine_instances_confidence_threshold", None
343
- )
344
-
345
- def f(batched_inputs, c2_inputs, c2_results):
346
- image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]]
347
- detector_results = assemble_rcnn_outputs_by_name(
348
- image_sizes, c2_results, force_mask_on=True
349
- )
350
- sem_seg_results = c2_results["sem_seg"]
351
-
352
- # copied from meta_arch/panoptic_fpn.py ...
353
- processed_results = []
354
- for sem_seg_result, detector_result, input_per_image, image_size in zip(
355
- sem_seg_results, detector_results, batched_inputs, image_sizes
356
- ):
357
- height = input_per_image.get("height", image_size[0])
358
- width = input_per_image.get("width", image_size[1])
359
- sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width)
360
- detector_r = detector_postprocess(detector_result, height, width)
361
-
362
- processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r})
363
-
364
- if combine_on:
365
- panoptic_r = combine_semantic_and_instance_outputs(
366
- detector_r,
367
- sem_seg_r.argmax(dim=0),
368
- combine_overlap_threshold,
369
- combine_stuff_area_limit,
370
- combine_instances_confidence_threshold,
371
- )
372
- processed_results[-1]["panoptic_seg"] = panoptic_r
373
- return processed_results
374
-
375
- return f
376
-
377
-
378
- class Caffe2RetinaNet(Caffe2MetaArch):
379
- def __init__(self, cfg, torch_model):
380
- assert isinstance(torch_model, meta_arch.RetinaNet)
381
- super().__init__(cfg, torch_model)
382
-
383
- @mock_torch_nn_functional_interpolate()
384
- def forward(self, inputs):
385
- assert self.tensor_mode
386
- images = self._caffe2_preprocess_image(inputs)
387
-
388
- # explicitly return the images sizes to avoid removing "im_info" by ONNX
389
- # since it's not used in the forward path
390
- return_tensors = [images.image_sizes]
391
-
392
- features = self._wrapped_model.backbone(images.tensor)
393
- features = [features[f] for f in self._wrapped_model.in_features]
394
- for i, feature_i in enumerate(features):
395
- features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True)
396
- return_tensors.append(features[i])
397
-
398
- box_cls, box_delta = self._wrapped_model.head(features)
399
- for i, (box_cls_i, box_delta_i) in enumerate(zip(box_cls, box_delta)):
400
- return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i)))
401
- return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i)))
402
-
403
- return tuple(return_tensors)
404
-
405
- def encode_additional_info(self, predict_net, init_net):
406
- size_divisibility = self._wrapped_model.backbone.size_divisibility
407
- check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
408
- check_set_pb_arg(
409
- predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
410
- )
411
- check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet")
412
-
413
- # Inference parameters:
414
- check_set_pb_arg(
415
- predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.score_threshold)
416
- )
417
- check_set_pb_arg(predict_net, "topk_candidates", "i", self._wrapped_model.topk_candidates)
418
- check_set_pb_arg(
419
- predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.nms_threshold)
420
- )
421
- check_set_pb_arg(
422
- predict_net,
423
- "max_detections_per_image",
424
- "i",
425
- self._wrapped_model.max_detections_per_image,
426
- )
427
-
428
- check_set_pb_arg(
429
- predict_net,
430
- "bbox_reg_weights",
431
- "floats",
432
- [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights],
433
- )
434
- self._encode_anchor_generator_cfg(predict_net)
435
-
436
- def _encode_anchor_generator_cfg(self, predict_net):
437
- # serialize anchor_generator for future use
438
- serialized_anchor_generator = io.BytesIO()
439
- torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator)
440
- # Ideally we can put anchor generating inside the model, then we don't
441
- # need to store this information.
442
- bytes = serialized_anchor_generator.getvalue()
443
- check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes)
444
-
445
- @staticmethod
446
- def get_outputs_converter(predict_net, init_net):
447
- self = types.SimpleNamespace()
448
- serialized_anchor_generator = io.BytesIO(
449
- get_pb_arg_vals(predict_net, "serialized_anchor_generator", None)
450
- )
451
- self.anchor_generator = torch.load(serialized_anchor_generator)
452
- bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None)
453
- self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights))
454
- self.score_threshold = get_pb_arg_valf(predict_net, "score_threshold", None)
455
- self.topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None)
456
- self.nms_threshold = get_pb_arg_valf(predict_net, "nms_threshold", None)
457
- self.max_detections_per_image = get_pb_arg_vali(
458
- predict_net, "max_detections_per_image", None
459
- )
460
-
461
- # hack to reuse inference code from RetinaNet
462
- self.inference = functools.partial(meta_arch.RetinaNet.inference, self)
463
- self.inference_single_image = functools.partial(
464
- meta_arch.RetinaNet.inference_single_image, self
465
- )
466
-
467
- def f(batched_inputs, c2_inputs, c2_results):
468
- image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]]
469
-
470
- num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")])
471
- box_cls = [c2_results["box_cls_{}".format(i)] for i in range(num_features)]
472
- box_delta = [c2_results["box_delta_{}".format(i)] for i in range(num_features)]
473
-
474
- # For each feature level, feature should have the same batch size and
475
- # spatial dimension as the box_cls and box_delta.
476
- dummy_features = [box_delta[i].clone()[:, 0:0, :, :] for i in range(num_features)]
477
- anchors = self.anchor_generator(dummy_features)
478
-
479
- # self.num_classess can be inferred
480
- self.num_classes = box_cls[0].shape[1] // (box_delta[0].shape[1] // 4)
481
-
482
- results = self.inference(box_cls, box_delta, anchors, image_sizes)
483
- return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
484
-
485
- return f
486
-
487
-
488
- META_ARCH_CAFFE2_EXPORT_TYPE_MAP = {
489
- "GeneralizedRCNN": Caffe2GeneralizedRCNN,
490
- "PanopticFPN": Caffe2PanopticFPN,
491
- "RetinaNet": Caffe2RetinaNet,
492
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/__init__.py DELETED
@@ -1,11 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm
3
- from .deform_conv import DeformConv, ModulatedDeformConv
4
- from .mask_ops import paste_masks_in_image
5
- from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated
6
- from .roi_align import ROIAlign, roi_align
7
- from .roi_align_rotated import ROIAlignRotated, roi_align_rotated
8
- from .shape_spec import ShapeSpec
9
- from .wrappers import BatchNorm2d, Conv2d, ConvTranspose2d, cat, interpolate, Linear
10
-
11
- __all__ = [k for k in globals().keys() if not k.startswith("_")]
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docker/README.md DELETED
@@ -1,24 +0,0 @@
1
- ## Run the container
2
- Change to the *docker* directory of this repository:
3
- ```
4
- cd docker
5
- USER_ID=$UID docker-compose run detectron2
6
- ```
7
-
8
- #### Using a persistent cache directory
9
- Prevents models to be re-downloaded on every run, by storing them in a cache directory.
10
-
11
- `docker-compose run --volume=/path/to/cache:/tmp:rw detectron2`
12
-
13
- ## Rebuild the container
14
- Rebuild the container by `USER_ID=$UID docker-compose build detectron2`.
15
- This is only necessary when `Dockerfile` has been changed. The initial build is done automatically.
16
-
17
- ## Install new dependencies
18
- Add the following to `Dockerfile` to make persistent changes.
19
- ```
20
- RUN sudo apt-get update && sudo apt-get install -y \
21
- nano vim emacs
22
- RUN pip install --user pandas
23
- ```
24
- Or run them in the container to make temporary changes.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/filter.h DELETED
@@ -1,106 +0,0 @@
1
- #pragma once
2
-
3
- #include "diffvg.h"
4
- #include "atomic.h"
5
-
6
- enum class FilterType {
7
- Box,
8
- Tent,
9
- RadialParabolic, // 4/3(1 - (d/r))
10
- Hann // https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
11
- };
12
-
13
- struct Filter {
14
- FilterType type;
15
- float radius;
16
- };
17
-
18
- struct DFilter {
19
- float radius;
20
- };
21
-
22
- DEVICE
23
- inline
24
- float compute_filter_weight(const Filter &filter,
25
- float dx,
26
- float dy) {
27
- if (fabs(dx) > filter.radius || fabs(dy) > filter.radius) {
28
- return 0;
29
- }
30
- if (filter.type == FilterType::Box) {
31
- return 1.f / square(2 * filter.radius);
32
- } else if (filter.type == FilterType::Tent) {
33
- return (filter.radius - fabs(dx)) * (filter.radius - fabs(dy)) /
34
- square(square(filter.radius));
35
- } else if (filter.type == FilterType::RadialParabolic) {
36
- return (4.f / 3.f) * (1 - square(dx / filter.radius)) *
37
- (4.f / 3.f) * (1 - square(dy / filter.radius));
38
- } else {
39
- assert(filter.type == FilterType::Hann);
40
- // normalize dx, dy to [0, 1]
41
- auto ndx = (dx / (2*filter.radius)) + 0.5f;
42
- auto ndy = (dy / (2*filter.radius)) + 0.5f;
43
- // the normalization factor is R^2
44
- return 0.5f * (1.f - cos(float(2 * M_PI) * ndx)) *
45
- 0.5f * (1.f - cos(float(2 * M_PI) * ndy)) /
46
- square(filter.radius);
47
- }
48
- }
49
-
50
- DEVICE
51
- inline
52
- void d_compute_filter_weight(const Filter &filter,
53
- float dx,
54
- float dy,
55
- float d_return,
56
- DFilter *d_filter) {
57
- if (filter.type == FilterType::Box) {
58
- // return 1.f / square(2 * filter.radius);
59
- atomic_add(d_filter->radius,
60
- d_return * (-2) * 2 * filter.radius / cubic(2 * filter.radius));
61
- } else if (filter.type == FilterType::Tent) {
62
- // return (filer.radius - fabs(dx)) * (filer.radius - fabs(dy)) /
63
- // square(square(filter.radius));
64
- auto fx = filter.radius - fabs(dx);
65
- auto fy = filter.radius - fabs(dy);
66
- auto norm = 1 / square(filter.radius);
67
- auto d_fx = d_return * fy * norm;
68
- auto d_fy = d_return * fx * norm;
69
- auto d_norm = d_return * fx * fy;
70
- atomic_add(d_filter->radius,
71
- d_fx + d_fy + (-4) * d_norm / pow(filter.radius, 5));
72
- } else if (filter.type == FilterType::RadialParabolic) {
73
- // return (4.f / 3.f) * (1 - square(dx / filter.radius)) *
74
- // (4.f / 3.f) * (1 - square(dy / filter.radius));
75
- // auto d_square_x = d_return * (-4.f / 3.f);
76
- // auto d_square_y = d_return * (-4.f / 3.f);
77
- auto r3 = filter.radius * filter.radius * filter.radius;
78
- auto d_radius = -(2 * square(dx) + 2 * square(dy)) / r3;
79
- atomic_add(d_filter->radius, d_radius);
80
- } else {
81
- assert(filter.type == FilterType::Hann);
82
- // // normalize dx, dy to [0, 1]
83
- // auto ndx = (dx / (2*filter.radius)) + 0.5f;
84
- // auto ndy = (dy / (2*filter.radius)) + 0.5f;
85
- // // the normalization factor is R^2
86
- // return 0.5f * (1.f - cos(float(2 * M_PI) * ndx)) *
87
- // 0.5f * (1.f - cos(float(2 * M_PI) * ndy)) /
88
- // square(filter.radius);
89
-
90
- // normalize dx, dy to [0, 1]
91
- auto ndx = (dx / (2*filter.radius)) + 0.5f;
92
- auto ndy = (dy / (2*filter.radius)) + 0.5f;
93
- auto fx = 0.5f * (1.f - cos(float(2*M_PI) * ndx));
94
- auto fy = 0.5f * (1.f - cos(float(2*M_PI) * ndy));
95
- auto norm = 1 / square(filter.radius);
96
- auto d_fx = d_return * fy * norm;
97
- auto d_fy = d_return * fx * norm;
98
- auto d_norm = d_return * fx * fy;
99
- auto d_ndx = d_fx * 0.5f * sin(float(2*M_PI) * ndx) * float(2*M_PI);
100
- auto d_ndy = d_fy * 0.5f * sin(float(2*M_PI) * ndy) * float(2*M_PI);
101
- atomic_add(d_filter->radius,
102
- d_ndx * (-2*dx / square(2*filter.radius)) +
103
- d_ndy * (-2*dy / square(2*filter.radius)) +
104
- (-2) * d_norm / cubic(filter.radius));
105
- }
106
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/random/detail/random_core_access.h DELETED
@@ -1,57 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- namespace thrust
20
- {
21
-
22
- namespace random
23
- {
24
-
25
- namespace detail
26
- {
27
-
28
- struct random_core_access
29
- {
30
-
31
- template<typename OStream, typename EngineOrDistribution>
32
- static OStream &stream_out(OStream &os, const EngineOrDistribution &x)
33
- {
34
- return x.stream_out(os);
35
- }
36
-
37
- template<typename IStream, typename EngineOrDistribution>
38
- static IStream &stream_in(IStream &is, EngineOrDistribution &x)
39
- {
40
- return x.stream_in(is);
41
- }
42
-
43
- template<typename EngineOrDistribution>
44
- __host__ __device__
45
- static bool equal(const EngineOrDistribution &lhs, const EngineOrDistribution &rhs)
46
- {
47
- return lhs.equal(rhs);
48
- }
49
-
50
- }; // end random_core_access
51
-
52
- } // end detail
53
-
54
- } // end random
55
-
56
- } // end thrust
57
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/fill.h DELETED
@@ -1,94 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
30
- #include <thrust/system/cuda/detail/util.h>
31
- #include <thrust/system/cuda/detail/parallel_for.h>
32
- #include <thrust/distance.h>
33
-
34
- namespace thrust
35
- {
36
- namespace cuda_cub {
37
-
38
- namespace __fill {
39
-
40
- // fill functor
41
- template<class Iterator, class T>
42
- struct functor
43
- {
44
- Iterator it;
45
- T value;
46
-
47
- THRUST_FUNCTION
48
- functor(Iterator it, T value)
49
- : it(it), value(value) {}
50
-
51
- template<class Size>
52
- THRUST_DEVICE_FUNCTION void operator()(Size idx)
53
- {
54
- it[idx] = value;
55
- }
56
- }; // struct functor
57
-
58
- } // namespace __fill
59
-
60
- template <class Derived, class OutputIterator, class Size, class T>
61
- OutputIterator __host__ __device__
62
- fill_n(execution_policy<Derived>& policy,
63
- OutputIterator first,
64
- Size count,
65
- const T& value)
66
- {
67
- cuda_cub::parallel_for(policy,
68
- __fill::functor<OutputIterator, T>(
69
- first,
70
- value),
71
- count);
72
-
73
- cuda_cub::throw_on_error(
74
- cuda_cub::synchronize(policy)
75
- , "fill_n: failed to synchronize"
76
- );
77
-
78
- return first + count;
79
- } // func fill_n
80
-
81
- template <class Derived, class ForwardIterator, class T>
82
- void __host__ __device__
83
- fill(execution_policy<Derived>& policy,
84
- ForwardIterator first,
85
- ForwardIterator last,
86
- const T& value)
87
- {
88
- cuda_cub::fill_n(policy, first, thrust::distance(first,last), value);
89
- } // func filll
90
-
91
-
92
- } // namespace cuda_cub
93
- } // end namespace thrust
94
- #endif