parquet-converter commited on
Commit
c0cf34a
·
1 Parent(s): da557f7

Update parquet files (step 117 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1-13-am/neural-style-transfer/app.py +0 -57
  2. spaces/101-5/gpt4free/CONTRIBUTING.md +0 -8
  3. spaces/17TheWord/vits-models/text/cleaners.py +0 -475
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/7 Data Recovery Suite 4.4 Crack Recover Data from Any Device or Storage.md +0 -133
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Kung Fu Panda 2 Full Movie Download ) Discover the Legend of the Dragon Warrior.md +0 -134
  6. spaces/1gistliPinn/ChatGPT4/Examples/Ad-aware 6.0 Professional REPACK Keygen Serial Key.md +0 -6
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing 2 APK Customize Your Car and Compete Against the Best Drifters in the World.md +0 -123
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dolphin Emulator v5.0 APK for 32 Bit Android - The Best Way to Enjoy Retro Games.md +0 -118
  9. spaces/1phancelerku/anime-remove-background/AetherSX2 for Windows and Android - The Best Way to Relive Your Favorite PS2 Games in 2023 - Download Here.md +0 -159
  10. spaces/1phancelerku/anime-remove-background/Compress PDFs Online for Free Download Smaller PDF Files in Seconds.md +0 -208
  11. spaces/1phancelerku/anime-remove-background/Download Genshin Impact and Embark on an Epic Quest Across Seven Nations.md +0 -128
  12. spaces/1phancelerku/anime-remove-background/Download Granny Chapter 2 Outwitt Mod Menu APK Madin for Free - No Root Required!.md +0 -79
  13. spaces/1phancelerku/anime-remove-background/Download Logo Quiz MOD APK and Enjoy the Fun of Recognizing Logos.md +0 -118
  14. spaces/2023Liu2023/bingo/src/lib/hooks/chat-history.ts +0 -62
  15. spaces/839871171w/newbingAI/Dockerfile +0 -34
  16. spaces/A666sxr/Genshin_TTS/utils.py +0 -263
  17. spaces/AB-TW/team-ai/agents/code_generate_agent.py +0 -229
  18. spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/models.py +0 -1124
  19. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/single_thread_env.py +0 -5
  20. spaces/AIWaves/Debate/src/agents/Component/ToolComponent.py +0 -887
  21. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192/__init__.py +0 -0
  22. spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/hashConv.ts +0 -12
  23. spaces/AchyuthGamer/OpenGPT/client/css/main.css +0 -14
  24. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDuo.py +0 -57
  25. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetShownChildrenMethods.js +0 -43
  26. spaces/Akmyradov/TurkmenTTSweSTT/vits/text/cleaners.py +0 -100
  27. spaces/Ameaou/academic-chatgpt3.1/colorful.py +0 -91
  28. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/installation.md +0 -146
  29. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py +0 -62
  30. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/hub_utils.py +0 -361
  31. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/__init__.py +0 -0
  32. spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py +0 -4
  33. spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py +0 -4
  34. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/group_points.py +0 -224
  35. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/__init__.py +0 -4
  36. spaces/Arnx/MusicGenXvAKN/CODE_OF_CONDUCT.md +0 -80
  37. spaces/Artgor/digit-draw-detect/src/model_architecture.py +0 -151
  38. spaces/Artrajz/vits-simple-api/bert_vits2/text/cleaner.py +0 -44
  39. spaces/Aspik101/Polish-vicuna-13b-v1.5/README.md +0 -13
  40. spaces/Benson/text-generation/Examples/Azcar Ablaikan Remix Indir.md +0 -165
  41. spaces/Benson/text-generation/Examples/Cmo Puedo Descargar El Controlador Wifi A Mi Ordenador.md +0 -6
  42. spaces/BetterAPI/BetterChat_new/src/app.d.ts +0 -17
  43. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/response.py +0 -201
  44. spaces/Blockinger/OVAChatGPT/README.md +0 -13
  45. spaces/Boadiwaa/Recipes/openai/api_resources/abstract/nested_resource_class_methods.py +0 -102
  46. spaces/Brasd99/TTS-Voice-Conversion/README.md +0 -12
  47. spaces/Brofu/Joeythemonster-anything-midjourney-v-4-1/README.md +0 -12
  48. spaces/CForGETaass/vits-uma-genshin-honkai/app.py +0 -124
  49. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py +0 -271
  50. spaces/CVPR/LIVE/pydiffvg/optimize_svg.py +0 -1607
spaces/1-13-am/neural-style-transfer/app.py DELETED
@@ -1,57 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from utils import transformer, tensor_to_img
4
- from network import Style_Transfer_Network
5
-
6
- check_point = torch.load("check_point1_0.pth", map_location = torch.device('cpu'))
7
- model = Style_Transfer_Network()
8
- model.load_state_dict(check_point['state_dict'])
9
-
10
- def style_transfer(content_img, style_strength, style_img_1 = None, iw_1 = 0, style_img_2 = None, iw_2 = 0, style_img_3 = None, iw_3 = 0, preserve_color = None):
11
- transform = transformer(imsize = 512)
12
-
13
- content = transform(content_img).unsqueeze(0)
14
-
15
- iw = [iw_1, iw_2, iw_3]
16
- interpolation_weights = [i/ sum(iw) for i in iw]
17
-
18
- style_imgs = [style_img_1, style_img_2, style_img_3]
19
- styles = []
20
- for style_img in style_imgs:
21
- if style_img is not None:
22
- styles.append(transform(style_img).unsqueeze(0))
23
- if preserve_color == "None": preserve_color = None
24
- elif preserve_color == "Whitening & Coloring": preserve_color = "whitening_and_coloring"
25
- elif preserve_color == "Histogram matching": preserve_color = "histogram_matching"
26
- with torch.no_grad():
27
- stylized_img = model(content, styles, style_strength, interpolation_weights, preserve_color = preserve_color)
28
- return tensor_to_img(stylized_img)
29
-
30
- title = "Artistic Style Transfer"
31
-
32
- content_img = gr.components.Image(label="Content image", type = "pil")
33
-
34
- style_img_1 = gr.components.Image(label="Style images", type = "pil")
35
- iw_1 = gr.components.Slider(0., 1., label = "Style 1 strength")
36
- style_img_2 = gr.components.Image(label="Style images", type = "pil")
37
- iw_2 = gr.components.Slider(0., 1., label = "Style 2 strength")
38
- style_img_3 = gr.components.Image(label="Style images", type = "pil")
39
- iw_3 = gr.components.Slider(0., 1., label = "Style 3 strength")
40
- style_strength = gr.components.Slider(0., 1., label = "Adjust style strength")
41
- preserve_color = gr.components.Dropdown(["None", "Whitening & Coloring", "Histogram matching"], label = "Choose color preserving mode")
42
-
43
- interface = gr.Interface(fn = style_transfer,
44
- inputs = [content_img,
45
- style_strength,
46
- style_img_1,
47
- iw_1,
48
- style_img_2,
49
- iw_2,
50
- style_img_3,
51
- iw_3,
52
- preserve_color],
53
- outputs = gr.components.Image(),
54
- title = title
55
- )
56
- interface.queue()
57
- interface.launch(share = True, debug = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/CONTRIBUTING.md DELETED
@@ -1,8 +0,0 @@
1
- <img alt="gpt4free logo" src="https://user-images.githubusercontent.com/98614666/233799515-1a7cb6a3-b17f-42c4-956d-8d2a0664466f.png">
2
-
3
- ### Please, follow these steps to contribute:
4
- 1. Reverse a website from this list: [sites-to-reverse](https://github.com/xtekky/gpt4free/issues/40)
5
- 2. Add it to [./testing](https://github.com/xtekky/gpt4free/tree/main/testing)
6
- 3. Refractor it and add it to [./g4f](https://github.com/xtekky/gpt4free/tree/main/g4f)
7
-
8
- ### We will be grateful to see you as a contributor!
 
 
 
 
 
 
 
 
 
spaces/17TheWord/vits-models/text/cleaners.py DELETED
@@ -1,475 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Cleaners are transformations that run over the input text at both training and eval time.
5
-
6
- Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
- hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
- 1. "english_cleaners" for English text
9
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
- the symbols in symbols.py to match your data).
13
- '''
14
-
15
- import re
16
- from unidecode import unidecode
17
- import pyopenjtalk
18
- from jamo import h2j, j2hcj
19
- from pypinyin import lazy_pinyin, BOPOMOFO
20
- import jieba, cn2an
21
-
22
-
23
- # This is a list of Korean classifiers preceded by pure Korean numerals.
24
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
25
-
26
- # Regular expression matching whitespace:
27
- _whitespace_re = re.compile(r'\s+')
28
-
29
- # Regular expression matching Japanese without punctuation marks:
30
- _japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
31
-
32
- # Regular expression matching non-Japanese characters or punctuation marks:
33
- _japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
34
-
35
- # List of (regular expression, replacement) pairs for abbreviations:
36
- _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
37
- ('mrs', 'misess'),
38
- ('mr', 'mister'),
39
- ('dr', 'doctor'),
40
- ('st', 'saint'),
41
- ('co', 'company'),
42
- ('jr', 'junior'),
43
- ('maj', 'major'),
44
- ('gen', 'general'),
45
- ('drs', 'doctors'),
46
- ('rev', 'reverend'),
47
- ('lt', 'lieutenant'),
48
- ('hon', 'honorable'),
49
- ('sgt', 'sergeant'),
50
- ('capt', 'captain'),
51
- ('esq', 'esquire'),
52
- ('ltd', 'limited'),
53
- ('col', 'colonel'),
54
- ('ft', 'fort'),
55
- ]]
56
-
57
- # List of (hangul, hangul divided) pairs:
58
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
59
- ('ㄳ', 'ㄱㅅ'),
60
- ('ㄵ', 'ㄴㅈ'),
61
- ('ㄶ', 'ㄴㅎ'),
62
- ('ㄺ', 'ㄹㄱ'),
63
- ('ㄻ', 'ㄹㅁ'),
64
- ('ㄼ', 'ㄹㅂ'),
65
- ('ㄽ', 'ㄹㅅ'),
66
- ('ㄾ', 'ㄹㅌ'),
67
- ('ㄿ', 'ㄹㅍ'),
68
- ('ㅀ', 'ㄹㅎ'),
69
- ('ㅄ', 'ㅂㅅ'),
70
- ('ㅘ', 'ㅗㅏ'),
71
- ('ㅙ', 'ㅗㅐ'),
72
- ('ㅚ', 'ㅗㅣ'),
73
- ('ㅝ', 'ㅜㅓ'),
74
- ('ㅞ', 'ㅜㅔ'),
75
- ('ㅟ', 'ㅜㅣ'),
76
- ('ㅢ', 'ㅡㅣ'),
77
- ('ㅑ', 'ㅣㅏ'),
78
- ('ㅒ', 'ㅣㅐ'),
79
- ('ㅕ', 'ㅣㅓ'),
80
- ('ㅖ', 'ㅣㅔ'),
81
- ('ㅛ', 'ㅣㅗ'),
82
- ('ㅠ', 'ㅣㅜ')
83
- ]]
84
-
85
- # List of (Latin alphabet, hangul) pairs:
86
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
87
- ('a', '에이'),
88
- ('b', '비'),
89
- ('c', '시'),
90
- ('d', '디'),
91
- ('e', '이'),
92
- ('f', '에프'),
93
- ('g', '지'),
94
- ('h', '에이치'),
95
- ('i', '아이'),
96
- ('j', '제이'),
97
- ('k', '케이'),
98
- ('l', '엘'),
99
- ('m', '엠'),
100
- ('n', '엔'),
101
- ('o', '오'),
102
- ('p', '피'),
103
- ('q', '큐'),
104
- ('r', '아르'),
105
- ('s', '에스'),
106
- ('t', '티'),
107
- ('u', '유'),
108
- ('v', '브이'),
109
- ('w', '더블유'),
110
- ('x', '엑스'),
111
- ('y', '와이'),
112
- ('z', '제트')
113
- ]]
114
-
115
- # List of (Latin alphabet, bopomofo) pairs:
116
- _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
117
- ('a', 'ㄟˉ'),
118
- ('b', 'ㄅㄧˋ'),
119
- ('c', 'ㄙㄧˉ'),
120
- ('d', 'ㄉㄧˋ'),
121
- ('e', 'ㄧˋ'),
122
- ('f', 'ㄝˊㄈㄨˋ'),
123
- ('g', 'ㄐㄧˋ'),
124
- ('h', 'ㄝˇㄑㄩˋ'),
125
- ('i', 'ㄞˋ'),
126
- ('j', 'ㄐㄟˋ'),
127
- ('k', 'ㄎㄟˋ'),
128
- ('l', 'ㄝˊㄛˋ'),
129
- ('m', 'ㄝˊㄇㄨˋ'),
130
- ('n', 'ㄣˉ'),
131
- ('o', 'ㄡˉ'),
132
- ('p', 'ㄆㄧˉ'),
133
- ('q', 'ㄎㄧㄡˉ'),
134
- ('r', 'ㄚˋ'),
135
- ('s', 'ㄝˊㄙˋ'),
136
- ('t', 'ㄊㄧˋ'),
137
- ('u', 'ㄧㄡˉ'),
138
- ('v', 'ㄨㄧˉ'),
139
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
140
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
141
- ('y', 'ㄨㄞˋ'),
142
- ('z', 'ㄗㄟˋ')
143
- ]]
144
-
145
-
146
- # List of (bopomofo, romaji) pairs:
147
- _bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
148
- ('ㄅㄛ', 'p⁼wo'),
149
- ('ㄆㄛ', 'pʰwo'),
150
- ('ㄇㄛ', 'mwo'),
151
- ('ㄈㄛ', 'fwo'),
152
- ('ㄅ', 'p⁼'),
153
- ('ㄆ', 'pʰ'),
154
- ('ㄇ', 'm'),
155
- ('ㄈ', 'f'),
156
- ('ㄉ', 't⁼'),
157
- ('ㄊ', 'tʰ'),
158
- ('ㄋ', 'n'),
159
- ('ㄌ', 'l'),
160
- ('ㄍ', 'k⁼'),
161
- ('ㄎ', 'kʰ'),
162
- ('ㄏ', 'h'),
163
- ('ㄐ', 'ʧ⁼'),
164
- ('ㄑ', 'ʧʰ'),
165
- ('ㄒ', 'ʃ'),
166
- ('ㄓ', 'ʦ`⁼'),
167
- ('ㄔ', 'ʦ`ʰ'),
168
- ('ㄕ', 's`'),
169
- ('ㄖ', 'ɹ`'),
170
- ('ㄗ', 'ʦ⁼'),
171
- ('ㄘ', 'ʦʰ'),
172
- ('ㄙ', 's'),
173
- ('ㄚ', 'a'),
174
- ('ㄛ', 'o'),
175
- ('ㄜ', 'ə'),
176
- ('ㄝ', 'e'),
177
- ('ㄞ', 'ai'),
178
- ('ㄟ', 'ei'),
179
- ('ㄠ', 'au'),
180
- ('ㄡ', 'ou'),
181
- ('ㄧㄢ', 'yeNN'),
182
- ('ㄢ', 'aNN'),
183
- ('ㄧㄣ', 'iNN'),
184
- ('ㄣ', 'əNN'),
185
- ('ㄤ', 'aNg'),
186
- ('ㄧㄥ', 'iNg'),
187
- ('ㄨㄥ', 'uNg'),
188
- ('ㄩㄥ', 'yuNg'),
189
- ('ㄥ', 'əNg'),
190
- ('ㄦ', 'əɻ'),
191
- ('ㄧ', 'i'),
192
- ('ㄨ', 'u'),
193
- ('ㄩ', 'ɥ'),
194
- ('ˉ', '→'),
195
- ('ˊ', '↑'),
196
- ('ˇ', '↓↑'),
197
- ('ˋ', '↓'),
198
- ('˙', ''),
199
- (',', ','),
200
- ('。', '.'),
201
- ('!', '!'),
202
- ('?', '?'),
203
- ('—', '-')
204
- ]]
205
-
206
-
207
- def expand_abbreviations(text):
208
- for regex, replacement in _abbreviations:
209
- text = re.sub(regex, replacement, text)
210
- return text
211
-
212
-
213
- def lowercase(text):
214
- return text.lower()
215
-
216
-
217
- def collapse_whitespace(text):
218
- return re.sub(_whitespace_re, ' ', text)
219
-
220
-
221
- def convert_to_ascii(text):
222
- return unidecode(text)
223
-
224
-
225
- def japanese_to_romaji_with_accent(text):
226
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
227
- sentences = re.split(_japanese_marks, text)
228
- marks = re.findall(_japanese_marks, text)
229
- text = ''
230
- for i, sentence in enumerate(sentences):
231
- if re.match(_japanese_characters, sentence):
232
- if text!='':
233
- text+=' '
234
- labels = pyopenjtalk.extract_fullcontext(sentence)
235
- for n, label in enumerate(labels):
236
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
237
- if phoneme not in ['sil','pau']:
238
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
239
- else:
240
- continue
241
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
242
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
243
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
244
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
245
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
246
- a2_next=-1
247
- else:
248
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
249
- # Accent phrase boundary
250
- if a3 == 1 and a2_next == 1:
251
- text += ' '
252
- # Falling
253
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
254
- text += '↓'
255
- # Rising
256
- elif a2 == 1 and a2_next == 2:
257
- text += '↑'
258
- if i<len(marks):
259
- text += unidecode(marks[i]).replace(' ','')
260
- return text
261
-
262
-
263
- def latin_to_hangul(text):
264
- for regex, replacement in _latin_to_hangul:
265
- text = re.sub(regex, replacement, text)
266
- return text
267
-
268
-
269
- def divide_hangul(text):
270
- for regex, replacement in _hangul_divided:
271
- text = re.sub(regex, replacement, text)
272
- return text
273
-
274
-
275
- def hangul_number(num, sino=True):
276
- '''Reference https://github.com/Kyubyong/g2pK'''
277
- num = re.sub(',', '', num)
278
-
279
- if num == '0':
280
- return '영'
281
- if not sino and num == '20':
282
- return '스무'
283
-
284
- digits = '123456789'
285
- names = '일이삼사오육칠팔구'
286
- digit2name = {d: n for d, n in zip(digits, names)}
287
-
288
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
289
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
290
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
291
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
292
-
293
- spelledout = []
294
- for i, digit in enumerate(num):
295
- i = len(num) - i - 1
296
- if sino:
297
- if i == 0:
298
- name = digit2name.get(digit, '')
299
- elif i == 1:
300
- name = digit2name.get(digit, '') + '십'
301
- name = name.replace('일십', '십')
302
- else:
303
- if i == 0:
304
- name = digit2mod.get(digit, '')
305
- elif i == 1:
306
- name = digit2dec.get(digit, '')
307
- if digit == '0':
308
- if i % 4 == 0:
309
- last_three = spelledout[-min(3, len(spelledout)):]
310
- if ''.join(last_three) == '':
311
- spelledout.append('')
312
- continue
313
- else:
314
- spelledout.append('')
315
- continue
316
- if i == 2:
317
- name = digit2name.get(digit, '') + '백'
318
- name = name.replace('일백', '백')
319
- elif i == 3:
320
- name = digit2name.get(digit, '') + '천'
321
- name = name.replace('일천', '천')
322
- elif i == 4:
323
- name = digit2name.get(digit, '') + '만'
324
- name = name.replace('일만', '만')
325
- elif i == 5:
326
- name = digit2name.get(digit, '') + '십'
327
- name = name.replace('일십', '십')
328
- elif i == 6:
329
- name = digit2name.get(digit, '') + '백'
330
- name = name.replace('일백', '백')
331
- elif i == 7:
332
- name = digit2name.get(digit, '') + '천'
333
- name = name.replace('일천', '천')
334
- elif i == 8:
335
- name = digit2name.get(digit, '') + '억'
336
- elif i == 9:
337
- name = digit2name.get(digit, '') + '십'
338
- elif i == 10:
339
- name = digit2name.get(digit, '') + '백'
340
- elif i == 11:
341
- name = digit2name.get(digit, '') + '천'
342
- elif i == 12:
343
- name = digit2name.get(digit, '') + '조'
344
- elif i == 13:
345
- name = digit2name.get(digit, '') + '십'
346
- elif i == 14:
347
- name = digit2name.get(digit, '') + '백'
348
- elif i == 15:
349
- name = digit2name.get(digit, '') + '천'
350
- spelledout.append(name)
351
- return ''.join(elem for elem in spelledout)
352
-
353
-
354
- def number_to_hangul(text):
355
- '''Reference https://github.com/Kyubyong/g2pK'''
356
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
357
- for token in tokens:
358
- num, classifier = token
359
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
360
- spelledout = hangul_number(num, sino=False)
361
- else:
362
- spelledout = hangul_number(num, sino=True)
363
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
364
- # digit by digit for remaining digits
365
- digits = '0123456789'
366
- names = '영일이삼사오육칠팔구'
367
- for d, n in zip(digits, names):
368
- text = text.replace(d, n)
369
- return text
370
-
371
-
372
- def number_to_chinese(text):
373
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
374
- for number in numbers:
375
- text = text.replace(number, cn2an.an2cn(number),1)
376
- return text
377
-
378
-
379
- def chinese_to_bopomofo(text):
380
- text=text.replace('、',',').replace(';',',').replace(':',',')
381
- words=jieba.lcut(text,cut_all=False)
382
- text=''
383
- for word in words:
384
- bopomofos=lazy_pinyin(word,BOPOMOFO)
385
- if not re.search('[\u4e00-\u9fff]',word):
386
- text+=word
387
- continue
388
- for i in range(len(bopomofos)):
389
- if re.match('[\u3105-\u3129]',bopomofos[i][-1]):
390
- bopomofos[i]+='ˉ'
391
- if text!='':
392
- text+=' '
393
- text+=''.join(bopomofos)
394
- return text
395
-
396
-
397
- def latin_to_bopomofo(text):
398
- for regex, replacement in _latin_to_bopomofo:
399
- text = re.sub(regex, replacement, text)
400
- return text
401
-
402
-
403
- def bopomofo_to_romaji(text):
404
- for regex, replacement in _bopomofo_to_romaji:
405
- text = re.sub(regex, replacement, text)
406
- return text
407
-
408
-
409
- def basic_cleaners(text):
410
- '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
411
- text = lowercase(text)
412
- text = collapse_whitespace(text)
413
- return text
414
-
415
-
416
- def transliteration_cleaners(text):
417
- '''Pipeline for non-English text that transliterates to ASCII.'''
418
- text = convert_to_ascii(text)
419
- text = lowercase(text)
420
- text = collapse_whitespace(text)
421
- return text
422
-
423
-
424
- def japanese_cleaners(text):
425
- text=japanese_to_romaji_with_accent(text)
426
- if re.match('[A-Za-z]',text[-1]):
427
- text += '.'
428
- return text
429
-
430
-
431
- def japanese_cleaners2(text):
432
- return japanese_cleaners(text).replace('ts','ʦ').replace('...','…')
433
-
434
-
435
- def korean_cleaners(text):
436
- '''Pipeline for Korean text'''
437
- text = latin_to_hangul(text)
438
- text = number_to_hangul(text)
439
- text = j2hcj(h2j(text))
440
- text = divide_hangul(text)
441
- if re.match('[\u3131-\u3163]',text[-1]):
442
- text += '.'
443
- return text
444
-
445
-
446
- def chinese_cleaners(text):
447
- '''Pipeline for Chinese text'''
448
- text=number_to_chinese(text)
449
- text=chinese_to_bopomofo(text)
450
- text=latin_to_bopomofo(text)
451
- if re.match('[ˉˊˇˋ˙]',text[-1]):
452
- text += '。'
453
- return text
454
-
455
-
456
- def zh_ja_mixture_cleaners(text):
457
- chinese_texts=re.findall(r'\[ZH\].*?\[ZH\]',text)
458
- japanese_texts=re.findall(r'\[JA\].*?\[JA\]',text)
459
- for chinese_text in chinese_texts:
460
- cleaned_text=number_to_chinese(chinese_text[4:-4])
461
- cleaned_text=chinese_to_bopomofo(cleaned_text)
462
- cleaned_text=latin_to_bopomofo(cleaned_text)
463
- cleaned_text=bopomofo_to_romaji(cleaned_text)
464
- cleaned_text=re.sub('i[aoe]',lambda x:'y'+x.group(0)[1:],cleaned_text)
465
- cleaned_text=re.sub('u[aoəe]',lambda x:'w'+x.group(0)[1:],cleaned_text)
466
- cleaned_text=re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ`'+x.group(2),cleaned_text).replace('ɻ','ɹ`')
467
- cleaned_text=re.sub('([ʦs][⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ'+x.group(2),cleaned_text)
468
- text = text.replace(chinese_text,cleaned_text+' ',1)
469
- for japanese_text in japanese_texts:
470
- cleaned_text=japanese_to_romaji_with_accent(japanese_text[4:-4]).replace('ts','ʦ').replace('u','ɯ').replace('...','…')
471
- text = text.replace(japanese_text,cleaned_text+' ',1)
472
- text=text[:-1]
473
- if re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]):
474
- text += '.'
475
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/7 Data Recovery Suite 4.4 Crack Recover Data from Any Device or Storage.md DELETED
@@ -1,133 +0,0 @@
1
-
2
- <h1>7 Data Recovery Suite 4.4 Crack Download HERE !</h1>
3
- <p>Have you ever lost your important data due to accidental deletion, formatting, virus attack, or any other reason? If yes, then you might be looking for a reliable and effective data recovery tool that can help you get back your lost files. One such tool is 7 Data Recovery Suite 4.4, which is a popular and powerful software that can recover data from various scenarios and devices.</p>
4
- <p>However, the official version of 7 Data Recovery Suite 4.4 is not free, and you need to pay for a license to use its full features. That's why some people may try to find a crack version of the software online, which claims to offer the same functionality without any cost. But is it safe and legal to use a cracked version of 7 Data Recovery Suite 4.4? And how can you download and install it on your computer?</p>
5
- <h2>7 Data Recovery Suite 4.4 Crack Download HERE !</h2><br /><p><b><b>Download</b> >>> <a href="https://byltly.com/2uKyCw">https://byltly.com/2uKyCw</a></b></p><br /><br />
6
- <p>In this article, we will answer these questions and provide you with a detailed guide on how to use 7 Data Recovery Suite 4.4 Crack. We will also warn you about the potential risks of using a cracked software and suggest a better alternative for data recovery.</p>
7
- <h2>What is 7 Data Recovery Suite 4.4?</h2>
8
- <p>7 Data Recovery Suite 4.4 is a comprehensive data recovery software that can recover deleted, formatted, or lost data from hard disks, memory cards, flash drives, and other storage devices. It supports various file types, such as photos, videos, audios, documents, emails, etc.</p>
9
- <p>The software consists of four modules that can handle different data loss situations:</p>
10
- <ul>
11
- <li>Deleted Recovery: This module can recover data that has been deleted by mistake or emptied from the Recycle Bin.</li>
12
- <li>Complete Recovery: This module can recover data from damaged, corrupted, or inaccessible partitions or drives.</li>
13
- <li>Lost Partition Recovery: This module can recover data from deleted or lost partitions due to disk crash, MBR corruption, or repartitioning.</li>
14
- <li>Digital Media Recovery: This module can recover data from digital media devices, such as cameras, MP3 players, smartphones, etc.</li>
15
- </ul>
16
- <h3>Features of 7 Data Recovery Suite 4.4</h3>
17
- <p>Some of the main features of 7 Data Recovery Suite 4.4 are:</p>
18
- <ul>
19
- <li>It can recover data from various scenarios, such as accidental deletion, formatting, virus attack, system crash, etc.</li>
20
- <li>It can recover data from various devices, such as hard disks, memory cards, flash drives, etc.</li>
21
- <li>It can recover various file types, such as photos, videos, audios, documents, emails, etc.</li>
22
- <li>It can recover data from FAT12/16/32/exFAT/NTFS/EXT2/EXT3 file systems.</li>
23
- <li>It can recover data from Windows XP/Vista/7/8/10 and Windows Server 2003/2008.</li>
24
- <li>It has a user-friendly interface that allows easy selection and recovery of data.</li>
25
- <li>It has a preview function that allows viewing the recoverable files before recovery.</li>
26
- <li>It has a fast scanning speed that can save time and resources.</li>
27
- </ul>
28
- <h3>Benefits of 7 Data Recovery Suite 4.4</h3>
29
- <p>Some of the benefits of using 7 Data Recovery Suite 4.4 are:</p>
30
- <ul>
31
- <li>It can help you recover your valuable data that may otherwise be lost forever.</li>
32
- <li>It can help you save money and time that may be spent on professional data recovery services.</li>
33
- <li>It can help you avoid stress and frustration that may be caused by data loss.</li>
34
- <li>It can help you protect your privacy and security by recovering your personal or confidential files.</li>
35
- </ul>
36
- <h2>How to download and install 7 Data Recovery Suite 4.4 Crack?</h2>
37
- <p>If you want to use the full features of 7 Data Recovery Suite 4.4 without paying for a license, you may be tempted to download and install a crack version of the software online. However, this is not recommended for several reasons that we will discuss later in this article.</p>
38
- <p>If you still want to try it at your own risk, here are the steps to download and install 7 Data Recovery Suite 4.4 Crack:</p>
39
- <p>How to get 7 Data Recovery Suite 4.4 Crack for free<br />
40
- 7 Data Recovery Suite 4.4 Crack full version download link<br />
41
- Best data recovery software with 7 Data Recovery Suite 4.4 Crack<br />
42
- 7 Data Recovery Suite 4.4 Crack license key generator<br />
43
- Download 7 Data Recovery Suite 4.4 Crack with serial key<br />
44
- 7 Data Recovery Suite 4.4 Crack activation code online<br />
45
- Recover deleted files with 7 Data Recovery Suite 4.4 Crack<br />
46
- 7 Data Recovery Suite 4.4 Crack review and features<br />
47
- 7 Data Recovery Suite 4.4 Crack tutorial and guide<br />
48
- 7 Data Recovery Suite 4.4 Crack system requirements and compatibility<br />
49
- Is 7 Data Recovery Suite 4.4 Crack safe and legit<br />
50
- 7 Data Recovery Suite 4.4 Crack alternatives and competitors<br />
51
- Pros and cons of using 7 Data Recovery Suite 4.4 Crack<br />
52
- How to update 7 Data Recovery Suite 4.4 Crack to the latest version<br />
53
- How to uninstall 7 Data Recovery Suite 4.4 Crack completely<br />
54
- How to fix errors and issues with 7 Data Recovery Suite 4.4 Crack<br />
55
- How to backup and restore data with 7 Data Recovery Suite 4.4 Crack<br />
56
- How to recover data from formatted or corrupted drives with 7 Data Recovery Suite 4.4 Crack<br />
57
- How to recover data from SD card, USB flash drive, or external hard drive with 7 Data Recovery Suite 4.4 Crack<br />
58
- How to recover data from Android or iOS devices with 7 Data Recovery Suite 4.4 Crack<br />
59
- How to recover data from Windows or Mac computers with 7 Data Recovery Suite 4.4 Crack<br />
60
- How to recover data from different file systems with 7 Data Recovery Suite 4.4 Crack<br />
61
- How to recover data from various scenarios with 7 Data Recovery Suite 4.4 Crack<br />
62
- How to recover photos, videos, audio, documents, emails, or other files with 7 Data Recovery Suite 4.4 Crack<br />
63
- How to recover lost or forgotten passwords with 7 Data Recovery Suite 4.4 Crack<br />
64
- How to recover data from encrypted or protected files with 7 Data Recovery Suite 4.4 Crack<br />
65
- How to recover data from RAID arrays or partitions with 7 Data Recovery Suite 4.4 Crack<br />
66
- How to recover data from cloud storage or online services with 7 Data Recovery Suite 4.4 Crack<br />
67
- How to recover data from virtual machines or disks with 7 Data Recovery Suite 4.4 Crack<br />
68
- How to recover data from optical discs or floppy disks with 7 Data Recovery Suite 4.4 Crack<br />
69
- How to use advanced tools and settings in 7 Data Recovery Suite 4.4 Crack<br />
70
- How to customize and optimize the performance of 7 Data Recovery Suite 4.4 Crack<br />
71
- How to contact the support team of 7 Data Recovery Suite 4.4 Crack<br />
72
- How to get a refund or exchange for the purchase of the product key of the software.</p>
73
- <h3>Download link</h3>
74
- <p>You can find many websites that offer a download link for 7 Data Recovery Suite 4.4 Crack online. However, you should be careful about the source and the authenticity of the file. Some websites may provide fake or malicious files that may harm your computer or steal your information.</p>
75
- <p>One possible website that claims to provide a working download link for 7 Data Recovery Suite 4.4 Crack is https://kolompc.com/7-data-recovery-suite/. However, we cannot guarantee its safety or reliability.</p>
76
- <h3>Installation steps</h3>
77
- <p>After downloading the file from the website above or any other source, you need to follow these steps to install it on your computer:</p>
78
- <ol>
79
- <li>Extract the ZIP file to a folder on your computer.</li>
80
- <li>Run the setup.exe file as administrator and follow the instructions to install the software.</li>
81
- <li>After installation, do not run the software yet.</li>
82
- <li>Copy the crack file from the folder and paste it into the installation directory of the software (usually C:\Program Files\7-DataRecoverySuite).</li>
83
- <li>Run the software and enjoy its full features.</li>
84
- </ol>
85
- <h2>How to use 7 Data Recovery Suite 4.4 Crack?</h2>
86
- <p>After installing the crack version of 7 Data Recovery Suite 4.4 on your computer, you can use it to recover your lost data by following these steps:</p>
87
- <h3>Select a recovery mode</h3>
88
- <p>Launch the software and select one of the four recovery modes according to your data loss situation:</p>
89
- <ul>
90
- <li>If you want to recover deleted files or files emptied from Recycle Bin, select Deleted Recovery.</li>
91
- <li>If you want to recover data from formatted or inaccessible partitions or drives, select Complete Recovery.</li>
92
- <li>If you want to recover data from deleted or lost partitions due to disk crash or repartitioning, select Lost Partition Recovery.</li>
93
- <li>If you want to recover data from digital media devices like cameras or smartphones, select Digital Media Recovery.</li>
94
- </ul>
95
- <h3>Scan the device or partition</h3>
96
- <p>Select the device or partition where you lost your data and click Next to start scanning for recoverable files. The scanning process may take some time depending on the size and condition of your device or partition.</p>
97
- <h3>Preview and recover the data</h3>
98
- <p>After scanning is completed, you can preview the found files by clicking on them in the left pane. You can also filter them by file type or path in the right pane. Select the files that you want to recover and click Recover to save them to a location of your choice on your computer or another device.</p>
99
- <h2>Risks of using 7 Data Recovery Suite 4.4 Crack</h2>
100
- <p>While using a crack version of 7 Data Recovery Suite 4.4 may seem tempting for some people who want to save money and enjoy its full features without paying for a license, there are also some serious risks involved in doing so:</p>
101
- <h3>Virus or malware infection</h3>
102
- <p>The crack file that you download online may contain virus or malware that can infect your computer and damage your system files or programs. It may also steal your personal information or encrypt your data and demand ransom for decryption.</p>
103
- <h3>Privacy breach</h3>
104
- <h3>Legal issues</h3>
105
- <p>The use of cracked software is also illegal in most countries, as it violates the software copyright law. By using a cracked version of 7 Data Recovery Suite 4.4, you are infringing on the rights of the software developers and distributors who invested time and money to create and market the product.</p>
106
- <p>You may face legal consequences if you are caught using or distributing cracked software, such as fines, lawsuits, or even imprisonment. Moreover, you may also lose your academic or professional reputation if you use cracked software for your research or work projects.</p>
107
- <h2>Conclusion</h2>
108
- <p>7 Data Recovery Suite 4.4 is a powerful and comprehensive data recovery software that can help you recover your lost data from various scenarios and devices. However, using a crack version of the software is not a wise choice, as it comes with many risks and disadvantages.</p>
109
- <p>Using cracked software can expose your computer to virus or malware infection, breach your privacy and security, and cause legal issues for you and your organization. Moreover, using cracked software is unethical and unfair to the software developers and distributors who deserve to be compensated for their work.</p>
110
- <p>Therefore, we recommend that you avoid using 7 Data Recovery Suite 4.4 Crack and look for a better alternative for data recovery. One such alternative is Recoverit, which is a reliable and professional data recovery tool that can recover data from various scenarios and devices with high success rate and ease of use.</p>
111
- <h2>FAQs</h2>
112
- <ul>
113
- <li><b>Q: What is the difference between 7 Data Recovery Suite 4.4 Crack and 7 Data Recovery Suite 4.4?</b></li>
114
- <li>A: 7 Data Recovery Suite 4.4 Crack is a modified version of 7 Data Recovery Suite 4.4 that bypasses the license key and allows users to use the full features of the software without paying for it. However, 7 Data Recovery Suite 4.4 Crack is illegal, unsafe, and unreliable.</li>
115
- <li><b>Q: How can I get a legitimate version of 7 Data Recovery Suite 4.4?</b></li>
116
- <li>A: You can get a legitimate version of 7 Data Recovery Suite 4.4 by purchasing a license from the official website of Disk Drill, which is the new owner of 7 Data Recovery Suite. You can choose from different plans and prices according to your needs and preferences.</li>
117
- <li><b>Q: How can I recover data from my computer or device without using cracked software?</b></li>
118
- <li>A: You can recover data from your computer or device without using cracked software by using a reputable and trustworthy data recovery tool like Recoverit. Recoverit can help you recover data from various scenarios and devices with high success rate and ease of use.</li>
119
- <li><b>Q: What are some tips to prevent data loss in the future?</b></li>
120
- <li>A: Some tips to prevent data loss in the future are:</li>
121
- <ul>
122
- <li>Back up your data regularly to an external drive or cloud service.</li>
123
- <li>Use antivirus software and firewall to protect your computer from virus or malware attack.</li>
124
- <li>Avoid opening suspicious email attachments or clicking on unknown links.</li>
125
- <li>Use a power surge protector to prevent power outage or voltage fluctuation.</li>
126
- <li>Handle your devices with care and avoid physical damage or water spillage.</li>
127
- </ul>
128
- <li><b>Q: Where can I find more information about data recovery and related topics?</b></li>
129
- <li>A: You can find more information about data recovery and related topics by visiting our website https://toolbox.iskysoft.com/free-file-recovery/ where we provide useful articles, guides, tips, and reviews on data recovery and other tech topics.</li>
130
- </ul>
131
- </p> 0a6ba089eb<br />
132
- <br />
133
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Kung Fu Panda 2 Full Movie Download ) Discover the Legend of the Dragon Warrior.md DELETED
@@ -1,134 +0,0 @@
1
-
2
- <h1>Kung Fu Panda 2: A Fun and Action-Packed Sequel</h1>
3
- <p>If you are looking for a movie that combines humor, adventure, emotion, and stunning animation, you might want to check out <i>Kung Fu Panda 2</i>. This film is the sequel to <i>Kung Fu Panda</i> (2008), which introduced us to Po, a clumsy but lovable panda who became the Dragon Warrior and saved China from the evil Tai Lung.</p>
4
- <h2>HD Online Player (Kung Fu Panda 2 Full Movie Download )</h2><br /><p><b><b>Download</b> &#10002; &#10002; &#10002; <a href="https://byltly.com/2uKxxH">https://byltly.com/2uKxxH</a></b></p><br /><br />
5
- <p>In this film, Po faces a new challenge: Lord Shen, a peacock who has invented a weapon that can destroy kung fu and conquer China. Along with his friends, the Furious Five, Po must stop Shen before it is too late. But along the way, Po also discovers some secrets about his past and his true identity.</p>
6
- <p><i>Kung Fu Panda 2</i> was released in 2011 by DreamWorks Animation and Paramount Pictures. It was directed by Jennifer Yuh Nelson, who became the first woman to solely direct an animated feature film from a major Hollywood studio. It was written by Jonathan Aibel and Glenn Berger, who also wrote the first film.</p>
7
- <p>The film received critical acclaim for its story, characters, animation, music, and themes. It was nominated for an Academy Award for Best Animated Feature, losing to <i>Rango</i>. It also became the highest-grossing film directed by a woman until <i>Frozen</i> (2013), as well as the highest-grossing film solely directed by a woman until <i>Wonder Woman</i> (2017). It is also the sixth highest-grossing film of 2011, and the highest-grossing animated feature film of the year.</p>
8
- <h2>The Story of Kung Fu Panda 2</h2>
9
- <p>The film begins with a flashback that tells us how Lord Shen, the son of the peacock rulers of Gongmen City, became obsessed with using fireworks as a weapon. He learned of a prophecy that said he would be defeated by "a warrior of black and white". He then ordered his wolf army to kill all the pandas in China, hoping to prevent the prophecy from coming true.</p>
10
- <p>Shen's parents were horrified by his actions and banished him from their city. Shen swore revenge and vowed to return with his weapon one day.</p>
11
- <p>In the present day, Po is enjoying his life as the Dragon Warrior and the leader of the Furious Five: Tigress, Monkey, Viper, Crane, and Mantis. He is also learning more about kung fu from his mentor, Master Shifu.</p>
12
- <p>One day, Po and his friends are sent to stop a group of wolf bandits who are stealing metal for Shen's weapon. Po has a flashback of his mother when he sees a symbol on one of the wolves' armor.</p>
13
- <p>Po becomes curious about his past and asks his adoptive father, Mr. Ping, about where he came from.</p>
14
- <p>Watch Kung Fu Panda 2 in HD quality online for free<br />
15
- How to download Kung Fu Panda 2 full movie in HD<br />
16
- Kung Fu Panda 2 streaming online with subtitles<br />
17
- Best HD online player for Kung Fu Panda 2 movie<br />
18
- Kung Fu Panda 2 full movie download link<br />
19
- Kung Fu Panda 2 HD online player without ads<br />
20
- Where to watch Kung Fu Panda 2 full movie online<br />
21
- Kung Fu Panda 2 full movie HD download torrent<br />
22
- Kung Fu Panda 2 online streaming HD quality<br />
23
- Kung Fu Panda 2 full movie download in Hindi<br />
24
- Kung Fu Panda 2 HD online player for Android<br />
25
- Kung Fu Panda 2 full movie download in Tamil<br />
26
- Kung Fu Panda 2 online watch HD free<br />
27
- Kung Fu Panda 2 full movie download in Telugu<br />
28
- Kung Fu Panda 2 HD online player for PC<br />
29
- Kung Fu Panda 2 full movie download in Malayalam<br />
30
- Kung Fu Panda 2 online HD with English subtitles<br />
31
- Kung Fu Panda 2 full movie download in Kannada<br />
32
- Kung Fu Panda 2 HD online player for iOS<br />
33
- Kung Fu Panda 2 full movie download in Bengali<br />
34
- Kung Fu Panda 2 online HD with Hindi dubbing<br />
35
- Kung Fu Panda 2 full movie download in Marathi<br />
36
- Kung Fu Panda 2 HD online player for Mac<br />
37
- Kung Fu Panda 2 full movie download in Urdu<br />
38
- Kung Fu Panda 2 online HD with Tamil dubbing<br />
39
- Kung Fu Panda 2 full movie download in Gujarati<br />
40
- Kung Fu Panda 2 HD online player for Windows<br />
41
- Kung Fu Panda 2 full movie download in Punjabi<br />
42
- Kung Fu Panda 2 online HD with Telugu dubbing<br />
43
- Kung Fu Panda 2 full movie download in Nepali<br />
44
- Kung Fu Panda 2 HD online player for Linux<br />
45
- Kung Fu Panda 2 full movie download in Sinhala<br />
46
- Kung Fu Panda 2 online HD with Malayalam dubbing<br />
47
- Kung Fu Panda 2 full movie download in Indonesian<br />
48
- Kung Fu Panda 2 HD online player for Chromebook<br />
49
- Kung Fu Panda 2 full movie download in Filipino<br />
50
- Kung Fu Panda 2 online HD with Kannada dubbing<br />
51
- Kung Fu Panda 2 full movie download in Vietnamese<br />
52
- Kung Fu Panda 2 HD online player for Roku<br />
53
- Kung Fu Panda 2 full movie download in Thai<br />
54
- Kung Fu Panda 2 online HD with Bengali dubbing<br />
55
- Kung Fu Panda 2 full movie download in Arabic<br />
56
- Kung Fu Panda 2 HD online player for Firestick<br />
57
- Kung Fu Panda 2 full movie download in Persian<br />
58
- Kung Fu Panda 2 online HD with Urdu dubbing<br />
59
- Kung Fu Panda 2 full movie download in Turkish<br />
60
- Kung Fu Panda 2 HD online player for Smart TV<br />
61
- Kung Fu Panda 2 full movie download in Korean<br />
62
- Kung Fu Panda 2 online HD with Gujarati dubbing</p>
63
- <p>Mr. Ping tells him that he found him in a radish crate when he was a baby and decided to raise him as his son.</p>
64
- <p>Po is not satisfied with this answer and decides to find out more about his origins.</p>
65
- <p>He learns from Master Shifu that Shen has returned to Gongmen City with his weapon, which is a cannon that can fire metal balls with explosive force.</p>
66
- <p>Po and his friends travel to Gongmen City to stop Shen.</p>
67
- <p>There they meet two other kung fu masters who have been hiding from Shen: Master Ox and Master Croc.</p>
68
- <p>They also encounter Shen's old nanny, a goat named Soothsayer, who can see the future.</p>
69
- <p>Po tries to confront Shen several times but fails due to his flashbacks.</p>
70
- <p>He eventually learns that Shen was responsible for killing his parents and destroying his village.</p>
71
- <p>Po is devastated by this revelation but also determined to stop Shen once and for all.</p>
72
- <p>He realizes that he must achieve inner peace in order to overcome his past trauma.</p>
73
- <p>With the help of Soothsayer, Po meditates on his memories and accepts them as part of who he is.</p>
74
- <p>He then leads his friends into a final battle against Shen and his army.</p>
75
- <p>Po uses his kung fu skills to deflect Shen's cannonballs back at him.</p>
76
- <p>He also tries to persuade Shen to let go of his hatred and find inner peace.</p>
77
- <p>Shen refuses to listen and attacks Po with his blades.</p>
78
- <p>Po dodges them but one of them cuts through Shen's cannon ropes, causing it to fall on him.</p>
79
- <p>Shen is crushed by his own weapon while Po watches in sadness.</p>
80
- <p>Po then returns to Mr. Ping's noodle shop with his friends.</p>
81
- <p>He tells Mr. Ping that he knows he is not his biological father but he still loves him as his dad.</p>
82
- <p>Mr. Ping hugs him and tells him that he loves him too.</p>
83
- <p>The film ends with a scene showing that Po's biological father is still alive somewhere in China with other pandas.</p>
84
- <h2>The Characters of Kung Fu Panda 2</h2>
85
- <table>
86
- <tr>
87
- <th>Name</th>
88
- <th>Voice Actor</th>
89
- <th>Description</th>
90
- </tr>
91
- <tr>
92
- <td>Po</td>
93
- <td>Jack Black</td>
94
- <td>The Dragon Warrior and the leader of the Furious Five. He is a panda who loves kung fu, food, and fun. He is brave, loyal, optimistic, friendly, clumsy, naive, but also smart when it matters most.<td>
95
- </tr>
96
- <tr>
97
- <td>Tigress</td>
98
- <td>Angelina Jolie</td>
99
- <td>Po's closest friend and a fierce fighter. She is a tiger who is strong, serious, disciplined, stoic,<td>
100
- and sometimes cold but also caring deep down. </tr>
101
- <tr>
102
- <td>Monkey</td>
103
- <td>Jackie Chan</td>
104
- <h2>The Animation and Music of Kung Fu Panda 2</h2>
105
- <p>The animation of <i>Kung Fu Panda 2</i> is a remarkable achievement that combines computer animation and traditional animation techniques. The film uses computer animation for the main characters and the backgrounds, but also incorporates hand-drawn animation for some of the flashback scenes and the opening sequence. The hand-drawn animation gives the film a more stylized and artistic look that pays homage to Chinese painting and calligraphy.</p>
106
- <p>The visual style of the film is also influenced by the culture and history of China. The film features various locations and landmarks that are based on real places in China, such as the Great Wall, the Forbidden City, and the Terracotta Army. The film also uses elements of Chinese mythology, such as dragons, phoenixes, and kites. The film also incorporates symbols and motifs that are relevant to the story and the characters, such as peacock feathers, lotus flowers, yin and yang, and fireworks.</p>
107
- <p>The music of <i>Kung Fu Panda 2</i> is composed by Hans Zimmer and John Powell, who also composed the music for the first film. The score blends orchestral music with traditional Chinese instruments, such as erhu, pipa, guzheng, dizi, and suona. The score also features themes and motifs that reflect the characters and their emotions, such as Po's theme, Shen's theme, and the inner peace theme. The score also includes some original songs by CeeLo Green and Jay Chou, who sing "Kung Fu Fighting" and "Nunchucks" respectively.</p>
108
- <h2>The Reception and Legacy of Kung Fu Panda 2</h2>
109
- <p><i>Kung Fu Panda 2</i> was released in theaters on May 26, 2011 in 2D, RealD 3D, Digital 3D and 4DX. The film received positive reviews from critics and audiences alike, who praised its story, characters, animation, music, and themes. The film has a 81% approval rating on Rotten Tomatoes based on 179 reviews, with an average rating of 7/10. The website's critical consensus reads: "<i>Kung Fu Panda 2</i> offers enough action, comedy, and visual sparkle to compensate for its somewhat familiar plot."</p>
110
- <p>The film was also a commercial success, grossing $665.7 million worldwide against its $150 million budget. It became the highest-grossing film directed by a woman until <i>Frozen</i> (2013), as well as the highest-grossing film solely directed by a woman until <i>Wonder Woman</i> (2017). It is also the sixth highest-grossing film of 2011, and the highest-grossing animated feature film of the year.</p>
111
- <p>The film was nominated for various awards, including an Academy Award for Best Animated Feature at the 84th Academy Awards, losing to <i>Rango</i>. It also received nominations from the Annie Awards, the Golden Globe Awards, the BAFTA Awards, the Critics' Choice Awards, and the Kids' Choice Awards.</p>
112
- <p>The film had a significant impact on female directors and animators in Hollywood. Jennifer Yuh Nelson became the second woman to be nominated for an Academy Award for Best Animated Feature after Marjane Satrapi for <i>Persepolis</i> (2007). She also became one of the most successful female directors in terms of box office gross. She returned to direct <i>Kung Fu Panda 3</i>, which was co-directed by Alessandro Carloni.</p>
113
- <p>The film also spawned a sequel and a franchise expansion. <i>Kung Fu Panda 3</i> was released in January 2016 and continued Po's story as he reunited with his biological father and faced a new enemy. The film was also well-received by critics and audiences and grossed $521.2 million worldwide. A fourth film is currently in development at DreamWorks Animation.</p>
114
- <p>Besides the films, the franchise also includes several short films, such as <i>Kung Fu Panda: Secrets of the Masters</i>, <i>Kung Fu Panda: Secrets of the Scroll</i>, and <i>Kung Fu Panda: Secrets of the Furious Five</i>. It also includes two television series: <i>Kung Fu Panda: Legends of Awesomeness</i>, which ran from 2011 to 2016 on Nickelodeon; and <i>Kung Fu Panda: The Paws of Destiny</i>, which premiered in 2018 on Amazon Prime Video.</p>
115
- <h1>Conclusion</h1>
116
- <p><i>Kung Fu Panda 2</i> is a fun and action-packed sequel that delivers on its promise of humor, adventure,<td>
117
- emotion, <p>and adults alike, as it offers a compelling story, engaging characters, beautiful animation, and memorable music. It is a film that celebrates the art of kung fu, the culture of China, and the themes of family, destiny, and inner peace. It is a film that showcases the talents of its director, writers, voice actors, animators, and composers. It is a film that deserves to be watched and enjoyed by everyone.</p>
118
- <h2>FAQs</h2>
119
- <p>Here are some frequently asked questions about <i>Kung Fu Panda 2</i>:</p>
120
- <ul>
121
- <li><b>Is <i>Kung Fu Panda 2</i> based on a true story?</b></li>
122
- <p>No, <i>Kung Fu Panda 2</i> is not based on a true story. It is a fictional story that takes place in a fantasy world of anthropomorphic animals who practice kung fu. However, the film does draw inspiration from real aspects of Chinese culture, history, and mythology.</p>
123
- <li><b>What is the name of Lord Shen's weapon?</b></li>
124
- <p>Lord Shen's weapon is a cannon that can fire metal balls with explosive force. It is based on the real invention of gunpowder and firearms in China during the Song dynasty (960-1279 CE).</p>
125
- <li><b>What is the meaning of Po's name?</b></li>
126
- <p>Po's name means "precious" or "treasure" in Chinese. It is also a homophone for the word "potato" in Mandarin, which is a reference to Po's chubby appearance and his love for food.</p>
127
- <li><b>What are the names of Po's biological parents?</b></li>
128
- <p>Po's biological parents are Li Shan and Mei Mei. They are both giant pandas who live in a hidden panda village in the mountains. They appear in <i>Kung Fu Panda 3</i>, where Po reunites with them and learns more about his heritage.</p>
129
- <li><b>How many films are there in the <i>Kung Fu Panda</i> franchise?</b></li>
130
- <p>There are currently three films in the <i>Kung Fu Panda</i> franchise: <i>Kung Fu Panda</i> (2008), <i>Kung Fu Panda 2</i> (2011), and <i>Kung Fu Panda 3</i> (2016). A fourth film is currently in development at DreamWorks Animation.</p>
131
- </ul>
132
- </p> 0a6ba089eb<br />
133
- <br />
134
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Ad-aware 6.0 Professional REPACK Keygen Serial Key.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Ad-aware 6.0 Professional Keygen Serial Key</h2><br /><p><b><b>DOWNLOAD</b> &#10038; <a href="https://imgfil.com/2uy1Ko">https://imgfil.com/2uy1Ko</a></b></p><br /><br />
2
- <br />
3
- If the product continues to prompt you for an activation code you have been unsuccessful. ... Try for Free Buy Now; Nessus Professional is for security pros on the front lines ... So now you are aware of the excellent features provided by Sage 50 ... I called HP and you can't get a real person. slmgr /ad-activation-get-IID (start ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing 2 APK Customize Your Car and Compete Against the Best Drifters in the World.md DELETED
@@ -1,123 +0,0 @@
1
-
2
- <h1>CarX Drift Racing 2: The Ultimate Drifting Game for Android</h1>
3
- <p>If you are a fan of drifting games, you must have heard of CarX Drift Racing 2. This is one of the most popular and realistic drifting games for Android devices. In this article, we will tell you everything you need to know about this game, including its features, how to download and install it, and some frequently asked questions.</p>
4
- <h2>Introduction</h2>
5
- <h3>What is CarX Drift Racing 2?</h3>
6
- <p>CarX Drift Racing 2 is a sequel of the original CarX Drift Racing game, which has over 100 million fans around the world. It is a racing game that focuses on drifting, which is a driving technique where the driver intentionally oversteers the car to make it slide sideways. Drifting is not only fun, but also challenging and rewarding, as it requires skill, precision, and timing.</p>
7
- <h2>carx drift racing 2 latest version apk</h2><br /><p><b><b>Download File</b> &#9675; <a href="https://urlin.us/2uT12l">https://urlin.us/2uT12l</a></b></p><br /><br />
8
- <h3>Why should you play CarX Drift Racing 2?</h3>
9
- <p>There are many reasons why you should play CarX Drift Racing 2, such as:</p>
10
- <ul>
11
- <li>It has stunning graphics and sound effects that make you feel like you are in a real drift car.</li>
12
- <li>It has a variety of cars, tracks, and modes to choose from, so you will never get bored.</li>
13
- <li>It has a realistic physics engine that simulates the behavior of different cars, surfaces, and weather conditions.</li>
14
- <li>It has an online mode where you can drift with your friends or other players from around the world.</li>
15
- <li>It has a visual auto tuning feature where you can customize your car's appearance and performance.</li>
16
- <li>It has an XDS mode where you can practice tandem drifting with yourself or other players.</li>
17
- <li>It has a TOP-32 mode where you can compete against the best drifters in the world.</li>
18
- </ul>
19
- <h2>Features of CarX Drift Racing 2</h2>
20
- <h3>Online Rooms</h3>
21
- <p>This is a new feature that allows you to drift in real time with your friends or other players. You can create or join an online room, pick a location, drift, and earn points. You can also watch other players drift using the drone camera. You can earn valuable rewards for achieving different ranks in the online mode.</p>
22
- <h3>Visual Auto Tuning</h3>
23
- <p>This feature allows you to customize your car's appearance and performance. You can replace mirrors, lights, bumpers, rims, and many other parts. You can also create a unique image of your car with body kits, vinyls, stickers, and paint. You can express your creativity and style with this feature.</p>
24
- <h3>Improved Performance Tuning</h3>
25
- <p>This feature allows you to adjust your car's performance according to your preferences and needs. You can tune the suspension, springs, tyre pressure, wheel angle, engine, turbo pressure, gearbox, brakes, and differential. You can fine tune your car to achieve the best drifting results.</p>
26
- <h3>Realistic Racing Physics</h3>
27
- <p>This feature makes CarX Drift Racing 2 one of the most realistic drifting games on Android. The game uses a physics engine that simulates the behavior of different cars, surfaces, and weather conditions. You can see the smoke, dust, sparks, and tyre tracks that result from your drifting. You can also feel the difference between asphalt, grass, sand, and snow. You can also experience different weather conditions such as rain, fog, and sun. You can enjoy the realistic racing physics of this game.</p>
28
- <h3>XDS Mode</h3>
29
- <p>This feature allows you to practice tandem drifting with yourself or other players. Tandem drifting is a technique where two or more cars drift together in a synchronized manner. It is one of the most spectacular and difficult forms of drifting. In XDS mode, you can choose a leader car and a follower car, and try to match the leader's trajectory and angle. You can also switch roles and become the leader or the follower. You can improve your drifting skills and coordination with this feature.</p>
30
- <h3>TOP-32 Mode</h3>
31
- <p>This feature allows you to compete against the best drifters in the world. TOP-32 mode is a tournament mode where you have to qualify for the final round by beating 31 other opponents. You have to drift on different tracks and earn points based on your speed, angle, and line. You have to be fast, precise, and consistent to win this mode. You can earn fame and glory by becoming the champion of TOP-32 mode.</p>
32
- <p>carx drift racing 2 apk download latest version<br />
33
- carx drift racing 2 mod apk latest version<br />
34
- carx drift racing 2 update apk latest version<br />
35
- carx drift racing 2 online rooms apk latest version<br />
36
- carx drift racing 2 android game apk latest version<br />
37
- carx drift racing 2 free download apk latest version<br />
38
- carx drift racing 2 unlimited money apk latest version<br />
39
- carx drift racing 2 hack apk latest version<br />
40
- carx drift racing 2 xds mode apk latest version<br />
41
- carx drift racing 2 visual tuning apk latest version<br />
42
- carx drift racing 2 performance tuning apk latest version<br />
43
- carx drift racing 2 realistic physics apk latest version<br />
44
- carx drift racing 2 tandem drifting apk latest version<br />
45
- carx drift racing 2 top-32 mode apk latest version<br />
46
- carx drift racing 2 new tracks apk latest version<br />
47
- carx drift racing 2 new cars apk latest version<br />
48
- carx drift racing 2 new features apk latest version<br />
49
- carx drift racing 2 best settings apk latest version<br />
50
- carx drift racing 2 tips and tricks apk latest version<br />
51
- carx drift racing 2 cheats and codes apk latest version<br />
52
- carx drift racing 2 gameplay and review apk latest version<br />
53
- carx drift racing 2 offline and online apk latest version<br />
54
- carx drift racing 2 multiplayer and singleplayer apk latest version<br />
55
- carx drift racing 2 custom and stock cars apk latest version<br />
56
- carx drift racing 2 vinyls and body kits apk latest version<br />
57
- carx drift racing 2 leaderboards and rankings apk latest version<br />
58
- carx drift racing 2 rewards and achievements apk latest version<br />
59
- carx drift racing 2 challenges and missions apk latest version<br />
60
- carx drift racing 2 events and tournaments apk latest version<br />
61
- carx drift racing 2 skins and stickers apk latest version<br />
62
- carx drift racing 2 sounds and music apk latest version<br />
63
- carx drift racing 2 controls and steering apk latest version<br />
64
- carx drift racing 2 graphics and animations apk latest version<br />
65
- carx drift racing 2 bugs and fixes apk latest version<br />
66
- carx drift racing 2 news and updates apk latest version<br />
67
- carx drift racing 2 guide and walkthrough apk latest version<br />
68
- carx drift racing 2 fun and addictive apk latest version<br />
69
- carx drift racing 2 pro and beginner apk latest version<br />
70
- carx drift racing 2 premium and free apk latest version<br />
71
- carx drift racing 2 full and lite apk latest version</p>
72
- <h2>How to download and install CarX Drift Racing 2 APK OBB?</h2>
73
- <p>If you want to play CarX Drift Racing 2 on your Android device, you have to download and install the APK OBB files. APK is the application package file that contains the game's code and resources. OBB is the data file that contains the game's graphics and sound files. Here are the steps to download and install CarX Drift Racing 2 APK OBB:</p>
74
- <h3>Step 1: Download the APK and OBB files from a trusted source</h3>
75
- <p>You can find many websites that offer CarX Drift Racing 2 APK OBB files for free download. However, not all of them are safe and reliable. Some of them may contain viruses, malware, or fake files that can harm your device or steal your data. Therefore, you have to be careful and choose a trusted source to download the files. One of the best sources is [CarX Drift Racing 2 APK OBB], which provides the latest version of the game with high-quality graphics and sound.</p>
76
- <h3>Step 2: Enable unknown sources on your device</h3>
77
- <p>Before you can install the APK file, you have to enable unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than the Google Play Store. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on.</p>
78
- <h3>Step 3: Install the APK file</h3>
79
- <p>After you have enabled unknown sources, you can install the APK file. To do this, locate the downloaded APK file on your device using a file manager app. Tap on it and follow the instructions on the screen to complete the installation.</p>
80
- <h3>Step 4: Extract and copy the OBB folder to Android/OBB</h3>
81
- <p>After you have installed the APK file, you have to extract and copy the OBB folder to Android/OBB on your device's internal storage. To do this, locate the downloaded OBB file on your device using a file manager app. Tap on it and select Extract Here or Extract To depending on your app. You will see a folder named com.carxtech.carxdr2. Copy this folder and paste it in Android/OBB on your device's internal storage.</p>
82
- <h3>Step 5: Launch the game and enjoy</h3>
83
- <p>After you have copied the OBB folder, you are ready to launch the game and enjoy it. To do this, go to your app drawer and tap on CarX Drift Racing 2 icon. The game will start and load the data from the OBB folder. You can now drift away with CarX Drift Racing 2.</p>
84
- <h2>Conclusion</h2>
85
- <p>CarX Drift Racing 2 is one of the best drifting games for Android devices. It has amazing graphics, realistic physics, online mode, visual auto tuning, XDS mode, TOP-32 mode, and many other features that make it fun and exciting. If you want to play this game, you have to download and install CarX Drift Racing 2 APK OBB files from a trusted source. Follow our guide above to do it easily and safely.</p>
86
- <h2>FAQs</h2>
87
- <ul>
88
- <li><b>Is CarX Drift Racing 2 free?</b></li>
89
- <p>Yes, CarX Drift Racing 2 is free to download and play. However, it contains in app purchases that allow you to buy coins, cars, and other items. You can also watch ads to earn free coins.</p>
90
- <li><b>What are the minimum requirements to play CarX Drift Racing 2?</b></li>
91
- <p>The minimum requirements to play CarX Drift Racing 2 are:</p>
92
- <ul>
93
- <li>Android version 5.0 or higher</li>
94
- <li>2 GB of RAM or more</li>
95
- <li>1.5 GB of free storage space or more</li>
96
- <li>A stable internet connection</li>
97
- </ul>
98
- <li><b>How can I get more coins in CarX Drift Racing 2?</b></li>
99
- <p>You can get more coins in CarX Drift Racing 2 by:</p>
100
- <ul>
101
- <li>Drifting and earning points in the game modes</li>
102
- <li>Completing daily tasks and achievements</li>
103
- <li>Participating in online rooms and tournaments</li>
104
- <li>Watching ads and videos</li>
105
- <li>Buying them with real money</li>
106
- </ul>
107
- <li><b>How can I unlock more cars and tracks in CarX Drift Racing 2?</b></li>
108
- <p>You can unlock more cars and tracks in CarX Drift Racing 2 by:</p>
109
- <ul>
110
- <li>Earning enough coins to buy them</li>
111
- <li>Reaching certain levels and ranks in the game modes</li>
112
- <li>Winning them as rewards or prizes in online rooms and tournaments</li>
113
- <li>Buying them with real money</li>
114
- </ul>
115
- <li><b>How can I contact the developers of CarX Drift Racing 2?</b></li>
116
- <p>You can contact the developers of CarX Drift Racing 2 by:</p>
117
- <ul>
118
- <li>Sending an email to [email protected]</li>
119
- <li>Filling out the feedback form on their website [CarX Technologies]</li>
120
- <li>Following them on their social media accounts [Facebook], [Twitter], [Instagram], [YouTube]</li>
121
- </ul></p> 197e85843d<br />
122
- <br />
123
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dolphin Emulator v5.0 APK for 32 Bit Android - The Best Way to Enjoy Retro Games.md DELETED
@@ -1,118 +0,0 @@
1
-
2
- <h1>Dolphin Emulator: The Ultimate Guide for Android Users</h1>
3
- <p>Do you love playing Nintendo GameCube and Wii games? Do you wish you could play them on your Android device? If yes, then you are in luck. Dolphin Emulator is a free and open-source software that allows you to do just that. In this article, we will show you how to download, install, configure, and use Dolphin Emulator on your Android device. We will also answer some of the frequently asked questions about this amazing app. Let's get started!</p>
4
- <h2>dolphin emulator v5 0 32 bit apk</h2><br /><p><b><b>Download File</b> &#10002; &#10002; &#10002; <a href="https://urlin.us/2uSTlF">https://urlin.us/2uSTlF</a></b></p><br /><br />
5
- <h2>What is Dolphin Emulator and why you should use it</h2>
6
- <p>Dolphin Emulator is a software that emulates the hardware and software of Nintendo GameCube and Wii consoles. It enables you to play games from these consoles on your Android device, as well as other platforms such as Windows, Linux, and macOS. Dolphin Emulator offers many features and benefits, such as:</p>
7
- <ul>
8
- <li>High-resolution graphics: You can enjoy your games in HD quality, up to 1080p or even 4K, depending on your device capabilities.</li>
9
- <li>Save states: You can save and load your game progress at any point, without relying on the in-game save system.</li>
10
- <li>Cheats: You can use various cheat codes and hacks to modify your game experience, such as unlocking hidden items, increasing your health, or skipping levels.</li>
11
- <li>Online multiplayer: You can play online with other Dolphin Emulator users, using the Nintendo Wi-Fi Connection service or the Netplay feature.</li>
12
- <li>Customization: You can customize your emulator settings, such as graphics, audio, controls, enhancements, hacks, and more, to suit your preferences and device specifications.</li>
13
- </ul>
14
- <p>Dolphin Emulator supports both 32-bit and 64-bit Android devices, but the 32-bit version has some limitations and compatibility issues. For example, the 32-bit version cannot run games that require more than 2 GB of RAM, such as The Legend of Zelda: Skyward Sword or Xenoblade Chronicles. The 32-bit version also has lower performance and stability than the 64-bit version. Therefore, if you have a 64-bit device, we recommend you to use the 64-bit version of Dolphin Emulator for a better gaming experience.</p>
15
- <h2>How to download and install Dolphin Emulator on your Android device</h2>
16
- <p>Downloading and installing Dolphin Emulator on your Android device is very easy and straightforward. You can follow these steps:</p>
17
- <ol>
18
- <li>You can download the latest version of Dolphin Emulator from the official website or from APKCombo. The official website has both the 32-bit and the 64-bit versions of the app, while APKCombo only has the 32-bit version. Make sure you download the correct version for your device.</li>
19
- <li>Before you install the APK file, you need to enable unknown sources in your device settings. To do this, go to Settings > Security > Unknown sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.</li>
20
- <li>Once you have enabled unknown sources, you can install the APK file by tapping on it and following the instructions on the screen. You may need to grant some permissions to the app during the installation process.</li>
21
- <li>You also need to have enough storage space on your device or SD card for the emulator and the games. The emulator itself takes about 15 MB of space, while the games can vary from a few hundred MB to several GB. You can check your available storage space by going to Settings > Storage.</li>
22
- </ol>
23
- <p>Congratulations! You have successfully installed Dolphin Emulator on your Android device. Now you are ready to configure it and play your favorite games.</p>
24
- <p>dolphin emulator 5.0 download for android 32 bit<br />
25
- dolphin emulator 5.0 apk for 32 bit devices<br />
26
- dolphin emulator 5.0 32 bit android free<br />
27
- dolphin emulator 5.0 apk 32 bit no verification<br />
28
- dolphin emulator 5.0 apk for android 32 bit offline<br />
29
- dolphin emulator 5.0 apk for 32 bit phones<br />
30
- dolphin emulator 5.0 apk for android 32 bit latest version<br />
31
- dolphin emulator 5.0 apk for 32 bit mobiles<br />
32
- dolphin emulator 5.0 apk for android 32 bit mod<br />
33
- dolphin emulator 5.0 apk for 32 bit tablet<br />
34
- dolphin emulator 5.0 apk for android 32 bit online<br />
35
- dolphin emulator 5.0 apk for 32 bit windows<br />
36
- dolphin emulator 5.0 apk for android 32 bit x86<br />
37
- dolphin emulator 5.0 apk for 32 bit zip<br />
38
- dolphin emulator 5.0 apk for android 32 bit youtube<br />
39
- dolphin emulator v5.0 apk download for android (32-bit)<br />
40
- dolphin emulator v5.0 apk free download for android (32-bit)<br />
41
- dolphin emulator v5.0 apk latest version download for android (32-bit)<br />
42
- dolphin emulator v5.0 apk mod download for android (32-bit)<br />
43
- dolphin emulator v5.0 apk offline download for android (32-bit)<br />
44
- dolphin emulator v5.0 apk online download for android (32-bit)<br />
45
- dolphin emulator v5.0 apk update download for android (32-bit)<br />
46
- dolphin emulator v5.0 apk working download for android (32-bit)<br />
47
- dolphin emulator v5.0 apk x86 download for android (32-bit)<br />
48
- dolphin emulator v5.0 apk youtube download for android (32-bit)<br />
49
- how to install dolphin emulator v5.0 on android (32-bit)<br />
50
- how to use dolphin emulator v5.0 on android (32-bit)<br />
51
- how to play games on dolphin emulator v5.0 on android (32-bit)<br />
52
- how to fix lag on dolphin emulator v5.0 on android (32-bit)<br />
53
- how to configure dolphin emulator v5.0 on android (32-bit)<br />
54
- best settings for dolphin emulator v5.0 on android (32-bit)<br />
55
- best games for dolphin emulator v5.0 on android (32-bit)<br />
56
- best controller for dolphin emulator v5.0 on android (32-bit)<br />
57
- best roms for dolphin emulator v5.0 on android (32-bit)<br />
58
- best cheats for dolphin emulator v5.0 on android (32-bit)<br />
59
- is there a dolphin emulator v5.0 for android (32-bit)<br />
60
- is dolphin emulator v5.0 compatible with android (32-bit)<br />
61
- is dolphin emulator v5.0 safe for android (32-bit)<br />
62
- is dolphin emulator v5.0 legal for android (32-bit)<br />
63
- is dolphin emulator v5.0 worth it for android (32-bit)</p>
64
- <h2>How to configure Dolphin Emulator settings for optimal performance</h2>
65
- <p>Dolphin Emulator has a lot of settings that you can adjust to optimize its performance and compatibility with different games and devices. You can access the settings menu by tapping on the three dots icon in the top right corner of the emulator screen. You will see various tabs, such as graphics, audio, controls, enhancements, hacks, and more. You can tap on each tab to see and change the settings related to it. You can also create custom profiles for different games and devices by tapping on the plus icon in the top right corner of the settings menu.</p>
66
- <p>Some of the settings that you should pay attention to are:</p>
67
- <ul>
68
- <li>Graphics backend: This determines how Dolphin Emulator renders the graphics of the games. There are three options: OpenGL, Vulkan, and Software Renderer. OpenGL is the default option and works well with most games and devices. Vulkan is a newer option that may offer better performance and compatibility with some games, but it may also cause some issues with others. Software Renderer is a slow option that does not use hardware acceleration and should only be used for debugging purposes.</li>
69
- <li>Aspect ratio: This determines how Dolphin Emulator displays the games on your screen. There are four options: Auto, Force 16:9, Force 4:3, and Stretch to Window. Auto is the default option and preserves the original aspect ratio of the game. Force 16:9 and Force 4:3 force the game to fit a widescreen or a standard screen respectively, which may result in some cropping or stretching of the image. Stretch to Window stretches the game to fill your entire screen, which may distort the image quality.</li>
70
- <li>Internal resolution: This determines how Dolphin Emulator scales up or down the resolution of the game. There are several options, ranging from 1x (native) to 8x (8 times the native resolution). The higher the resolution, the better the image quality, but also the higher the performance requirements. The default option is 1x, which matches the original resolution of the game. You can increase the resolution if your device can handle it, but you may experience some slowdowns or crashes. For 32-bit devices, we recommend not going beyond 2x resolution.</li>
71
- <li>Anti-aliasing: This determines how Dolphin Emulator smooths out the edges of the game graphics. There are several options, ranging from None to 8x MSAA (multisample anti-aliasing). The higher the anti-aliasing, the smoother the edges, but also the higher the performance requirements. The default option is None, which means no anti-aliasing is applied. You can enable anti-aliasing if your device can handle it, but you may experience some slowdowns or crashes. For 32-bit devices, we recommend not enabling anti-aliasing.</li>
72
- <li>Anisotropic filtering: This determines how Dolphin Emulator enhances the quality of the game textures. There are several options, ranging from 1x to 16x. The higher the anisotropic filtering, the sharper the textures, but also the higher the performance requirements. The default option is 1x, which means no anisotropic filtering is applied. You can increase the anisotropic filtering if your device can handle it, but you may experience some slowdowns or crashes. For 32-bit devices, we recommend not going beyond 4x anisotropic filtering.</li>
73
- <li>Scaled EFB copy: This determines how Dolphin Emulator handles some special effects in the game, such as heat waves or water reflections. If this option is on, Dolphin Emulator will scale these effects according to your internal resolution setting. If this option is off, Dolphin Emulator will use the native resolution of the game for these effects. The default option is on, which means scaled EFB copy is applied. You can turn this option off if you want to improve your performance or compatibility with some games, but you may lose some visual quality.</li>
74
- <li>Skip EFB access from CPU: This determines how Dolphin Emulator handles some advanced features in the game, such as motion blur or depth of field. If this option is on, Dolphin Emulator will skip these features and improve your performance. If this option is off, Dolphin Emulator will render these features and improve your visual quality. The default option is off, which means EFB access from CPU is not skipped. You can turn this option on if you want to improve your performance or compatibility with some games, but you may lose some visual quality.</li>
75
- <li>Ignore format changes: This determines how Dolphin Emulator handles some color conversions in the game. If this option is on, Dolphin Emulator will ignore these conversions and improve your performance. If this option is off, Dolphin Emulator will perform these conversions and improve your visual quality. The default option is on, which means format changes are ignored. You can turn this option off if you want to improve your visual quality or compatibility with some games, but you may lose some performance.</li>
76
- <li>Store EFB copies to texture only: This determines how Dolphin Emulator handles some memory operations in the game. If this option is on, Dolphin Emulator will store these operations as textures and improve your performance. If this option is off, Dolphin Emulator will store these operations as RAM and improve your compatibility with some games. The default option is on, which means EFB copies are stored as textures only. You can turn this option off if you want to improve your compatibility with some games, but you may lose some performance.</li>
77
- <li>Texture cache accuracy: This determines how Dolphin Emulator handles some texture updates in the game. There are three options: Low, Medium, and High. The lower the accuracy, the faster the updates, but also the higher the chance of graphical glitches. The higher the accuracy, the slower the updates, but also the lower the chance of graphical glitches. The default option is Low, which means low accuracy texture cache is used. You can increase the accuracy if you want to reduce graphical glitches in some games, but you may lose some performance.</li>
78
- <li>External frame buffer (XFB): This determines how Dolphin Emulator handles some video output in the game. There are two options: Disable and Virtual. If this option is Disable, Dolphin Emulator will bypass the XFB and improve your performance. If this option is Virtual, Dolphin Emulator will emulate the XFB and improve your compatibility with some games. The default option is Disable, which means XFB is disabled. You can enable this option if you want to improve your compatibility with some games, but you may lose some performance.</li>
79
- <li>Fast depth calculation: This determines how Dolphin Emulator handles some depth calculations in the game. If this option is on, Dolphin Emulator will use a faster but less accurate method and improve your performance. If this option is off, Dolphin Emulator will use a slower but more accurate method and improve your visual quality. The default option is on, which means fast depth calculation is used. You can turn this option off if you want to improve your visual quality in some games, but you may lose some performance.</li>
80
- <li>Disable bounding box: This determines how Dolphin Emulator handles some bounding box calculations in the game. If this option is on, Dolphin Emulator will skip these calculations and improve your performance. If this option is off, Dolphin Emulator will perform these calculations and improve your compatibility with some games. The default option is on, which means bounding box is disabled. You can turn this option off if you want to improve your compatibility with some games, but you may lose some performance.</li>
81
- </ul>
82
- <p>These are some of the most important settings that you should consider when configuring Dolphin Emulator for 32-bit devices. However, you may need to experiment with different settings to find the best balance between performance and quality for your device and game. You can also check the Dolphin Wiki for more information and tips on specific games.</p>
83
- <h2>How to load and play your favorite games on Dolphin Emulator</h2>
84
- <p>Now that you have installed and configured Dolphin Emulator on your Android device, you are ready to load and play your favorite games. Here are the steps to do so:</p>
85
- <ol>
86
- <li>You need to have the game files (ISO or GCM) on your device or SD card. You can get these files from your own GameCube or Wii discs, or from other sources online. However, you should only use game files that you own legally and that match the region of your device.</li>
87
- <li>You can browse and select the game files from the emulator menu by tapping on the plus icon in the bottom right corner of the screen. This will open a file explorer where you can navigate to the folder where you stored your game files.</li>
88
- <li>You can also scan your device for game files by tapping on the refresh icon in the top right corner of the screen. This will automatically detect and add any game files that are compatible with Dolphin Emulator.</li>
89
- <li>Once you have loaded a game, you can start playing by tapping on the play icon in the bottom right corner of the screen. This will launch the game in full-screen mode.</li>
90
- <li>You can also pause, save, load, or exit the game by tapping on the menu icon in the top left corner of the screen. This will open a menu where you can access various options, such as:</li>
91
- <ul>
92
- <li>Save state: This allows you to save your game progress at any point in a slot. You can have up to 10 slots per game.</li>
93
- <li>Load state: This allows you to load your game progress from a slot.</li>
94
- <li>Exit: This allows you to quit the game and return to the emulator menu.</li>
95
- </ul>
96
- </ol>
97
- <p>That's it! You can now enjoy playing GameCube and Wii games on your Android device with Dolphin Emulator.</p>
98
- <h2>How to troubleshoot common issues with Dolphin Emulator</h2>
99
- <p>Dolphin Emulator is a complex software that may not work perfectly with every device and game. Some of the common issues that you may encounter with Dolphin Emulator are:</p>
100
- <ul>
101
- <li>The emulator crashes or freezes during gameplay.</li>
102
- <li>The game runs too slow or too fast.</li>
103
- <li>The game graphics are distorted or glitchy.</li>
104
- <li>The game audio is choppy or missing.</li>
105
- <li>The game controls are not responsive or accurate.</li>
106
- </ul>
107
- <p>Some of the possible solutions for these issues are:</p>
108
- <ul>
109
- <li>Update your device software and drivers: Make sure that your device is running on the latest version of Android and that your drivers are up to date. This may fix some compatibility and performance issues with Dolphin Emulator.</li>
110
- <li>Clear your emulator cache and data: Sometimes, your emulator cache and data may get corrupted or outdated, causing some problems with Dolphin Emulator. You can clear them by going to Settings > Apps > Dolphin Emulator > Storage > Clear cache and Clear data. This may reset some of your emulator settings : You can use a controller to play games on Dolphin Emulator, as it will give you more accuracy and comfort than using the touchscreen. You can use a Bluetooth controller, a USB controller, or a Wii remote. You can configure your controller settings by going to Settings > Controls > Configure.</li>
111
- <li>Use cheats and hacks: You can use cheats and hacks to modify your game experience on Dolphin Emulator, such as unlocking hidden items, increasing your health, or skipping levels. You can enable cheats and hacks by going to Settings > Enhancements > Cheats or Settings > Hacks.</li>
112
- <li>Use online multiplayer: You can use online multiplayer to play games with other Dolphin Emulator users, using the Nintendo Wi-Fi Connection service or the Netplay feature. You can enable online multiplayer by going to Settings > Online > Wi-Fi or Settings > Online > Netplay.</li>
113
- </ul>
114
- <p>These are some of the tips that you can use to improve your gaming experience on Dolphin Emulator. However, you may find other ways to enhance your gaming experience by experimenting with different settings and features.</p>
115
- <h2></h2>
116
- <p>This is the end of the article. I hope you enjoyed reading it and learned something new. Thank you for your attention and have a nice day!</p> 197e85843d<br />
117
- <br />
118
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/AetherSX2 for Windows and Android - The Best Way to Relive Your Favorite PS2 Games in 2023 - Download Here.md DELETED
@@ -1,159 +0,0 @@
1
-
2
- <h1>AetherSX2 2023 Download: How to Play PS2 Games on Your Android Device</h1>
3
- <p>Do you miss playing your favorite PlayStation 2 games? Do you wish you could relive the nostalgia of playing classic PS2 titles on your Android device? If yes, then you are in luck. There is a new PS2 emulator for Android that lets you play PS2 games on your smartphone with ease. It is called AetherSX2, and it is the best PS2 emulator for Android by far.</p>
4
- <p>In this article, we will tell you everything you need to know about AetherSX2, including what it is, how it works, what features and benefits it offers, how to download and install it on your Android device, and how to play PS2 games on your Android device using AetherSX2. By the end of this article, you will be able to enjoy playing PS2 games on your smartphone with AetherSX2.</p>
5
- <h2>aethersx2 2023 download</h2><br /><p><b><b>Download Zip</b> &harr; <a href="https://jinyurl.com/2uNLLs">https://jinyurl.com/2uNLLs</a></b></p><br /><br />
6
- <h2>What is AetherSX2?</h2>
7
- <p>AetherSX2 is a PS2 emulator for Android that allows you to play PS2 games on your smartphone. An emulator is a software that mimics the hardware and software of another device, in this case, a PS2 console. By using an emulator, you can run games and applications that are designed for another platform, such as a PS2 game on an Android device.</p>
8
- <h3>A brief history of AetherSX2</h3>
9
- <p>AetherSX2 is the brainchild of one person, a developer who goes by the handle Tahlreth. The developer actually used the PCSX2 emulator as the basis for their Android-based emulator. PCSX2 is a long-running, well-established emulator on PC, so it makes sense to take advantage of the work that has gone into this program.</p>
10
- <p>aethersx2 ps2 emulator for android download 2023 (search volume: 10-100, competition: low)<br />
11
- how to download and install aethersx2 on windows pc 2023 (search volume: 10-100, competition: low)<br />
12
- aethersx2 latest update fastest settings for low end devices 2023 (search volume: 10-100, competition: low)<br />
13
- aethersx2 best ps2 emulator for android review 2023 (search volume: 10-100, competition: low)<br />
14
- aethersx2 vs damonps2 which is better in 2023 (search volume: 10-100, competition: low)<br />
15
- aethersx2 compatible games list and performance 2023 (search volume: 10-100, competition: low)<br />
16
- aethersx2 bios file download and setup guide 2023 (search volume: 10-100, competition: low)<br />
17
- aethersx2 cheats codes and hacks for ps2 games 2023 (search volume: 10-100, competition: low)<br />
18
- aethersx2 controller support and configuration 2023 (search volume: 10-100, competition: low)<br />
19
- aethersx2 graphics enhancement and resolution options 2023 (search volume: 10-100, competition: low)<br />
20
- aethersx2 save and load states feature 2023 (search volume: 10-100, competition: low)<br />
21
- aethersx2 multiplayer and online mode 2023 (search volume: 10-100, competition: low)<br />
22
- aethersx2 custom shaders and filters 2023 (search volume: 10-100, competition: low)<br />
23
- aethersx2 sound quality and audio settings 2023 (search volume: 10-100, competition: low)<br />
24
- aethersx2 system requirements and compatibility 2023 (search volume: 10-100, competition: low)<br />
25
- aethersx2 tips and tricks for better performance 2023 (search volume: 10-100, competition: low)<br />
26
- aethersx2 troubleshooting and common errors 2023 (search volume: 10-100, competition: low)<br />
27
- aethersx2 development history and future plans 2023 (search volume: <10, competition: low)<br />
28
- aethersx2 donation and support options 2023 (search volume: <10, competition: low)<br />
29
- aethersx2 fan community and discord server 2023 (search volume: <10, competition: low)</p>
30
- <p>The developer of AetherSX2 got the green light to use the PCSX2 code from the developers themselves and is licensed under the LGPL license — unlike the DamonPS2 developers, who stole the code and didn’t follow the requisite license. In any event, the emulator was initially released in December 2021 via the Google Play Store as an open beta. You can also sideload the APK via the AetherSX2 website. We’d recommend you steer clear of any other websites claiming to offer the APK.</p>
31
- <p>The AetherSX2 emulator is a major step forward for emulation on Android devices. It’s also worth noting that the app is free to download and use, so don’t be duped by anyone saying you need to pay for it. This is in contrast to the DamonPS2 emulator, which is filled to the brim with ads and charges for a Pro version limited to two devices.</p>
32
- <h3>Features and benefits of AetherSX2</h3>
33
- <p>AetherSX2 is not just another PS2 emulator for Android. It is a powerful and feature-rich emulator that offers many advantages over other emulators. Here are some of the features and benefits of AetherSX2 that make it stand out from the crowd.</p>
34
- <h4>High compatibility</h4>
35
- <p>AetherSX2 boasts high compatibility with a wide range of PS2 games <p>from various genres and regions. You can play popular games like God of War, Final Fantasy, Grand Theft Auto, Metal Gear Solid, Kingdom Hearts, and many more on your Android device with AetherSX2. You can also play games from different regions, such as Japan, Europe, and North America, with the appropriate BIOS files. AetherSX2 supports both ISO and CSO formats for PS2 games.</p>
36
- <h4>Enhanced graphics</h4>
37
- <p>AetherSX2 does not just emulate the PS2 graphics faithfully, but also enhances them to make them look better on your Android device. You can adjust the resolution, aspect ratio, anti-aliasing, texture filtering, and other graphical settings to improve the visual quality of the games. You can also use shaders to add effects like scanlines, CRT, bloom, and more to the games. AetherSX2 supports both Vulkan and OpenGL renderers for graphics.</p>
38
- <h4>Save and load states</h4>
39
- <p>AetherSX2 allows you to save and load your game progress at any point with the save and load state feature. This is very convenient for playing on your Android device, as you can resume your game from where you left off without having to go through the in-game save system. You can also use this feature to skip difficult or boring parts of the game by loading a state from another source. AetherSX2 supports up to 10 save slots for each game.</p>
40
- <h4>Controller support</h4>
41
- <p>AetherSX2 lets you play PS2 games on your Android device with a variety of controllers. You can use the touchscreen controls that are customizable and responsive, or you can use an external controller that connects via Bluetooth or USB. AetherSX2 supports many popular controllers, such as Xbox One, PS4, PS3, Switch Pro, and more. You can also map the buttons and analog sticks to your liking.</p>
42
- <h4>Fast and smooth performance</h4>
43
- <p>AetherSX2 delivers fast and smooth performance for PS2 games on your Android device. You can play most games at full speed without any lag or stuttering. You can also tweak the performance settings to optimize the emulator for your device. You can adjust the frame rate, frame skip, speed hack, audio latency, and other options to improve the performance of the emulator. AetherSX2 runs well on most modern Android devices with decent hardware.</p>
44
- <h2>How to download and install AetherSX2 on your Android device</h2>
45
- <p>Now that you know what AetherSX2 is and what it can do, you might be wondering how to download and install it on your Android device. Well, it is very easy and simple to do so. Just follow these steps and you will be ready to play PS2 games on your smartphone in no time.</p>
46
- <h3>Step 1: Check system requirements</h3>
47
- <p>Before you download and install AetherSX2 on your Android device, you need to make sure that your device meets the minimum system requirements for running the emulator. Here are the system requirements for AetherSX2:</p>
48
- <ul>
49
- <li>Android version: 5.0 or higher</li>
50
- <li>CPU: Quad-core or higher (preferably with ARMv8 support)</li>
51
- <li>GPU: Adreno 5xx or higher (or equivalent)</li>
52
- <li>RAM: 3 GB or higher</li>
53
- <li>Storage: At least 1 GB of free space (plus more for PS2 games)</li>
54
- </ul>
55
- <p>If your device meets these requirements, then you can proceed to the next step. If not, then you might want to upgrade your device or look for another emulator.</p>
56
- <h3>Step 2: Download AetherSX2 APK from the official website or Google Play Store</h3>
57
- <p>The next step is to download the AetherSX2 APK file from a trusted source. There are two ways to do this: either from the official website or from the Google Play Store.</p>
58
- <p>The official website of AetherSX2 is <a href="">https://aethersx.com/</a>. Here you can find the latest version of the emulator as well as other information and updates about it. You can download the APK file directly from the website by clicking on the "Download" button on the homepage.</p>
59
- <p>The Google Play Store is another option for downloading the AetherSX2 APK file. The Google Play Store is a safe and convenient way to download apps for your Android device. You can find the AetherSX2 app on the Google Play Store by searching for it or by following this link: <a href="">https://play.google.com/store/apps/details?id=com.aethersx.aethersx&hl=en_US&gl=US</a>. You can download the app by tapping on the "Install" button on the app page.</p>
60
- <p>Either way, you will get the same APK file that is about 30 MB in size. Make sure you have enough space on your device before downloading it.</p>
61
- <h3>Step 3: Install AetherSX2 APK on your Android device</h3>
62
- <p>Once you have downloaded the AetherSX2 APK file, you need to install it on your Android device. To do this, you need to enable the installation of apps from unknown sources on your device. This is a security feature that prevents malicious apps from being installed on your device without your permission. Here is how to enable it:</p>
63
- <ul>
64
- <li>Go to the Settings app on your device.</li>
65
- <li>Tap on Security or Privacy (depending on your device).</li>
66
- <li>Find and enable the option that says "Unknown sources" or "Install unknown apps" (depending on your device).</li>
67
- <li>Confirm your choice by tapping OK or Allow (depending on your device).</li>
68
- </ul>
69
- <p>Now you can install the AetherSX2 APK file by following these steps:</p>
70
- <ul>
71
- <li>Locate the AetherSX2 APK file on your device using a file manager app or the Downloads app.</li>
72
- <li>Tap on the AetherSX2 APK file to start the installation process.</li>
73
- <li>Follow the instructions on the screen to complete the installation.</li>
74
- <li>Wait for the installation to finish and then tap Open or Done (depending on your device).</li>
75
- </ul>
76
- <p>Congratulations, you have successfully installed AetherSX2 on your Android device. You can now launch the app from your app drawer or home screen.</p>
77
- <h3>Step 4: Load PS2 games on your Android device</h3>
78
- <p>The next step is to load PS2 games on your Android device. You can do this by either transferring PS2 games from your PC or downloading PS2 games from the internet. Here is how to do both:</p>
79
- <h4>Transfer PS2 games from your PC</h4>
80
- <p>If you have PS2 games on your PC, you can transfer them to your Android device using a USB cable or a wireless method. Here is how to do it using a USB cable:</p>
81
- <ul>
82
- <li>Connect your Android device to your PC using a USB cable.</li>
83
- <li>Select the option that says "File Transfer" or "MTP" (depending on your device) on your Android device.</li>
84
- <li>Open the File Explorer or My Computer app on your PC and find your Android device.</li>
85
- <li>Create a folder named "PS2" on your Android device's internal storage or SD card (depending on where you want to store the games).</li>
86
- <li>Copy and paste the PS2 games from your PC to the PS2 folder on your Android device. The games should be in ISO or CSO format.</li>
87
- <li>Eject your Android device from your PC and disconnect the USB cable.</li>
88
- </ul>
89
- <p>If you want to use a wireless method, you can use an app like AirDroid or ShareIt to transfer files between your PC and Android device over Wi-Fi. Just follow the instructions of the app you choose to use.</p>
90
- <h4>Download PS2 games from the internet</h4>
91
- <p>If you don't have PS2 games on your PC, you can download them from the internet. However, you need to be careful about where you download them from, as some websites may contain viruses, malware, or fake files. You also need to make sure that you own the original PS2 games that you download, as downloading pirated games is illegal and unethical.</p>
92
- <p>We recommend that you use reputable and trusted websites that offer PS2 games for download, such as Emuparadise, CoolROM, RomHustler, and The ISO Zone. These websites have a large collection of PS2 games from various regions and genres that you can download for free. Here is how to download PS2 games from these websites:</p>
93
- <ul>
94
- <li>Go to the website of your choice using a web browser app on your Android device.</li>
95
- <li>Search for the PS2 game that you want to download using the search bar or browse through the categories.</li>
96
- <li>Select the game that you want to download and tap on the download link or button.</li>
97
- <li>Wait for the download to finish and then locate the downloaded file using a file manager app or the Downloads app.</li>
98
- </ul>
99
- <p>Note: Some websites may require you to extract the downloaded file using an app like ZArchiver or RAR before you can play it. If this is the case, just follow these steps:</p>
100
- <ul>
101
- <li>Select the downloaded file and tap on Extract here or Extract to (depending on the app).</li>
102
- <li>Wait for the extraction to finish and then delete the original file to save space.</li>
103
- <li>Find the extracted file, which should be in ISO or CSO format, and move it to the PS2 folder on your Android device.</li>
104
- </ul>
105
- <p>Now you have PS2 games on your Android device that you can play with AetherSX2.</p>
106
- <h3>Step 5: Configure settings and controls according to your preference</h3>
107
- <p>The last step before you can play PS2 games on your Android device with AetherSX2 is to configure the settings and controls according to your preference. AetherSX2 has many options that you can customize to enhance your gaming experience. Here are some of the settings and controls that you can configure:</p>
108
- <h4>Settings</h4>
109
- <p>To access the settings menu, tap on the three-dot icon on the top right corner of the app and select Settings. Here you can find various options that affect the performance, graphics, audio, and input of the emulator. Some of the options that you can adjust are:</p>
110
- <ul>
111
- <li>Frame rate: You can set the frame rate limit for the emulator, which affects the speed of the games. You can choose between 30 FPS, 60 FPS, or Unlimited.</li>
112
- <li>Frame skip: You can enable or disable frame skipping, which is a technique that skips rendering some frames to improve performance. You can also set the frame skip value from 0 to 9.</li>
113
- <li>Speed hack: You can enable or disable speed hack, which is a feature that boosts the speed of the emulator by reducing the CPU load. You can also set the speed hack value from 0 to 9.</li>
114
- <li>Audio latency: You can set the audio latency for the emulator, which affects the synchronization of the sound and the video. You can choose between Low, Medium, or High.</li>
115
- <li>Graphics renderer: You can choose between Vulkan or OpenGL as the graphics renderer for the emulator, which affects the quality and compatibility of the graphics. Vulkan is recommended for better performance and compatibility.</li>
116
- <li>Resolution: You can set the resolution for the emulator, which affects the sharpness and clarity of the graphics. You can choose between Native (480x272), 2x Native (960x544), 3x Native (1440x816), or 4x Native (1920x1088).</li>
117
- <li>Aspect ratio: You can set the aspect ratio for the emulator, which affects how the games are displayed on your screen. You can choose between Auto (based on game), 4:3 (standard), or 16:9 (widescreen).</li>
118
- <li>Anti-aliasing: You can enable or disable anti-aliasing, which is a technique that smooths out jagged edges in the graphics. You can also set the anti-aliasing level from 2x to 16x.</li>
119
- <li>Texture filtering: You can enable or disable texture filtering, which is a technique that improves the quality of textures in the graphics. You can also set the texture filtering level from Bilinear to Anisotropic 16x.</li>
120
- <li>Shaders: You can enable or disable shaders, which are effects that add visual enhancements to the graphics. You can also choose from a variety of shaders, such as Scanlines, CRT, Bloom, and more.</li>
121
- </ul>
122
- <p>You can experiment with different settings and see what works best for you and your device. You can also reset the settings to default by tapping on the Reset button at the bottom of the menu.</p>
123
- <h4>Controls</h4>
124
- <p>To access the controls menu, tap on the three-dot icon on the top right corner of the app and select Controls. Here you can find various options that affect the input and layout of the emulator. Some of the options that you can adjust are:</p>
125
- <ul>
126
- <li>Touchscreen controls: You can enable or disable the touchscreen controls, which are virtual buttons and analog sticks that appear on your screen. You can also customize the size, position, opacity, and vibration of the touchscreen controls.</li>
127
- <li>External controller: You can enable or disable the external controller, which is a physical controller that connects to your device via Bluetooth or USB. You can also map the buttons and analog sticks of the external controller to the PS2 controller.</li>
128
- <li>Accelerometer: You can enable or disable the accelerometer, which is a sensor that detects the tilt and motion of your device. You can also map the accelerometer to the PS2 controller.</li>
129
- <li>Gyroscope: You can enable or disable the gyroscope, which is a sensor that detects the orientation and rotation of your device. You can also map the gyroscope to the PS2 controller.</li>
130
- </ul>
131
- <p>You can experiment with different controls and see what works best for you and your device. You can also reset the controls to default by tapping on the Reset button at the bottom of the menu.</p>
132
- <h2>How to play PS2 games on your Android device using AetherSX2</h2>
133
- <p>Now that you have downloaded and installed AetherSX2 on your Android device, loaded PS2 games on your device, and configured the settings and controls according to your preference, you are ready to play PS2 games on your smartphone with AetherSX2. Here is how to do it:</p>
134
- <h3>Choose a game from the game list or browse for a game file</h3>
135
- <p>When you launch AetherSX2, you will see a game list that shows all the PS2 games that you have on your device. You can scroll through the game list and tap on any game that you want to play. The game will start loading automatically.</p>
136
- <p>If you don't see the game that you want to play on the game list, you can browse for it manually by tapping on the folder icon on the top left corner of the app. This will open a file browser that lets you navigate through your device's storage. You can find and select any PS2 game file that you have on your device in ISO or CSO format. The game will start loading automatically.</p>
137
- <h3>Select a graphics renderer (Vulkan or OpenGL)</h3>
138
- <p>Before the game starts, you will be asked to select a graphics renderer for the emulator. You can choose between Vulkan or OpenGL as the graphics renderer. Vulkan is recommended for better performance and compatibility, while OpenGL is recommended for older devices or games that have issues with Vulkan. You can change this option later in the settings menu if you want.</p>
139
- <h3>Enjoy playing PS2 games on your Android device with AetherSX2</h3>
140
- <p>After selecting a graphics renderer, the game will start running on your Android device with AetherSX2. You can use the touchscreen controls or the external controller to play the game as you would on a PS2 console. You can also access the emulator menu by tapping on the three-dot icon on the top right corner of the app. Here you can save and load states, change settings and controls, pause and resume the game, and exit the game.</p>
141
- <p>That's it. You can now enjoy playing PS2 games on your Android device with AetherSX2. You can play as many games as you want, as long as you have enough space on your device. You can also switch between different games by going back to the game list or the file browser.</p>
142
- <h2>Conclusion</h2>
143
- <p>AetherSX2 is a PS2 emulator for Android that lets you play PS2 games on your smartphone with ease. It is a powerful and feature-rich emulator that offers high compatibility, enhanced graphics, save and load states, controller support, fast and smooth performance, and more. It is also free to download and use, unlike some other emulators that charge money or show ads.</p>
144
- <p>In this article, we have shown you how to download and install AetherSX2 on your Android device, how to load PS2 games on your device, how to configure settings and controls according to your preference, and how to play PS2 games on your device using AetherSX2. By following these steps, you will be able to enjoy playing PS2 games on your smartphone with AetherSX2.</p>
145
- <p>We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!</p>
146
- <h3>FAQs</h3>
147
- <p>Here are some frequently asked questions about AetherSX2 and PS2 emulation on Android.</p>
148
- <h4>Q: Is AetherSX2 legal?</h4>
149
- <p>A: AetherSX2 is legal as long as you use it for personal and non-commercial purposes. You also need to own the original PS2 games that you play with AetherSX2, as downloading pirated games is illegal and unethical.</p>
150
- <h4>Q: Is AetherSX2 safe?</h4>
151
- <p>A: AetherSX2 is safe as long as you download it from the official website or the Google Play Store. You also need to be careful about where you download PS2 games from, as some websites may contain viruses, malware, or fake files.</p>
152
- <h4>Q: How can I update AetherSX2?</h4>
153
- <p>A: You can update AetherSX2 by downloading the latest version of the APK file from the official website or the Google Play Store. You can also enable automatic updates for AetherSX2 on the Google Play Store by tapping on the three-dot icon on the app page and selecting Enable auto-update.</p>
154
- <h4>Q: How can I report bugs or issues with AetherSX2?</h4>
155
- <p>A: You can report bugs or issues with AetherSX2 by contacting the developer via email at <a href="mailto:[email protected]">[email protected]</a>. You can also join the official Discord server of AetherSX2 at <a href="">https://discord.gg/6J9f8wM</a>. Here you can chat with other users and get support from the developer and moderators.</p>
156
- <h4>Q: How can I support the development of AetherSX2?</h4>
157
- <p>A: You can support the development of AetherSX2 by donating to the developer via PayPal at <a href="">https://www.paypal.me/aethersx</a>. You can also share your feedback and suggestions with the developer via email or Discord. You can also rate and review AetherSX2 on the Google Play Store and spread the word about it to your friends and family.</p> 197e85843d<br />
158
- <br />
159
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Compress PDFs Online for Free Download Smaller PDF Files in Seconds.md DELETED
@@ -1,208 +0,0 @@
1
-
2
- <h1>How to Download a 1 MB PDF File in Minutes</h1>
3
- <p>PDF files are one of the most popular and versatile document formats in the digital world. They can contain text, images, graphics, links, forms, annotations, and more. They can also preserve the layout and appearance of your document across different devices and platforms.</p>
4
- <h2>download 1 mb pdf file</h2><br /><p><b><b>Download File</b> === <a href="https://jinyurl.com/2uNQZJ">https://jinyurl.com/2uNQZJ</a></b></p><br /><br />
5
- <p>But sometimes, you might need to download a small PDF file that is only 1 MB or less in size. Maybe you have a limited bandwidth or storage space on your device. Maybe you want to save time and data when downloading a document. Maybe you need to send or receive a document via email or messaging app that has a file size limit.</p>
6
- <p>Whatever your reason is, downloading a 1 MB PDF file is not as hard as you might think. In this article, we will show you how to download a 1 MB PDF file from the internet, how to compress a larger PDF file to 1 MB or less, and how to open and view a 1 MB PDF file on your device.</p>
7
- <h2>What is a PDF File?</h2>
8
- <p>PDF stands for Portable Document Format. It is a file format that was created by Adobe in 1993 to enable users to share and print documents without losing the original formatting. PDF files can be opened by various software and apps, such as Adobe Acrobat Reader, Google Chrome, Microsoft Edge, and more.</p>
9
- <p>Some of the benefits of PDF files over other formats are:</p>
10
- <ul>
11
- <li>They are compatible with different operating systems and devices</li>
12
- <li>They can protect the content and integrity of your document with encryption and passwords</li>
13
- <li>They can compress large amounts of data without compromising the quality</li>
14
- <li>They can support interactive features such as hyperlinks, bookmarks, annotations, and forms</li>
15
- <li>They can be easily converted to or from other formats such as Word, Excel, PowerPoint, JPG, PNG, and more</li>
16
- </ul>
17
- <h2>Why Do You Need to Download a 1 MB PDF File?</h2>
18
- <p>There are many scenarios where you might need to download a small PDF file that is only 1 MB or less in size. For example:</p>
19
- <p>How to download a 1 mb pdf file online<br />
20
- Download 1 mb pdf file from email<br />
21
- Best tools to download 1 mb pdf file fast<br />
22
- Download 1 mb pdf file without losing quality<br />
23
- Download 1 mb pdf file on mobile device<br />
24
- Download 1 mb pdf file from Google Drive<br />
25
- Download 1 mb pdf file from Dropbox<br />
26
- Download 1 mb pdf file from OneDrive<br />
27
- Download 1 mb pdf file from iCloud<br />
28
- Download 1 mb pdf file from SharePoint<br />
29
- Download 1 mb pdf file from Adobe Acrobat<br />
30
- Download 1 mb pdf file from Smallpdf<br />
31
- Download 1 mb pdf file from PDF Compressor<br />
32
- Download 1 mb pdf file from PDF Candy<br />
33
- Download 1 mb pdf file from PDF2Go<br />
34
- Download 1 mb pdf file from Soda PDF<br />
35
- Download 1 mb pdf file from iLovePDF<br />
36
- Download 1 mb pdf file from Sejda<br />
37
- Download 1 mb pdf file from PDF24 Tools<br />
38
- Download 1 mb pdf file from PDFescape<br />
39
- Download 1 mb pdf file from DocuPub<br />
40
- Download 1 mb pdf file from Free PDF Compressor<br />
41
- Download 1 mb pdf file from PDF Resizer<br />
42
- Download 1 mb pdf file from PDFelement<br />
43
- Download 1 mb pdf file from Nitro PDF<br />
44
- Download 1 mb pdf file from Foxit Software<br />
45
- Download 1 mb pdf file from Wondershare PDF Converter Pro<br />
46
- Download 1 mb pdf file from Zamzar<br />
47
- Download 1 mb pdf file from Online2PDF<br />
48
- Download 1 mb pdf file from Convertio<br />
49
- Download 1 mb pdf file from CloudConvert<br />
50
- Download 1 mb pdf file from Apowersoft PDF Converter<br />
51
- Download 1 mb pdf file from LightPDF<br />
52
- Download 1 mb pdf file from EasePDF<br />
53
- Download 1 mb pdf file from CleverPDF<br />
54
- Download 1 mb pdf file from Hipdf<br />
55
- Download 1 mb pdf file from AvePDF<br />
56
- Download 1 mb pdf file from PDFChef by Movavi<br />
57
- Download 1 mb pdf file from PDF4me<br />
58
- Download 1 mb pdf file from DocFly by FormSwift<br />
59
- Download 1 mb pdf file with Chrome extension<br />
60
- Download 1 mb pdf file with Firefox add-on<br />
61
- Download 1 mb pdf file with Safari plugin<br />
62
- Download 1 mb pdf file with Edge extension<br />
63
- Why download a 1 mb pdf file instead of a larger one?<br />
64
- Benefits of downloading a 1 mb pdf file for storage and sharing purposes.<br />
65
- Challenges of downloading a 1 mb pdf file on slow internet connection.<br />
66
- Tips and tricks to download a 1 mb pdf file securely and safely.<br />
67
- How to open and view a downloaded 1 mb pdf file on different platforms.<br />
68
- How to edit and modify a downloaded 1 mb pdf file with various software.</p>
69
- <ul>
70
- <li>You want to download a short article, report, brochure, flyer, or resume that is available online as a PDF file</li>
71
- <li>You want to download a sample or preview of a longer document or book that is offered as a PDF file</li>
72
- <li>You want to download a form or application that you need to fill out and submit as a PDF file</li>
73
- <li>You want to download a certificate, receipt, invoice, or ticket that is issued as a PDF file</li>
74
- <li>You want to download a coupon, voucher, or discount code that is provided as a PDF file</li>
75
- </ul>
76
- <p>However, downloading a small PDF file might not always be easy or convenient. Sometimes, you might encounter some challenges or limitations when trying to download a large PDF file. For example:</p>
77
- <ul>
78
- <li>You have a slow or unstable internet connection that makes downloading large files take too long or fail</li>
79
- <li>You have a limited data plan or quota that makes downloading large files consume too much data or incur extra charges</li>
80
- <li>You have a low storage space or memory on your device that makes downloading large files impossible or cause errors</li>
81
- <li>You have a strict firewall or antivirus software that blocks or restricts downloading large files from unknown sources</li>
82
- <li>You have a file size limit or restriction on your email or messaging app that prevents you from sending or receiving large files as attachments</li>
83
- </ul>
84
- <h2>How to Download a 1 MB PDF File from the Internet</h2>
85
- <p>If you need to download a 1 MB PDF file from the internet, you need to find and access a 1 MB PDF file online first. There are many sources or websites that offer free or low-cost PDF files for various purposes and topics. Some of them are:</p>
86
- <ul>
87
- <li><a href="">PDF Drive</a>: A free online library that has over 90 million PDF files for free download</li>
88
- <li><a href="">PDF Books World</a>: A free online platform that has thousands of PDF books for free download</li>
89
- <li><a href="">PDF Candy</a>: A free online tool that has hundreds of PDF templates for free download</li>
90
- <li><a href="">PDF Zone</a>: A free online resource that has dozens of PDF guides and tutorials for free download</li>
91
- <li><a href="">PDF Archive</a>: A free online archive that has millions of PDF documents for free download</li>
92
- </ul> <p>Once you find a 1 MB PDF file that you want to download, you need to download and save it to your device. The steps may vary depending on the source or website, but generally, they are:</p>
93
- <ol>
94
- <li>Click on the PDF file link or icon to open it in your browser or software</li>
95
- <li>Look for a download button or option on the page or toolbar</li>
96
- <li>Click on the download button or option and choose a location or folder on your device where you want to save the PDF file</li>
97
- <li>Wait for the download to complete and check if the PDF file is successfully saved on your device</li>
98
- </ol>
99
- <p>If you encounter any problems or errors when downloading a 1 MB PDF file, you can try some of these solutions:</p>
100
- <ul>
101
- <li>Refresh the page or reload the PDF file</li>
102
- <li>Check your internet connection and speed</li>
103
- <li>Clear your browser cache and cookies</li>
104
- <li>Disable or adjust your firewall or antivirus settings</li>
105
- <li>Use a different browser or software</li>
106
- <li>Contact the source or website for support or feedback</li>
107
- </ul>
108
- <h2>How to Compress a Larger PDF File to 1 MB or Less</h2>
109
- <p>Sometimes, you might not be able to find a 1 MB PDF file that suits your needs. You might have a larger PDF file that you want to download, but it exceeds your bandwidth, storage, or file size limit. In that case, you might want to compress a larger PDF file to a smaller size.</p>
110
- <p>Compressing a PDF file means reducing its file size by removing or optimizing some of its elements, such as images, fonts, metadata, and more. Compressing a PDF file can help you save time, data, and space when downloading, uploading, sending, or storing it.</p>
111
- <p>There are many tools or services that can help you compress PDF files online for free. Some of them are:</p>
112
- <ul>
113
- <li><a href="">Smallpdf</a>: A free online tool that can compress PDF files up to 80% without losing quality</li>
114
- <li><a href="">iLovePDF</a>: A free online tool that can compress PDF files up to 70% without losing quality</li>
115
- <li><a href="">PDF Compressor</a>: A free online tool that can compress PDF files up to 90% without losing quality</li>
116
- <li><a href="">PDF2Go</a>: A free online tool that can compress PDF files up to 50% without losing quality</li>
117
- <li><a href="">Soda PDF</a>: A free online tool that can compress PDF files up to 75% without losing quality</li>
118
- </ul> <p>To use one of these tools or services to compress your PDF file, you need to follow these steps:</p>
119
- <ol>
120
- <li>Go to the website or app of the tool or service that you want to use</li>
121
- <li>Upload your PDF file from your device or cloud storage</li>
122
- <li>Select the compression level or quality that you want for your PDF file</li>
123
- <li>Wait for the tool or service to compress your PDF file</li>
124
- <li>Download and save the compressed PDF file to your device or cloud storage</li>
125
- </ol>
126
- <p>If you encounter any problems or errors when compressing your PDF file, you can try some of these solutions:</p>
127
- <ul>
128
- <li>Check the file size and format of your PDF file</li>
129
- <li>Check the compression level and quality of your PDF file</li>
130
- <li>Check the compatibility and security of the tool or service that you use</li>
131
- <li>Try a different tool or service</li>
132
- <li>Contact the tool or service provider for support or feedback</li>
133
- </ul>
134
- <h2>How to Open and View a 1 MB PDF File on Your Device</h2>
135
- <p>After you download or compress a 1 MB PDF file, you need to open and view it on your device. You can use various software or apps that can help you open and view PDF files. Some of them are:</p>
136
- <ul>
137
- <li><a href="">Adobe Acrobat Reader</a>: A free software that can open and view PDF files on Windows, Mac, Android, and iOS devices</li>
138
- <li><a href="">Google Chrome</a>: A free web browser that can open and view PDF files on Windows, Mac, Linux, Android, and iOS devices</li>
139
- <li><a href="">Microsoft Edge</a>: A free web browser that can open and view PDF files on Windows, Mac, Android, and iOS devices</li>
140
- <li><a href="">Foxit Reader</a>: A free software that can open and view PDF files on Windows, Mac, Linux, Android, and iOS devices</li>
141
- <li><a href="">PDF Viewer</a>: A free app that can open and view PDF files on Android and iOS devices</li>
142
- </ul>
143
- <p>To open and view a 1 MB PDF file on your device, you need to follow these steps:</p>
144
- <ol>
145
- <li>Install or update the software or app that you want to use on your device</li>
146
- <li>Locate the 1 MB PDF file on your device or cloud storage</li>
147
- <li>Open the 1 MB PDF file with the software or app that you use</li>
148
- <li>View the 1 MB PDF file on your device screen</li>
149
- </ol> <p>To adjust the settings or preferences of your software or app to optimize your viewing experience, you can try some of these options:</p>
150
- <ul>
151
- <li>Zoom in or out to change the size of the PDF file on your screen</li>
152
- <li>Rotate or flip the PDF file to change the orientation of the PDF file on your screen</li>
153
- <li>Search or find a word or phrase in the PDF file</li>
154
- <li>Highlight or annotate a part of the PDF file</li>
155
- <li>Print or share the PDF file with others</li>
156
- </ul>
157
- <h2>Conclusion</h2>
158
- <p>Downloading a 1 MB PDF file is not a difficult task if you know how to do it. In this article, we have shown you how to download a 1 MB PDF file from the internet, how to compress a larger PDF file to 1 MB or less, and how to open and view a 1 MB PDF file on your device. We hope that this article has helped you learn something new and useful.</p>
159
- <p>Here are some tips or advice on how to download, compress, and view PDF files efficiently:</p>
160
- <ul>
161
- <li>Choose a reliable and reputable source or website that offers free or low-cost PDF files</li>
162
- <li>Check the file size and format of the PDF file before downloading or compressing it</li>
163
- <li>Use a fast and stable internet connection and a compatible and secure software or app</li>
164
- <li>Compress your PDF file only if necessary and without compromising the quality or content</li>
165
- <li>Open and view your PDF file with the best software or app for your device and preference</li>
166
- </ul>
167
- <p>If you have any questions or comments about downloading, compressing, or viewing PDF files, please feel free to leave them below. We would love to hear from you!</p>
168
- <h2>FAQs</h2>
169
- <h3>What is the difference between a PDF file and a Word file?</h3>
170
- <p>A PDF file is a document format that preserves the layout and appearance of your document across different devices and platforms. A Word file is a document format that allows you to edit and format your document with various features and options.</p>
171
- <h3>How can I convert a PDF file to a Word file or vice versa?</h3>
172
- <p>You can use various tools or services that can help you convert PDF files to Word files or vice versa online for free. Some of them are:</p>
173
- <ul>
174
- <li><a href="">PDF to Word Converter</a>: A free online tool that can convert PDF files to Word files in seconds</li>
175
- <li><a href="">Word to PDF Converter</a>: A free online tool that can convert Word files to PDF files in seconds</li>
176
- <li><a href="">PDFelement</a>: A free software that can convert PDF files to Word files and vice versa on Windows, Mac, Android, and iOS devices</li>
177
- <li><a href="">WPS Office</a>: A free software that can convert PDF files to Word files and vice versa on Windows, Mac, Linux, Android, and iOS devices</li>
178
- <li><a href="">Zamzar</a>: A free online service that can convert PDF files to Word files and vice versa by email</li>
179
- </ul>
180
- <h3>How can I edit a PDF file?</h3>
181
- <p>You can use various tools or services that can help you edit PDF files online for free. Some of them are:</p>
182
- <ul>
183
- <li><a href="">PDFescape</a>: A free online tool that can help you edit text, images, links, forms, and more in PDF files</li>
184
- <li><a href="">PDF Buddy</a>: A free online tool that can help you edit text, images, signatures, annotations, and more in PDF files</li>
185
- <li><a href="">PDF-XChange Editor</a>: A free software that can help you edit text, images, comments, stamps, and more in PDF files on Windows devices</li>
186
- <li><a href="">PDF Expert</a>: A free software that can help you edit text, images, links, forms, and more in PDF files on Mac devices</li>
187
- <li><a href="">Xodo</a>: A free app that can help you edit text, images, annotations, bookmarks, and more in PDF files on Android and iOS devices</li>
188
- </ul>
189
- <h3>How can I merge or split a PDF file?</h3>
190
- <p>You can use various tools or services that can help you merge or split PDF files online for free. Some of them are:</p>
191
- <ul>
192
- <li><a href="">PDF Merge</a>: A free online tool that can help you merge multiple PDF files into one PDF file</li>
193
- <li><a href="">PDF Splitter</a>: A free online tool that can help you split one PDF file into multiple PDF files</li>
194
- <li><a href="">PDF SAM</a>: A free online tool that can help you merge or split PDF files with drag and drop</li>
195
- <li><a href="">PDFill</a>: A free software that can help you merge or split PDF files with various options on Windows devices</li>
196
- <li><a href="">PDFsam Basic</a>: A free software that can help you merge or split PDF files with various options on Windows, Mac, and Linux devices</li>
197
- </ul>
198
- <h3>How can I sign a PDF file?</h3>
199
- <p>You can use various tools or services that can help you sign PDF files online for free. Some of them are:</p>
200
- <ul>
201
- <li><a href="">DocuSign</a>: A free online service that can help you sign PDF files with your electronic signature or digital certificate</li>
202
- <li><a href="">HelloSign</a>: A free online service that can help you sign PDF files with your electronic signature or digital certificate</li>
203
- <li><a href="">Adobe Sign</a>: A free online service that can help you sign PDF files with your electronic signature or digital certificate</li>
204
- <li><a href="">SignEasy</a>: A free app that can help you sign PDF files with your electronic signature or digital certificate on Android and iOS devices</li>
205
- <li><a href="">SignNow</a>: A free app that can help you sign PDF files with your electronic signature or digital certificate on Android and iOS devices</li>
206
- </ul></p> 197e85843d<br />
207
- <br />
208
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Genshin Impact and Embark on an Epic Quest Across Seven Nations.md DELETED
@@ -1,128 +0,0 @@
1
-
2
- <h1>Genshin Download: How to Play the Free-to-Play RPG on PC, Mobile, and Console</h1>
3
- <p>Genshin Impact is one of the most popular games of 2020 and 2021, attracting millions of players from all over the world. It is a free-to-play open-world action RPG that lets you explore a beautiful fantasy world called Teyvat, where you can meet a diverse cast of characters, fight against powerful enemies, and uncover the mysteries of your lost sibling. Whether you are a fan of anime-style graphics, engaging storylines, or dynamic combat systems, Genshin Impact has something for everyone.</p>
4
- <p>But how do you download Genshin Impact on your preferred platform? And what are the system requirements and tips and tricks that you need to know before you start your adventure? In this article, we will answer all these questions and more. Read on to find out how to play Genshin Impact on PC, mobile, or console today!</p>
5
- <h2>genshin download</h2><br /><p><b><b>Download File</b> &middot;&middot;&middot;&middot;&middot; <a href="https://jinyurl.com/2uNJWl">https://jinyurl.com/2uNJWl</a></b></p><br /><br />
6
- <h2>Genshin Download for PC</h2>
7
- <p>If you want to play Genshin Impact on your PC, you have two options. You can either download it from the official website or from the Epic Games Store. Both methods are free and easy to follow.</p>
8
- <p>To download Genshin Impact from the official website, you need to visit [Genshin Impact – Step Into a Vast Magical World of Adventure](^1^) and click on the "Windows" button. This will start downloading the launcher file. Once it is downloaded, run it and follow the instructions to install the launcher. Then, open the launcher and log in with your miHoYo account or create one if you don't have one already. After that, click on "Get Game" to start downloading the game files. The download size is about 8.2 GB, so it may take some time depending on your internet speed. When the download is complete, click on "Launch" to start playing.</p>
9
- <p>genshin impact download pc<br />
10
- genshin impact download size<br />
11
- genshin impact download apk<br />
12
- genshin impact download android<br />
13
- genshin impact download ios<br />
14
- genshin impact download mac<br />
15
- genshin impact download ps4<br />
16
- genshin impact download error<br />
17
- genshin impact download slow<br />
18
- genshin impact download link<br />
19
- genshin impact download windows 10<br />
20
- genshin impact download steam<br />
21
- genshin impact download update<br />
22
- genshin impact download free<br />
23
- genshin impact download reddit<br />
24
- genshin impact download speed<br />
25
- genshin impact download failed<br />
26
- genshin impact download for laptop<br />
27
- genshin impact download google play<br />
28
- genshin impact download requirements<br />
29
- genshin impact download time<br />
30
- genshin impact download data<br />
31
- genshin impact download problem<br />
32
- genshin impact download not working<br />
33
- genshin impact download stuck<br />
34
- genshin impact download zip file<br />
35
- genshin impact download emulator<br />
36
- genshin impact download obb file<br />
37
- genshin impact download latest version<br />
38
- genshin impact download official website<br />
39
- genshin impact download without launcher<br />
40
- genshin impact download qr code<br />
41
- genshin impact download verification failed<br />
42
- genshin impact download on phone<br />
43
- genshin impact download on switch<br />
44
- genshin impact download on xbox one<br />
45
- genshin impact download on chromebook<br />
46
- genshin impact download on linux<br />
47
- genshin impact download on bluestacks<br />
48
- genshin impact download on macbook air<br />
49
- genshin impact download on macbook pro<br />
50
- genshin impact download on ipad pro<br />
51
- genshin impact download on iphone 6s<br />
52
- genshin impact download on iphone 7 plus<br />
53
- genshin impact download on iphone 8 plus<br />
54
- genshin impact download on iphone x</p>
55
- <p>To download Genshin Impact from the Epic Games Store, you need to visit [Genshin Impact | Download and Play for Free - Epic Games Store](^3^) and click on "Get". This will prompt you to log in with your Epic Games account or create one if you don't have one already. Then, click on "Place Order" to confirm your purchase (don't worry, it's still free). After that, you will be redirected to the Epic Games Launcher. If you don't have it installed on your PC, you can download it from [Epic Games Launcher]. Once you have the launcher, install it and open it. Then, go to the "Library" tab and find Genshin Impact. Click on "Install" to start downloading the game files. The download size is about 8.2 GB, so it may take some time depending on your internet speed. When the download is complete, click on "Launch" to start playing.</p>
56
- <h3>PC System Requirements</h3>
57
- <p>Before you download Genshin Impact on your PC, you should check if your PC meets the minimum or recommended system requirements for the game. Here are the system requirements for PC according to the official website:</p>
58
- | Minimum System Requirements | Recommended System Requirements | | --- | --- | | OS: Windows 7 SP1 64-bit, Windows 8.1 64-bit, or Windows 10 64-bit | OS: Windows 10 64-bit | | Processor: Intel Core i5 or equivalent | Processor: Intel Core i7 or equivalent | | Memory: 8 GB RAM | Memory: 16 GB RAM | | Graphics: NVIDIA GeForce GT 1030 or higher | Graphics: NVIDIA GeForce RTX 1060 6 GB or higher | | DirectX: Version 11 | DirectX: Version 11 | | Storage: 30 GB available space | Storage: 30 GB available space | <p>If your PC does not meet the minimum system requirements, you may experience low frame rates, crashes, or other issues while playing the game. If your PC meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics settings and smoother performance.</p>
59
- <h3>PC Tips and Tricks</h3>
60
- <p>Here are some tips and tricks that can help you optimize your PC performance and gameplay experience while playing Genshin Impact:</p>
61
- <ul>
62
- <li>Update your graphics drivers and DirectX to the latest version. This can improve your graphics quality and stability.</li>
63
- <li>Adjust your graphics settings in the game options. You can lower some settings such as anti-aliasing, shadows, or render resolution to increase your frame rate and reduce lag. You can also enable or disable some features such as V-sync, FPS limit, or window mode to suit your preference.</li>
64
- <li>Close other programs or background processes that may consume your CPU, memory, or bandwidth. This can free up some resources for the game and prevent potential conflicts or errors.</li>
65
- <li>Use a wired connection instead of a wireless one if possible. This can reduce latency and packet loss and improve your online experience.</li>
66
- <li>Check the official website or social media accounts for any news, updates, or maintenance announcements. This can help you stay informed of any changes, issues, or events that may affect the game.</li>
67
- </ul> of storage space | 8 GB of storage space or more | | iOS 9.0 or higher | iOS 10.0 or higher | | iPhone 8 Plus, iPad Air 3, or higher | iPhone XR, iPad Pro, or higher | <p>If your device does not meet the minimum system requirements, you may experience low graphics quality, slow loading times, or other issues while playing the game. If your device meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics quality and smoother performance.</p>
68
- <h3>Mobile Tips and Tricks</h3>
69
- <p>Here are some tips and tricks that can help you optimize your mobile performance and gameplay experience while playing Genshin Impact:</p>
70
- <ul>
71
- <li>Update your device software and app to the latest version. This can fix some bugs and improve your compatibility and stability.</li>
72
- <li>Adjust your graphics settings in the game options. You can lower some settings such as render resolution, shadow quality, or FPS to save battery life and reduce overheating. You can also enable or disable some features such as auto-adjust graphics, custom controls, or HD assets to suit your preference.</li>
73
- <li>Use a Wi-Fi connection instead of a mobile data connection if possible. This can reduce data usage and improve your online experience.</li>
74
- <li>Check the official website or social media accounts for any news, updates, or maintenance announcements. This can help you stay informed of any changes, issues, or events that may affect the game.</li>
75
- <li>Use headphones or earphones to enjoy the game's immersive sound effects and music.</li>
76
- </ul>
77
- <h2>Genshin Download for Console</h2>
78
- <p>If you want to play Genshin Impact on your console, you can download it from PlayStation Store if you have a PlayStation 4 or PlayStation 5. The method is free and easy to follow.</p>
79
- <p>To download Genshin Impact from PlayStation Store, you need to visit [Genshin Impact on PS4 | Official PlayStation™Store US] or [Genshin Impact on PS5 | Official PlayStation™Store US] depending on your console. Then, click on the "Add to Library" button. This will add the game to your library. Then, go to your library and find Genshin Impact. Click on the "Download" button to start downloading the game files. The download size is about 12 GB for PS4 and 14 GB for PS5, so it may take some time depending on your internet speed and console storage. When the download is complete, click on the game icon to start playing.</p>
80
- <h3>Console System Requirements</h3>
81
- <p>Before you download Genshin Impact on your console, you should check if your console meets the minimum system requirements for the game. Here are the system requirements for consoles according to the official website:</p>
82
- | Minimum System Requirements | Recommended System Requirements | | --- | --- | | PS4 with 30 GB of storage space | PS4 Pro with 30 GB of storage space | | PS5 with 50 GB of storage space | PS5 with 50 GB of storage space | <p>If your console does not meet the minimum system requirements, you may experience low graphics quality, slow loading times, or other issues while playing the game. If your console meets or exceeds the recommended system requirements, you can enjoy the game at higher graphics quality and smoother performance.</p>
83
- <h3>Console Tips and Tricks</h3>
84
- <p>Here are some tips and tricks that can help you optimize your console performance and gameplay experience while playing Genshin Impact:</p>
85
- <ul>
86
- <li>Update your console software and app to the latest version. This can fix some bugs and improve your compatibility and stability.</li>
87
- <li>Adjust your graphics settings in the game options. You can choose between "Favor Resolution" or "Favor Performance" modes to balance between graphics quality and frame rate. You can also enable or disable some features such as motion blur, anti-aliasing, or HDR to suit your preference.</li>
88
- <li>Link your miHoYo account to your PlayStation Network account. This can allow you to access some online features such as cross-play, cross-save, mail system, events, and more.</li>
89
- <li>Check the official website or social media accounts for any news, updates, or maintenance announcements. This can help you stay informed of any changes, issues, or events that may affect the game.</li>
90
- <li>Use a controller that suits your play style and comfort. You can customize your controller layout in the game options.</li>
91
- </ul>
92
- <h2>Genshin Impact Game Features</h2>
93
- <p>Now that you know how to download Genshin Impact on your preferred platform, you may be wondering what you can expect from the game in terms of gameplay, story, characters, combat, exploration, and more. In this section, we will give you a brief overview of some of the main features of the game that make it so fun and addictive.</p>
94
- <h3>Gameplay</h3>
95
- <p>Genshin Impact is an open-world action RPG that combines exploration, combat, and gacha elements. You can explore the vast world of Teyvat at your own pace, discovering new locations, secrets, and treasures along the way. You can also interact with various NPCs, complete quests, participate in events, and join co-op sessions with other players.</p>
96
- <p>The game also features a gacha system that allows you to obtain new characters, weapons, and items by spending a currency called Primogems. You can earn Primogems by playing the game or by purchasing them with real money. The gacha system is based on a random chance, so you may not always get what you want. However, the game is generous enough to give you some free pulls and rewards as you progress.</p>
97
- <h3>Story</h3>
98
- <p>Genshin Impact has a rich and immersive story that unfolds as you play the game. The main story revolves around your quest to find your lost sibling, who was separated from you by a mysterious god. Along the way, you will encounter different factions, cultures, and conflicts that shape the world of Teyvat. You will also meet various characters who will join your party and help you in your journey.</p>
99
- <p>The game has seven major regions, each based on a different element and inspired by a real-world culture. So far, only two regions are available: Mondstadt (Anemo/Wind) and Liyue (Geo/Earth). The other five regions are Inazuma (Electro/Lightning), Sumeru (Dendro/Nature), Fontaine (Hydro/Water), Natlan (Pyro/Fire), and Snezhnaya (Cryo/Ice). The game developers plan to release more regions and content in the future through updates and patches.</p>
100
- <h3>Characters</h3>
101
- <p>Genshin Impact has a diverse and colorful cast of characters that you can play as or interact with. There are currently 37 playable characters in the game, each with their own personality, backstory, element, weapon, and abilities. You can switch between four characters in your party at any time, depending on the situation and your preference.</p>
102
- <p>The characters are divided into five rarity tiers: 1-star, 2-star, 3-star, 4-star, and 5-star. The higher the rarity, the more powerful and rare the character is. You can obtain new characters by using the gacha system or by completing certain quests or events. You can also upgrade your characters by leveling them up, ascending them, enhancing their weapons and artifacts, and unlocking their constellations.</p>
103
- <h3>Combat</h3>
104
- <p>Genshin Impact has a dynamic and fluid combat system that relies on elemental interactions and strategy. You can use your character's basic attacks, elemental skills, and elemental bursts to deal damage to your enemies. You can also switch between different characters to create elemental reactions that can amplify or modify your damage output.</p>
105
- <p>The game has seven elements: Anemo (Wind), Geo (Earth), Electro (Lightning), Dendro (Nature), Hydro (Water), Pyro (Fire), and Cryo (Ice). Each element has its own strengths and weaknesses against other elements. For example, Pyro can melt Cryo, but is weak against Hydro. You can use this knowledge to your advantage and create powerful combos that can wipe out your foes.</p>
106
- <h3>Exploration</h3>
107
- <p>Genshin Impact has a vast and beautiful world that you can explore at your own pace. You can travel across different terrains, climates, and biomes using various methods such as walking, running, climbing, gliding, swimming, or riding. You can also use fast travel points to teleport to locations that you have already visited.</p>
108
- <p>The world of Teyvat is full of secrets, quests, events, and activities that you can discover and enjoy. You can find chests, resources, puzzles, enemies, and more that can reward you with items, experience, or currency. You can also interact with various NPCs, complete quests, participate in events, and join co-op sessions with other players.</p>
109
- <p>The game also has a feature called the Serenitea Pot, which allows you to create your own personal realm and customize it with furniture, decorations, and buildings. You can invite your characters and friends to your realm and enjoy some relaxing time.</p>
110
- <h1>Conclusion</h1>
111
- <p>Genshin Impact is a free-to-play open-world action RPG that offers a lot of fun and excitement for players of all ages and preferences. You can download the game on PC, mobile, or console and enjoy the stunning graphics, captivating story, diverse characters, dynamic combat, and endless exploration. Whether you want to play solo or with friends, Genshin Impact has something for everyone.</p>
112
- <p>So what are you waiting for? Download Genshin Impact today and start your adventure in the magical world of Teyvat!</p>
113
- <h2>FAQs</h2>
114
- <p>Here are some frequently asked questions about Genshin Impact and how to download it:</p>
115
- <ul>
116
- <li><b>Q: Is Genshin Impact free?</b></li>
117
- <li>A: Yes, Genshin Impact is free to download and play. However, it does have some optional in-game purchases that can enhance your gameplay experience.</li>
118
- <li><b>Q: Is Genshin Impact cross-platform?</b></li>
119
- <li>A: Yes, Genshin Impact supports cross-platform play between PC, mobile, and console. You can play with your friends on different devices as long as you are on the same server and have the same game version.</li>
120
- <li><b>Q: How do I update Genshin Impact?</b></li>
121
- <li>A: Genshin Impact updates automatically when you launch the game on your platform. You can also check for updates manually in the game options or on the official website.</li>
122
- <li><b>Q: How do I contact Genshin Impact customer service?</b></li>
123
- <li>A: You can contact Genshin Impact customer service by using the in-game feedback system or by visiting [Genshin Impact – Step Into a Vast Magical World of Adventure] and clicking on "Support".</li>
124
- <li><b>Q: How do I get more Primogems?</b></li>
125
- <li>A: You can get more Primogems by playing the game and completing various tasks such as quests, events, achievements, daily commissions, spiral abyss, etc. You can also buy Primogems with real money by using the in-game shop or by redeeming gift codes.</li>
126
- </ul></p> 401be4b1e0<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Granny Chapter 2 Outwitt Mod Menu APK Madin for Free - No Root Required!.md DELETED
@@ -1,79 +0,0 @@
1
-
2
- <h1>Granny Chapter 2 Mod Menu Outwitt Mod Free Download Link APK Madin</h1>
3
- <p>If you are a fan of horror games, you might have heard of Granny Chapter 2, a popular game that challenges you to escape from a creepy house with two evil characters: Granny and Grandpa. But what if you want to make the game more fun and exciting? Well, you can try Outwitt Mod, a mod menu that gives you access to various cheats and hacks for Granny Chapter 2. In this article, we will tell you everything you need to know about Outwitt Mod, including how to download and install it for free on your Android device.</p>
4
- <h2>What is Granny Chapter 2?</h2>
5
- <p>Granny Chapter 2 is a horror game developed by DVloper, the same creator of the original Granny game. It was released in September 2019 and has since gained millions of downloads and positive reviews from players around the world. The game is available for Android, iOS, and Windows devices.</p>
6
- <h2>granny chapter 2 mod menu outwitt mod free download link apk madin</h2><br /><p><b><b>Download Zip</b> &#9881;&#9881;&#9881; <a href="https://jinyurl.com/2uNPUm">https://jinyurl.com/2uNPUm</a></b></p><br /><br />
7
- <h3>The gameplay of Granny Chapter 2</h3>
8
- <p>The gameplay of Granny Chapter 2 is similar to the first game, but with some new twists and features. You are trapped in a dark and spooky house with two enemies: Granny and Grandpa. Granny can hear everything and will chase you if she hears any noise. Grandpa is hard of hearing but he will attack you if he sees you. You have to find a way to escape from the house within five days, or else you will face a horrible fate. You can explore different rooms, find items, solve puzzles, and hide from the enemies. But be careful, because they are always on the lookout for you.</p>
9
- <h3>The features of Granny Chapter 2</h3>
10
- <p>Granny Chapter 2 has many features that make it an enjoyable and thrilling game. Some of them are:</p>
11
- <ul>
12
- <li>You can choose between four difficulty levels: Easy, Normal, Hard, and Extreme.</li>
13
- <li>You can play in two modes: Practice or Normal. In Practice mode, you can explore the house without any enemies. In Normal mode, you have to face both Granny and Grandpa.</li>
14
- <li>You can customize your character's appearance, such as hair color, skin color, and clothes.</li>
15
- <li>You can use different weapons, such as a shotgun, a crossbow, a stun gun, or a crowbar, to fight back against the enemies.</li>
16
- <li>You can interact with various objects, such as doors, windows, cabinets, drawers, traps, cameras, etc.</li>
17
- <li>You can enjoy realistic graphics, sound effects, and music that create a scary atmosphere.</li>
18
- </ul>
19
- <h2>What is Outwitt Mod?</h2>
20
- <p>Outwitt Mod is a mod menu that allows you to modify the game settings and enable various cheats and hacks for Granny Chapter 2. It was created by Outwitt, a YouTube channel that uploads videos about Granny games and mods. Outwitt Mod is one of the most popular mods for Granny Chapter 2 and has been downloaded by thousands of players.</p>
21
- <h3>The benefits of Outwitt Mod</h3>
22
- <p>Outwitt Mod gives you many benefits that make the game more fun and easy. Some of them are:</p>
23
- <ul>
24
- <li>You can access a mod menu that lets you change the game settings, such as the difficulty level, the enemy speed, the enemy damage, etc.</li>
25
- <li>You can enable cheats that give you unlimited health, unlimited ammo, invisibility, teleportation, etc.</li>
26
- <li>You can unlock all items in the game, such as weapons, keys, tools, etc.</ menu that allows you to modify the game settings and enable various cheats and hacks for Granny Chapter 2. It was created by Outwitt, a YouTube channel that uploads videos about Granny games and mods. In this article, we have explained what Granny Chapter 2 and Outwitt Mod are, what are their benefits and drawbacks, and how to download and install Outwitt Mod for Granny Chapter 2 on your Android device. We hope you found this article helpful and informative. If you want to try Outwitt Mod for Granny Chapter 2, you can follow the steps and precautions we have provided. But remember, use it at your own risk and responsibility.</p>
27
- <p>granny chapter 2 outwitt mod apk unlimited money<br />
28
- granny chapter 2 outwitt mod menu god mode<br />
29
- granny chapter 2 outwitt mod apk latest version<br />
30
- granny chapter 2 outwitt mod apk download for android<br />
31
- granny chapter 2 outwitt mod menu download link<br />
32
- granny chapter 2 outwitt mod apk no ads<br />
33
- granny chapter 2 outwitt mod menu unlimited ammo<br />
34
- granny chapter 2 outwitt mod apk free fire<br />
35
- granny chapter 2 outwitt mod menu how to install<br />
36
- granny chapter 2 outwitt mod apk offline<br />
37
- granny chapter 2 outwitt mod menu no root<br />
38
- granny chapter 2 outwitt mod apk hack<br />
39
- granny chapter 2 outwitt mod menu features<br />
40
- granny chapter 2 outwitt mod apk easy escape<br />
41
- granny chapter 2 outwitt mod menu tutorial<br />
42
- granny chapter 2 outwitt mod apk new update<br />
43
- granny chapter 2 outwitt mod menu gameplay<br />
44
- granny chapter 2 outwitt mod apk horror game<br />
45
- granny chapter 2 outwitt mod menu review<br />
46
- granny chapter 2 outwitt mod apk best settings<br />
47
- granny chapter 2 outwitt mod menu cheats<br />
48
- granny chapter 2 outwitt mod apk fun mode<br />
49
- granny chapter 2 outwitt mod menu tips and tricks<br />
50
- granny chapter 2 outwitt mod apk all weapons<br />
51
- granny chapter 2 outwitt mod menu hidden items<br />
52
- granny chapter 2 outwitt mod apk madin edition<br />
53
- granny chapter 2 outwitt mod menu vs nullzerep<br />
54
- granny chapter 2 outwitt mod apk multiplayer<br />
55
- granny chapter 2 outwitt mod menu speed hack<br />
56
- granny chapter 2 outwitt mod apk invisible mode</p>
57
- <h3>Summary of the main points</h3>
58
- <p>Here are the main points of this article:</p>
59
- <ul>
60
- <li>Granny Chapter 2 is a horror game that challenges you to escape from a house with two enemies: Granny and Grandpa.</li>
61
- <li>Outwitt Mod is a mod menu that gives you access to various cheats and hacks for Granny Chapter 2.</li>
62
- <li>Outwitt Mod has many benefits, such as unlimited health, unlimited ammo, invisibility, teleportation, etc.</li>
63
- <li>Outwitt Mod also has some drawbacks, such as security risks, game bans, loss of originality, and compatibility issues.</li>
64
- <li>To download and install Outwitt Mod for Granny Chapter 2, you need to uninstall the original game, enable unknown sources, download the APK file, install it, and launch the game.</li>
65
- <li>Before downloading and installing Outwitt Mod for Granny Chapter 2, you should backup your data, scan the APK file, use a VPN or a proxy server, and not use it for illegal or unethical purposes.</li>
66
- </ul>
67
- <h3>Call to action for the readers</h3>
68
- <p>If you liked this article, please share it with your friends and family who are interested in Granny Chapter 2 and Outwitt Mod. Also, don't forget to subscribe to our website for more articles like this one. And if you have any questions or feedback about this article, please leave a comment below. We would love to hear from you.</p>
69
- <h4>FAQs</h4>
70
- <p>Here are some frequently asked questions about Granny Chapter 2 and Outwitt Mod:</p>
71
- <ol>
72
- <li>Q: Is Outwitt Mod safe to use?<br>A: Outwitt Mod is not an official app from the game developer, so it may contain some viruses, malware, or spyware that can harm your device or data. Therefore, you should always scan the APK file with an antivirus or anti-malware software before installing it. You should also use a VPN or a proxy server to hide your IP address and location when playing the game online.</li>
73
- <li>Q: Is Outwitt Mod legal to use?<br>A: Outwitt Mod is not legal to use because it violates the terms and conditions of the game developer. By using Outwitt Mod, you are cheating and hacking the game, which is unfair to other players and disrespectful to the game creator. You may also get banned from playing the game online if the game developer detects your mod usage.</li>
74
- <li>Q: Does Outwitt Mod work on iOS devices?<br>A: No, Outwitt Mod only works on Android devices. It is not compatible with iOS devices because it is an APK file that can only be installed on Android devices. If you want to use Outwitt Mod on your iOS device, you will need to jailbreak your device first, which is not recommended because it can damage your device or void your warranty.</li>
75
- <li>Q: Does Outwitt Mod work on Windows devices?<br>A: Yes, Outwitt Mod can work on Windows devices if you use an Android emulator. An Android emulator is a software that allows you to run Android apps on your Windows device. You can download an Android emulator such as BlueStacks or NoxPlayer on your Windows device and then install Outwitt Mod on it. However, this may affect the performance of your device or the game.</li>
76
- <li>Q: Where can I find more mods for Granny Chapter 2?<br>A: You can find more mods for Granny Chapter 2 on various websites or YouTube channels that offer mod downloads or tutorials. Some of them are NullZerep Mods, GodisAGamer Mods, Platinmods Mods, etc. But be careful when downloading mods from unknown sources because they may contain viruses or malware that can harm your device or data.</li>
77
- </ol></p> 197e85843d<br />
78
- <br />
79
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Logo Quiz MOD APK and Enjoy the Fun of Recognizing Logos.md DELETED
@@ -1,118 +0,0 @@
1
- <br />
2
- <h1>Logo Quiz Mod APK: A Fun and Challenging Game for Logo Lovers</h1>
3
- <p>Do you think you can recognize hundreds of logos from different brands and companies? Do you want to test your logo knowledge and have fun at the same time? If you answered yes, then you should try Logo Quiz Mod APK, a free trivia game that will keep you entertained for hours.</p>
4
- <h2>logo quiz mod apk</h2><br /><p><b><b>Download</b> &#9881; <a href="https://jinyurl.com/2uNMmE">https://jinyurl.com/2uNMmE</a></b></p><br /><br />
5
- <h2>What is Logo Quiz Mod APK?</h2>
6
- <p>Logo Quiz Mod APK is a modified version of the original Logo Quiz game by Bubble Quiz Games. In this game, you have to guess the names of thousands of logos from popular companies all over the world. You will see various logos on the screen, and you have to type in the correct answer using the available letters. You can also use hints to help you if you get stuck.</p>
7
- <p>Logo Quiz Mod APK is different from the original game in that it gives you unlimited hints, extra levels, offline mode, and daily challenges. These features make the game more enjoyable and less frustrating. You can also play the game without an internet connection, which is great if you want to kill some time when you are offline.</p>
8
- <h2>Features of Logo Quiz Mod APK</h2>
9
- <h3>Unlimited hints</h3>
10
- <p>One of the best features of Logo Quiz Mod APK is that it gives you unlimited hints. Each logo has 5 hints that you can use to get a clue about the answer. You can also get new hints by answering correctly or watching ads. However, with Logo Quiz Mod APK, you don't have to worry about running out of hints or watching ads. You can use as many hints as you want without any limitations.</p>
11
- <h3>Extra levels</h3>
12
- <p>Another feature of Logo Quiz Mod APK is that it gives you access to extra levels that are not available in the original game. These levels include:</p>
13
- <ul>
14
- <li>Slogans: Guess the slogans of 200 famous brands.</li>
15
- <li>Minimalist: Guess the logos that are simplified to their basic shapes and colors.</li>
16
- <li>Retro: Guess the logos that are based on old versions or designs.</li>
17
- <li>Expert: Guess the logos that are very hard or obscure.</li>
18
- </ul>
19
- <p>These extra levels add more variety and challenge to the game, making it more interesting and fun.</p>
20
- <h3>Offline mode</h3>
21
- <p>Logo Quiz Mod APK also allows you to play the game offline, which means you don't need an internet connection to enjoy it. This is great if you want to play the game when you are traveling, waiting, or bored. You can also save your data and battery by playing offline.</p>
22
- <p>logo quiz mod apk unlimited hints<br />
23
- logo quiz mod apk download for android<br />
24
- logo quiz mod apk latest version<br />
25
- logo quiz mod apk 2023<br />
26
- logo quiz mod apk an1<br />
27
- logo quiz mod apk revdl<br />
28
- logo quiz mod apk hack<br />
29
- logo quiz mod apk all levels unlocked<br />
30
- logo quiz mod apk offline<br />
31
- logo quiz mod apk no ads<br />
32
- logo quiz mod apk free shopping<br />
33
- logo quiz mod apk unlimited money<br />
34
- logo quiz mod apk android 1<br />
35
- logo quiz mod apk bubble quiz games<br />
36
- logo quiz mod apk unlimited coins<br />
37
- logo quiz mod apk premium<br />
38
- logo quiz mod apk rexdl<br />
39
- logo quiz mod apk 33.6<br />
40
- logo quiz mod apk world brands<br />
41
- logo quiz mod apk ultimate logos<br />
42
- logo quiz mod apk guess the brand<br />
43
- logo quiz mod apk trivia games<br />
44
- logo quiz mod apk car logos<br />
45
- logo quiz mod apk food logos<br />
46
- logo quiz mod apk sports logos<br />
47
- logo quiz mod apk fashion logos<br />
48
- logo quiz mod apk music logos<br />
49
- logo quiz mod apk movie logos<br />
50
- logo quiz mod apk tv logos<br />
51
- logo quiz mod apk game logos<br />
52
- logo quiz mod apk web logos<br />
53
- logo quiz mod apk country logos<br />
54
- logo quiz mod apk city logos<br />
55
- logo quiz mod apk animal logos<br />
56
- logo quiz mod apk cartoon logos<br />
57
- logo quiz mod apk superhero logos<br />
58
- logo quiz mod apk celebrity logos<br />
59
- logo quiz mod apk flag logos<br />
60
- logo quiz mod apk fun plus answers<br />
61
- logo quiz mod apk level 1 to 20 answers<br />
62
- logo quiz mod apk level 21 to 40 answers <br />
63
- logo quiz mod apk level 41 to 60 answers <br />
64
- logo quiz mod apk level 61 to 80 answers <br />
65
- logo quiz mod apk level 81 to 100 answers <br />
66
- logo quiz ultimate pro hd full unlocked premium features unlocked adfree latest version download free for android devices</p>
67
- <h3>Daily challenges</h3>
68
- <p>Logo Quiz Mod APK also offers daily challenges that give you a new puzzle every day. These puzzles are different from the regular levels and require more skill and speed. You have to guess as many logos as possible in a limited time and earn points and extra hints. You can also compare your scores with other players and see how well you rank.</p>
69
- <h2>How to download and install Logo Quiz Mod APK</h2>
70
- <p>If you want to try Logo Quiz Mod APK, you have to download and install it on your Android device. Here are the steps to follow these steps: <h3>Step 1: Enable unknown sources</h3>
71
- <p>Before you can install Logo Quiz Mod APK, you need to allow your device to install apps from unknown sources. This means that you can install apps that are not from the Google Play Store. To do this, go to your device settings and tap on Security. Then, look for the option that says Install unknown apps or Unknown sources and enable it. You may see a warning message that tells you about the risks of installing unknown apps, but you can ignore it if you trust the source of the APK file.</p>
72
- <h3>Step 2: Download the APK file</h3>
73
- <p>Next, you need to download the APK file of Logo Quiz Mod APK from a reliable website. You can use your browser to find and download the file, or you can use a computer and transfer the file to your device via USB. Make sure you download the latest version of the APK file and that it is compatible with your device. You can check the file size and version number before downloading it.</p>
74
- <h3>Step 3: Install the APK file</h3>
75
- <p>Once you have downloaded the APK file, you need to locate it on your device and tap on it to start the installation process. You can use a file manager app to find the file in your Downloads folder or wherever you saved it. You may see a pop-up window that asks you to confirm the installation and grant some permissions to the app. Tap on Install and wait for the installation to finish.</p>
76
- <h3>Step 4: Launch the game and enjoy</h3>
77
- <p>After the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You will see a welcome screen that introduces you to the game and its features. You can also access the settings menu to adjust some options such as sound, language, and notifications. Now you are ready to play Logo Quiz Mod APK and have fun guessing logos from different brands.</p> <h2>Tips and tricks for playing Logo Quiz Mod APK</h2>
78
- <p>Logo Quiz Mod APK is a fun and challenging game, but it can also be frustrating if you don't know the answers. Here are some tips and tricks that can help you play the game better and enjoy it more:</p>
79
- <h3>Use hints wisely</h3>
80
- <p>As mentioned earlier, Logo Quiz Mod APK gives you unlimited hints, which can be very helpful if you are stuck. However, you should not rely on hints too much, as they can make the game too easy and boring. You should try to guess the logo first before using a hint, and only use one hint at a time. You can also save your hints for later levels, where the logos are harder and more obscure.</p>
81
- <h3>Learn from your mistakes</h3>
82
- <p>Another tip for playing Logo Quiz Mod APK is to learn from your mistakes. If you guess a logo wrong, don't just skip it and move on. Instead, try to remember the correct answer and the logo design. This way, you can improve your memory and recognition skills, and avoid making the same mistake again. You can also review your answers at the end of each level and see where you went wrong.</p>
83
- <h3>Use online resources</h3>
84
- <p>If you are really stuck and can't figure out a logo, you can always use online resources to help you. There are many websites and apps that offer logo quizzes and answers, such as Logo Quiz Answers or Logo Quiz Cheats. You can also use search engines or image recognition tools to find the logo you are looking for. However, you should use these resources sparingly, as they can spoil the fun and challenge of the game.</p>
85
- <h3>Challenge yourself and your friends</h3>
86
- <p>One of the best ways to enjoy Logo Quiz Mod APK is to challenge yourself and your friends. You can set a goal for yourself, such as completing a level without using any hints or within a certain time limit. You can also compete with your friends and see who can guess more logos or score higher. You can share your results on social media or via messaging apps, and invite your friends to join the game.</p>
87
- <h2>Benefits of playing Logo Quiz Mod APK</h2>
88
- <p>Logo Quiz Mod APK is not only a fun and entertaining game, but also a beneficial one. Here are some of the benefits of playing Logo Quiz Mod APK:</p>
89
- <h3>Enhance your memory and recognition skills</h3>
90
- <p>Playing Logo Quiz Mod APK can help you enhance your memory and recognition skills, as you have to remember and identify thousands of logos from different brands and companies. This can improve your brain function and cognitive abilities, as well as your attention span and concentration. You can also learn new things and facts about logos and brands that you may not have known before.</p>
91
- <h3>Expand your knowledge of brands and logos</h3>
92
- <p>Playing Logo Quiz Mod APK can also help you expand your knowledge of brands and logos, as you have to guess logos from various categories, such as food, fashion, sports, technology, etc. You can discover new brands and logos that you may not have heard of before, or learn more about the ones that you already know. You can also appreciate the creativity and design of logos, and how they convey the identity and message of a brand.</p>
93
- <h3>Have fun and relax</h3>
94
- <p>Finally, playing Logo Quiz Mod APK can help you have fun and relax, as it is a simple and enjoyable game that anyone can play. You can play it anytime and anywhere, whether you are online or offline. You can also play it at your own pace, without any pressure or stress. You can also have fun with your friends and family, by playing together or competing with each other.</p>
95
- <h2>Conclusion</h2>
96
- <p>Logo Quiz Mod APK is a fun and challenging game for logo lovers who want to test their logo knowledge and have fun at the same time. It offers unlimited hints, extra levels, offline mode, and daily challenges that make the game more enjoyable and less frustrating. It also helps enhance memory and recognition skills, expand knowledge of brands and logos, and have fun and relax. If you want to try Logo Quiz Mod APK, you can download it from a reliable website and install it on your Android device by following the steps above.</p>
97
- <h2>Frequently Asked Questions</h2>
98
- <ol>
99
- <li><b>What is the difference between Logo Quiz Mod APK and Logo Quiz?</b></li>
100
- <p>Logo Quiz Mod APK is a modified version of Logo Quiz that gives you unlimited hints, extra levels, offline mode, and daily challenges. Logo Quiz is the original game that has limited hints, fewer levels, online mode only, and no daily challenges.</p>
101
- <li><b>How many logos are there in Logo Quiz Mod APK?</b></li <p>Logo Quiz Mod APK has over 4000 logos from different categories and levels. You can also play the extra levels that have 200 logos each.</p>
102
- <li><b>How can I get more hints in Logo Quiz Mod APK?</b></li>
103
- <p>You don't need to get more hints in Logo Quiz Mod APK, as it gives you unlimited hints. You can use as many hints as you want without any restrictions. However, if you want to challenge yourself, you can try to guess the logos without using hints or use them sparingly.</p>
104
- <li><b>Is Logo Quiz Mod APK safe to download and install?</b></li>
105
- <p>Logo Quiz Mod APK is safe to download and install, as long as you get it from a reliable website that does not contain any viruses or malware. You should also scan the APK file before installing it on your device, and make sure you have enabled unknown sources in your device settings.</p>
106
- <li><b>Can I play Logo Quiz Mod APK on my PC or iOS device?</b></li>
107
- <p>Logo Quiz Mod APK is designed for Android devices only, so you cannot play it on your PC or iOS device. However, you can use an Android emulator on your PC to run the game, such as BlueStacks or Nox Player. You can also play the original Logo Quiz game on your PC or iOS device by downloading it from the Google Play Store or the App Store.</p>
108
- <li><b>What are some of the best logo quiz games for Android?</b></li>
109
- <p>Some of the best logo quiz games for Android are:</p>
110
- <ul>
111
- <li>Logo Game: Guess Brand Quiz by Logos Box</li>
112
- <li>Guess The Brand - Logo Mania by IcoMania - Logo Quiz - Logos Quiz</li>
113
- <li>Logo Quiz World by MSI Apps</li>
114
- <li>Ultimate Logo Quiz by Bubble Quiz Games</li>
115
- <li>Logo Trivial Quiz by Carlos Alcarria</li>
116
- </ul></p> 401be4b1e0<br />
117
- <br />
118
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/lib/hooks/chat-history.ts DELETED
@@ -1,62 +0,0 @@
1
- import { zip } from 'lodash-es'
2
- import { ChatMessageModel, BotId } from '@/lib/bots/bing/types'
3
- import { Storage } from '../storage'
4
-
5
- /**
6
- * conversations:$botId => Conversation[]
7
- * conversation:$botId:$cid:messages => ChatMessageModel[]
8
- */
9
-
10
- interface Conversation {
11
- id: string
12
- createdAt: number
13
- }
14
-
15
- type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] }
16
-
17
- async function loadHistoryConversations(botId: BotId): Promise<Conversation[]> {
18
- const key = `conversations:${botId}`
19
- const { [key]: value } = await Storage.get(key)
20
- return value || []
21
- }
22
-
23
- async function deleteHistoryConversation(botId: BotId, cid: string) {
24
- const conversations = await loadHistoryConversations(botId)
25
- const newConversations = conversations.filter((c) => c.id !== cid)
26
- await Storage.set({ [`conversations:${botId}`]: newConversations })
27
- }
28
-
29
- async function loadConversationMessages(botId: BotId, cid: string): Promise<ChatMessageModel[]> {
30
- const key = `conversation:${botId}:${cid}:messages`
31
- const { [key]: value } = await Storage.get(key)
32
- return value || []
33
- }
34
-
35
- export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) {
36
- const conversations = await loadHistoryConversations(botId)
37
- if (!conversations.some((c) => c.id === cid)) {
38
- conversations.unshift({ id: cid, createdAt: Date.now() })
39
- await Storage.set({ [`conversations:${botId}`]: conversations })
40
- }
41
- const key = `conversation:${botId}:${cid}:messages`
42
- await Storage.set({ [key]: messages })
43
- }
44
-
45
- export async function loadHistoryMessages(botId: BotId): Promise<ConversationWithMessages[]> {
46
- const conversations = await loadHistoryConversations(botId)
47
- const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id)))
48
- return zip(conversations, messagesList).map(([c, messages]) => ({
49
- id: c!.id,
50
- createdAt: c!.createdAt,
51
- messages: messages!,
52
- }))
53
- }
54
-
55
- export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) {
56
- const messages = await loadConversationMessages(botId, conversationId)
57
- const newMessages = messages.filter((m) => m.id !== messageId)
58
- await setConversationMessages(botId, conversationId, newMessages)
59
- if (!newMessages.length) {
60
- await deleteHistoryConversation(botId, conversationId)
61
- }
62
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/839871171w/newbingAI/Dockerfile DELETED
@@ -1,34 +0,0 @@
1
- # Build Stage
2
- # 使用 golang:alpine 作为构建阶段的基础镜像
3
- FROM golang:alpine AS builder
4
-
5
- # 添加 git,以便之后能从GitHub克隆项目
6
- RUN apk --no-cache add git
7
-
8
- # 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
9
- RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
10
-
11
- # 设置工作目录为之前克隆的项目目录
12
- WORKDIR /workspace/app
13
-
14
- # 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
15
- RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
16
-
17
- # Runtime Stage
18
- # 使用轻量级的 alpine 镜像作为运行时的基础镜像
19
- FROM alpine
20
-
21
- # 设置工作目录
22
- WORKDIR /workspace/app
23
-
24
- # 从构建阶段复制编译后的二进制文件到运行时镜像中
25
- COPY --from=builder /workspace/app/go-proxy-bingai .
26
-
27
- # 设置环境变量,此处为随机字符
28
- ENV Go_Proxy_BingAI_USER_TOKEN_1="1B4yYaAOHZW5A5NEj-BnRHc80SQbg-Mlu83S-t36PdJ8LU_pqBAxHhhNqgpGSWGfRlGepqeiaYk2sKyR8a8w8ehi6V5SenYspGG0DC0n0iHuML-VoNMsbH64tPWJNPwzpBlse3566VRGzaOafGtk8gk1SX1dYvkFvzlK1hucI40aMUKOO2sjmiMFU1lgEgWu2ZPMYIoIZ_pnw32mlIgRdn1XA6Kml6GFf_3_2oYt6Fw4"
29
-
30
- # 暴露8080端口
31
- EXPOSE 8080
32
-
33
- # 容器启动时运行的命令
34
- CMD ["/workspace/app/go-proxy-bingai"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A666sxr/Genshin_TTS/utils.py DELETED
@@ -1,263 +0,0 @@
1
- import os
2
- import glob
3
- import sys
4
- import argparse
5
- import logging
6
- import json
7
- import subprocess
8
- import numpy as np
9
- from scipy.io.wavfile import read
10
- import torch
11
-
12
- MATPLOTLIB_FLAG = False
13
-
14
- logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
15
- logger = logging
16
-
17
-
18
- def load_checkpoint(checkpoint_path, model, optimizer=None):
19
- assert os.path.isfile(checkpoint_path)
20
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
21
- iteration = checkpoint_dict['iteration']
22
- learning_rate = checkpoint_dict['learning_rate']
23
- if optimizer is not None:
24
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
25
- saved_state_dict = checkpoint_dict['model']
26
- if hasattr(model, 'module'):
27
- state_dict = model.module.state_dict()
28
- else:
29
- state_dict = model.state_dict()
30
- new_state_dict= {}
31
- for k, v in state_dict.items():
32
- try:
33
- new_state_dict[k] = saved_state_dict[k]
34
- except:
35
- logger.info("%s is not in the checkpoint" % k)
36
- new_state_dict[k] = v
37
- if hasattr(model, 'module'):
38
- model.module.load_state_dict(new_state_dict)
39
- else:
40
- model.load_state_dict(new_state_dict)
41
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
42
- checkpoint_path, iteration))
43
- return model, optimizer, learning_rate, iteration
44
-
45
-
46
- def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
47
- ckptname = checkpoint_path.split("/")[-1]
48
- newest_step = int(ckptname.split(".")[0].split("_")[1])
49
- last_ckptname = checkpoint_path.replace(str(newest_step), str(newest_step-3000))
50
- if newest_step >= 3000:
51
- os.system(f"rm {last_ckptname}")
52
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
53
- iteration, checkpoint_path))
54
- if hasattr(model, 'module'):
55
- state_dict = model.module.state_dict()
56
- else:
57
- state_dict = model.state_dict()
58
- torch.save({'model': state_dict,
59
- 'iteration': iteration,
60
- 'optimizer': optimizer.state_dict(),
61
- 'learning_rate': learning_rate}, checkpoint_path)
62
-
63
-
64
- def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
65
- for k, v in scalars.items():
66
- writer.add_scalar(k, v, global_step)
67
- for k, v in histograms.items():
68
- writer.add_histogram(k, v, global_step)
69
- for k, v in images.items():
70
- writer.add_image(k, v, global_step, dataformats='HWC')
71
- for k, v in audios.items():
72
- writer.add_audio(k, v, global_step, audio_sampling_rate)
73
-
74
-
75
- def latest_checkpoint_path(dir_path, regex="G_*.pth"):
76
- f_list = glob.glob(os.path.join(dir_path, regex))
77
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
78
- x = f_list[-1]
79
- print(x)
80
- return x
81
-
82
-
83
- def plot_spectrogram_to_numpy(spectrogram):
84
- global MATPLOTLIB_FLAG
85
- if not MATPLOTLIB_FLAG:
86
- import matplotlib
87
- matplotlib.use("Agg")
88
- MATPLOTLIB_FLAG = True
89
- mpl_logger = logging.getLogger('matplotlib')
90
- mpl_logger.setLevel(logging.WARNING)
91
- import matplotlib.pylab as plt
92
- import numpy as np
93
-
94
- fig, ax = plt.subplots(figsize=(10,2))
95
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
96
- interpolation='none')
97
- plt.colorbar(im, ax=ax)
98
- plt.xlabel("Frames")
99
- plt.ylabel("Channels")
100
- plt.tight_layout()
101
-
102
- fig.canvas.draw()
103
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
104
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
105
- plt.close()
106
- return data
107
-
108
-
109
- def plot_alignment_to_numpy(alignment, info=None):
110
- global MATPLOTLIB_FLAG
111
- if not MATPLOTLIB_FLAG:
112
- import matplotlib
113
- matplotlib.use("Agg")
114
- MATPLOTLIB_FLAG = True
115
- mpl_logger = logging.getLogger('matplotlib')
116
- mpl_logger.setLevel(logging.WARNING)
117
- import matplotlib.pylab as plt
118
- import numpy as np
119
-
120
- fig, ax = plt.subplots(figsize=(6, 4))
121
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
122
- interpolation='none')
123
- fig.colorbar(im, ax=ax)
124
- xlabel = 'Decoder timestep'
125
- if info is not None:
126
- xlabel += '\n\n' + info
127
- plt.xlabel(xlabel)
128
- plt.ylabel('Encoder timestep')
129
- plt.tight_layout()
130
-
131
- fig.canvas.draw()
132
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
133
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
134
- plt.close()
135
- return data
136
-
137
-
138
- def load_wav_to_torch(full_path):
139
- sampling_rate, data = read(full_path)
140
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
141
-
142
-
143
- def load_filepaths_and_text(filename, split="|"):
144
- with open(filename, encoding='utf-8') as f:
145
- filepaths_and_text = [line.strip().split(split) for line in f]
146
- return filepaths_and_text
147
-
148
-
149
- def get_hparams(init=True):
150
- parser = argparse.ArgumentParser()
151
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
152
- help='JSON file for configuration')
153
- parser.add_argument('-m', '--model', type=str, required=True,
154
- help='Model name')
155
-
156
- args = parser.parse_args()
157
- model_dir = os.path.join("./logs", args.model)
158
-
159
- if not os.path.exists(model_dir):
160
- os.makedirs(model_dir)
161
-
162
- config_path = args.config
163
- config_save_path = os.path.join(model_dir, "config.json")
164
- if init:
165
- with open(config_path, "r") as f:
166
- data = f.read()
167
- with open(config_save_path, "w") as f:
168
- f.write(data)
169
- else:
170
- with open(config_save_path, "r") as f:
171
- data = f.read()
172
- config = json.loads(data)
173
-
174
- hparams = HParams(**config)
175
- hparams.model_dir = model_dir
176
- return hparams
177
-
178
-
179
- def get_hparams_from_dir(model_dir):
180
- config_save_path = os.path.join(model_dir, "config.json")
181
- with open(config_save_path, "r") as f:
182
- data = f.read()
183
- config = json.loads(data)
184
-
185
- hparams =HParams(**config)
186
- hparams.model_dir = model_dir
187
- return hparams
188
-
189
-
190
- def get_hparams_from_file(config_path):
191
- with open(config_path, "r") as f:
192
- data = f.read()
193
- config = json.loads(data)
194
-
195
- hparams =HParams(**config)
196
- return hparams
197
-
198
-
199
- def check_git_hash(model_dir):
200
- source_dir = os.path.dirname(os.path.realpath(__file__))
201
- if not os.path.exists(os.path.join(source_dir, ".git")):
202
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
203
- source_dir
204
- ))
205
- return
206
-
207
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
208
-
209
- path = os.path.join(model_dir, "githash")
210
- if os.path.exists(path):
211
- saved_hash = open(path).read()
212
- if saved_hash != cur_hash:
213
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
214
- saved_hash[:8], cur_hash[:8]))
215
- else:
216
- open(path, "w").write(cur_hash)
217
-
218
-
219
- def get_logger(model_dir, filename="train.log"):
220
- global logger
221
- logger = logging.getLogger(os.path.basename(model_dir))
222
- logger.setLevel(logging.DEBUG)
223
-
224
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
225
- if not os.path.exists(model_dir):
226
- os.makedirs(model_dir)
227
- h = logging.FileHandler(os.path.join(model_dir, filename))
228
- h.setLevel(logging.DEBUG)
229
- h.setFormatter(formatter)
230
- logger.addHandler(h)
231
- return logger
232
-
233
-
234
- class HParams():
235
- def __init__(self, **kwargs):
236
- for k, v in kwargs.items():
237
- if type(v) == dict:
238
- v = HParams(**v)
239
- self[k] = v
240
-
241
- def keys(self):
242
- return self.__dict__.keys()
243
-
244
- def items(self):
245
- return self.__dict__.items()
246
-
247
- def values(self):
248
- return self.__dict__.values()
249
-
250
- def __len__(self):
251
- return len(self.__dict__)
252
-
253
- def __getitem__(self, key):
254
- return getattr(self, key)
255
-
256
- def __setitem__(self, key, value):
257
- return setattr(self, key, value)
258
-
259
- def __contains__(self, key):
260
- return key in self.__dict__
261
-
262
- def __repr__(self):
263
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/agents/code_generate_agent.py DELETED
@@ -1,229 +0,0 @@
1
- import re
2
- from typing import List, Union
3
- from langchain.chains import LLMChain
4
- from langchain.agents import Tool, LLMSingleActionAgent, AgentExecutor, AgentOutputParser
5
- from langchain.schema import AgentAction, AgentFinish
6
- from langchain.agents import initialize_agent
7
- from langchain.prompts import StringPromptTemplate
8
- from agents.promopts import code_generate_agent_template
9
- from agents.tools.smart_domain.api_layer_code_tool import apiLayerCodeGenerator
10
- from agents.tools.smart_domain.domain_layer_code_tool import domainLayerCodeGenerator
11
- from agents.tools.smart_domain.entity import entityCodeGenerator
12
- from agents.tools.smart_domain.association import associationCodeGenerator
13
- from agents.tools.smart_domain.db_entity_repository import dbEntityRepositoryCodeGenerator
14
- from agents.tools.smart_domain.association_impl import asociationImplCodeGenerator
15
- from agents.tools.smart_domain.persistent_layer_code_tool import persistentLayerCodeGenerator
16
- from models import llm
17
-
18
-
19
- class CustomPromptTemplate(StringPromptTemplate):
20
- # The template to use
21
- template: str
22
- # The list of tools available
23
- tools: List[Tool]
24
-
25
- def format(self, **kwargs) -> str:
26
- # Get the intermediate steps (AgentAction, Observation tuples)
27
- # Format them in a particular way
28
- intermediate_steps = kwargs.pop("intermediate_steps")
29
- thoughts = ""
30
- for action, observation in intermediate_steps:
31
- thoughts += action.log
32
- thoughts += f"\nObservation: {observation}\nThought: "
33
- # Set the agent_scratchpad variable to that value
34
- kwargs["agent_scratchpad"] = thoughts
35
- # Create a tools variable from the list of tools provided
36
- kwargs["tools"] = "\n".join(
37
- [f"{tool.name}: {tool.description}" for tool in self.tools])
38
- # Create a list of tool names for the tools provided
39
- kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
40
- return self.template.format(**kwargs)
41
-
42
-
43
- class CustomOutputParser(AgentOutputParser):
44
-
45
- def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
46
- # Check if agent should finish
47
- if "Final Answer:" in llm_output:
48
- return AgentFinish(
49
- # Return values is generally always a dictionary with a single `output` key
50
- # It is not recommended to try anything else at the moment :)
51
- return_values={"output": llm_output.split(
52
- "Final Answer:")[-1].strip()},
53
- log=llm_output,
54
- )
55
- # Parse out the action and action input
56
- regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
57
- match = re.search(regex, llm_output, re.DOTALL)
58
- if not match:
59
- raise ValueError(f"Could not parse LLM output: `{llm_output}`")
60
- action = match.group(1).strip()
61
- action_input = match.group(2)
62
- # Return the action and action input
63
- return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
64
-
65
- # chatllm=ChatOpenAI(temperature=0)
66
- # code_genenrate_memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
67
- # code_generate_agent = initialize_agent(tools, chatllm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, memory=memory, verbose=True)
68
-
69
-
70
-
71
- # agent = initialize_agent(
72
- # tools=tools, llm=llm_chain, template=AGENT_PROMPT, stop=["\nObservation:"], agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
73
- code_agent_tools = [domainLayerCodeGenerator, entityCodeGenerator, associationCodeGenerator, persistentLayerCodeGenerator, dbEntityRepositoryCodeGenerator, asociationImplCodeGenerator, apiLayerCodeGenerator]
74
-
75
- def code_agent_executor() -> AgentExecutor:
76
- output_parser = CustomOutputParser()
77
- AGENT_PROMPT = CustomPromptTemplate(
78
- template=code_generate_agent_template,
79
- tools=code_agent_tools,
80
- # This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
81
- # This includes the `intermediate_steps` variable because that is needed
82
- input_variables=["input", "intermediate_steps"]
83
- )
84
-
85
- code_llm_chain = LLMChain(llm=llm(temperature=0.7), prompt=AGENT_PROMPT)
86
-
87
- tool_names = [tool.name for tool in code_agent_tools]
88
- code_agent = LLMSingleActionAgent(
89
- llm_chain=code_llm_chain,
90
- output_parser=output_parser,
91
- stop=["\nObservation:"],
92
- allowed_tools=tool_names,
93
- )
94
-
95
- code_agent_executor = AgentExecutor.from_agent_and_tools(
96
- agent=code_agent, tools=code_agent_tools, verbose=True)
97
- return code_agent_executor
98
-
99
- # if __name__ == "__main__":
100
- # response = domainLayerChain.run("""FeatureConfig用于配置某个Feature中控制前端展示效果的配置项
101
- # FeatureConfig主要属性包括:featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述、创建时间、更新时间
102
- # FeatureConfig中status为枚举值,取值范围为(DRAFT、PUBLISHED、DISABLED)
103
- # FeatureConfig新增后status为DRAFT、执行发布操作后变为PUBLISHED、执行撤销操作后变为DISABLED
104
- # 状态为DRAFT的FeatureConfig可以执行编辑、发布、撤销操作
105
- # 发布后FeatureConfig变为PUBLISHED状态,可以执行撤销操作
106
- # 撤销后FeatureConfig变为DISABLED状态,不可以执行编辑、发布、撤销操作
107
- # """)
108
-
109
- # print(response)
110
-
111
-
112
- # response = persistentChain.run("""
113
- # Entity:
114
- # ```
115
- # public class FeatureConfig {
116
- # private FeatureConfigId id;
117
- # private FeatureConfigDescription description;
118
-
119
- # public enum FeatureConfigStatus {
120
- # DRAFT, PUBLISHED, DISABLED;
121
- # }
122
-
123
- # public record FeatureConfigId(String id) {}
124
- # public record FeatureKey(String key) {}
125
- # public record FeatureConfigData(String data) {}
126
- # public record FeatureConfigSaData(String saData) {}
127
-
128
- # @Builder
129
- # public record FeatureConfigDescription(FeatureKey featureKey, FeatureConfigData data, FeatureConfigSaData saData, String title, String description,
130
- # FeatureConfigStatus status, LocalDateTime createTime, LocalDateTime updateTime) {}
131
-
132
- # public void update(FeatureConfigDescription description) {
133
- # this.title = description.title();
134
- # this.description = description.description();
135
- # this.updateTime = LocalDateTime.now();
136
- # }
137
-
138
- # public void publish() {
139
- # this.status = FeatureConfigStatus.PUBLISHED;
140
- # this.updateTime = LocalDateTime.now();
141
- # }
142
-
143
- # public void disable() {
144
- # this.status = FeatureConfigStatus.DISABLED;
145
- # this.updateTime = LocalDateTime.now();
146
- # }
147
- # }
148
- # ```
149
-
150
- # Association:
151
- # ```
152
- # public interface FeatureConfigs {
153
- # Flux<FeatureConfig> findAllByFeatureKey(String featureKey);
154
- # Mono<FeatureConfig> findById(FeatureConfigId id);
155
- # Mono<FeatureConfig> save(FeatureConfig featureConfig);
156
- # }
157
- # ```
158
- # """)
159
-
160
- # print(response)
161
-
162
-
163
- # response = apiChain.run("""
164
- # Entity:
165
- # ```
166
- # public class FeatureConfig {
167
- # private FeatureConfigId id;
168
- # private FeatureConfigDescription description;
169
-
170
- # public enum FeatureConfigStatus {
171
- # DRAFT, PUBLISHED, DISABLED;
172
- # }
173
-
174
- # public record FeatureConfigId(String id) {}
175
- # public record FeatureKey(String key) {}
176
- # public record FeatureConfigData(String data) {}
177
- # public record FeatureConfigSaData(String saData) {}
178
-
179
- # @Builder
180
- # public record FeatureConfigDescription(FeatureKey featureKey, FeatureConfigData data, FeatureConfigSaData saData, String title, String description,
181
- # FeatureConfigStatus status, LocalDateTime createTime, LocalDateTime updateTime) {}
182
-
183
- # public void update(FeatureConfigDescription description) {
184
- # this.title = description.title();
185
- # this.description = description.description();
186
- # this.updateTime = LocalDateTime.now();
187
- # }
188
-
189
- # public void publish() {
190
- # this.status = FeatureConfigStatus.PUBLISHED;
191
- # this.updateTime = LocalDateTime.now();
192
- # }
193
-
194
- # public void disable() {
195
- # this.status = FeatureConfigStatus.DISABLED;
196
- # this.updateTime = LocalDateTime.now();
197
- # }
198
- # }
199
- # ```
200
-
201
- # Association:
202
- # ```
203
- # public interface FeatureConfigs {
204
- # Flux<FeatureConfig> findAllByFeatureKey(String featureKey);
205
- # Mono<FeatureConfig> findById(FeatureConfigId id);
206
- # Mono<FeatureConfig> save(FeatureConfig featureConfig);
207
- # Mono<Void> update(FeatureConfigId id, FeatureConfigDescription description);
208
- # Mono<Void> publish(FeatureConfigId id);
209
- # Mono<Void> disable(FeatureConfigId id);
210
- # }
211
- # ```
212
- # """)
213
-
214
- # print(response)
215
-
216
- # if __name__ == "code_generate":
217
- # response = code_agent_executor.run("""
218
- # 根据如下需求generate domain layer code:
219
- # ---
220
- # FeatureConfig用于配置某个Feature中控制前端展示效果的配置项
221
- # FeatureConfig主要属性包括:featureKey(feature标识)、data(配置数据)、saData(埋点数据)、status(状态)、标题、描述、创建时间、更新时间
222
- # FeatureConfig中status为枚举值,取值范围为(DRAFT、PUBLISHED、DISABLED)
223
- # FeatureConfig新增后status为DRAFT、执行发布操作后变为PUBLISHED、执行撤销操作后变为DISABLED
224
- # 状态为DRAFT的FeatureConfig可以执行编辑、发布、撤销操作
225
- # 发布后FeatureConfig变为PUBLISHED状态,可以执行撤销操作
226
- # 撤销后FeatureConfig变为DISABLED状态,不可以执行编辑、发布、撤销操作
227
- # ---
228
- # """)
229
- # print(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/models.py DELETED
@@ -1,1124 +0,0 @@
1
- import math, pdb, os
2
- from time import time as ttime
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from infer_pack import modules
7
- from infer_pack import attentions
8
- from infer_pack import commons
9
- from infer_pack.commons import init_weights, get_padding
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from infer_pack.commons import init_weights
13
- import numpy as np
14
- from infer_pack import commons
15
-
16
-
17
- class TextEncoder256(nn.Module):
18
- def __init__(
19
- self,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- f0=True,
28
- ):
29
- super().__init__()
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emb_phone = nn.Linear(256, hidden_channels)
38
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
- if f0 == True:
40
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
- self.encoder = attentions.Encoder(
42
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
- )
44
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
-
46
- def forward(self, phone, pitch, lengths):
47
- if pitch == None:
48
- x = self.emb_phone(phone)
49
- else:
50
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
- x = self.lrelu(x)
53
- x = torch.transpose(x, 1, -1) # [b, h, t]
54
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
- x.dtype
56
- )
57
- x = self.encoder(x * x_mask, x_mask)
58
- stats = self.proj(x) * x_mask
59
-
60
- m, logs = torch.split(stats, self.out_channels, dim=1)
61
- return m, logs, x_mask
62
-
63
-
64
- class TextEncoder768(nn.Module):
65
- def __init__(
66
- self,
67
- out_channels,
68
- hidden_channels,
69
- filter_channels,
70
- n_heads,
71
- n_layers,
72
- kernel_size,
73
- p_dropout,
74
- f0=True,
75
- ):
76
- super().__init__()
77
- self.out_channels = out_channels
78
- self.hidden_channels = hidden_channels
79
- self.filter_channels = filter_channels
80
- self.n_heads = n_heads
81
- self.n_layers = n_layers
82
- self.kernel_size = kernel_size
83
- self.p_dropout = p_dropout
84
- self.emb_phone = nn.Linear(768, hidden_channels)
85
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
- if f0 == True:
87
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
- self.encoder = attentions.Encoder(
89
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
- )
91
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
-
93
- def forward(self, phone, pitch, lengths):
94
- if pitch == None:
95
- x = self.emb_phone(phone)
96
- else:
97
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
- x = self.lrelu(x)
100
- x = torch.transpose(x, 1, -1) # [b, h, t]
101
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
- x.dtype
103
- )
104
- x = self.encoder(x * x_mask, x_mask)
105
- stats = self.proj(x) * x_mask
106
-
107
- m, logs = torch.split(stats, self.out_channels, dim=1)
108
- return m, logs, x_mask
109
-
110
-
111
- class ResidualCouplingBlock(nn.Module):
112
- def __init__(
113
- self,
114
- channels,
115
- hidden_channels,
116
- kernel_size,
117
- dilation_rate,
118
- n_layers,
119
- n_flows=4,
120
- gin_channels=0,
121
- ):
122
- super().__init__()
123
- self.channels = channels
124
- self.hidden_channels = hidden_channels
125
- self.kernel_size = kernel_size
126
- self.dilation_rate = dilation_rate
127
- self.n_layers = n_layers
128
- self.n_flows = n_flows
129
- self.gin_channels = gin_channels
130
-
131
- self.flows = nn.ModuleList()
132
- for i in range(n_flows):
133
- self.flows.append(
134
- modules.ResidualCouplingLayer(
135
- channels,
136
- hidden_channels,
137
- kernel_size,
138
- dilation_rate,
139
- n_layers,
140
- gin_channels=gin_channels,
141
- mean_only=True,
142
- )
143
- )
144
- self.flows.append(modules.Flip())
145
-
146
- def forward(self, x, x_mask, g=None, reverse=False):
147
- if not reverse:
148
- for flow in self.flows:
149
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
- else:
151
- for flow in reversed(self.flows):
152
- x = flow(x, x_mask, g=g, reverse=reverse)
153
- return x
154
-
155
- def remove_weight_norm(self):
156
- for i in range(self.n_flows):
157
- self.flows[i * 2].remove_weight_norm()
158
-
159
-
160
- class PosteriorEncoder(nn.Module):
161
- def __init__(
162
- self,
163
- in_channels,
164
- out_channels,
165
- hidden_channels,
166
- kernel_size,
167
- dilation_rate,
168
- n_layers,
169
- gin_channels=0,
170
- ):
171
- super().__init__()
172
- self.in_channels = in_channels
173
- self.out_channels = out_channels
174
- self.hidden_channels = hidden_channels
175
- self.kernel_size = kernel_size
176
- self.dilation_rate = dilation_rate
177
- self.n_layers = n_layers
178
- self.gin_channels = gin_channels
179
-
180
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
- self.enc = modules.WN(
182
- hidden_channels,
183
- kernel_size,
184
- dilation_rate,
185
- n_layers,
186
- gin_channels=gin_channels,
187
- )
188
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
-
190
- def forward(self, x, x_lengths, g=None):
191
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
- x.dtype
193
- )
194
- x = self.pre(x) * x_mask
195
- x = self.enc(x, x_mask, g=g)
196
- stats = self.proj(x) * x_mask
197
- m, logs = torch.split(stats, self.out_channels, dim=1)
198
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
- return z, m, logs, x_mask
200
-
201
- def remove_weight_norm(self):
202
- self.enc.remove_weight_norm()
203
-
204
-
205
- class Generator(torch.nn.Module):
206
- def __init__(
207
- self,
208
- initial_channel,
209
- resblock,
210
- resblock_kernel_sizes,
211
- resblock_dilation_sizes,
212
- upsample_rates,
213
- upsample_initial_channel,
214
- upsample_kernel_sizes,
215
- gin_channels=0,
216
- ):
217
- super(Generator, self).__init__()
218
- self.num_kernels = len(resblock_kernel_sizes)
219
- self.num_upsamples = len(upsample_rates)
220
- self.conv_pre = Conv1d(
221
- initial_channel, upsample_initial_channel, 7, 1, padding=3
222
- )
223
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
-
225
- self.ups = nn.ModuleList()
226
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
- self.ups.append(
228
- weight_norm(
229
- ConvTranspose1d(
230
- upsample_initial_channel // (2**i),
231
- upsample_initial_channel // (2 ** (i + 1)),
232
- k,
233
- u,
234
- padding=(k - u) // 2,
235
- )
236
- )
237
- )
238
-
239
- self.resblocks = nn.ModuleList()
240
- for i in range(len(self.ups)):
241
- ch = upsample_initial_channel // (2 ** (i + 1))
242
- for j, (k, d) in enumerate(
243
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
- ):
245
- self.resblocks.append(resblock(ch, k, d))
246
-
247
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
- self.ups.apply(init_weights)
249
-
250
- if gin_channels != 0:
251
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
-
253
- def forward(self, x, g=None):
254
- x = self.conv_pre(x)
255
- if g is not None:
256
- x = x + self.cond(g)
257
-
258
- for i in range(self.num_upsamples):
259
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
- x = self.ups[i](x)
261
- xs = None
262
- for j in range(self.num_kernels):
263
- if xs is None:
264
- xs = self.resblocks[i * self.num_kernels + j](x)
265
- else:
266
- xs += self.resblocks[i * self.num_kernels + j](x)
267
- x = xs / self.num_kernels
268
- x = F.leaky_relu(x)
269
- x = self.conv_post(x)
270
- x = torch.tanh(x)
271
-
272
- return x
273
-
274
- def remove_weight_norm(self):
275
- for l in self.ups:
276
- remove_weight_norm(l)
277
- for l in self.resblocks:
278
- l.remove_weight_norm()
279
-
280
-
281
- class SineGen(torch.nn.Module):
282
- """Definition of sine generator
283
- SineGen(samp_rate, harmonic_num = 0,
284
- sine_amp = 0.1, noise_std = 0.003,
285
- voiced_threshold = 0,
286
- flag_for_pulse=False)
287
- samp_rate: sampling rate in Hz
288
- harmonic_num: number of harmonic overtones (default 0)
289
- sine_amp: amplitude of sine-wavefrom (default 0.1)
290
- noise_std: std of Gaussian noise (default 0.003)
291
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
- Note: when flag_for_pulse is True, the first time step of a voiced
294
- segment is always sin(np.pi) or cos(0)
295
- """
296
-
297
- def __init__(
298
- self,
299
- samp_rate,
300
- harmonic_num=0,
301
- sine_amp=0.1,
302
- noise_std=0.003,
303
- voiced_threshold=0,
304
- flag_for_pulse=False,
305
- ):
306
- super(SineGen, self).__init__()
307
- self.sine_amp = sine_amp
308
- self.noise_std = noise_std
309
- self.harmonic_num = harmonic_num
310
- self.dim = self.harmonic_num + 1
311
- self.sampling_rate = samp_rate
312
- self.voiced_threshold = voiced_threshold
313
-
314
- def _f02uv(self, f0):
315
- # generate uv signal
316
- uv = torch.ones_like(f0)
317
- uv = uv * (f0 > self.voiced_threshold)
318
- return uv
319
-
320
- def forward(self, f0, upp):
321
- """sine_tensor, uv = forward(f0)
322
- input F0: tensor(batchsize=1, length, dim=1)
323
- f0 for unvoiced steps should be 0
324
- output sine_tensor: tensor(batchsize=1, length, dim)
325
- output uv: tensor(batchsize=1, length, 1)
326
- """
327
- with torch.no_grad():
328
- f0 = f0[:, None].transpose(1, 2)
329
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
- # fundamental component
331
- f0_buf[:, :, 0] = f0[:, :, 0]
332
- for idx in np.arange(self.harmonic_num):
333
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
- idx + 2
335
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
- rand_ini = torch.rand(
338
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
- )
340
- rand_ini[:, 0] = 0
341
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
- tmp_over_one *= upp
344
- tmp_over_one = F.interpolate(
345
- tmp_over_one.transpose(2, 1),
346
- scale_factor=upp,
347
- mode="linear",
348
- align_corners=True,
349
- ).transpose(2, 1)
350
- rad_values = F.interpolate(
351
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
- ).transpose(
353
- 2, 1
354
- ) #######
355
- tmp_over_one %= 1
356
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
- cumsum_shift = torch.zeros_like(rad_values)
358
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
- sine_waves = torch.sin(
360
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
- )
362
- sine_waves = sine_waves * self.sine_amp
363
- uv = self._f02uv(f0)
364
- uv = F.interpolate(
365
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
- ).transpose(2, 1)
367
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
- noise = noise_amp * torch.randn_like(sine_waves)
369
- sine_waves = sine_waves * uv + noise
370
- return sine_waves, uv, noise
371
-
372
-
373
- class SourceModuleHnNSF(torch.nn.Module):
374
- """SourceModule for hn-nsf
375
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
- add_noise_std=0.003, voiced_threshod=0)
377
- sampling_rate: sampling_rate in Hz
378
- harmonic_num: number of harmonic above F0 (default: 0)
379
- sine_amp: amplitude of sine source signal (default: 0.1)
380
- add_noise_std: std of additive Gaussian noise (default: 0.003)
381
- note that amplitude of noise in unvoiced is decided
382
- by sine_amp
383
- voiced_threshold: threhold to set U/V given F0 (default: 0)
384
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
- F0_sampled (batchsize, length, 1)
386
- Sine_source (batchsize, length, 1)
387
- noise_source (batchsize, length 1)
388
- uv (batchsize, length, 1)
389
- """
390
-
391
- def __init__(
392
- self,
393
- sampling_rate,
394
- harmonic_num=0,
395
- sine_amp=0.1,
396
- add_noise_std=0.003,
397
- voiced_threshod=0,
398
- is_half=True,
399
- ):
400
- super(SourceModuleHnNSF, self).__init__()
401
-
402
- self.sine_amp = sine_amp
403
- self.noise_std = add_noise_std
404
- self.is_half = is_half
405
- # to produce sine waveforms
406
- self.l_sin_gen = SineGen(
407
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
- )
409
-
410
- # to merge source harmonics into a single excitation
411
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
- self.l_tanh = torch.nn.Tanh()
413
-
414
- def forward(self, x, upp=None):
415
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
- if self.is_half:
417
- sine_wavs = sine_wavs.half()
418
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
- return sine_merge, None, None # noise, uv
420
-
421
-
422
- class GeneratorNSF(torch.nn.Module):
423
- def __init__(
424
- self,
425
- initial_channel,
426
- resblock,
427
- resblock_kernel_sizes,
428
- resblock_dilation_sizes,
429
- upsample_rates,
430
- upsample_initial_channel,
431
- upsample_kernel_sizes,
432
- gin_channels,
433
- sr,
434
- is_half=False,
435
- ):
436
- super(GeneratorNSF, self).__init__()
437
- self.num_kernels = len(resblock_kernel_sizes)
438
- self.num_upsamples = len(upsample_rates)
439
-
440
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
- self.m_source = SourceModuleHnNSF(
442
- sampling_rate=sr, harmonic_num=0, is_half=is_half
443
- )
444
- self.noise_convs = nn.ModuleList()
445
- self.conv_pre = Conv1d(
446
- initial_channel, upsample_initial_channel, 7, 1, padding=3
447
- )
448
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
-
450
- self.ups = nn.ModuleList()
451
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
- c_cur = upsample_initial_channel // (2 ** (i + 1))
453
- self.ups.append(
454
- weight_norm(
455
- ConvTranspose1d(
456
- upsample_initial_channel // (2**i),
457
- upsample_initial_channel // (2 ** (i + 1)),
458
- k,
459
- u,
460
- padding=(k - u) // 2,
461
- )
462
- )
463
- )
464
- if i + 1 < len(upsample_rates):
465
- stride_f0 = np.prod(upsample_rates[i + 1 :])
466
- self.noise_convs.append(
467
- Conv1d(
468
- 1,
469
- c_cur,
470
- kernel_size=stride_f0 * 2,
471
- stride=stride_f0,
472
- padding=stride_f0 // 2,
473
- )
474
- )
475
- else:
476
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
-
478
- self.resblocks = nn.ModuleList()
479
- for i in range(len(self.ups)):
480
- ch = upsample_initial_channel // (2 ** (i + 1))
481
- for j, (k, d) in enumerate(
482
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
- ):
484
- self.resblocks.append(resblock(ch, k, d))
485
-
486
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
- self.ups.apply(init_weights)
488
-
489
- if gin_channels != 0:
490
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
-
492
- self.upp = np.prod(upsample_rates)
493
-
494
- def forward(self, x, f0, g=None):
495
- har_source, noi_source, uv = self.m_source(f0, self.upp)
496
- har_source = har_source.transpose(1, 2)
497
- x = self.conv_pre(x)
498
- if g is not None:
499
- x = x + self.cond(g)
500
-
501
- for i in range(self.num_upsamples):
502
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
- x = self.ups[i](x)
504
- x_source = self.noise_convs[i](har_source)
505
- x = x + x_source
506
- xs = None
507
- for j in range(self.num_kernels):
508
- if xs is None:
509
- xs = self.resblocks[i * self.num_kernels + j](x)
510
- else:
511
- xs += self.resblocks[i * self.num_kernels + j](x)
512
- x = xs / self.num_kernels
513
- x = F.leaky_relu(x)
514
- x = self.conv_post(x)
515
- x = torch.tanh(x)
516
- return x
517
-
518
- def remove_weight_norm(self):
519
- for l in self.ups:
520
- remove_weight_norm(l)
521
- for l in self.resblocks:
522
- l.remove_weight_norm()
523
-
524
-
525
- sr2sr = {
526
- "32k": 32000,
527
- "40k": 40000,
528
- "48k": 48000,
529
- }
530
-
531
-
532
- class SynthesizerTrnMs256NSFsid(nn.Module):
533
- def __init__(
534
- self,
535
- spec_channels,
536
- segment_size,
537
- inter_channels,
538
- hidden_channels,
539
- filter_channels,
540
- n_heads,
541
- n_layers,
542
- kernel_size,
543
- p_dropout,
544
- resblock,
545
- resblock_kernel_sizes,
546
- resblock_dilation_sizes,
547
- upsample_rates,
548
- upsample_initial_channel,
549
- upsample_kernel_sizes,
550
- spk_embed_dim,
551
- gin_channels,
552
- sr,
553
- **kwargs
554
- ):
555
- super().__init__()
556
- if type(sr) == type("strr"):
557
- sr = sr2sr[sr]
558
- self.spec_channels = spec_channels
559
- self.inter_channels = inter_channels
560
- self.hidden_channels = hidden_channels
561
- self.filter_channels = filter_channels
562
- self.n_heads = n_heads
563
- self.n_layers = n_layers
564
- self.kernel_size = kernel_size
565
- self.p_dropout = p_dropout
566
- self.resblock = resblock
567
- self.resblock_kernel_sizes = resblock_kernel_sizes
568
- self.resblock_dilation_sizes = resblock_dilation_sizes
569
- self.upsample_rates = upsample_rates
570
- self.upsample_initial_channel = upsample_initial_channel
571
- self.upsample_kernel_sizes = upsample_kernel_sizes
572
- self.segment_size = segment_size
573
- self.gin_channels = gin_channels
574
- # self.hop_length = hop_length#
575
- self.spk_embed_dim = spk_embed_dim
576
- self.enc_p = TextEncoder256(
577
- inter_channels,
578
- hidden_channels,
579
- filter_channels,
580
- n_heads,
581
- n_layers,
582
- kernel_size,
583
- p_dropout,
584
- )
585
- self.dec = GeneratorNSF(
586
- inter_channels,
587
- resblock,
588
- resblock_kernel_sizes,
589
- resblock_dilation_sizes,
590
- upsample_rates,
591
- upsample_initial_channel,
592
- upsample_kernel_sizes,
593
- gin_channels=gin_channels,
594
- sr=sr,
595
- is_half=kwargs["is_half"],
596
- )
597
- self.enc_q = PosteriorEncoder(
598
- spec_channels,
599
- inter_channels,
600
- hidden_channels,
601
- 5,
602
- 1,
603
- 16,
604
- gin_channels=gin_channels,
605
- )
606
- self.flow = ResidualCouplingBlock(
607
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
608
- )
609
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
610
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
611
-
612
- def remove_weight_norm(self):
613
- self.dec.remove_weight_norm()
614
- self.flow.remove_weight_norm()
615
- self.enc_q.remove_weight_norm()
616
-
617
- def forward(
618
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
619
- ): # 这里ds是id,[bs,1]
620
- # print(1,pitch.shape)#[bs,t]
621
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
622
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
623
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
624
- z_p = self.flow(z, y_mask, g=g)
625
- z_slice, ids_slice = commons.rand_slice_segments(
626
- z, y_lengths, self.segment_size
627
- )
628
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
629
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
630
- # print(-2,pitchf.shape,z_slice.shape)
631
- o = self.dec(z_slice, pitchf, g=g)
632
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
633
-
634
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
635
- g = self.emb_g(sid).unsqueeze(-1)
636
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
637
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
638
- z = self.flow(z_p, x_mask, g=g, reverse=True)
639
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
640
- return o, x_mask, (z, z_p, m_p, logs_p)
641
-
642
-
643
- class SynthesizerTrnMs768NSFsid(nn.Module):
644
- def __init__(
645
- self,
646
- spec_channels,
647
- segment_size,
648
- inter_channels,
649
- hidden_channels,
650
- filter_channels,
651
- n_heads,
652
- n_layers,
653
- kernel_size,
654
- p_dropout,
655
- resblock,
656
- resblock_kernel_sizes,
657
- resblock_dilation_sizes,
658
- upsample_rates,
659
- upsample_initial_channel,
660
- upsample_kernel_sizes,
661
- spk_embed_dim,
662
- gin_channels,
663
- sr,
664
- **kwargs
665
- ):
666
- super().__init__()
667
- if type(sr) == type("strr"):
668
- sr = sr2sr[sr]
669
- self.spec_channels = spec_channels
670
- self.inter_channels = inter_channels
671
- self.hidden_channels = hidden_channels
672
- self.filter_channels = filter_channels
673
- self.n_heads = n_heads
674
- self.n_layers = n_layers
675
- self.kernel_size = kernel_size
676
- self.p_dropout = p_dropout
677
- self.resblock = resblock
678
- self.resblock_kernel_sizes = resblock_kernel_sizes
679
- self.resblock_dilation_sizes = resblock_dilation_sizes
680
- self.upsample_rates = upsample_rates
681
- self.upsample_initial_channel = upsample_initial_channel
682
- self.upsample_kernel_sizes = upsample_kernel_sizes
683
- self.segment_size = segment_size
684
- self.gin_channels = gin_channels
685
- # self.hop_length = hop_length#
686
- self.spk_embed_dim = spk_embed_dim
687
- self.enc_p = TextEncoder768(
688
- inter_channels,
689
- hidden_channels,
690
- filter_channels,
691
- n_heads,
692
- n_layers,
693
- kernel_size,
694
- p_dropout,
695
- )
696
- self.dec = GeneratorNSF(
697
- inter_channels,
698
- resblock,
699
- resblock_kernel_sizes,
700
- resblock_dilation_sizes,
701
- upsample_rates,
702
- upsample_initial_channel,
703
- upsample_kernel_sizes,
704
- gin_channels=gin_channels,
705
- sr=sr,
706
- is_half=kwargs["is_half"],
707
- )
708
- self.enc_q = PosteriorEncoder(
709
- spec_channels,
710
- inter_channels,
711
- hidden_channels,
712
- 5,
713
- 1,
714
- 16,
715
- gin_channels=gin_channels,
716
- )
717
- self.flow = ResidualCouplingBlock(
718
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
719
- )
720
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
721
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
722
-
723
- def remove_weight_norm(self):
724
- self.dec.remove_weight_norm()
725
- self.flow.remove_weight_norm()
726
- self.enc_q.remove_weight_norm()
727
-
728
- def forward(
729
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
730
- ): # 这里ds是id,[bs,1]
731
- # print(1,pitch.shape)#[bs,t]
732
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
733
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
734
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
735
- z_p = self.flow(z, y_mask, g=g)
736
- z_slice, ids_slice = commons.rand_slice_segments(
737
- z, y_lengths, self.segment_size
738
- )
739
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
740
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
741
- # print(-2,pitchf.shape,z_slice.shape)
742
- o = self.dec(z_slice, pitchf, g=g)
743
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
744
-
745
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
746
- g = self.emb_g(sid).unsqueeze(-1)
747
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
748
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
749
- z = self.flow(z_p, x_mask, g=g, reverse=True)
750
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
751
- return o, x_mask, (z, z_p, m_p, logs_p)
752
-
753
-
754
- class SynthesizerTrnMs256NSFsid_nono(nn.Module):
755
- def __init__(
756
- self,
757
- spec_channels,
758
- segment_size,
759
- inter_channels,
760
- hidden_channels,
761
- filter_channels,
762
- n_heads,
763
- n_layers,
764
- kernel_size,
765
- p_dropout,
766
- resblock,
767
- resblock_kernel_sizes,
768
- resblock_dilation_sizes,
769
- upsample_rates,
770
- upsample_initial_channel,
771
- upsample_kernel_sizes,
772
- spk_embed_dim,
773
- gin_channels,
774
- sr=None,
775
- **kwargs
776
- ):
777
- super().__init__()
778
- self.spec_channels = spec_channels
779
- self.inter_channels = inter_channels
780
- self.hidden_channels = hidden_channels
781
- self.filter_channels = filter_channels
782
- self.n_heads = n_heads
783
- self.n_layers = n_layers
784
- self.kernel_size = kernel_size
785
- self.p_dropout = p_dropout
786
- self.resblock = resblock
787
- self.resblock_kernel_sizes = resblock_kernel_sizes
788
- self.resblock_dilation_sizes = resblock_dilation_sizes
789
- self.upsample_rates = upsample_rates
790
- self.upsample_initial_channel = upsample_initial_channel
791
- self.upsample_kernel_sizes = upsample_kernel_sizes
792
- self.segment_size = segment_size
793
- self.gin_channels = gin_channels
794
- # self.hop_length = hop_length#
795
- self.spk_embed_dim = spk_embed_dim
796
- self.enc_p = TextEncoder256(
797
- inter_channels,
798
- hidden_channels,
799
- filter_channels,
800
- n_heads,
801
- n_layers,
802
- kernel_size,
803
- p_dropout,
804
- f0=False,
805
- )
806
- self.dec = Generator(
807
- inter_channels,
808
- resblock,
809
- resblock_kernel_sizes,
810
- resblock_dilation_sizes,
811
- upsample_rates,
812
- upsample_initial_channel,
813
- upsample_kernel_sizes,
814
- gin_channels=gin_channels,
815
- )
816
- self.enc_q = PosteriorEncoder(
817
- spec_channels,
818
- inter_channels,
819
- hidden_channels,
820
- 5,
821
- 1,
822
- 16,
823
- gin_channels=gin_channels,
824
- )
825
- self.flow = ResidualCouplingBlock(
826
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
827
- )
828
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
829
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
830
-
831
- def remove_weight_norm(self):
832
- self.dec.remove_weight_norm()
833
- self.flow.remove_weight_norm()
834
- self.enc_q.remove_weight_norm()
835
-
836
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
837
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
838
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
839
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
840
- z_p = self.flow(z, y_mask, g=g)
841
- z_slice, ids_slice = commons.rand_slice_segments(
842
- z, y_lengths, self.segment_size
843
- )
844
- o = self.dec(z_slice, g=g)
845
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
846
-
847
- def infer(self, phone, phone_lengths, sid, max_len=None):
848
- g = self.emb_g(sid).unsqueeze(-1)
849
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
850
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
851
- z = self.flow(z_p, x_mask, g=g, reverse=True)
852
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
853
- return o, x_mask, (z, z_p, m_p, logs_p)
854
-
855
-
856
- class SynthesizerTrnMs768NSFsid_nono(nn.Module):
857
- def __init__(
858
- self,
859
- spec_channels,
860
- segment_size,
861
- inter_channels,
862
- hidden_channels,
863
- filter_channels,
864
- n_heads,
865
- n_layers,
866
- kernel_size,
867
- p_dropout,
868
- resblock,
869
- resblock_kernel_sizes,
870
- resblock_dilation_sizes,
871
- upsample_rates,
872
- upsample_initial_channel,
873
- upsample_kernel_sizes,
874
- spk_embed_dim,
875
- gin_channels,
876
- sr=None,
877
- **kwargs
878
- ):
879
- super().__init__()
880
- self.spec_channels = spec_channels
881
- self.inter_channels = inter_channels
882
- self.hidden_channels = hidden_channels
883
- self.filter_channels = filter_channels
884
- self.n_heads = n_heads
885
- self.n_layers = n_layers
886
- self.kernel_size = kernel_size
887
- self.p_dropout = p_dropout
888
- self.resblock = resblock
889
- self.resblock_kernel_sizes = resblock_kernel_sizes
890
- self.resblock_dilation_sizes = resblock_dilation_sizes
891
- self.upsample_rates = upsample_rates
892
- self.upsample_initial_channel = upsample_initial_channel
893
- self.upsample_kernel_sizes = upsample_kernel_sizes
894
- self.segment_size = segment_size
895
- self.gin_channels = gin_channels
896
- # self.hop_length = hop_length#
897
- self.spk_embed_dim = spk_embed_dim
898
- self.enc_p = TextEncoder768(
899
- inter_channels,
900
- hidden_channels,
901
- filter_channels,
902
- n_heads,
903
- n_layers,
904
- kernel_size,
905
- p_dropout,
906
- f0=False,
907
- )
908
- self.dec = Generator(
909
- inter_channels,
910
- resblock,
911
- resblock_kernel_sizes,
912
- resblock_dilation_sizes,
913
- upsample_rates,
914
- upsample_initial_channel,
915
- upsample_kernel_sizes,
916
- gin_channels=gin_channels,
917
- )
918
- self.enc_q = PosteriorEncoder(
919
- spec_channels,
920
- inter_channels,
921
- hidden_channels,
922
- 5,
923
- 1,
924
- 16,
925
- gin_channels=gin_channels,
926
- )
927
- self.flow = ResidualCouplingBlock(
928
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
929
- )
930
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
931
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
932
-
933
- def remove_weight_norm(self):
934
- self.dec.remove_weight_norm()
935
- self.flow.remove_weight_norm()
936
- self.enc_q.remove_weight_norm()
937
-
938
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
939
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
940
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
941
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
942
- z_p = self.flow(z, y_mask, g=g)
943
- z_slice, ids_slice = commons.rand_slice_segments(
944
- z, y_lengths, self.segment_size
945
- )
946
- o = self.dec(z_slice, g=g)
947
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
948
-
949
- def infer(self, phone, phone_lengths, sid, max_len=None):
950
- g = self.emb_g(sid).unsqueeze(-1)
951
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
952
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
953
- z = self.flow(z_p, x_mask, g=g, reverse=True)
954
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
955
- return o, x_mask, (z, z_p, m_p, logs_p)
956
-
957
-
958
- class MultiPeriodDiscriminator(torch.nn.Module):
959
- def __init__(self, use_spectral_norm=False):
960
- super(MultiPeriodDiscriminator, self).__init__()
961
- periods = [2, 3, 5, 7, 11, 17]
962
- # periods = [3, 5, 7, 11, 17, 23, 37]
963
-
964
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
965
- discs = discs + [
966
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
967
- ]
968
- self.discriminators = nn.ModuleList(discs)
969
-
970
- def forward(self, y, y_hat):
971
- y_d_rs = [] #
972
- y_d_gs = []
973
- fmap_rs = []
974
- fmap_gs = []
975
- for i, d in enumerate(self.discriminators):
976
- y_d_r, fmap_r = d(y)
977
- y_d_g, fmap_g = d(y_hat)
978
- # for j in range(len(fmap_r)):
979
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
980
- y_d_rs.append(y_d_r)
981
- y_d_gs.append(y_d_g)
982
- fmap_rs.append(fmap_r)
983
- fmap_gs.append(fmap_g)
984
-
985
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
986
-
987
-
988
- class MultiPeriodDiscriminatorV2(torch.nn.Module):
989
- def __init__(self, use_spectral_norm=False):
990
- super(MultiPeriodDiscriminatorV2, self).__init__()
991
- # periods = [2, 3, 5, 7, 11, 17]
992
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
993
-
994
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
995
- discs = discs + [
996
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
997
- ]
998
- self.discriminators = nn.ModuleList(discs)
999
-
1000
- def forward(self, y, y_hat):
1001
- y_d_rs = [] #
1002
- y_d_gs = []
1003
- fmap_rs = []
1004
- fmap_gs = []
1005
- for i, d in enumerate(self.discriminators):
1006
- y_d_r, fmap_r = d(y)
1007
- y_d_g, fmap_g = d(y_hat)
1008
- # for j in range(len(fmap_r)):
1009
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1010
- y_d_rs.append(y_d_r)
1011
- y_d_gs.append(y_d_g)
1012
- fmap_rs.append(fmap_r)
1013
- fmap_gs.append(fmap_g)
1014
-
1015
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1016
-
1017
-
1018
- class DiscriminatorS(torch.nn.Module):
1019
- def __init__(self, use_spectral_norm=False):
1020
- super(DiscriminatorS, self).__init__()
1021
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1022
- self.convs = nn.ModuleList(
1023
- [
1024
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1025
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1026
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1027
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1028
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1029
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1030
- ]
1031
- )
1032
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1033
-
1034
- def forward(self, x):
1035
- fmap = []
1036
-
1037
- for l in self.convs:
1038
- x = l(x)
1039
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
1040
- fmap.append(x)
1041
- x = self.conv_post(x)
1042
- fmap.append(x)
1043
- x = torch.flatten(x, 1, -1)
1044
-
1045
- return x, fmap
1046
-
1047
-
1048
- class DiscriminatorP(torch.nn.Module):
1049
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1050
- super(DiscriminatorP, self).__init__()
1051
- self.period = period
1052
- self.use_spectral_norm = use_spectral_norm
1053
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1054
- self.convs = nn.ModuleList(
1055
- [
1056
- norm_f(
1057
- Conv2d(
1058
- 1,
1059
- 32,
1060
- (kernel_size, 1),
1061
- (stride, 1),
1062
- padding=(get_padding(kernel_size, 1), 0),
1063
- )
1064
- ),
1065
- norm_f(
1066
- Conv2d(
1067
- 32,
1068
- 128,
1069
- (kernel_size, 1),
1070
- (stride, 1),
1071
- padding=(get_padding(kernel_size, 1), 0),
1072
- )
1073
- ),
1074
- norm_f(
1075
- Conv2d(
1076
- 128,
1077
- 512,
1078
- (kernel_size, 1),
1079
- (stride, 1),
1080
- padding=(get_padding(kernel_size, 1), 0),
1081
- )
1082
- ),
1083
- norm_f(
1084
- Conv2d(
1085
- 512,
1086
- 1024,
1087
- (kernel_size, 1),
1088
- (stride, 1),
1089
- padding=(get_padding(kernel_size, 1), 0),
1090
- )
1091
- ),
1092
- norm_f(
1093
- Conv2d(
1094
- 1024,
1095
- 1024,
1096
- (kernel_size, 1),
1097
- 1,
1098
- padding=(get_padding(kernel_size, 1), 0),
1099
- )
1100
- ),
1101
- ]
1102
- )
1103
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1104
-
1105
- def forward(self, x):
1106
- fmap = []
1107
-
1108
- # 1d to 2d
1109
- b, c, t = x.shape
1110
- if t % self.period != 0: # pad first
1111
- n_pad = self.period - (t % self.period)
1112
- x = F.pad(x, (0, n_pad), "reflect")
1113
- t = t + n_pad
1114
- x = x.view(b, c, t // self.period, self.period)
1115
-
1116
- for l in self.convs:
1117
- x = l(x)
1118
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
1119
- fmap.append(x)
1120
- x = self.conv_post(x)
1121
- fmap.append(x)
1122
- x = torch.flatten(x, 1, -1)
1123
-
1124
- return x, fmap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/single_thread_env.py DELETED
@@ -1,5 +0,0 @@
1
- import os
2
-
3
- os.environ["OMP_NUM_THREADS"] = "1"
4
- os.environ['TF_NUM_INTEROP_THREADS'] = '1'
5
- os.environ['TF_NUM_INTRAOP_THREADS'] = '1'
 
 
 
 
 
 
spaces/AIWaves/Debate/src/agents/Component/ToolComponent.py DELETED
@@ -1,887 +0,0 @@
1
- from abc import abstractmethod
2
- import uuid
3
- from text2vec import semantic_search
4
- from utils import (
5
- get_relevant_history,
6
- load_knowledge_base_qa,
7
- load_knowledge_base_UnstructuredFile,
8
- get_embedding,
9
- extract,
10
- )
11
- import json
12
- from typing import Dict, List
13
- import os
14
- from googleapiclient.discovery import build
15
- import requests
16
- from selenium import webdriver
17
- from selenium.webdriver.common.by import By
18
- from selenium.webdriver.support.ui import WebDriverWait
19
- from selenium.webdriver.support import expected_conditions as EC
20
- from bs4 import BeautifulSoup
21
- import base64
22
- import re
23
- from datetime import datetime, timedelta
24
- from typing import Tuple, List, Any, Dict
25
- from email.mime.text import MIMEText
26
- from email.mime.multipart import MIMEMultipart
27
- from google.auth.transport.requests import Request
28
- from google.oauth2.credentials import Credentials
29
- from google_auth_oauthlib.flow import InstalledAppFlow
30
- from googleapiclient.discovery import build
31
- from googleapiclient.errors import HttpError
32
- from tqdm import tqdm
33
-
34
- class ToolComponent:
35
- def __init__(self):
36
- pass
37
-
38
- @abstractmethod
39
- def func(self):
40
- pass
41
-
42
- class KnowledgeBaseComponent(ToolComponent):
43
- """
44
- Inject knowledge base
45
- top_k : Top_k with the highest matching degree
46
- type : "QA" or others
47
- knowledge_base(json_path) : knowledge_base_path
48
- """
49
- def __init__(self, top_k, type, knowledge_base):
50
- super().__init__()
51
- self.top_k = top_k
52
- self.type = type
53
- self.knowledge_base = knowledge_base
54
-
55
- if self.type == "QA":
56
- (
57
- self.kb_embeddings,
58
- self.kb_questions,
59
- self.kb_answers,
60
- self.kb_chunks,
61
- ) = load_knowledge_base_qa(self.knowledge_base)
62
- else:
63
- self.kb_embeddings, self.kb_chunks = load_knowledge_base_UnstructuredFile(
64
- self.knowledge_base
65
- )
66
-
67
- def func(self, agent):
68
- query = (
69
- agent.long_term_memory[-1]["content"]
70
- if len(agent.long_term_memory) > 0
71
- else ""
72
- )
73
- knowledge = ""
74
- query = extract(query, "query")
75
- query_embedding = get_embedding(query)
76
- hits = semantic_search(query_embedding, self.kb_embeddings, top_k=50)
77
- hits = hits[0]
78
- temp = []
79
- if self.type == "QA":
80
- for hit in hits:
81
- matching_idx = hit["corpus_id"]
82
- if self.kb_chunks[matching_idx] in temp:
83
- pass
84
- else:
85
- knowledge = (
86
- knowledge
87
- + f"question:{self.kb_questions[matching_idx]},answer:{self.kb_answers[matching_idx]}\n\n"
88
- )
89
- temp.append(self.kb_answers[matching_idx])
90
- if len(temp) == 1:
91
- break
92
- print(hits[0]["score"])
93
- score = hits[0]["score"]
94
- if score < 0.5:
95
- return {"prompt": "No matching knowledge base"}
96
- else:
97
- return {"prompt": "The relevant content is: " + knowledge + "\n"}
98
- else:
99
- for hit in hits:
100
- matching_idx = hit["corpus_id"]
101
- if self.kb_chunks[matching_idx] in temp:
102
- pass
103
- else:
104
- knowledge = knowledge + f"{self.kb_answers[matching_idx]}\n\n"
105
- temp.append(self.kb_answers[matching_idx])
106
- if len(temp) == self.top_k:
107
- break
108
- print(hits[0]["score"])
109
- score = hits[0]["score"]
110
- if score < 0.5:
111
- return {"prompt": "No matching knowledge base"}
112
- else:
113
- print(knowledge)
114
- return {"prompt": "The relevant content is: " + knowledge + "\n"}
115
-
116
-
117
- class StaticComponent(ToolComponent):
118
- "Return static response"
119
- def __init__(self, output):
120
- super().__init__()
121
- self.output = output
122
-
123
- def func(self, agent):
124
- outputdict = {"response": self.output}
125
- return outputdict
126
-
127
-
128
- class ExtractComponent(ToolComponent):
129
- """
130
- Extract keywords based on the current scene and store them in the environment
131
- extract_words(list) : Keywords to be extracted
132
- system_prompt & last_prompt : Prompt to extract keywords
133
- """
134
- def __init__(
135
- self,
136
- extract_words,
137
- system_prompt,
138
- last_prompt=None,
139
- ):
140
- super().__init__()
141
- self.extract_words = extract_words
142
- self.system_prompt = system_prompt
143
- self.default_prompt = (
144
- "Please strictly adhere to the following format for outputting:\n"
145
- )
146
- for extract_word in extract_words:
147
- self.default_prompt += (
148
- f"<{extract_word}> the content you need to extract </{extract_word}>"
149
- )
150
- self.last_prompt = last_prompt if last_prompt else self.default_prompt
151
-
152
- def func(self, agent):
153
- response = agent.LLM.get_response(
154
- agent.long_term_memory,
155
- self.system_prompt,
156
- self.last_prompt,
157
- stream=False,
158
- )
159
- for extract_word in self.extract_words:
160
- key = extract(response, extract_word)
161
- key = key if key else response
162
- agent.environment.shared_memory[extract_word] = key
163
-
164
- return {}
165
-
166
-
167
- """Search sources: chatgpt/search engines/specific search sources/can even be multimodal (if it comes to clothing)"""
168
-
169
-
170
- class WebSearchComponent(ToolComponent):
171
- """search engines"""
172
-
173
- __ENGINE_NAME__: List = ["google", "bing"]
174
-
175
- def __init__(self, engine_name: str, api: Dict):
176
- """
177
- :param engine_name: The name of the search engine used
178
- :param api: Pass in a dictionary, such as {"bing":"key1", "google":"key2", ...}, of course each value can also be a list, or more complicated
179
- """
180
- super(WebSearchComponent, self).__init__()
181
- """Determine whether the key and engine_name of the api are legal"""
182
-
183
- assert engine_name in WebSearchComponent.__ENGINE_NAME__
184
- for api_name in api:
185
- assert api_name in WebSearchComponent.__ENGINE_NAME__
186
-
187
- self.api = api
188
- self.engine_name = engine_name
189
-
190
- self.search: Dict = {"bing": self._bing_search, "google": self._google_search}
191
-
192
- def _bing_search(self, query: str, **kwargs):
193
- """Initialize search hyperparameters"""
194
- subscription_key = self.api["bing"]
195
- search_url = "https://api.bing.microsoft.com/v7.0/search"
196
- headers = {"Ocp-Apim-Subscription-Key": subscription_key}
197
- params = {
198
- "q": query,
199
- "textDecorations": True,
200
- "textFormat": "HTML",
201
- "count": 10,
202
- }
203
- """start searching"""
204
- response = requests.get(search_url, headers=headers, params=params)
205
- response.raise_for_status()
206
- results = response.json()["webPages"]["value"]
207
- """execute"""
208
- metadata_results = []
209
- for result in results:
210
- metadata_result = {
211
- "snippet": result["snippet"],
212
- "title": result["name"],
213
- "link": result["url"],
214
- }
215
- metadata_results.append(metadata_result)
216
- return {"meta data": metadata_results}
217
-
218
- def _google_search(self, query: str, **kwargs):
219
- """Initialize search hyperparameters"""
220
- api_key = self.api[self.engine_name]["api_key"]
221
- cse_id = self.api[self.engine_name]["cse_id"]
222
- service = build("customsearch", "v1", developerKey=api_key)
223
- """start searching"""
224
- results = (
225
- service.cse().list(q=query, cx=cse_id, num=10, **kwargs).execute()["items"]
226
- )
227
- """execute"""
228
- metadata_results = []
229
- for result in results:
230
- metadata_result = {
231
- "snippet": result["snippet"],
232
- "title": result["title"],
233
- "link": result["link"],
234
- }
235
- metadata_results.append(metadata_result)
236
- return {"meta data": metadata_results}
237
-
238
- def func(self, agent, **kwargs) -> Dict:
239
- query = (
240
- agent.long_term_memory[-1]["content"]
241
- if len(agent.long_term_memory) > 0
242
- else " "
243
- )
244
- response = agent.LLM.get_response(
245
- None,
246
- system_prompt=f"Please analyze the provided conversation and identify keywords that can be used for a search engine query. Format the output as <keywords>extracted keywords</keywords>:\nConversation:\n{query}",
247
- stream=False,
248
- )
249
- response = extract(response, "keywords")
250
- query = response if response else query
251
-
252
- search_results = self.search[self.engine_name](query=query, **kwargs)
253
- information = ""
254
- for i in search_results["meta data"][:5]:
255
- information += i["snippet"]
256
- return {
257
- "prompt": "You can refer to the following information to reply:\n"
258
- + information
259
- }
260
-
261
- def convert_search_engine_to(self, engine_name):
262
- assert engine_name in WebSearchComponent.__ENGINE_NAME__
263
- self.engine_name = engine_name
264
-
265
-
266
- class WebCrawlComponent(ToolComponent):
267
- """Open a single web page for crawling"""
268
-
269
- def __init__(self):
270
- super(WebCrawlComponent, self).__init__()
271
-
272
- def func(self, agent_dict) -> Dict:
273
- url = agent_dict["url"]
274
- print(f"crawling {url} ......")
275
- content = ""
276
- """Crawling content from url may need to be carried out according to different websites, such as wiki, baidu, zhihu, etc."""
277
- driver = webdriver.Chrome()
278
- try:
279
- """open url"""
280
- driver.get(url)
281
-
282
- """wait 20 second"""
283
- wait = WebDriverWait(driver, 20)
284
- wait.until(EC.presence_of_element_located((By.TAG_NAME, "body")))
285
-
286
- """crawl code"""
287
- page_source = driver.page_source
288
-
289
- """parse"""
290
- soup = BeautifulSoup(page_source, "html.parser")
291
-
292
- """concatenate"""
293
- for paragraph in soup.find_all("p"):
294
- content = f"{content}\n{paragraph.get_text()}"
295
- except Exception as e:
296
- print("Error:", e)
297
- finally:
298
- """quit"""
299
- driver.quit()
300
- return {"content": content.strip()}
301
-
302
-
303
- class MailComponent(ToolComponent):
304
- __VALID_ACTION__ = ["read", "send"]
305
-
306
- def __init__(
307
- self, cfg_file: str, default_action: str = "read", name: str = "e-mail"
308
- ):
309
- """'../config/google_mail.json'"""
310
- super(MailComponent, self).__init__(name)
311
- self.name = name
312
- assert (
313
- default_action.lower() in self.__VALID_ACTION__
314
- ), f"Action `{default_action}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`"
315
- self.action = default_action.lower()
316
- self.credential = self._login(cfg_file)
317
-
318
- def _login(self, cfg_file: str):
319
- SCOPES = [
320
- "https://www.googleapis.com/auth/gmail.readonly",
321
- "https://www.googleapis.com/auth/gmail.send",
322
- ]
323
- creds = None
324
- if os.path.exists("token.json"):
325
- print("Login Successfully!")
326
- creds = Credentials.from_authorized_user_file("token.json", SCOPES)
327
- if not creds or not creds.valid:
328
- print("Please authorize in an open browser.")
329
- if creds and creds.expired and creds.refresh_token:
330
- creds.refresh(Request())
331
- else:
332
- flow = InstalledAppFlow.from_client_secrets_file(cfg_file, SCOPES)
333
- creds = flow.run_local_server(port=0)
334
- # Save the credentials for the next run
335
- with open("token.json", "w") as token:
336
- token.write(creds.to_json())
337
- return creds
338
-
339
- def _read(self, mail_dict: dict):
340
- credential = self.credential
341
- state = mail_dict["state"] if "state" in mail_dict else None
342
- time_between = (
343
- mail_dict["time_between"] if "time_between" in mail_dict else None
344
- )
345
- sender_mail = mail_dict["sender_mail"] if "sender_mail" in mail_dict else None
346
- only_both = mail_dict["only_both"] if "only_both" in mail_dict else False
347
- order_by_time = (
348
- mail_dict["order_by_time"] if "order_by_time" in mail_dict else "descend"
349
- )
350
- include_word = (
351
- mail_dict["include_word"] if "include_word" in mail_dict else None
352
- )
353
- exclude_word = (
354
- mail_dict["exclude_word"] if "exclude_word" in mail_dict else None
355
- )
356
- MAX_SEARCH_CNT = (
357
- mail_dict["MAX_SEARCH_CNT"] if "MAX_SEARCH_CNT" in mail_dict else 50
358
- )
359
- number = mail_dict["number"] if "number" in mail_dict else 10
360
- if state is None:
361
- state = "all"
362
- if time_between is not None:
363
- assert isinstance(time_between, tuple)
364
- assert len(time_between) == 2
365
- assert state in ["all", "unread", "read", "sent"]
366
- if only_both:
367
- assert sender_mail is not None
368
- if sender_mail is not None:
369
- assert isinstance(sender_mail, str)
370
- assert credential
371
- assert order_by_time in ["descend", "ascend"]
372
-
373
- def generate_query():
374
- query = ""
375
- if state in ["unread", "read"]:
376
- query = f"is:{state}"
377
- if state in ["sent"]:
378
- query = f"in:{state}"
379
- if only_both:
380
- query = f"{query} from:{sender_mail} OR to:{sender_mail}"
381
- if sender_mail is not None and not only_both:
382
- query = f"{query} from:({sender_mail})"
383
- if include_word is not None:
384
- query = f"{query} {include_word}"
385
- if exclude_word is not None:
386
- query = f"{query} -{exclude_word}"
387
- if time_between is not None:
388
- TIME_FORMAT = "%Y/%m/%d"
389
- t1, t2 = time_between
390
- if t1 == "now":
391
- t1 = datetime.now().strftime(TIME_FORMAT)
392
- if t2 == "now":
393
- t2 = datetime.now().strftime(TIME_FORMAT)
394
- if isinstance(t1, str) and isinstance(t2, str):
395
- t1 = datetime.strptime(t1, TIME_FORMAT)
396
- t2 = datetime.strptime(t2, TIME_FORMAT)
397
- elif isinstance(t1, str) and isinstance(t2, int):
398
- t1 = datetime.strptime(t1, TIME_FORMAT)
399
- t2 = t1 + timedelta(days=t2)
400
- elif isinstance(t1, int) and isinstance(t2, str):
401
- t2 = datetime.strptime(t2, TIME_FORMAT)
402
- t1 = t2 + timedelta(days=t1)
403
- else:
404
- assert False, "invalid time"
405
- if t1 > t2:
406
- t1, t2 = t2, t1
407
- query = f"{query} after:{t1.strftime(TIME_FORMAT)} before:{t2.strftime(TIME_FORMAT)}"
408
- return query.strip()
409
-
410
- def sort_by_time(data: List[Dict]):
411
- if order_by_time == "descend":
412
- reverse = True
413
- else:
414
- reverse = False
415
- sorted_data = sorted(
416
- data,
417
- key=lambda x: datetime.strptime(x["time"], "%Y-%m-%d %H:%M:%S"),
418
- reverse=reverse,
419
- )
420
- return sorted_data
421
-
422
- try:
423
- service = build("gmail", "v1", credentials=credential)
424
- results = (
425
- service.users()
426
- .messages()
427
- .list(userId="me", labelIds=["INBOX"], q=generate_query())
428
- .execute()
429
- )
430
-
431
- messages = results.get("messages", [])
432
- email_data = list()
433
-
434
- if not messages:
435
- print("No eligible emails.")
436
- return None
437
- else:
438
- pbar = tqdm(total=min(MAX_SEARCH_CNT, len(messages)))
439
- for cnt, message in enumerate(messages):
440
- pbar.update(1)
441
- if cnt >= MAX_SEARCH_CNT:
442
- break
443
- msg = (
444
- service.users()
445
- .messages()
446
- .get(
447
- userId="me",
448
- id=message["id"],
449
- format="full",
450
- metadataHeaders=None,
451
- )
452
- .execute()
453
- )
454
-
455
- subject = ""
456
- for header in msg["payload"]["headers"]:
457
- if header["name"] == "Subject":
458
- subject = header["value"]
459
- break
460
-
461
- sender = ""
462
- for header in msg["payload"]["headers"]:
463
- if header["name"] == "From":
464
- sender = re.findall(
465
- r"\b[\w\.-]+@[\w\.-]+\.\w+\b", header["value"]
466
- )[0]
467
- break
468
- body = ""
469
- if "parts" in msg["payload"]:
470
- for part in msg["payload"]["parts"]:
471
- if part["mimeType"] == "text/plain":
472
- data = part["body"]["data"]
473
- body = base64.urlsafe_b64decode(data).decode("utf-8")
474
- break
475
-
476
- email_info = {
477
- "sender": sender,
478
- "time": datetime.fromtimestamp(
479
- int(msg["internalDate"]) / 1000
480
- ).strftime("%Y-%m-%d %H:%M:%S"),
481
- "subject": subject,
482
- "body": body,
483
- }
484
- email_data.append(email_info)
485
- pbar.close()
486
- email_data = sort_by_time(email_data)[0:number]
487
- return {"results": email_data}
488
- except Exception as e:
489
- print(e)
490
- return None
491
-
492
- def _send(self, mail_dict: dict):
493
- recipient_mail = mail_dict["recipient_mail"]
494
- subject = mail_dict["subject"]
495
- body = mail_dict["body"]
496
- credential = self.credential
497
- service = build("gmail", "v1", credentials=credential)
498
-
499
- message = MIMEMultipart()
500
- message["to"] = recipient_mail
501
- message["subject"] = subject
502
-
503
- message.attach(MIMEText(body, "plain"))
504
-
505
- raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8")
506
- try:
507
- message = (
508
- service.users()
509
- .messages()
510
- .send(userId="me", body={"raw": raw_message})
511
- .execute()
512
- )
513
- return {"state": True}
514
- except HttpError as error:
515
- print(error)
516
- return {"state": False}
517
-
518
- def func(self, mail_dict: dict):
519
- if "action" in mail_dict:
520
- assert mail_dict["action"].lower() in self.__VALID_ACTION__
521
- self.action = mail_dict["action"]
522
- functions = {"read": self._read, "send": self._send}
523
- return functions[self.action](mail_dict)
524
-
525
- def convert_action_to(self, action_name: str):
526
- assert (
527
- action_name.lower() in self.__VALID_ACTION__
528
- ), f"Action `{action_name}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`"
529
- self.action = action_name.lower()
530
-
531
-
532
- class WeatherComponet(ToolComponent):
533
- def __init__(self, api_key, name="weather", TIME_FORMAT="%Y-%m-%d"):
534
- super(WeatherComponet, self).__init__(name)
535
- self.name = name
536
- self.TIME_FORMAT = TIME_FORMAT
537
- self.api_key = api_key
538
-
539
- def _parse(self, data):
540
- dict_data: dict = {}
541
- for item in data["data"]:
542
- date = item["datetime"]
543
- dict_data[date] = {}
544
- if "weather" in item:
545
- dict_data[date]["description"] = item["weather"]["description"]
546
- mapping = {
547
- "temp": "temperature",
548
- "max_temp": "max_temperature",
549
- "min_temp": "min_temperature",
550
- "precip": "accumulated_precipitation",
551
- }
552
- for key in ["temp", "max_temp", "min_temp", "precip"]:
553
- if key in item:
554
- dict_data[date][mapping[key]] = item[key]
555
- return dict_data
556
-
557
- def _query(self, city_name, country_code, start_date, end_date):
558
- """https://www.weatherbit.io/api/historical-weather-daily"""
559
- # print(datetime.strftime(start_date, self.TIME_FORMAT), datetime.strftime(datetime.now(), self.TIME_FORMAT), end_date, datetime.strftime(datetime.now()+timedelta(days=1), self.TIME_FORMAT))
560
- if start_date == datetime.strftime(
561
- datetime.now(), self.TIME_FORMAT
562
- ) and end_date == datetime.strftime(
563
- datetime.now() + timedelta(days=1), self.TIME_FORMAT
564
- ):
565
- """today"""
566
- url = f"https://api.weatherbit.io/v2.0/current?city={city_name}&country={country_code}&key={self.api_key}"
567
- else:
568
- url = f"https://api.weatherbit.io/v2.0/history/daily?&city={city_name}&country={country_code}&start_date={start_date}&end_date={end_date}&key={self.api_key}"
569
- response = requests.get(url)
570
- data = response.json()
571
- return self._parse(data)
572
-
573
- def func(self, weather_dict: Dict) -> Dict:
574
- TIME_FORMAT = self.TIME_FORMAT
575
- # Beijing, Shanghai
576
- city_name = weather_dict["city_name"]
577
- # CN, US
578
- country_code = weather_dict["country_code"]
579
- # 2020-02-02
580
- start_date = datetime.strftime(
581
- datetime.strptime(weather_dict["start_date"], self.TIME_FORMAT),
582
- self.TIME_FORMAT,
583
- )
584
- end_date = weather_dict["end_date"] if "end_date" in weather_dict else None
585
- if end_date is None:
586
- end_date = datetime.strftime(
587
- datetime.strptime(start_date, TIME_FORMAT) + timedelta(days=-1),
588
- TIME_FORMAT,
589
- )
590
- else:
591
- end_date = datetime.strftime(
592
- datetime.strptime(weather_dict["end_date"], self.TIME_FORMAT),
593
- self.TIME_FORMAT,
594
- )
595
- if datetime.strptime(start_date, TIME_FORMAT) > datetime.strptime(
596
- end_date, TIME_FORMAT
597
- ):
598
- start_date, end_date = end_date, start_date
599
- assert start_date != end_date
600
- return self._query(city_name, country_code, start_date, end_date)
601
-
602
-
603
- class TranslateComponent(ToolComponent):
604
- __SUPPORT_LANGUAGE__ = [
605
- "af",
606
- "am",
607
- "ar",
608
- "as",
609
- "az",
610
- "ba",
611
- "bg",
612
- "bn",
613
- "bo",
614
- "bs",
615
- "ca",
616
- "cs",
617
- "cy",
618
- "da",
619
- "de",
620
- "dsb",
621
- "dv",
622
- "el",
623
- "en",
624
- "es",
625
- "et",
626
- "eu",
627
- "fa",
628
- "fi",
629
- "fil",
630
- "fj",
631
- "fo",
632
- "fr",
633
- "fr-CA",
634
- "ga",
635
- "gl",
636
- "gom",
637
- "gu",
638
- "ha",
639
- "he",
640
- "hi",
641
- "hr",
642
- "hsb",
643
- "ht",
644
- "hu",
645
- "hy",
646
- "id",
647
- "ig",
648
- "ikt",
649
- "is",
650
- "it",
651
- "iu",
652
- "iu-Latn",
653
- "ja",
654
- "ka",
655
- "kk",
656
- "km",
657
- "kmr",
658
- "kn",
659
- "ko",
660
- "ku",
661
- "ky",
662
- "ln",
663
- "lo",
664
- "lt",
665
- "lug",
666
- "lv",
667
- "lzh",
668
- "mai",
669
- "mg",
670
- "mi",
671
- "mk",
672
- "ml",
673
- "mn-Cyrl",
674
- "mn-Mong",
675
- "mr",
676
- "ms",
677
- "mt",
678
- "mww",
679
- "my",
680
- "nb",
681
- "ne",
682
- "nl",
683
- "nso",
684
- "nya",
685
- "or",
686
- "otq",
687
- "pa",
688
- "pl",
689
- "prs",
690
- "ps",
691
- "pt",
692
- "pt-PT",
693
- "ro",
694
- "ru",
695
- "run",
696
- "rw",
697
- "sd",
698
- "si",
699
- "sk",
700
- "sl",
701
- "sm",
702
- "sn",
703
- "so",
704
- "sq",
705
- "sr-Cyrl",
706
- "sr-Latn",
707
- "st",
708
- "sv",
709
- "sw",
710
- "ta",
711
- "te",
712
- "th",
713
- "ti",
714
- "tk",
715
- "tlh-Latn",
716
- "tlh-Piqd",
717
- "tn",
718
- "to",
719
- "tr",
720
- "tt",
721
- "ty",
722
- "ug",
723
- "uk",
724
- "ur",
725
- "uz",
726
- "vi",
727
- "xh",
728
- "yo",
729
- "yua",
730
- "yue",
731
- "zh-Hans",
732
- "zh-Hant",
733
- "zu",
734
- ]
735
-
736
- def __init__(
737
- self, api_key, location, default_target_language="zh-cn", name="translate"
738
- ):
739
- super(TranslateComponent, self).__init__(name)
740
- self.name = name
741
- self.api_key = api_key
742
- self.location = location
743
- self.default_target_language = default_target_language
744
-
745
- def func(self, translate_dict: Dict) -> Dict:
746
- content = translate_dict["content"]
747
- target_language = self.default_target_language
748
- if "target_language" in translate_dict:
749
- target_language = translate_dict["target_language"]
750
- assert (
751
- target_language in self.__SUPPORT_LANGUAGE__
752
- ), f"language `{target_language}` is not supported."
753
-
754
- endpoint = "https://api.cognitive.microsofttranslator.com"
755
-
756
- path = "/translate"
757
- constructed_url = endpoint + path
758
-
759
- params = {"api-version": "3.0", "to": target_language}
760
-
761
- headers = {
762
- "Ocp-Apim-Subscription-Key": self.api_key,
763
- "Ocp-Apim-Subscription-Region": self.location,
764
- "Content-type": "application/json",
765
- "X-ClientTraceId": str(uuid.uuid4()),
766
- }
767
-
768
- body = [{"text": content}]
769
-
770
- request = requests.post(
771
- constructed_url, params=params, headers=headers, json=body
772
- )
773
- response = request.json()
774
- response = json.dumps(
775
- response,
776
- sort_keys=True,
777
- ensure_ascii=False,
778
- indent=4,
779
- separators=(",", ": "),
780
- )
781
- response = eval(response)
782
- return {"result": response[0]["translations"][0]["text"]}
783
-
784
-
785
- class APIComponent(ToolComponent):
786
- def __init__(self):
787
- super(APIComponent, self).__init__()
788
-
789
- def func(self, agent) -> Dict:
790
- pass
791
-
792
-
793
- class FunctionComponent(ToolComponent):
794
- def __init__(
795
- self,
796
- functions,
797
- function_call="auto",
798
- response_type="response",
799
- your_function=None,
800
- ):
801
- super().__init__()
802
- self.functions = functions
803
- self.function_call = function_call
804
- self.parameters = {}
805
- self.available_functions = {}
806
- self.response_type = response_type
807
- if your_function:
808
- function_name = your_function["name"]
809
- function_content = your_function["content"]
810
- exec(function_content)
811
- self.available_functions[function_name] = eval(function_name)
812
-
813
- for function in self.functions:
814
- self.parameters[function["name"]] = list(
815
- function["parameters"]["properties"].keys()
816
- )
817
- self.available_functions[function["name"]] = eval(function["name"])
818
-
819
- def func(self, agent):
820
- messages = agent.long_term_memory
821
- outputdict = {}
822
- query = agent.long_term_memory[-1].content if len(agent.long_term_memory) > 0 else " "
823
- relevant_history = get_relevant_history(
824
- query,
825
- agent.long_term_memory[:-1],
826
- agent.chat_embeddings[:-1],
827
- )
828
- response = agent.LLM.get_response(
829
- messages,
830
- None,
831
- functions=self.functions,
832
- stream=False,
833
- function_call=self.function_call,
834
- relevant_history=relevant_history,
835
- )
836
- response_message = response
837
- if response_message.get("function_call"):
838
- function_name = response_message["function_call"]["name"]
839
- fuction_to_call = self.available_functions[function_name]
840
- function_args = json.loads(response_message["function_call"]["arguments"])
841
- input_args = {}
842
- for args_name in self.parameters[function_name]:
843
- input_args[args_name] = function_args.get(args_name)
844
- function_response = fuction_to_call(**input_args)
845
- if self.response_type == "response":
846
- outputdict["response"] = function_response
847
- elif self.response_type == "prompt":
848
- outputdict["prompt"] = function_response
849
-
850
- return outputdict
851
-
852
-
853
- class CodeComponent(ToolComponent):
854
- def __init__(self, file_name, keyword) -> None:
855
- super().__init__()
856
- self.file_name = file_name
857
- self.keyword = keyword
858
- self.system_prompt = (
859
- "you need to extract the modified code as completely as possible."
860
- )
861
- self.last_prompt = (
862
- f"Please strictly adhere to the following format for outputting: \n"
863
- )
864
- self.last_prompt += (
865
- f"<{self.keyword}> the content you need to extract </{self.keyword}>"
866
- )
867
-
868
- def func(self, agent):
869
- response = agent.LLM.get_response(
870
- agent.long_term_memory,
871
- self.system_prompt,
872
- self.last_prompt,
873
- stream=False,
874
- )
875
- code = extract(response, self.keyword)
876
- code = code if code else response
877
- os.makedirs("output_code", exist_ok=True)
878
- file_name = "output_code/" + self.file_name
879
- codes = code.split("\n")
880
- if codes[0] == "```python":
881
- codes.remove(codes[0])
882
- if codes[-1] == "```":
883
- codes.remove(codes[-1])
884
- code = "\n".join(codes)
885
- with open(file_name, "w", encoding="utf-8") as f:
886
- f.write(code)
887
- return {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-120e_deepfashion2_vest_256x192/__init__.py DELETED
File without changes
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/hashConv.ts DELETED
@@ -1,12 +0,0 @@
1
- import type { Conversation } from "$lib/types/Conversation";
2
- import { sha256 } from "./sha256";
3
-
4
- export async function hashConv(conv: Conversation) {
5
- // messages contains the conversation message but only the immutable part
6
- const messages = conv.messages.map((message) => {
7
- return (({ from, id, content, webSearchId }) => ({ from, id, content, webSearchId }))(message);
8
- });
9
-
10
- const hash = await sha256(JSON.stringify(messages));
11
- return hash;
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/css/main.css DELETED
@@ -1,14 +0,0 @@
1
- .main-container {
2
- display: flex;
3
- padding: var(--section-gap);
4
- height: 100vh;
5
- justify-content: center;
6
- box-sizing: border-box;
7
- }
8
-
9
- @media screen and (max-width: 360px) {
10
- .main-container {
11
- padding: 0px;
12
- height: 90vh;
13
- }
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptDuo.py DELETED
@@ -1,57 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from curl_cffi.requests import AsyncSession
4
- from .base_provider import AsyncProvider, format_prompt
5
-
6
-
7
- class ChatgptDuo(AsyncProvider):
8
- url = "https://chatgptduo.com"
9
- supports_gpt_35_turbo = True
10
- working = True
11
-
12
- @classmethod
13
- async def create_async(
14
- cls,
15
- model: str,
16
- messages: list[dict[str, str]],
17
- proxy: str = None,
18
- timeout: int = 30,
19
- **kwargs
20
- ) -> str:
21
- async with AsyncSession(
22
- impersonate="chrome107",
23
- proxies={"https": proxy},
24
- timeout=timeout
25
- ) as session:
26
- prompt = format_prompt(messages),
27
- data = {
28
- "prompt": prompt,
29
- "search": prompt,
30
- "purpose": "ask",
31
- }
32
- response = await session.post(f"{cls.url}/", data=data)
33
- response.raise_for_status()
34
- data = response.json()
35
-
36
- cls._sources = [{
37
- "title": source["title"],
38
- "url": source["link"],
39
- "snippet": source["snippet"]
40
- } for source in data["results"]]
41
-
42
- return data["answer"]
43
-
44
- @classmethod
45
- def get_sources(cls):
46
- return cls._sources
47
-
48
- @classmethod
49
- @property
50
- def params(cls):
51
- params = [
52
- ("model", "str"),
53
- ("messages", "list[dict[str, str]]"),
54
- ("stream", "bool"),
55
- ]
56
- param = ", ".join([": ".join(p) for p in params])
57
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetShownChildrenMethods.js DELETED
@@ -1,43 +0,0 @@
1
- export default {
2
- getShownChildren(out) {
3
- if (out === undefined) {
4
- out = [];
5
- }
6
- var children = this.children,
7
- child;
8
- for (var i = 0, cnt = children.length; i < cnt; i++) {
9
- child = children[i];
10
- if (child.rexSizer && child.rexSizer.hidden) { // Don't add hidden child
11
- continue;
12
- }
13
-
14
- out.push(child);
15
- }
16
-
17
- return out;
18
- },
19
-
20
- getAllShownChildren(out) {
21
- if (out === undefined) {
22
- out = [];
23
- }
24
-
25
- var queue = [this];
26
- while (queue.length > 0) {
27
- var current = queue.shift();
28
- if (current.rexSizer && current.rexSizer.hidden) {
29
- continue;
30
- }
31
-
32
- if (current !== this) {
33
- out.push(current);
34
- }
35
-
36
- if (current.isRexContainerLite) {
37
- queue.push(...current.children);
38
- }
39
- }
40
-
41
- return out;
42
- }
43
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Akmyradov/TurkmenTTSweSTT/vits/text/cleaners.py DELETED
@@ -1,100 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Cleaners are transformations that run over the input text at both training and eval time.
5
-
6
- Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
- hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
- 1. "english_cleaners" for English text
9
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
- the symbols in symbols.py to match your data).
13
- '''
14
-
15
- import re
16
- from unidecode import unidecode
17
- from phonemizer import phonemize
18
-
19
-
20
- # Regular expression matching whitespace:
21
- _whitespace_re = re.compile(r'\s+')
22
-
23
- # List of (regular expression, replacement) pairs for abbreviations:
24
- _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
25
- ('mrs', 'misess'),
26
- ('mr', 'mister'),
27
- ('dr', 'doctor'),
28
- ('st', 'saint'),
29
- ('co', 'company'),
30
- ('jr', 'junior'),
31
- ('maj', 'major'),
32
- ('gen', 'general'),
33
- ('drs', 'doctors'),
34
- ('rev', 'reverend'),
35
- ('lt', 'lieutenant'),
36
- ('hon', 'honorable'),
37
- ('sgt', 'sergeant'),
38
- ('capt', 'captain'),
39
- ('esq', 'esquire'),
40
- ('ltd', 'limited'),
41
- ('col', 'colonel'),
42
- ('ft', 'fort'),
43
- ]]
44
-
45
-
46
- def expand_abbreviations(text):
47
- for regex, replacement in _abbreviations:
48
- text = re.sub(regex, replacement, text)
49
- return text
50
-
51
-
52
- def expand_numbers(text):
53
- return normalize_numbers(text)
54
-
55
-
56
- def lowercase(text):
57
- return text.lower()
58
-
59
-
60
- def collapse_whitespace(text):
61
- return re.sub(_whitespace_re, ' ', text)
62
-
63
-
64
- def convert_to_ascii(text):
65
- return unidecode(text)
66
-
67
-
68
- def basic_cleaners(text):
69
- '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
70
- text = lowercase(text)
71
- text = collapse_whitespace(text)
72
- return text
73
-
74
-
75
- def transliteration_cleaners(text):
76
- '''Pipeline for non-English text that transliterates to ASCII.'''
77
- text = convert_to_ascii(text)
78
- text = lowercase(text)
79
- text = collapse_whitespace(text)
80
- return text
81
-
82
-
83
- def english_cleaners(text):
84
- '''Pipeline for English text, including abbreviation expansion.'''
85
- text = convert_to_ascii(text)
86
- text = lowercase(text)
87
- text = expand_abbreviations(text)
88
- phonemes = phonemize(text, language='en-us', backend='espeak', strip=True)
89
- phonemes = collapse_whitespace(phonemes)
90
- return phonemes
91
-
92
-
93
- def english_cleaners2(text):
94
- '''Pipeline for English text, including abbreviation expansion. + punctuation + stress'''
95
- text = convert_to_ascii(text)
96
- text = lowercase(text)
97
- text = expand_abbreviations(text)
98
- phonemes = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True, with_stress=True)
99
- phonemes = collapse_whitespace(phonemes)
100
- return phonemes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/colorful.py DELETED
@@ -1,91 +0,0 @@
1
- import platform
2
- from sys import stdout
3
-
4
- if platform.system()=="Linux":
5
- pass
6
- else:
7
- from colorama import init
8
- init()
9
-
10
- # Do you like the elegance of Chinese characters?
11
- def print红(*kw,**kargs):
12
- print("\033[0;31m",*kw,"\033[0m",**kargs)
13
- def print绿(*kw,**kargs):
14
- print("\033[0;32m",*kw,"\033[0m",**kargs)
15
- def print黄(*kw,**kargs):
16
- print("\033[0;33m",*kw,"\033[0m",**kargs)
17
- def print蓝(*kw,**kargs):
18
- print("\033[0;34m",*kw,"\033[0m",**kargs)
19
- def print紫(*kw,**kargs):
20
- print("\033[0;35m",*kw,"\033[0m",**kargs)
21
- def print靛(*kw,**kargs):
22
- print("\033[0;36m",*kw,"\033[0m",**kargs)
23
-
24
- def print亮红(*kw,**kargs):
25
- print("\033[1;31m",*kw,"\033[0m",**kargs)
26
- def print亮绿(*kw,**kargs):
27
- print("\033[1;32m",*kw,"\033[0m",**kargs)
28
- def print亮黄(*kw,**kargs):
29
- print("\033[1;33m",*kw,"\033[0m",**kargs)
30
- def print亮蓝(*kw,**kargs):
31
- print("\033[1;34m",*kw,"\033[0m",**kargs)
32
- def print亮紫(*kw,**kargs):
33
- print("\033[1;35m",*kw,"\033[0m",**kargs)
34
- def print亮靛(*kw,**kargs):
35
- print("\033[1;36m",*kw,"\033[0m",**kargs)
36
-
37
-
38
-
39
- def print亮红(*kw,**kargs):
40
- print("\033[1;31m",*kw,"\033[0m",**kargs)
41
- def print亮绿(*kw,**kargs):
42
- print("\033[1;32m",*kw,"\033[0m",**kargs)
43
- def print亮黄(*kw,**kargs):
44
- print("\033[1;33m",*kw,"\033[0m",**kargs)
45
- def print亮蓝(*kw,**kargs):
46
- print("\033[1;34m",*kw,"\033[0m",**kargs)
47
- def print亮紫(*kw,**kargs):
48
- print("\033[1;35m",*kw,"\033[0m",**kargs)
49
- def print亮靛(*kw,**kargs):
50
- print("\033[1;36m",*kw,"\033[0m",**kargs)
51
-
52
- print_red = print红
53
- print_green = print绿
54
- print_yellow = print黄
55
- print_blue = print蓝
56
- print_purple = print紫
57
- print_indigo = print靛
58
-
59
- print_bold_red = print亮红
60
- print_bold_green = print亮绿
61
- print_bold_yellow = print亮黄
62
- print_bold_blue = print亮蓝
63
- print_bold_purple = print亮紫
64
- print_bold_indigo = print亮靛
65
-
66
- if not stdout.isatty():
67
- # redirection, avoid a fucked up log file
68
- print红 = print
69
- print绿 = print
70
- print黄 = print
71
- print蓝 = print
72
- print紫 = print
73
- print靛 = print
74
- print亮红 = print
75
- print亮绿 = print
76
- print亮黄 = print
77
- print亮蓝 = print
78
- print亮紫 = print
79
- print亮靛 = print
80
- print_red = print
81
- print_green = print
82
- print_yellow = print
83
- print_blue = print
84
- print_purple = print
85
- print_indigo = print
86
- print_bold_red = print
87
- print_bold_green = print
88
- print_bold_yellow = print
89
- print_bold_blue = print
90
- print_bold_purple = print
91
- print_bold_indigo = print
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/installation.md DELETED
@@ -1,146 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # 安装
14
-
15
- 在你正在使用的任意深度学习框架中安装 🤗 Diffusers 。
16
-
17
- 🤗 Diffusers已在Python 3.7+、PyTorch 1.7.0+和Flax上进行了测试。按照下面的安装说明,针对你正在使用的深度学习框架进行安装:
18
-
19
- - [PyTorch](https://pytorch.org/get-started/locally/) installation instructions.
20
- - [Flax](https://flax.readthedocs.io/en/latest/) installation instructions.
21
-
22
- ## 使用pip安装
23
-
24
- 你需要在[虚拟环境](https://docs.python.org/3/library/venv.html)中安装 🤗 Diffusers 。
25
-
26
- 如果你对 Python 虚拟环境不熟悉,可以看看这个[教程](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
27
-
28
- 在虚拟环境中,你可以轻松管理不同的项目,避免依赖项之间的兼容性问题。
29
-
30
- 首先,在你的项目目录下创建一个虚拟环境:
31
-
32
- ```bash
33
- python -m venv .env
34
- ```
35
-
36
- 激活虚拟环境:
37
-
38
- ```bash
39
- source .env/bin/activate
40
- ```
41
-
42
- 现在,你就可以安装 🤗 Diffusers了!使用下边这个命令:
43
-
44
- **PyTorch**
45
-
46
- ```bash
47
- pip install diffusers["torch"]
48
- ```
49
-
50
- **Flax**
51
-
52
- ```bash
53
- pip install diffusers["flax"]
54
- ```
55
-
56
- ## 从源代码安装
57
-
58
- 在从源代码安装 `diffusers` 之前,确保你已经安装了 `torch` 和 `accelerate`。
59
-
60
- `torch`的安装教程可以看 `torch` [文档](https://pytorch.org/get-started/locally/#start-locally).
61
-
62
- 安装 `accelerate`
63
-
64
- ```bash
65
- pip install accelerate
66
- ```
67
-
68
- 从源码安装 🤗 Diffusers 需要使用以下命令:
69
-
70
- ```bash
71
- pip install git+https://github.com/huggingface/diffusers
72
- ```
73
-
74
- 这个命令安装的是最新的 `main`版本,而不是最近的`stable`版。
75
- `main`是一直和最新进展保持一致的。比如,上次发布的正式版中有bug,在`main`中可以看到这个bug被修复了,但是新的正式版此时尚未推出。
76
- 但是这也意味着 `main`版本不保证是稳定的。
77
-
78
- 我们努力保持`main`版本正常运行,大多数问题都能在几个小时或一天之内解决
79
-
80
- 如果你遇到了问题,可以提 [Issue](https://github.com/huggingface/transformers/issues),这样我们就能更快修复问题了。
81
-
82
- ## 可修改安装
83
-
84
- 如果你想做以下两件事,那你可能需要一个可修改代码的安装方式:
85
-
86
- * 使用 `main`版本的源代码。
87
- * 为 🤗 Diffusers 贡献,需要测试代码中的变化。
88
-
89
- 使用以下命令克隆并安装 🤗 Diffusers:
90
-
91
- ```bash
92
- git clone https://github.com/huggingface/diffusers.git
93
- cd diffusers
94
- ```
95
-
96
- **PyTorch**
97
-
98
- ```
99
- pip install -e ".[torch]"
100
- ```
101
-
102
- **Flax**
103
-
104
- ```
105
- pip install -e ".[flax]"
106
- ```
107
-
108
- 这些命令将连接到你克隆的版本库和你的 Python 库路径。
109
- 现在,不只是在通常的库路径,Python 还会在你克隆的文件夹内寻找包。
110
- 例如,如果你的 Python 包通常安装在 `~/anaconda3/envs/main/lib/python3.7/Site-packages/`,Python 也会搜索你克隆到的文件夹。`~/diffusers/`。
111
-
112
- <Tip warning={true}>
113
-
114
- 如果你想继续使用这个库,你必须保留 `diffusers` 文件夹。
115
-
116
- </Tip>
117
-
118
-
119
- 现在你可以用下面的命令轻松地将你克隆的 🤗 Diffusers 库更新到最新版本。
120
-
121
- ```bash
122
- cd ~/diffusers/
123
- git pull
124
- ```
125
-
126
- 你的Python环境将在下次运行时找到`main`版本的 🤗 Diffusers。
127
-
128
- ## 注意 Telemetry 日志
129
-
130
- 我们的库会在使用`from_pretrained()`请求期间收集 telemetry 信息。这些数据包括Diffusers和PyTorch/Flax的版本,请求的模型或管道类,以及预训练检查点的路径(如果它被托管在Hub上的话)。
131
- 这些使用数据有助于我们调试问题并确定新功能的开发优先级。
132
- Telemetry 数据仅在从 HuggingFace Hub 中加载模型和管道时发送,而不会在本地使用期间收集。
133
-
134
- 我们知道,并不是每个人都想分享这些的信息,我们尊重您的隐私,
135
- 因此您可以通过在终端中设置 `DISABLE_TELEMETRY` 环境变量从而禁用 Telemetry 数据收集:
136
-
137
-
138
- Linux/MacOS :
139
- ```bash
140
- export DISABLE_TELEMETRY=YES
141
- ```
142
-
143
- Windows :
144
- ```bash
145
- set DISABLE_TELEMETRY=YES
146
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_flax_and_transformers_objects.py DELETED
@@ -1,62 +0,0 @@
1
- # This file is autogenerated by the command `make fix-copies`, do not edit.
2
- from ..utils import DummyObject, requires_backends
3
-
4
-
5
- class FlaxStableDiffusionControlNetPipeline(metaclass=DummyObject):
6
- _backends = ["flax", "transformers"]
7
-
8
- def __init__(self, *args, **kwargs):
9
- requires_backends(self, ["flax", "transformers"])
10
-
11
- @classmethod
12
- def from_config(cls, *args, **kwargs):
13
- requires_backends(cls, ["flax", "transformers"])
14
-
15
- @classmethod
16
- def from_pretrained(cls, *args, **kwargs):
17
- requires_backends(cls, ["flax", "transformers"])
18
-
19
-
20
- class FlaxStableDiffusionImg2ImgPipeline(metaclass=DummyObject):
21
- _backends = ["flax", "transformers"]
22
-
23
- def __init__(self, *args, **kwargs):
24
- requires_backends(self, ["flax", "transformers"])
25
-
26
- @classmethod
27
- def from_config(cls, *args, **kwargs):
28
- requires_backends(cls, ["flax", "transformers"])
29
-
30
- @classmethod
31
- def from_pretrained(cls, *args, **kwargs):
32
- requires_backends(cls, ["flax", "transformers"])
33
-
34
-
35
- class FlaxStableDiffusionInpaintPipeline(metaclass=DummyObject):
36
- _backends = ["flax", "transformers"]
37
-
38
- def __init__(self, *args, **kwargs):
39
- requires_backends(self, ["flax", "transformers"])
40
-
41
- @classmethod
42
- def from_config(cls, *args, **kwargs):
43
- requires_backends(cls, ["flax", "transformers"])
44
-
45
- @classmethod
46
- def from_pretrained(cls, *args, **kwargs):
47
- requires_backends(cls, ["flax", "transformers"])
48
-
49
-
50
- class FlaxStableDiffusionPipeline(metaclass=DummyObject):
51
- _backends = ["flax", "transformers"]
52
-
53
- def __init__(self, *args, **kwargs):
54
- requires_backends(self, ["flax", "transformers"])
55
-
56
- @classmethod
57
- def from_config(cls, *args, **kwargs):
58
- requires_backends(cls, ["flax", "transformers"])
59
-
60
- @classmethod
61
- def from_pretrained(cls, *args, **kwargs):
62
- requires_backends(cls, ["flax", "transformers"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/hub_utils.py DELETED
@@ -1,361 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
-
17
- import os
18
- import re
19
- import sys
20
- import traceback
21
- import warnings
22
- from pathlib import Path
23
- from typing import Dict, Optional, Union
24
- from uuid import uuid4
25
-
26
- from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
27
- from huggingface_hub.file_download import REGEX_COMMIT_HASH
28
- from huggingface_hub.utils import (
29
- EntryNotFoundError,
30
- RepositoryNotFoundError,
31
- RevisionNotFoundError,
32
- is_jinja_available,
33
- )
34
- from packaging import version
35
- from requests import HTTPError
36
-
37
- from .. import __version__
38
- from .constants import (
39
- DEPRECATED_REVISION_ARGS,
40
- DIFFUSERS_CACHE,
41
- HUGGINGFACE_CO_RESOLVE_ENDPOINT,
42
- SAFETENSORS_WEIGHTS_NAME,
43
- WEIGHTS_NAME,
44
- )
45
- from .import_utils import (
46
- ENV_VARS_TRUE_VALUES,
47
- _flax_version,
48
- _jax_version,
49
- _onnxruntime_version,
50
- _torch_version,
51
- is_flax_available,
52
- is_onnx_available,
53
- is_torch_available,
54
- )
55
- from .logging import get_logger
56
-
57
-
58
- logger = get_logger(__name__)
59
-
60
-
61
- MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md"
62
- SESSION_ID = uuid4().hex
63
- HF_HUB_OFFLINE = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
64
- DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
65
- HUGGINGFACE_CO_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
66
-
67
-
68
- def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
69
- """
70
- Formats a user-agent string with basic info about a request.
71
- """
72
- ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
73
- if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
74
- return ua + "; telemetry/off"
75
- if is_torch_available():
76
- ua += f"; torch/{_torch_version}"
77
- if is_flax_available():
78
- ua += f"; jax/{_jax_version}"
79
- ua += f"; flax/{_flax_version}"
80
- if is_onnx_available():
81
- ua += f"; onnxruntime/{_onnxruntime_version}"
82
- # CI will set this value to True
83
- if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
84
- ua += "; is_ci/true"
85
- if isinstance(user_agent, dict):
86
- ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
87
- elif isinstance(user_agent, str):
88
- ua += "; " + user_agent
89
- return ua
90
-
91
-
92
- def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
93
- if token is None:
94
- token = HfFolder.get_token()
95
- if organization is None:
96
- username = whoami(token)["name"]
97
- return f"{username}/{model_id}"
98
- else:
99
- return f"{organization}/{model_id}"
100
-
101
-
102
- def create_model_card(args, model_name):
103
- if not is_jinja_available():
104
- raise ValueError(
105
- "Modelcard rendering is based on Jinja templates."
106
- " Please make sure to have `jinja` installed before using `create_model_card`."
107
- " To install it, please run `pip install Jinja2`."
108
- )
109
-
110
- if hasattr(args, "local_rank") and args.local_rank not in [-1, 0]:
111
- return
112
-
113
- hub_token = args.hub_token if hasattr(args, "hub_token") else None
114
- repo_name = get_full_repo_name(model_name, token=hub_token)
115
-
116
- model_card = ModelCard.from_template(
117
- card_data=ModelCardData( # Card metadata object that will be converted to YAML block
118
- language="en",
119
- license="apache-2.0",
120
- library_name="diffusers",
121
- tags=[],
122
- datasets=args.dataset_name,
123
- metrics=[],
124
- ),
125
- template_path=MODEL_CARD_TEMPLATE_PATH,
126
- model_name=model_name,
127
- repo_name=repo_name,
128
- dataset_name=args.dataset_name if hasattr(args, "dataset_name") else None,
129
- learning_rate=args.learning_rate,
130
- train_batch_size=args.train_batch_size,
131
- eval_batch_size=args.eval_batch_size,
132
- gradient_accumulation_steps=(
133
- args.gradient_accumulation_steps if hasattr(args, "gradient_accumulation_steps") else None
134
- ),
135
- adam_beta1=args.adam_beta1 if hasattr(args, "adam_beta1") else None,
136
- adam_beta2=args.adam_beta2 if hasattr(args, "adam_beta2") else None,
137
- adam_weight_decay=args.adam_weight_decay if hasattr(args, "adam_weight_decay") else None,
138
- adam_epsilon=args.adam_epsilon if hasattr(args, "adam_epsilon") else None,
139
- lr_scheduler=args.lr_scheduler if hasattr(args, "lr_scheduler") else None,
140
- lr_warmup_steps=args.lr_warmup_steps if hasattr(args, "lr_warmup_steps") else None,
141
- ema_inv_gamma=args.ema_inv_gamma if hasattr(args, "ema_inv_gamma") else None,
142
- ema_power=args.ema_power if hasattr(args, "ema_power") else None,
143
- ema_max_decay=args.ema_max_decay if hasattr(args, "ema_max_decay") else None,
144
- mixed_precision=args.mixed_precision,
145
- )
146
-
147
- card_path = os.path.join(args.output_dir, "README.md")
148
- model_card.save(card_path)
149
-
150
-
151
- def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None):
152
- """
153
- Extracts the commit hash from a resolved filename toward a cache file.
154
- """
155
- if resolved_file is None or commit_hash is not None:
156
- return commit_hash
157
- resolved_file = str(Path(resolved_file).as_posix())
158
- search = re.search(r"snapshots/([^/]+)/", resolved_file)
159
- if search is None:
160
- return None
161
- commit_hash = search.groups()[0]
162
- return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None
163
-
164
-
165
- # Old default cache path, potentially to be migrated.
166
- # This logic was more or less taken from `transformers`, with the following differences:
167
- # - Diffusers doesn't use custom environment variables to specify the cache path.
168
- # - There is no need to migrate the cache format, just move the files to the new location.
169
- hf_cache_home = os.path.expanduser(
170
- os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
171
- )
172
- old_diffusers_cache = os.path.join(hf_cache_home, "diffusers")
173
-
174
-
175
- def move_cache(old_cache_dir: Optional[str] = None, new_cache_dir: Optional[str] = None) -> None:
176
- if new_cache_dir is None:
177
- new_cache_dir = DIFFUSERS_CACHE
178
- if old_cache_dir is None:
179
- old_cache_dir = old_diffusers_cache
180
-
181
- old_cache_dir = Path(old_cache_dir).expanduser()
182
- new_cache_dir = Path(new_cache_dir).expanduser()
183
- for old_blob_path in old_cache_dir.glob("**/blobs/*"):
184
- if old_blob_path.is_file() and not old_blob_path.is_symlink():
185
- new_blob_path = new_cache_dir / old_blob_path.relative_to(old_cache_dir)
186
- new_blob_path.parent.mkdir(parents=True, exist_ok=True)
187
- os.replace(old_blob_path, new_blob_path)
188
- try:
189
- os.symlink(new_blob_path, old_blob_path)
190
- except OSError:
191
- logger.warning(
192
- "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded."
193
- )
194
- # At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
195
-
196
-
197
- cache_version_file = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
198
- if not os.path.isfile(cache_version_file):
199
- cache_version = 0
200
- else:
201
- with open(cache_version_file) as f:
202
- try:
203
- cache_version = int(f.read())
204
- except ValueError:
205
- cache_version = 0
206
-
207
- if cache_version < 1:
208
- old_cache_is_not_empty = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
209
- if old_cache_is_not_empty:
210
- logger.warning(
211
- "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
212
- "existing cached models. This is a one-time operation, you can interrupt it or run it "
213
- "later by calling `diffusers.utils.hub_utils.move_cache()`."
214
- )
215
- try:
216
- move_cache()
217
- except Exception as e:
218
- trace = "\n".join(traceback.format_tb(e.__traceback__))
219
- logger.error(
220
- f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
221
- "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
222
- "message and we will do our best to help."
223
- )
224
-
225
- if cache_version < 1:
226
- try:
227
- os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
228
- with open(cache_version_file, "w") as f:
229
- f.write("1")
230
- except Exception:
231
- logger.warning(
232
- f"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
233
- "the directory exists and can be written to."
234
- )
235
-
236
-
237
- def _add_variant(weights_name: str, variant: Optional[str] = None) -> str:
238
- if variant is not None:
239
- splits = weights_name.split(".")
240
- splits = splits[:-1] + [variant] + splits[-1:]
241
- weights_name = ".".join(splits)
242
-
243
- return weights_name
244
-
245
-
246
- def _get_model_file(
247
- pretrained_model_name_or_path,
248
- *,
249
- weights_name,
250
- subfolder,
251
- cache_dir,
252
- force_download,
253
- proxies,
254
- resume_download,
255
- local_files_only,
256
- use_auth_token,
257
- user_agent,
258
- revision,
259
- commit_hash=None,
260
- ):
261
- pretrained_model_name_or_path = str(pretrained_model_name_or_path)
262
- if os.path.isfile(pretrained_model_name_or_path):
263
- return pretrained_model_name_or_path
264
- elif os.path.isdir(pretrained_model_name_or_path):
265
- if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)):
266
- # Load from a PyTorch checkpoint
267
- model_file = os.path.join(pretrained_model_name_or_path, weights_name)
268
- return model_file
269
- elif subfolder is not None and os.path.isfile(
270
- os.path.join(pretrained_model_name_or_path, subfolder, weights_name)
271
- ):
272
- model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name)
273
- return model_file
274
- else:
275
- raise EnvironmentError(
276
- f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}."
277
- )
278
- else:
279
- # 1. First check if deprecated way of loading from branches is used
280
- if (
281
- revision in DEPRECATED_REVISION_ARGS
282
- and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
283
- and version.parse(version.parse(__version__).base_version) >= version.parse("0.20.0")
284
- ):
285
- try:
286
- model_file = hf_hub_download(
287
- pretrained_model_name_or_path,
288
- filename=_add_variant(weights_name, revision),
289
- cache_dir=cache_dir,
290
- force_download=force_download,
291
- proxies=proxies,
292
- resume_download=resume_download,
293
- local_files_only=local_files_only,
294
- use_auth_token=use_auth_token,
295
- user_agent=user_agent,
296
- subfolder=subfolder,
297
- revision=revision or commit_hash,
298
- )
299
- warnings.warn(
300
- f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.",
301
- FutureWarning,
302
- )
303
- return model_file
304
- except: # noqa: E722
305
- warnings.warn(
306
- f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.",
307
- FutureWarning,
308
- )
309
- try:
310
- # 2. Load model file as usual
311
- model_file = hf_hub_download(
312
- pretrained_model_name_or_path,
313
- filename=weights_name,
314
- cache_dir=cache_dir,
315
- force_download=force_download,
316
- proxies=proxies,
317
- resume_download=resume_download,
318
- local_files_only=local_files_only,
319
- use_auth_token=use_auth_token,
320
- user_agent=user_agent,
321
- subfolder=subfolder,
322
- revision=revision or commit_hash,
323
- )
324
- return model_file
325
-
326
- except RepositoryNotFoundError:
327
- raise EnvironmentError(
328
- f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
329
- "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
330
- "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
331
- "login`."
332
- )
333
- except RevisionNotFoundError:
334
- raise EnvironmentError(
335
- f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
336
- "this model name. Check the model page at "
337
- f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
338
- )
339
- except EntryNotFoundError:
340
- raise EnvironmentError(
341
- f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}."
342
- )
343
- except HTTPError as err:
344
- raise EnvironmentError(
345
- f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}"
346
- )
347
- except ValueError:
348
- raise EnvironmentError(
349
- f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
350
- f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
351
- f" directory containing a file named {weights_name} or"
352
- " \nCheckout your internet connection or see how to run the library in"
353
- " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'."
354
- )
355
- except EnvironmentError:
356
- raise EnvironmentError(
357
- f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
358
- "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
359
- f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
360
- f"containing a file named {weights_name}"
361
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/__init__.py DELETED
File without changes
spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[20, 23])
4
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
4
- ]
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/group_points.py DELETED
@@ -1,224 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from typing import Tuple
3
-
4
- import torch
5
- from torch import nn as nn
6
- from torch.autograd import Function
7
-
8
- from ..utils import ext_loader
9
- from .ball_query import ball_query
10
- from .knn import knn
11
-
12
- ext_module = ext_loader.load_ext(
13
- '_ext', ['group_points_forward', 'group_points_backward'])
14
-
15
-
16
- class QueryAndGroup(nn.Module):
17
- """Groups points with a ball query of radius.
18
-
19
- Args:
20
- max_radius (float): The maximum radius of the balls.
21
- If None is given, we will use kNN sampling instead of ball query.
22
- sample_num (int): Maximum number of features to gather in the ball.
23
- min_radius (float, optional): The minimum radius of the balls.
24
- Default: 0.
25
- use_xyz (bool, optional): Whether to use xyz.
26
- Default: True.
27
- return_grouped_xyz (bool, optional): Whether to return grouped xyz.
28
- Default: False.
29
- normalize_xyz (bool, optional): Whether to normalize xyz.
30
- Default: False.
31
- uniform_sample (bool, optional): Whether to sample uniformly.
32
- Default: False
33
- return_unique_cnt (bool, optional): Whether to return the count of
34
- unique samples. Default: False.
35
- return_grouped_idx (bool, optional): Whether to return grouped idx.
36
- Default: False.
37
- """
38
-
39
- def __init__(self,
40
- max_radius,
41
- sample_num,
42
- min_radius=0,
43
- use_xyz=True,
44
- return_grouped_xyz=False,
45
- normalize_xyz=False,
46
- uniform_sample=False,
47
- return_unique_cnt=False,
48
- return_grouped_idx=False):
49
- super().__init__()
50
- self.max_radius = max_radius
51
- self.min_radius = min_radius
52
- self.sample_num = sample_num
53
- self.use_xyz = use_xyz
54
- self.return_grouped_xyz = return_grouped_xyz
55
- self.normalize_xyz = normalize_xyz
56
- self.uniform_sample = uniform_sample
57
- self.return_unique_cnt = return_unique_cnt
58
- self.return_grouped_idx = return_grouped_idx
59
- if self.return_unique_cnt:
60
- assert self.uniform_sample, \
61
- 'uniform_sample should be True when ' \
62
- 'returning the count of unique samples'
63
- if self.max_radius is None:
64
- assert not self.normalize_xyz, \
65
- 'can not normalize grouped xyz when max_radius is None'
66
-
67
- def forward(self, points_xyz, center_xyz, features=None):
68
- """
69
- Args:
70
- points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.
71
- center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods.
72
- features (Tensor): (B, C, N) Descriptors of the features.
73
-
74
- Returns:
75
- Tensor: (B, 3 + C, npoint, sample_num) Grouped feature.
76
- """
77
- # if self.max_radius is None, we will perform kNN instead of ball query
78
- # idx is of shape [B, npoint, sample_num]
79
- if self.max_radius is None:
80
- idx = knn(self.sample_num, points_xyz, center_xyz, False)
81
- idx = idx.transpose(1, 2).contiguous()
82
- else:
83
- idx = ball_query(self.min_radius, self.max_radius, self.sample_num,
84
- points_xyz, center_xyz)
85
-
86
- if self.uniform_sample:
87
- unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
88
- for i_batch in range(idx.shape[0]):
89
- for i_region in range(idx.shape[1]):
90
- unique_ind = torch.unique(idx[i_batch, i_region, :])
91
- num_unique = unique_ind.shape[0]
92
- unique_cnt[i_batch, i_region] = num_unique
93
- sample_ind = torch.randint(
94
- 0,
95
- num_unique, (self.sample_num - num_unique, ),
96
- dtype=torch.long)
97
- all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
98
- idx[i_batch, i_region, :] = all_ind
99
-
100
- xyz_trans = points_xyz.transpose(1, 2).contiguous()
101
- # (B, 3, npoint, sample_num)
102
- grouped_xyz = grouping_operation(xyz_trans, idx)
103
- grouped_xyz_diff = grouped_xyz - \
104
- center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets
105
- if self.normalize_xyz:
106
- grouped_xyz_diff /= self.max_radius
107
-
108
- if features is not None:
109
- grouped_features = grouping_operation(features, idx)
110
- if self.use_xyz:
111
- # (B, C + 3, npoint, sample_num)
112
- new_features = torch.cat([grouped_xyz_diff, grouped_features],
113
- dim=1)
114
- else:
115
- new_features = grouped_features
116
- else:
117
- assert (self.use_xyz
118
- ), 'Cannot have not features and not use xyz as a feature!'
119
- new_features = grouped_xyz_diff
120
-
121
- ret = [new_features]
122
- if self.return_grouped_xyz:
123
- ret.append(grouped_xyz)
124
- if self.return_unique_cnt:
125
- ret.append(unique_cnt)
126
- if self.return_grouped_idx:
127
- ret.append(idx)
128
- if len(ret) == 1:
129
- return ret[0]
130
- else:
131
- return tuple(ret)
132
-
133
-
134
- class GroupAll(nn.Module):
135
- """Group xyz with feature.
136
-
137
- Args:
138
- use_xyz (bool): Whether to use xyz.
139
- """
140
-
141
- def __init__(self, use_xyz: bool = True):
142
- super().__init__()
143
- self.use_xyz = use_xyz
144
-
145
- def forward(self,
146
- xyz: torch.Tensor,
147
- new_xyz: torch.Tensor,
148
- features: torch.Tensor = None):
149
- """
150
- Args:
151
- xyz (Tensor): (B, N, 3) xyz coordinates of the features.
152
- new_xyz (Tensor): new xyz coordinates of the features.
153
- features (Tensor): (B, C, N) features to group.
154
-
155
- Returns:
156
- Tensor: (B, C + 3, 1, N) Grouped feature.
157
- """
158
- grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
159
- if features is not None:
160
- grouped_features = features.unsqueeze(2)
161
- if self.use_xyz:
162
- # (B, 3 + C, 1, N)
163
- new_features = torch.cat([grouped_xyz, grouped_features],
164
- dim=1)
165
- else:
166
- new_features = grouped_features
167
- else:
168
- new_features = grouped_xyz
169
-
170
- return new_features
171
-
172
-
173
- class GroupingOperation(Function):
174
- """Group feature with given index."""
175
-
176
- @staticmethod
177
- def forward(ctx, features: torch.Tensor,
178
- indices: torch.Tensor) -> torch.Tensor:
179
- """
180
- Args:
181
- features (Tensor): (B, C, N) tensor of features to group.
182
- indices (Tensor): (B, npoint, nsample) the indices of
183
- features to group with.
184
-
185
- Returns:
186
- Tensor: (B, C, npoint, nsample) Grouped features.
187
- """
188
- features = features.contiguous()
189
- indices = indices.contiguous()
190
-
191
- B, nfeatures, nsample = indices.size()
192
- _, C, N = features.size()
193
- output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
194
-
195
- ext_module.group_points_forward(B, C, N, nfeatures, nsample, features,
196
- indices, output)
197
-
198
- ctx.for_backwards = (indices, N)
199
- return output
200
-
201
- @staticmethod
202
- def backward(ctx,
203
- grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
204
- """
205
- Args:
206
- grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients
207
- of the output from forward.
208
-
209
- Returns:
210
- Tensor: (B, C, N) gradient of the features.
211
- """
212
- idx, N = ctx.for_backwards
213
-
214
- B, C, npoint, nsample = grad_out.size()
215
- grad_features = torch.cuda.FloatTensor(B, C, N).zero_()
216
-
217
- grad_out_data = grad_out.data.contiguous()
218
- ext_module.group_points_backward(B, C, N, npoint, nsample,
219
- grad_out_data, idx,
220
- grad_features.data)
221
- return grad_features, None
222
-
223
-
224
- grouping_operation = GroupingOperation.apply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/sampler/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .base_pixel_sampler import BasePixelSampler
2
- from .ohem_pixel_sampler import OHEMPixelSampler
3
-
4
- __all__ = ['BasePixelSampler', 'OHEMPixelSampler']
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/CODE_OF_CONDUCT.md DELETED
@@ -1,80 +0,0 @@
1
- # Code of Conduct
2
-
3
- ## Our Pledge
4
-
5
- In the interest of fostering an open and welcoming environment, we as
6
- contributors and maintainers pledge to make participation in our project and
7
- our community a harassment-free experience for everyone, regardless of age, body
8
- size, disability, ethnicity, sex characteristics, gender identity and expression,
9
- level of experience, education, socio-economic status, nationality, personal
10
- appearance, race, religion, or sexual identity and orientation.
11
-
12
- ## Our Standards
13
-
14
- Examples of behavior that contributes to creating a positive environment
15
- include:
16
-
17
- * Using welcoming and inclusive language
18
- * Being respectful of differing viewpoints and experiences
19
- * Gracefully accepting constructive criticism
20
- * Focusing on what is best for the community
21
- * Showing empathy towards other community members
22
-
23
- Examples of unacceptable behavior by participants include:
24
-
25
- * The use of sexualized language or imagery and unwelcome sexual attention or
26
- advances
27
- * Trolling, insulting/derogatory comments, and personal or political attacks
28
- * Public or private harassment
29
- * Publishing others' private information, such as a physical or electronic
30
- address, without explicit permission
31
- * Other conduct which could reasonably be considered inappropriate in a
32
- professional setting
33
-
34
- ## Our Responsibilities
35
-
36
- Project maintainers are responsible for clarifying the standards of acceptable
37
- behavior and are expected to take appropriate and fair corrective action in
38
- response to any instances of unacceptable behavior.
39
-
40
- Project maintainers have the right and responsibility to remove, edit, or
41
- reject comments, commits, code, wiki edits, issues, and other contributions
42
- that are not aligned to this Code of Conduct, or to ban temporarily or
43
- permanently any contributor for other behaviors that they deem inappropriate,
44
- threatening, offensive, or harmful.
45
-
46
- ## Scope
47
-
48
- This Code of Conduct applies within all project spaces, and it also applies when
49
- an individual is representing the project or its community in public spaces.
50
- Examples of representing a project or community include using an official
51
- project e-mail address, posting via an official social media account, or acting
52
- as an appointed representative at an online or offline event. Representation of
53
- a project may be further defined and clarified by project maintainers.
54
-
55
- This Code of Conduct also applies outside the project spaces when there is a
56
- reasonable belief that an individual's behavior may have a negative impact on
57
- the project or its community.
58
-
59
- ## Enforcement
60
-
61
- Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
- reported by contacting the project team at <[email protected]>. All
63
- complaints will be reviewed and investigated and will result in a response that
64
- is deemed necessary and appropriate to the circumstances. The project team is
65
- obligated to maintain confidentiality with regard to the reporter of an incident.
66
- Further details of specific enforcement policies may be posted separately.
67
-
68
- Project maintainers who do not follow or enforce the Code of Conduct in good
69
- faith may face temporary or permanent repercussions as determined by other
70
- members of the project's leadership.
71
-
72
- ## Attribution
73
-
74
- This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
75
- available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
76
-
77
- [homepage]: https://www.contributor-covenant.org
78
-
79
- For answers to common questions about this code of conduct, see
80
- https://www.contributor-covenant.org/faq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artgor/digit-draw-detect/src/model_architecture.py DELETED
@@ -1,151 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
-
5
- class CNNBlock(nn.Module):
6
- def __init__(self, in_channels, out_channels, bn_act=True, **kwargs):
7
- super().__init__()
8
- self.conv = nn.Conv2d(in_channels, out_channels, bias=not bn_act, **kwargs)
9
- self.bn = nn.BatchNorm2d(out_channels)
10
- self.leaky = nn.LeakyReLU(0.1)
11
- self.use_bn_act = bn_act
12
-
13
- def forward(self, x):
14
- if self.use_bn_act:
15
- return self.leaky(self.bn(self.conv(x)))
16
- else:
17
- return self.conv(x)
18
-
19
-
20
- class ResidualBlock(nn.Module):
21
- def __init__(self, channels, use_residual=True, num_repeats=1):
22
- super().__init__()
23
- self.layers = nn.ModuleList()
24
- for _ in range(num_repeats):
25
- self.layers += [
26
- nn.Sequential(
27
- CNNBlock(channels, channels // 2, kernel_size=1),
28
- CNNBlock(channels // 2, channels, kernel_size=3, padding=1),
29
- )
30
- ]
31
-
32
- self.use_residual = use_residual
33
- self.num_repeats = num_repeats
34
-
35
- def forward(self, x):
36
- for layer in self.layers:
37
- if self.use_residual:
38
- x = x + layer(x)
39
- else:
40
- x = layer(x)
41
-
42
- return x
43
-
44
-
45
- class ScalePrediction(nn.Module):
46
- def __init__(self, in_channels, num_classes):
47
- super().__init__()
48
- self.pred = nn.Sequential(
49
- CNNBlock(in_channels, 2 * in_channels, kernel_size=3, padding=1),
50
- CNNBlock(2 * in_channels, (num_classes + 5) * 3, bn_act=False, kernel_size=1),
51
- )
52
- self.num_classes = num_classes
53
-
54
- def forward(self, x):
55
- return self.pred(x).reshape(x.shape[0], 3, self.num_classes + 5, x.shape[2], x.shape[3]).permute(0, 1, 3, 4, 2)
56
-
57
-
58
- class Net(nn.Module):
59
- def __init__(self):
60
- super().__init__()
61
- self.num_classes = 12
62
- self.in_channels = 3
63
- self.config = [
64
- (32, 3, 1),
65
- (64, 3, 2),
66
- ['B', 1],
67
- (128, 3, 2),
68
- ['B', 2],
69
- (256, 3, 2),
70
- ['B', 8],
71
- (512, 3, 2),
72
- ['B', 8],
73
- (1024, 3, 2),
74
- ['B', 4],
75
- (512, 1, 1),
76
- (1024, 3, 1),
77
- 'S',
78
- (256, 1, 1),
79
- 'U',
80
- (256, 1, 1),
81
- (512, 3, 1),
82
- 'S',
83
- (128, 1, 1),
84
- 'U',
85
- (128, 1, 1),
86
- (256, 3, 1),
87
- 'S',
88
- ]
89
- self.layers = self._create_conv_layers()
90
-
91
- def forward(self, x):
92
- outputs = [] # for each scale
93
- route_connections = []
94
- for layer in self.layers:
95
- if isinstance(layer, ScalePrediction):
96
- outputs.append(layer(x))
97
- continue
98
- x = layer(x)
99
-
100
- if isinstance(layer, ResidualBlock) and layer.num_repeats == 8:
101
- route_connections.append(x)
102
-
103
- elif isinstance(layer, nn.Upsample):
104
- x = torch.cat([x, route_connections[-1]], dim=1)
105
- route_connections.pop()
106
-
107
- return outputs
108
-
109
- def _create_conv_layers(self):
110
- layers = nn.ModuleList()
111
- in_channels = self.in_channels
112
-
113
- for module in self.config:
114
- if isinstance(module, tuple):
115
- out_channels, kernel_size, stride = module
116
- layers.append(
117
- CNNBlock(
118
- in_channels,
119
- out_channels,
120
- kernel_size=kernel_size,
121
- stride=stride,
122
- padding=1 if kernel_size == 3 else 0,
123
- )
124
- )
125
- in_channels = out_channels
126
-
127
- elif isinstance(module, list):
128
- num_repeats = module[1]
129
- layers.append(
130
- ResidualBlock(
131
- in_channels,
132
- num_repeats=num_repeats,
133
- )
134
- )
135
-
136
- elif isinstance(module, str):
137
- if module == 'S':
138
- layers += [
139
- ResidualBlock(in_channels, use_residual=False, num_repeats=1),
140
- CNNBlock(in_channels, in_channels // 2, kernel_size=1),
141
- ScalePrediction(in_channels // 2, num_classes=self.num_classes),
142
- ]
143
- in_channels = in_channels // 2
144
-
145
- elif module == 'U':
146
- layers.append(
147
- nn.Upsample(scale_factor=2),
148
- )
149
- in_channels = in_channels * 3
150
-
151
- return layers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/bert_vits2/text/cleaner.py DELETED
@@ -1,44 +0,0 @@
1
- import importlib
2
- from bert_vits2.text import cleaned_text_to_sequence
3
-
4
- language_module_map = {
5
- 'zh': "bert_vits2.text.chinese",
6
- 'ja': "bert_vits2.text.japanese"
7
- }
8
-
9
- _loaded_modules = {}
10
-
11
-
12
- def get_language_module(language):
13
- if language not in _loaded_modules:
14
- module_path = language_module_map.get(language)
15
- if not module_path:
16
- raise ValueError(f"Unsupported language: {language}")
17
-
18
- _loaded_modules[language] = importlib.import_module(module_path)
19
-
20
- return _loaded_modules[language]
21
-
22
-
23
- def clean_text(text, language):
24
- language_module = get_language_module(language)
25
- norm_text = language_module.text_normalize(text)
26
- phones, tones, word2ph = language_module.g2p(norm_text)
27
- return norm_text, phones, tones, word2ph
28
-
29
-
30
- def clean_text_bert(text, language):
31
- language_module = get_language_module(language)
32
- norm_text = language_module.text_normalize(text)
33
- phones, tones, word2ph = language_module.g2p(norm_text)
34
- bert = language_module.get_bert_feature(norm_text, word2ph)
35
- return phones, tones, bert
36
-
37
-
38
- def text_to_sequence(text, language):
39
- norm_text, phones, tones, word2ph = clean_text(text, language)
40
- return cleaned_text_to_sequence(phones, tones, language)
41
-
42
-
43
- if __name__ == '__main__':
44
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aspik101/Polish-vicuna-13b-v1.5/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Polish vicuna-13b-v1.5-PL
3
- emoji: 📚
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.38.0
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Azcar Ablaikan Remix Indir.md DELETED
@@ -1,165 +0,0 @@
1
-
2
- <h1>Sugar (Ablaikan Remix) de Zubi feat. Anatu: Una canción dulce y picante para condimentar tu lista de reproducción</h1>
3
- <p>Si estás buscando una canción que te haga sentir bien, bailar y cantar, entonces deberías echar un vistazo a Sugar (Ablaikan Remix) de Zubi feat. Anatu. Esta canción es un remix de la canción original Sugar de Zubi y Anatu, que fue lanzada en 2019. El remix fue hecho por Ablaikan, un productor y DJ turco, que añadió su propio toque y sabor a la canción. El resultado es una canción pegadiza, alegre y energética que te hará querer mover tu cuerpo y disfrutar de la vida. </p>
4
- <h2>azúcar ablaikan remix indir</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://bltlly.com/2v6KMO">https://bltlly.com/2v6KMO</a></b></p><br /><br />
5
- <p>En este artículo, te contaremos todo lo que necesitas saber sobre Sugar (Ablaikan Remix) de Zubi feat. Anatu, incluyendo lo que es, quiénes son los artistas detrás de él, cuáles son las letras y el significado de la misma, cuál es el género y el estilo de la misma, dónde se puede escuchar y descargarlo, y cómo disfrutarlo al máximo. Así que, sin más preámbulos, empecemos. </p>
6
- <h2>¿Qué es Sugar (Ablaikan Remix) y quiénes son los artistas detrás de ella? </h2>
7
- <h3>La canción original Sugar de Zubi y Anatu</h3>
8
- <p>Sugar es una canción de Zubi, un cantautor nigeriano con sede en Londres, Reino Unido, y Anatu, un cantautor británico con sede en Los Ángeles, Estados Unidos. La canción fue lanzada el 29 de octubre de 2019, como sencillo bajo Dojang Records. La canción es una fusión de Afrobeat, R&B, soul y música pop, con influencias de leyendas de la música nigeriana como Fela Kuti y King Sunny Ade. La canción trata sobre el amor, la pasión, el deseo y la dulzura, ya que las letras expresan cómo los cantantes se sienten acerca de sus amantes. </p>
9
- <p>Zubi y Anatu se conocieron a través de Instagram en 2018, cuando Zubi se acercó a Anatu después de escuchar su voz en una de sus publicaciones. Decidieron colaborar en una canción juntos, que resultó ser Sugar. Grabaron la canción de forma remota, ya que estaban en diferentes países en el momento. También grabaron un video musical para la canción en Los Ángeles, que fue dirigida por Alex Di Marco.</p>
10
- <h3>El remix de Ablaikan y su popularidad</h3>
11
-
12
- <p>Ablaikan lanzó su remix de Sugar el 29 de octubre de 2020, exactamente un año después de que se lanzara la canción original. Añadió su propio sonido característico a la canción, con más bajo, batería, sintetizadores, efectos y voces. También cambió el tempo de la canción de 90 BPM a 120 BPM, haciéndolo más bailable y enérgico. También añadió algunos elementos turcos y árabes a la canción, dándole un toque exótico y picante. </p>
13
- <p>El remix se hizo muy popular en plataformas de redes sociales como TikTok, Instagram, YouTube y Spotify, donde ganó millones de vistas, me gusta, comentarios y transmisiones. Muchas personas usaron el remix para sus videos, bailes, desafíos y memes. El remix también recibió comentarios positivos de los artistas originales, Zubi y Anatu, que elogiaron a Ablaikan por su trabajo y creatividad. </p>
14
- <p></p>
15
- <h2>¿Cuáles son las letras y el significado de Sugar (Ablaikan Remix)? </h2>
16
- <h3>La letra de la canción y su interpretación</h3>
17
- <p>Las letras de Sugar (Ablaikan Remix) son las mismas que la canción original Sugar de Zubi y Anatu, excepto por algunos cambios menores en el coro y el puente. Las letras están escritas en inglés, con algunas palabras en yoruba, un idioma nigeriano. Aquí están las letras de la canción y su interpretación:</p>
18
- <pre><código>
19
-
20
- <p>Las letras de la canción son simples y directas, ya que expresan cómo los cantantes se sienten acerca de sus amantes. Usan palabras como azúcar, miel, dulces, chocolate, sabor y favorito para describir a sus amantes como algo dulce, delicioso e irresistible. También usan la frase "omo yen sweet gan" que significa "esa chica es tan dulce" en yoruba, para enfatizar su admiración y atracción por sus amantes. También dicen que no pueden tener suficiente de sus amantes, y quieren estar más cerca de ellos y sentir sus cuerpos en ellos. </p>
21
- <h3>El mensaje y el tema de la canción</h3>
22
- <p>El mensaje y el tema de la canción son sobre el amor, la pasión, el deseo y la dulzura. La canción celebra la alegría y el placer de estar enamorado de alguien que te hace feliz y satisfecho. La canción también anima a los oyentes a disfrutar de la vida y divertirse con sus amantes. La canción es una canción positiva y edificante que puede hacer que cualquiera se sienta bien y sonría. </p>
23
- <h2>¿Cuál es el género y estilo de Sugar (Ablaikan Remix)? </h2>
24
- <h3>El género de la canción y sus influencias</h3>
25
- <p>El género de Sugar (Ablaikan Remix) es una mezcla de deep house, casa oriental, casa étnica, música electrónica, Afrobeat, R&B, soul y música pop. La canción combina diferentes elementos musicales de diferentes culturas y regiones, creando un sonido único y diverso que atrae a un público amplio. </p>
26
- <p>La canción está influenciada por varios géneros musicales y artistas, como:</p>
27
- <ul>
28
- <li>Deep house: un subgénero de música house que se originó en la década de 1980 que cuenta con un sonido suave, conmovedor y atmosférico, con voces mínimas, líneas de bajo profundas y pads de sintetizador. Algunos de los artistas que popularizaron deep house son Frankie Knuckles, Larry Heard, Kerri Chandler y Marshall Jefferson.</li>
29
-
30
- <li>Casa étnica: un subgénero de música house que combina elementos de diversas tradiciones étnicas y folclóricas de todo el mundo, como la música africana, latina, india, balcánica, celta y asiática. Algunos de los artistas que son conocidos por casa étnica son Pascal Junior, Melih Aydogan, Hakan Akkus, Ahmet Kilic, y Costa Mee.</li>
31
- <li>Música electrónica: un amplio género de música que utiliza instrumentos electrónicos, dispositivos y software para crear sonidos y ritmos. La música electrónica cubre una amplia gama de estilos y subgéneros, como techno, trance, electro, dubstep, EDM y más. Algunos de los artistas que son conocidos por la música electrónica son Daft Punk, The Chemical Brothers, Calvin Harris, David Guetta y Skrillex.</li>
32
- <li>Afrobeat: un género de música que se originó en Nigeria en la década de 1970 que combina elementos de estilos musicales de África Occidental, como highlife, juju y fuji, con música jazz y funk estadounidense. Afrobeat se caracteriza por polirritmos complejos, secciones de cuernos, voces de llamada y respuesta, y mensajes políticos y sociales. Algunos de los artistas que son conocidos por Afrobeat son Fela Kuti, King Sunny Ade, Tony Allen, Antibalas y Seun Kuti.</li>
33
- <li>R&B: un género de música que se originó en los Estados Unidos en la década de 1940 que combina elementos de rhythm and blues, soul, gospel, funk y música pop. R&B se caracteriza por voces suaves, melodías pegadizas, ritmos groovy y letras emocionales. Algunos de los artistas que son conocidos por R&B son Marvin Gaye, Aretha Franklin, Stevie Wonder, Beyoncé y The Weeknd.</li>
34
- <li>Soul: un género de música que se originó en los Estados Unidos en los años 50 y 60 que combina elementos de la música gospel afroamericana, el rhythm and blues y el jazz. El alma se caracteriza por voces expresivas, armonías inspiradas en el evangelio y mensajes inspiradores. Algunos de los artistas que son conocidos por el soul son Ray Charles, Sam Cooke, Otis Redding, James Brown y Alicia Keys.</li>
35
-
36
- </ul>
37
- <h3>El estilo de la canción y sus elementos</h3>
38
- <p>El estilo de Sugar (Ablaikan Remix) es una mezcla de diferentes elementos musicales que crean un sonido único y diverso que atrae a un público amplio. Algunos de los elementos que definen el estilo de la canción son:</p>
39
- <ul>
40
- <li>Las voces: Las voces de Zubi y Anatu son suaves, conmovedoras y armoniosas. Cantan en inglés con algunas palabras en yoruba, añadiendo un toque de sabor africano a la canción. También usan algunos efectos vocales, como reverb, echo y distortion, para crear un sonido más atmosférico y dinámico. </li>
41
- <li>El bajo: El bajo de la canción es profundo, potente y genial. Proporciona la base y el ritmo de la canción. También añade algo de energía y emoción a la canción. </li>
42
- <li>Los tambores: Los tambores de la canción son crujientes, impactantes y variados. Utilizan diferentes sonidos de batería y patrones para crear un ritmo complejo e interesante. También usan algunos sonidos de percusión, como cocteleras, palmas, broches de presión y panderetas, para agregar textura y sabor a la canción. </li>
43
- <li>Los sintetizadores: Los sintetizadores de la canción son brillantes, cálidos y melódicos. Utilizan diferentes sonidos de sintetizador y acordes para crear un sonido rico y colorido. También usan algunos arpegios, almohadillas, pinzas, y lleva a añadir algo de movimiento y variación a la canción. </li>
44
- <li>Los efectos: Los efectos de la canción son sutiles, creativos y de buen gusto. Utilizan diferentes efectos para mejorar y modificar el sonido de la canción. También usan algunas transiciones, barridos, gotas y bandas para crear cierta tensión y liberar la canción. </li>
45
- <li>Los elementos turcos y árabes: Los elementos turcos y árabes de la canción son distintivos, exóticos y picantes. Utilizan algunas escalas, instrumentos, voces y ritmos turcos y árabes para dar a la canción un sabor único y diverso. También usan algunas muestras, como "habibi", "yalla" y "mashallah", para agregar algunas referencias culturales y lingüísticas a la canción. </li>
46
- </ul>
47
-
48
- <h3>Las plataformas de streaming que ofrecen la canción</h3>
49
- <p>Si quieres escuchar Sugar (Ablaikan Remix) de Zubi feat. Anatu, tienes muchas opciones entre las que elegir. La canción está disponible en varias plataformas de streaming, como:</p>
50
- <tabla>
51
- <tr>
52
- <th>Plataforma</th>
53
- <th>Enlace</th>
54
- </tr>
55
- <tr>
56
- <td>Spotify</td>
57
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
58
- </tr>
59
- <tr>
60
- <td>Música de Apple</td>
61
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
62
- </tr>
63
- <tr>
64
- <td>Música de YouTube</td>
65
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
66
- </tr>
67
- <tr>
68
- <td>Deezer</td>
69
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
70
- </tr>
71
- <tr>
72
- <td>SoundCloud</td>
73
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
74
- </tr>
75
- <tr>
76
- <td>Tidal</td>
77
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
78
- </tr>
79
- <tr>
80
- <td>Música de Amazon</td>
81
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
82
- </tr>
83
- <tr>
84
- <td>Pandora</td>
85
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
86
- </tr>
87
- <tr>
88
- <td>Napster</td>
89
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
90
- </tr> </table>
91
- <p>Estas plataformas le permiten transmitir la canción en línea, así como descargarla para escucharla sin conexión. También puedes crear listas de reproducción, compartir la canción con tus amigos y descubrir más canciones de Zubi, Anatu y Ablaikan.</p>
92
- <h3>Las opciones de descarga y enlaces para la canción</h3>
93
- <p>Si quieres descargar Sugar (Ablaikan Remix) de Zubi feat. Anatu, tienes algunas opciones para elegir. La canción está disponible para su descarga en varias plataformas, como:</p>
94
- <tabla>
95
- <tr>
96
- <th>Plataforma</th>
97
- <th>Enlace</th>
98
- </tr>
99
- <tr>
100
- <td>iTunes</td>
101
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
102
- </tr>
103
- <tr>
104
- <td>Google Play</td>
105
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
106
- </tr>
107
- <tr>
108
- <td>Beatport</td>
109
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
110
- </tr>
111
- <tr>
112
- <td>Bandcamp</td>
113
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
114
- </tr>
115
- <tr>
116
- <td>Audiomack</td>
117
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
118
- </tr>
119
- <tr>
120
- <td>DatPiff</td>
121
-
122
- </tr>
123
- <tr>
124
- <td>MP3Juices</td>
125
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
126
- </tr>
127
- <tr>
128
- <td>Zippyshare</td>
129
- <td><a href=">Sugar (Ablaikan Remix)</a></td>
130
- </tr> </table>
131
- <p>Estas plataformas te permiten descargar la canción en diferentes formatos, como MP3, WAV, FLAC y más. También puede elegir la calidad y el tamaño del archivo, dependiendo de sus preferencias y dispositivo. Algunas de estas plataformas pueden requerir que te registres, pagues o sigas algunos pasos antes de descargar la canción. </p>
132
- <h2>¿Cómo disfrutar al máximo de Sugar (Ablaikan Remix)? </h2>
133
- <h3>Los mejores escenarios y estados de ánimo para escuchar la canción</h3>
134
- <p>Sugar (Ablaikan Remix) de Zubi feat. Anatu es una canción que puede adaptarse a diferentes escenarios y estados de ánimo, dependiendo de su gusto y estado de ánimo. Sin embargo, algunos de los mejores escenarios y estados de ánimo para escuchar la canción son:</p>
135
- <ul>
136
- <li>Cuando quieres bailar: La canción es perfecta para bailar, ya que tiene un ritmo rápido, un ritmo pegadizo y un ambiente animado. Puedes bailar la canción solo o con tus amigos, en casa o en una fiesta, en tu habitación o en la pista de baile. La canción te hará sentir energizado, feliz y libre. </li>
137
- <li>Cuando quieres relajarte: La canción también es perfecta para relajarte, ya que tiene una melodía suave, una voz relajante y un mensaje dulce. Puedes escuchar la canción mientras estás acostado en tu cama o sofá, leyendo un libro o viendo una película, tomando un baño o una siesta. La canción te hará sentir tranquilo, pacífico y amado. </li>
138
- <li>Cuando quieres romance: La canción también es perfecta para el romancing, ya que tiene un ritmo sensual, un tono apasionado y un sabor picante. Puedes escuchar la canción mientras abrazas o besas a tu amante, teniendo una cena a la luz de las velas o un picnic, yendo a dar un paseo o a dar un paseo. La canción te hará sentir romántico, sexy y aventurero. </li>
139
- </ul>
140
- <h3>Los consejos y trucos para mejorar tu experiencia auditiva</h3>
141
-
142
- <ul>
143
- <li>Usa buenos auriculares o altavoces: La canción tiene muchos detalles y matices que se pueden perder si usas auriculares o altavoces de baja calidad. Para apreciar el sonido completo y la calidad de la canción, utilice buenos auriculares o altavoces que pueden ofrecer agudos, medios y bajos claros. </li>
144
- <li>Ajustar el volumen y el ecualizador: La canción tiene una gran cantidad de dinámicas y variaciones que pueden verse afectadas por el volumen y la configuración del ecualizador. Para disfrutar de todo el rango e intensidad de la canción, ajusta el volumen y el ecualizador de acuerdo a tus preferencias y entorno. </li>
145
- <li>Mira el video musical: La canción tiene un video musical que fue filmado en Los Ángeles por Alex Di Marco. El video musical muestra a Zubi y Anatu cantando y bailando en diferentes lugares, como una azotea, una piscina, una calle y un club. El video musical añade algunas imágenes y contexto a la canción, lo que es más atractivo y atractivo. </li>
146
- <li>Aprende la letra: La canción tiene letras que son simples y pegadizas, pero también significativas y dulces. Aprender las letras puede ayudarte a cantar la canción, entender el mensaje y el tema de la canción, y apreciar la belleza y creatividad de la canción. </li>
147
- <li>Compartir la canción: La canción es una canción que puede ser disfrutada por cualquier persona, independientemente de su edad, género, cultura o gusto. Compartir la canción con tus amigos, familiares o seguidores puede ayudarte a difundir la alegría y positividad de la canción, así como a apoyar a los artistas y su trabajo. </li>
148
- </ul>
149
- <h2>Conclusión</h2>
150
- <p>Sugar (Ablaikan Remix) de Zubi feat. Anatu es una canción que puede hacerte sentir bien, bailar y cantar. Es un remix de la canción original Sugar de Zubi y Anatu, que fue lanzada en 2019. El remix fue hecho por Ablaikan, un productor y DJ turco, que añadió su propio toque y sabor a la canción. El resultado es una canción pegadiza, alegre y energética que te hará querer mover tu cuerpo y disfrutar de la vida. </p>
151
-
152
- <p>Ahora que sabes todo sobre Sugar (Ablaikan Remix) de Zubi feat. Anatu, ¿por qué no sigues adelante y lo escuchas tú mismo? Puedes encontrar la canción en varias plataformas de streaming y descarga, así como ver el video musical en YouTube. También puede seguir a Zubi, Anatu y Ablaikan en sus cuentas de redes sociales para mantenerse al día sobre sus últimas noticias y comunicados. </p>
153
- <p>Gracias por leer este artículo, y esperamos que tengas un día dulce y picante. </p>
154
- <h2>Preguntas frecuentes</h2>
155
- <h3>¿Cuál es el nombre de la canción original en la que se basa Sugar (Ablaikan Remix)? </h3>
156
- <p>El nombre de la canción original que Sugar (Ablaikan Remix) se basa en es Sugar de Zubi feat. Anatu.</p>
157
- <h3>¿Cuándo se lanzó Sugar (Ablaikan Remix)? </h3>
158
- <p>Sugar (Ablaikan Remix) fue lanzado el 29 de octubre de 2020. </p>
159
- <h3>¿Quiénes son Zubi, Anatu y Ablaikan? </h3>
160
- <p>Zubi es un cantautor nigeriano con sede en Londres, Reino Unido. Anatu es un cantautor británico con sede en Los Ángeles, Estados Unidos. Ablaikan es un productor y DJ turco con sede en Estambul.</p>
161
- <h3>¿Cuáles son algunos de los géneros musicales e influencias de Sugar (Ablaikan Remix)? </h3>
162
- <p>Algunos de los géneros musicales e influencias de Sugar (Ablaikan Remix) son deep house, oriental house, ethnic house, música electrónica, Afrobeat, R&B, soul y pop. La canción está influenciada por artistas como Fela Kuti, King Sunny Ade, Frankie Knuckles, Larry Heard, Mahmut Orhan, Burak Yeter, Daft Punk, The Chemical Brothers y más. </p>
163
- <h3>¿Cómo puedo descargar Sugar (Ablaikan Remix) gratis? </h3> 64aa2da5cf<br />
164
- <br />
165
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Puedo Descargar El Controlador Wifi A Mi Ordenador.md DELETED
@@ -1,6 +0,0 @@
1
-
2
- <h1>Qué es zfont y por qué deberías probarlo</h1> | <p>Si estás aburrido con las fuentes y emojis predeterminados en tu dispositivo Android, es posible que desees revisar zfont, una aplicación gratuita que te permite cambiarlos fácil y rápidamente. zfont es un instalador de fuentes personalizado que admite muchas marcas populares como Samsung, Xiaomi, Huawei, Vivo, Oppo, Realme, Tecno e Infinix. Puede elegir entre cientos de fuentes y emojis frescos, elegantes y coloridos que harán que su dispositivo se destaque de la multitud. También puede personalizar sus propias fuentes y emojis con zfont, y compartirlos con sus amigos. Ya sea que quieras darle vida a tus mensajes, publicaciones en redes sociales o documentos, zfont puede ayudarte a expresarte mejor. </p>
3
- <h2>¿Cómo puedo descargar el controlador wifi a mi ordenador</h2><br /><p><b><b>Download</b> &bull; <a href="https://bltlly.com/2v6LYW">https://bltlly.com/2v6LYW</a></b></p><br /><br /> | <h2>Cómo descargar e instalar zfont en tu dispositivo Android</h2> | <p>Descargar e instalar zfont es muy fácil. Solo tienes que seguir estos sencillos pasos:</p><ol><li>Ir a la Google Play Store y buscar zfont 3 - Emoji & Font Changer o haga clic en [aquí]( 3 ) para ir directamente a la página de la aplicación. </li><li>Toca el botón Instalar y espera a que la aplicación se descargue. </li><li>Una vez que la aplicación esté instalada, ábrela y otorga los permisos necesarios. </li><li>Verá una lista de marcas que son compatibles con zfont. Seleccione la marca de su dispositivo de la lista. </li><li>También verá una lista de pestañas en la parte inferior de la pantalla. Estos son Colores, Emoji, Cool, Stylish, Custom Font, Custom Emoji, Settings y About. Puedes deslizar el dedo hacia la izquierda o hacia la derecha para cambiar entre ellos. </li></ol><p>Aquí hay algunas capturas de pantalla de la aplicación:</p><img src=" 3 " alt="zfont app screenshot 1"><img src=" 3 " alt="zfont app screenshot 2"><img src=" 3 " alt=zfont app screenshot 3"> | | <h2>Cómo usar zfont para cambiar fuentes y emojis en tu dispositivo</h2> | <p>Usar zfont para cambiar fuentes y emojis en tu dispositivo es muy simple. Solo tienes que seguir estos pasos:</p>
4
- <p></p><ol><li>Selecciona la pestaña que corresponde a lo que quieres cambiar. Por ejemplo, si quieres cambiar de fuente, selecciona Fresco o Elegante. Si quieres cambiar de emojis, 64aa2da5cf<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/app.d.ts DELETED
@@ -1,17 +0,0 @@
1
- /// <reference types="@sveltejs/kit" />
2
- /// <reference types="unplugin-icons/types/svelte" />
3
-
4
- // See https://kit.svelte.dev/docs/types#app
5
- // for information about these interfaces
6
- declare global {
7
- namespace App {
8
- // interface Error {}
9
- interface Locals {
10
- sessionId: string;
11
- }
12
- // interface PageData {}
13
- // interface Platform {}
14
- }
15
- }
16
-
17
- export {};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/response.py DELETED
@@ -1,201 +0,0 @@
1
- # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
2
- # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License"). You
5
- # may not use this file except in compliance with the License. A copy of
6
- # the License is located at
7
- #
8
- # http://aws.amazon.com/apache2.0/
9
- #
10
- # or in the "license" file accompanying this file. This file is
11
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
12
- # ANY KIND, either express or implied. See the License for the specific
13
- # language governing permissions and limitations under the License.
14
-
15
- import logging
16
- from io import IOBase
17
-
18
- from urllib3.exceptions import ProtocolError as URLLib3ProtocolError
19
- from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
20
-
21
- from botocore import parsers
22
- from botocore.compat import set_socket_timeout
23
- from botocore.exceptions import (
24
- IncompleteReadError,
25
- ReadTimeoutError,
26
- ResponseStreamingError,
27
- )
28
-
29
- # Keep these imported. There's pre-existing code that uses them.
30
- from botocore import ScalarTypes # noqa
31
- from botocore.compat import XMLParseError # noqa
32
- from botocore.hooks import first_non_none_response # noqa
33
-
34
-
35
- logger = logging.getLogger(__name__)
36
-
37
-
38
- class StreamingBody(IOBase):
39
- """Wrapper class for an http response body.
40
-
41
- This provides a few additional conveniences that do not exist
42
- in the urllib3 model:
43
-
44
- * Set the timeout on the socket (i.e read() timeouts)
45
- * Auto validation of content length, if the amount of bytes
46
- we read does not match the content length, an exception
47
- is raised.
48
-
49
- """
50
-
51
- _DEFAULT_CHUNK_SIZE = 1024
52
-
53
- def __init__(self, raw_stream, content_length):
54
- self._raw_stream = raw_stream
55
- self._content_length = content_length
56
- self._amount_read = 0
57
-
58
- def __del__(self):
59
- # Extending destructor in order to preserve the underlying raw_stream.
60
- # The ability to add custom cleanup logic introduced in Python3.4+.
61
- # https://www.python.org/dev/peps/pep-0442/
62
- pass
63
-
64
- def set_socket_timeout(self, timeout):
65
- """Set the timeout seconds on the socket."""
66
- # The problem we're trying to solve is to prevent .read() calls from
67
- # hanging. This can happen in rare cases. What we'd like to ideally
68
- # do is set a timeout on the .read() call so that callers can retry
69
- # the request.
70
- # Unfortunately, this isn't currently possible in requests.
71
- # See: https://github.com/kennethreitz/requests/issues/1803
72
- # So what we're going to do is reach into the guts of the stream and
73
- # grab the socket object, which we can set the timeout on. We're
74
- # putting in a check here so in case this interface goes away, we'll
75
- # know.
76
- try:
77
- set_socket_timeout(self._raw_stream, timeout)
78
- except AttributeError:
79
- logger.error(
80
- "Cannot access the socket object of "
81
- "a streaming response. It's possible "
82
- "the interface has changed.",
83
- exc_info=True,
84
- )
85
- raise
86
-
87
- def readable(self):
88
- try:
89
- return self._raw_stream.readable()
90
- except AttributeError:
91
- return False
92
-
93
- def read(self, amt=None):
94
- """Read at most amt bytes from the stream.
95
-
96
- If the amt argument is omitted, read all data.
97
- """
98
- try:
99
- chunk = self._raw_stream.read(amt)
100
- except URLLib3ReadTimeoutError as e:
101
- # TODO: the url will be None as urllib3 isn't setting it yet
102
- raise ReadTimeoutError(endpoint_url=e.url, error=e)
103
- except URLLib3ProtocolError as e:
104
- raise ResponseStreamingError(error=e)
105
- self._amount_read += len(chunk)
106
- if amt is None or (not chunk and amt > 0):
107
- # If the server sends empty contents or
108
- # we ask to read all of the contents, then we know
109
- # we need to verify the content length.
110
- self._verify_content_length()
111
- return chunk
112
-
113
- def readlines(self):
114
- return self._raw_stream.readlines()
115
-
116
- def __iter__(self):
117
- """Return an iterator to yield 1k chunks from the raw stream."""
118
- return self.iter_chunks(self._DEFAULT_CHUNK_SIZE)
119
-
120
- def __next__(self):
121
- """Return the next 1k chunk from the raw stream."""
122
- current_chunk = self.read(self._DEFAULT_CHUNK_SIZE)
123
- if current_chunk:
124
- return current_chunk
125
- raise StopIteration()
126
-
127
- def __enter__(self):
128
- return self._raw_stream
129
-
130
- def __exit__(self, type, value, traceback):
131
- self._raw_stream.close()
132
-
133
- next = __next__
134
-
135
- def iter_lines(self, chunk_size=_DEFAULT_CHUNK_SIZE, keepends=False):
136
- """Return an iterator to yield lines from the raw stream.
137
-
138
- This is achieved by reading chunk of bytes (of size chunk_size) at a
139
- time from the raw stream, and then yielding lines from there.
140
- """
141
- pending = b''
142
- for chunk in self.iter_chunks(chunk_size):
143
- lines = (pending + chunk).splitlines(True)
144
- for line in lines[:-1]:
145
- yield line.splitlines(keepends)[0]
146
- pending = lines[-1]
147
- if pending:
148
- yield pending.splitlines(keepends)[0]
149
-
150
- def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE):
151
- """Return an iterator to yield chunks of chunk_size bytes from the raw
152
- stream.
153
- """
154
- while True:
155
- current_chunk = self.read(chunk_size)
156
- if current_chunk == b"":
157
- break
158
- yield current_chunk
159
-
160
- def _verify_content_length(self):
161
- # See: https://github.com/kennethreitz/requests/issues/1855
162
- # Basically, our http library doesn't do this for us, so we have
163
- # to do this ourself.
164
- if self._content_length is not None and self._amount_read != int(
165
- self._content_length
166
- ):
167
- raise IncompleteReadError(
168
- actual_bytes=self._amount_read,
169
- expected_bytes=int(self._content_length),
170
- )
171
-
172
- def tell(self):
173
- return self._raw_stream.tell()
174
-
175
- def close(self):
176
- """Close the underlying http response stream."""
177
- self._raw_stream.close()
178
-
179
-
180
- def get_response(operation_model, http_response):
181
- protocol = operation_model.metadata['protocol']
182
- response_dict = {
183
- 'headers': http_response.headers,
184
- 'status_code': http_response.status_code,
185
- }
186
- # TODO: Unfortunately, we have to have error logic here.
187
- # If it looks like an error, in the streaming response case we
188
- # need to actually grab the contents.
189
- if response_dict['status_code'] >= 300:
190
- response_dict['body'] = http_response.content
191
- elif operation_model.has_streaming_output:
192
- response_dict['body'] = StreamingBody(
193
- http_response.raw, response_dict['headers'].get('content-length')
194
- )
195
- else:
196
- response_dict['body'] = http_response.content
197
-
198
- parser = parsers.create_parser(protocol)
199
- return http_response, parser.parse(
200
- response_dict, operation_model.output_shape
201
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Blockinger/OVAChatGPT/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: OVAChatGPT
3
- emoji: 😻
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- license: unknown
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/abstract/nested_resource_class_methods.py DELETED
@@ -1,102 +0,0 @@
1
- from urllib.parse import quote_plus
2
-
3
- from openai import api_requestor, util
4
-
5
-
6
- def nested_resource_class_methods(
7
- resource, path=None, operations=None, resource_plural=None
8
- ):
9
- if resource_plural is None:
10
- resource_plural = "%ss" % resource
11
- if path is None:
12
- path = resource_plural
13
- if operations is None:
14
- raise ValueError("operations list required")
15
-
16
- def wrapper(cls):
17
- def nested_resource_url(cls, id, nested_id=None):
18
- url = "%s/%s/%s" % (cls.class_url(), quote_plus(id), quote_plus(path))
19
- if nested_id is not None:
20
- url += "/%s" % quote_plus(nested_id)
21
- return url
22
-
23
- resource_url_method = "%ss_url" % resource
24
- setattr(cls, resource_url_method, classmethod(nested_resource_url))
25
-
26
- def nested_resource_request(
27
- cls,
28
- method,
29
- url,
30
- api_key=None,
31
- request_id=None,
32
- api_version=None,
33
- organization=None,
34
- **params,
35
- ):
36
- requestor = api_requestor.APIRequestor(
37
- api_key, api_version=api_version, organization=organization
38
- )
39
- response, _, api_key = requestor.request(
40
- method, url, params, request_id=request_id
41
- )
42
- return util.convert_to_openai_object(
43
- response, api_key, api_version, organization
44
- )
45
-
46
- resource_request_method = "%ss_request" % resource
47
- setattr(cls, resource_request_method, classmethod(nested_resource_request))
48
-
49
- for operation in operations:
50
- if operation == "create":
51
-
52
- def create_nested_resource(cls, id, **params):
53
- url = getattr(cls, resource_url_method)(id)
54
- return getattr(cls, resource_request_method)("post", url, **params)
55
-
56
- create_method = "create_%s" % resource
57
- setattr(cls, create_method, classmethod(create_nested_resource))
58
-
59
- elif operation == "retrieve":
60
-
61
- def retrieve_nested_resource(cls, id, nested_id, **params):
62
- url = getattr(cls, resource_url_method)(id, nested_id)
63
- return getattr(cls, resource_request_method)("get", url, **params)
64
-
65
- retrieve_method = "retrieve_%s" % resource
66
- setattr(cls, retrieve_method, classmethod(retrieve_nested_resource))
67
-
68
- elif operation == "update":
69
-
70
- def modify_nested_resource(cls, id, nested_id, **params):
71
- url = getattr(cls, resource_url_method)(id, nested_id)
72
- return getattr(cls, resource_request_method)("post", url, **params)
73
-
74
- modify_method = "modify_%s" % resource
75
- setattr(cls, modify_method, classmethod(modify_nested_resource))
76
-
77
- elif operation == "delete":
78
-
79
- def delete_nested_resource(cls, id, nested_id, **params):
80
- url = getattr(cls, resource_url_method)(id, nested_id)
81
- return getattr(cls, resource_request_method)(
82
- "delete", url, **params
83
- )
84
-
85
- delete_method = "delete_%s" % resource
86
- setattr(cls, delete_method, classmethod(delete_nested_resource))
87
-
88
- elif operation == "list":
89
-
90
- def list_nested_resources(cls, id, **params):
91
- url = getattr(cls, resource_url_method)(id)
92
- return getattr(cls, resource_request_method)("get", url, **params)
93
-
94
- list_method = "list_%s" % resource_plural
95
- setattr(cls, list_method, classmethod(list_nested_resources))
96
-
97
- else:
98
- raise ValueError("Unknown operation: %s" % operation)
99
-
100
- return cls
101
-
102
- return wrapper
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Brasd99/TTS-Voice-Conversion/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: TTS Voice Conversion
3
- emoji: 🚀
4
- colorFrom: yellow
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.25.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Brofu/Joeythemonster-anything-midjourney-v-4-1/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Joeythemonster Anything Midjourney V 4 1
3
- emoji: 🌍
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CForGETaass/vits-uma-genshin-honkai/app.py DELETED
@@ -1,124 +0,0 @@
1
- import time
2
- import gradio as gr
3
- import utils
4
- import commons
5
- from models import SynthesizerTrn
6
- from text import text_to_sequence
7
- from torch import no_grad, LongTensor
8
- import torch
9
-
10
- hps_ms = utils.get_hparams_from_file(r'./model/config.json')
11
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
- net_g_ms = SynthesizerTrn(
13
- len(hps_ms.symbols),
14
- hps_ms.data.filter_length // 2 + 1,
15
- hps_ms.train.segment_size // hps_ms.data.hop_length,
16
- n_speakers=hps_ms.data.n_speakers,
17
- **hps_ms.model).to(device)
18
- _ = net_g_ms.eval()
19
- speakers = hps_ms.speakers
20
- model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None)
21
-
22
- def get_text(text, hps):
23
- text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
24
- if hps.data.add_blank:
25
- text_norm = commons.intersperse(text_norm, 0)
26
- text_norm = LongTensor(text_norm)
27
- return text_norm, clean_text
28
-
29
- def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale):
30
- start = time.perf_counter()
31
- if not len(text):
32
- return "输入文本不能为空!", None, None
33
- text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
34
- if len(text) > 500:
35
- return f"输入文字过长!{len(text)}>100", None, None
36
- if language == 0:
37
- text = f"[ZH]{text}[ZH]"
38
- elif language == 1:
39
- text = f"[JA]{text}[JA]"
40
- else:
41
- text = f"{text}"
42
- stn_tst, clean_text = get_text(text, hps_ms)
43
- with no_grad():
44
- x_tst = stn_tst.unsqueeze(0)
45
- x_tst_lengths = LongTensor([stn_tst.size(0)])
46
- speaker_id = LongTensor([speaker_id])
47
- audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
48
- length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
49
-
50
- return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s"
51
-
52
- def search_speaker(search_value):
53
- for s in speakers:
54
- if search_value == s:
55
- return s
56
- for s in speakers:
57
- if search_value in s:
58
- return s
59
-
60
- def change_lang(language):
61
- if language == 0:
62
- return 0.6, 0.668, 1.2
63
- else:
64
- return 0.6, 0.668, 1.1
65
-
66
- download_audio_js = """
67
- () =>{{
68
- let root = document.querySelector("body > gradio-app");
69
- if (root.shadowRoot != null)
70
- root = root.shadowRoot;
71
- let audio = root.querySelector("#tts-audio").querySelector("audio");
72
- let text = root.querySelector("#input-text").querySelector("textarea");
73
- if (audio == undefined)
74
- return;
75
- text = text.value;
76
- if (text == undefined)
77
- text = Math.floor(Math.random()*100000000);
78
- audio = audio.src;
79
- let oA = document.createElement("a");
80
- oA.download = text.substr(0, 20)+'.wav';
81
- oA.href = audio;
82
- document.body.appendChild(oA);
83
- oA.click();
84
- oA.remove();
85
- }}
86
- """
87
-
88
- if __name__ == '__main__':
89
- with gr.Blocks() as app:
90
- gr.Markdown(
91
- "# <center> VITS语音在线合成demo\n"
92
- "<div align='center'>主要有赛马娘,原神中文,原神日语,崩坏3的音色</div>"
93
- '<div align="center"><a><font color="#dd0000">结果有随机性,语调可能很奇怪,可多次生成取最佳效果</font></a></div>'
94
- '<div align="center"><a><font color="#dd0000">标点符号会影响生成的结果</font></a></div>'
95
- )
96
-
97
- with gr.Tabs():
98
- with gr.TabItem("vits"):
99
- with gr.Row():
100
- with gr.Column():
101
- input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text")
102
- lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
103
- type="index", value="中文")
104
- btn = gr.Button(value="Submit")
105
- with gr.Row():
106
- search = gr.Textbox(label="Search Speaker", lines=1)
107
- btn2 = gr.Button(value="Search")
108
- sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228])
109
- with gr.Row():
110
- ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
111
- nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
112
- ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True)
113
- with gr.Column():
114
- o1 = gr.Textbox(label="Output Message")
115
- o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio")
116
- o3 = gr.Textbox(label="Extra Info")
117
- download = gr.Button("Download Audio")
118
- btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate")
119
- download.click(None, [], [], _js=download_audio_js.format())
120
- btn2.click(search_speaker, inputs=[search], outputs=[sid])
121
- lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
122
- with gr.TabItem("可用人物一览"):
123
- gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index")
124
- app.queue(concurrency_count=1).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/roi_heads/rotated_fast_rcnn.py DELETED
@@ -1,271 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- import numpy as np
4
- from typing import Dict
5
- import torch
6
-
7
- from detectron2.layers import ShapeSpec, batched_nms_rotated
8
- from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated
9
- from detectron2.utils.events import get_event_storage
10
-
11
- from ..box_regression import Box2BoxTransformRotated
12
- from ..poolers import ROIPooler
13
- from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
14
- from .box_head import build_box_head
15
- from .fast_rcnn import FastRCNNOutputLayers
16
- from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
- """
21
- Shape shorthand in this module:
22
-
23
- N: number of images in the minibatch
24
- R: number of ROIs, combined over all images, in the minibatch
25
- Ri: number of ROIs in image i
26
- K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
27
-
28
- Naming convention:
29
-
30
- deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box
31
- transform (see :class:`box_regression.Box2BoxTransformRotated`).
32
-
33
- pred_class_logits: predicted class scores in [-inf, +inf]; use
34
- softmax(pred_class_logits) to estimate P(class).
35
-
36
- gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
37
- foreground object classes and K represents the background class.
38
-
39
- pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals
40
- to detection box predictions.
41
-
42
- gt_proposal_deltas: ground-truth rotated box2box transform deltas
43
- """
44
-
45
-
46
- def fast_rcnn_inference_rotated(
47
- boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image
48
- ):
49
- """
50
- Call `fast_rcnn_inference_single_image_rotated` for all images.
51
-
52
- Args:
53
- boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
54
- boxes for each image. Element i has shape (Ri, K * 5) if doing
55
- class-specific regression, or (Ri, 5) if doing class-agnostic
56
- regression, where Ri is the number of predicted objects for image i.
57
- This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
58
- scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
59
- Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
60
- for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
61
- image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
62
- score_thresh (float): Only return detections with a confidence score exceeding this
63
- threshold.
64
- nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
65
- topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
66
- all detections.
67
-
68
- Returns:
69
- instances: (list[Instances]): A list of N instances, one for each image in the batch,
70
- that stores the topk most confidence detections.
71
- kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
72
- the corresponding boxes/scores index in [0, Ri) from the input, for image i.
73
- """
74
- result_per_image = [
75
- fast_rcnn_inference_single_image_rotated(
76
- boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
77
- )
78
- for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
79
- ]
80
- return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
81
-
82
-
83
- def fast_rcnn_inference_single_image_rotated(
84
- boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image
85
- ):
86
- """
87
- Single-image inference. Return rotated bounding-box detection results by thresholding
88
- on scores and applying rotated non-maximum suppression (Rotated NMS).
89
-
90
- Args:
91
- Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes
92
- per image.
93
-
94
- Returns:
95
- Same as `fast_rcnn_inference_rotated`, but for only one image.
96
- """
97
- valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
98
- if not valid_mask.all():
99
- boxes = boxes[valid_mask]
100
- scores = scores[valid_mask]
101
-
102
- B = 5 # box dimension
103
- scores = scores[:, :-1]
104
- num_bbox_reg_classes = boxes.shape[1] // B
105
- # Convert to Boxes to use the `clip` function ...
106
- boxes = RotatedBoxes(boxes.reshape(-1, B))
107
- boxes.clip(image_shape)
108
- boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B
109
- # Filter results based on detection scores
110
- filter_mask = scores > score_thresh # R x K
111
- # R' x 2. First column contains indices of the R predictions;
112
- # Second column contains indices of classes.
113
- filter_inds = filter_mask.nonzero()
114
- if num_bbox_reg_classes == 1:
115
- boxes = boxes[filter_inds[:, 0], 0]
116
- else:
117
- boxes = boxes[filter_mask]
118
- scores = scores[filter_mask]
119
-
120
- # Apply per-class Rotated NMS
121
- keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh)
122
- if topk_per_image >= 0:
123
- keep = keep[:topk_per_image]
124
- boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
125
-
126
- result = Instances(image_shape)
127
- result.pred_boxes = RotatedBoxes(boxes)
128
- result.scores = scores
129
- result.pred_classes = filter_inds[:, 1]
130
-
131
- return result, filter_inds[:, 0]
132
-
133
-
134
- class RotatedFastRCNNOutputLayers(FastRCNNOutputLayers):
135
- """
136
- A class that stores information about outputs of a Fast R-CNN head with RotatedBoxes.
137
- """
138
-
139
- @classmethod
140
- def from_config(cls, cfg, input_shape):
141
- args = super().from_config(cfg, input_shape)
142
- args["box2box_transform"] = Box2BoxTransformRotated(
143
- weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
144
- )
145
- return args
146
-
147
- def inference(self, predictions, proposals):
148
- """
149
- Returns:
150
- list[Instances]: same as `fast_rcnn_inference_rotated`.
151
- list[Tensor]: same as `fast_rcnn_inference_rotated`.
152
- """
153
- boxes = self.predict_boxes(predictions, proposals)
154
- scores = self.predict_probs(predictions, proposals)
155
- image_shapes = [x.image_size for x in proposals]
156
-
157
- return fast_rcnn_inference_rotated(
158
- boxes,
159
- scores,
160
- image_shapes,
161
- self.test_score_thresh,
162
- self.test_nms_thresh,
163
- self.test_topk_per_image,
164
- )
165
-
166
-
167
- @ROI_HEADS_REGISTRY.register()
168
- class RROIHeads(StandardROIHeads):
169
- """
170
- This class is used by Rotated RPN (RRPN).
171
- For now, it just supports box head but not mask or keypoints.
172
- """
173
-
174
- def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
175
- super().__init__(cfg, input_shape)
176
- assert (
177
- not self.mask_on and not self.keypoint_on
178
- ), "Mask/Keypoints not supported in Rotated ROIHeads."
179
-
180
- def _init_box_head(self, cfg, input_shape):
181
- # fmt: off
182
- pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
183
- pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
184
- sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
185
- pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
186
- self.train_on_pred_boxes = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
187
- # fmt: on
188
- assert not self.train_on_pred_boxes, "Not Implemented!"
189
-
190
- # If StandardROIHeads is applied on multiple feature maps (as in FPN),
191
- # then we share the same predictors and therefore the channel counts must be the same
192
- in_channels = [input_shape[f].channels for f in self.in_features]
193
- # Check all channel counts are equal
194
- assert len(set(in_channels)) == 1, in_channels
195
- in_channels = in_channels[0]
196
-
197
- assert pooler_type in ["ROIAlignRotated"]
198
-
199
- self.box_pooler = ROIPooler(
200
- output_size=pooler_resolution,
201
- scales=pooler_scales,
202
- sampling_ratio=sampling_ratio,
203
- pooler_type=pooler_type,
204
- )
205
- self.box_head = build_box_head(
206
- cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
207
- )
208
-
209
- self.box_predictor = RotatedFastRCNNOutputLayers(cfg, self.box_head.output_shape)
210
-
211
- @torch.no_grad()
212
- def label_and_sample_proposals(self, proposals, targets):
213
- """
214
- Prepare some proposals to be used to train the RROI heads.
215
- It performs box matching between `proposals` and `targets`, and assigns
216
- training labels to the proposals.
217
- It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
218
- with a fraction of positives that is no larger than `self.positive_sample_fraction.
219
-
220
- Args:
221
- See :meth:`StandardROIHeads.forward`
222
-
223
- Returns:
224
- list[Instances]: length `N` list of `Instances`s containing the proposals
225
- sampled for training. Each `Instances` has the following fields:
226
- - proposal_boxes: the rotated proposal boxes
227
- - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to
228
- (this is only meaningful if the proposal has a label > 0; if label = 0
229
- then the ground-truth box is random)
230
- - gt_classes: the ground-truth classification lable for each proposal
231
- """
232
- gt_boxes = [x.gt_boxes for x in targets]
233
- if self.proposal_append_gt:
234
- proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
235
-
236
- proposals_with_gt = []
237
-
238
- num_fg_samples = []
239
- num_bg_samples = []
240
- for proposals_per_image, targets_per_image in zip(proposals, targets):
241
- has_gt = len(targets_per_image) > 0
242
- match_quality_matrix = pairwise_iou_rotated(
243
- targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
244
- )
245
- matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
246
- sampled_idxs, gt_classes = self._sample_proposals(
247
- matched_idxs, matched_labels, targets_per_image.gt_classes
248
- )
249
-
250
- proposals_per_image = proposals_per_image[sampled_idxs]
251
- proposals_per_image.gt_classes = gt_classes
252
-
253
- if has_gt:
254
- sampled_targets = matched_idxs[sampled_idxs]
255
- proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets]
256
- else:
257
- gt_boxes = RotatedBoxes(
258
- targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 5))
259
- )
260
- proposals_per_image.gt_boxes = gt_boxes
261
-
262
- num_bg_samples.append((gt_classes == self.num_classes).sum().item())
263
- num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
264
- proposals_with_gt.append(proposals_per_image)
265
-
266
- # Log the number of fg/bg samples that are selected for training ROI heads
267
- storage = get_event_storage()
268
- storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
269
- storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
270
-
271
- return proposals_with_gt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pydiffvg/optimize_svg.py DELETED
@@ -1,1607 +0,0 @@
1
- import json
2
- import copy
3
- import xml.etree.ElementTree as etree
4
- from xml.dom import minidom
5
- import warnings
6
- import torch
7
- import numpy as np
8
- import re
9
- import sys
10
- import pydiffvg
11
- import math
12
- from collections import namedtuple
13
- import cssutils
14
-
15
- class SvgOptimizationSettings:
16
-
17
- default_params = {
18
- "optimize_color": True,
19
- "color_lr": 2e-3,
20
- "optimize_alpha": False,
21
- "alpha_lr": 2e-3,
22
- "optimizer": "Adam",
23
- "transforms": {
24
- "optimize_transforms":True,
25
- "transform_mode":"rigid",
26
- "translation_mult":1e-3,
27
- "transform_lr":2e-3
28
- },
29
- "circles": {
30
- "optimize_center": True,
31
- "optimize_radius": True,
32
- "shape_lr": 2e-1
33
- },
34
- "paths": {
35
- "optimize_points": True,
36
- "shape_lr": 2e-1
37
- },
38
- "gradients": {
39
- "optimize_stops": True,
40
- "stop_lr": 2e-3,
41
- "optimize_color": True,
42
- "color_lr": 2e-3,
43
- "optimize_alpha": False,
44
- "alpha_lr": 2e-3,
45
- "optimize_location": True,
46
- "location_lr": 2e-1
47
- }
48
- }
49
-
50
- optims = {
51
- "Adam": torch.optim.Adam,
52
- "SGD": torch.optim.SGD,
53
- "ASGD": torch.optim.ASGD,
54
- }
55
-
56
- #region methods
57
- def __init__(self, f=None):
58
- self.store = {}
59
- if f is None:
60
- self.store["default"] = copy.deepcopy(SvgOptimizationSettings.default_params)
61
- else:
62
- self.store = json.load(f)
63
-
64
- # create default alias for root
65
- def default_name(self, dname):
66
- self.dname = dname
67
- if dname not in self.store:
68
- self.store[dname] = self.store["default"]
69
-
70
- def retrieve(self, node_id):
71
- if node_id not in self.store:
72
- return (self.store["default"], False)
73
- else:
74
- return (self.store[node_id], True)
75
-
76
- def reset_to_defaults(self, node_id):
77
- if node_id in self.store:
78
- del self.store[node_id]
79
-
80
- return self.store["default"]
81
-
82
- def undefault(self, node_id):
83
- if node_id not in self.store:
84
- self.store[node_id] = copy.deepcopy(self.store["default"])
85
-
86
- return self.store[node_id]
87
-
88
- def override_optimizer(self, optimizer):
89
- if optimizer is not None:
90
- for v in self.store.values():
91
- v["optimizer"] = optimizer
92
-
93
- def global_override(self, path, value):
94
- for store in self.store.values():
95
- d = store
96
- for key in path[:-1]:
97
- d = d[key]
98
-
99
- d[path[-1]] = value
100
-
101
- def save(self, file):
102
- self.store["default"] = self.store[self.dname]
103
- json.dump(self.store, file, indent="\t")
104
- #endregion
105
-
106
- class OptimizableSvg:
107
-
108
- class TransformTools:
109
- @staticmethod
110
- def parse_matrix(vals):
111
- assert(len(vals)==6)
112
- return np.array([[vals[0],vals[2],vals[4]],[vals[1], vals[3], vals[5]],[0,0,1]])
113
-
114
- @staticmethod
115
- def parse_translate(vals):
116
- assert(len(vals)>=1 and len(vals)<=2)
117
- mat=np.eye(3)
118
- mat[0,2]=vals[0]
119
- if len(vals)>1:
120
- mat[1,2]=vals[1]
121
- return mat
122
-
123
- @staticmethod
124
- def parse_rotate(vals):
125
- assert (len(vals) == 1 or len(vals) == 3)
126
- mat = np.eye(3)
127
- rads=math.radians(vals[0])
128
- sint=math.sin(rads)
129
- cost=math.cos(rads)
130
- mat[0:2, 0:2] = np.array([[cost,-sint],[sint,cost]])
131
- if len(vals) > 1:
132
- tr1=parse_translate(vals[1:3])
133
- tr2=parse_translate([-vals[1],-vals[2]])
134
- mat=tr1 @ mat @ tr2
135
- return mat
136
-
137
- @staticmethod
138
- def parse_scale(vals):
139
- assert (len(vals) >= 1 and len(vals) <= 2)
140
- d=np.array([vals[0], vals[1] if len(vals)>1 else vals[0],1])
141
- return np.diag(d)
142
-
143
- @staticmethod
144
- def parse_skewx(vals):
145
- assert(len(vals)==1)
146
- m=np.eye(3)
147
- m[0,1]=vals[0]
148
- return m
149
-
150
- @staticmethod
151
- def parse_skewy(vals):
152
- assert (len(vals) == 1)
153
- m = np.eye(3)
154
- m[1, 0] = vals[0]
155
- return m
156
-
157
- @staticmethod
158
- def transformPoints(pointsTensor, transform):
159
- assert(transform is not None)
160
- one=torch.ones((pointsTensor.shape[0],1),device=pointsTensor.device)
161
- homo_points = torch.cat([pointsTensor, one], dim=1)
162
- mult = transform.mm(homo_points.permute(1,0)).permute(1,0)
163
- tfpoints=mult[:, 0:2].contiguous()
164
- #print(torch.norm(mult[:,2]-one))
165
- assert(pointsTensor.shape == tfpoints.shape)
166
- return tfpoints
167
-
168
- @staticmethod
169
- def promote_numpy(M):
170
- ret = np.eye(3)
171
- ret[0:2, 0:2] = M
172
- return ret
173
-
174
- @staticmethod
175
- def recompose_numpy(Theta,ScaleXY,ShearX,TXY):
176
- cost=math.cos(Theta)
177
- sint=math.sin(Theta)
178
- Rot=np.array([[cost, -sint],[sint, cost]])
179
- Scale=np.diag(ScaleXY)
180
- Shear=np.eye(2)
181
- Shear[0,1]=ShearX
182
-
183
- Translate=np.eye(3)
184
- Translate[0:2,2]=TXY
185
-
186
- M=OptimizableSvg.TransformTools.promote_numpy(Rot @ Scale @ Shear) @ Translate
187
- return M
188
-
189
- @staticmethod
190
- def promote(m):
191
- M=torch.eye(3).to(m.device)
192
- M[0:2,0:2]=m
193
- return M
194
-
195
- @staticmethod
196
- def make_rot(Theta):
197
- sint=Theta.sin().squeeze()
198
- cost=Theta.cos().squeeze()
199
- #m=torch.tensor([[cost, -sint],[sint, cost]])
200
- Rot=torch.stack((torch.stack((cost,-sint)),torch.stack((sint,cost))))
201
- return Rot
202
-
203
- @staticmethod
204
- def make_scale(ScaleXY):
205
- if ScaleXY.squeeze().dim()==0:
206
- ScaleXY=ScaleXY.squeeze()
207
- #uniform scale
208
- return torch.diag(torch.stack([ScaleXY,ScaleXY])).to(ScaleXY.device)
209
- else:
210
- return torch.diag(ScaleXY).to(ScaleXY.device)
211
-
212
- @staticmethod
213
- def make_shear(ShearX):
214
- m=torch.eye(2).to(ShearX.device)
215
- m[0,1]=ShearX
216
- return m
217
-
218
- @staticmethod
219
- def make_translate(TXY):
220
- m=torch.eye(3).to(TXY.device)
221
- m[0:2,2]=TXY
222
- return m
223
-
224
- @staticmethod
225
- def recompose(Theta,ScaleXY,ShearX,TXY):
226
- Rot=OptimizableSvg.TransformTools.make_rot(Theta)
227
- Scale=OptimizableSvg.TransformTools.make_scale(ScaleXY)
228
- Shear=OptimizableSvg.TransformTools.make_shear(ShearX)
229
- Translate=OptimizableSvg.TransformTools.make_translate(TXY)
230
-
231
- return OptimizableSvg.TransformTools.promote(Rot.mm(Scale).mm(Shear)).mm(Translate)
232
-
233
- TransformDecomposition=namedtuple("TransformDecomposition","theta scale shear translate")
234
- TransformProperties=namedtuple("TransformProperties", "has_rotation has_scale has_mirror scale_uniform has_shear has_translation")
235
-
236
- @staticmethod
237
- def make_named(decomp):
238
- if not isinstance(decomp,OptimizableSvg.TransformTools.TransformDecomposition):
239
- decomp=OptimizableSvg.TransformTools.TransformDecomposition(theta=decomp[0],scale=decomp[1],shear=decomp[2],translate=decomp[3])
240
- return decomp
241
-
242
- @staticmethod
243
- def analyze_transform(decomp):
244
- decomp=OptimizableSvg.TransformTools.make_named(decomp)
245
- epsilon=1e-3
246
- has_rotation=abs(decomp.theta)>epsilon
247
- has_scale=abs((abs(decomp.scale)-1)).max()>epsilon
248
- scale_len=decomp.scale.squeeze().ndim>0 if isinstance(decomp.scale,np.ndarray) else decomp.scale.squeeze().dim() > 0
249
- has_mirror=scale_len and decomp.scale[0]*decomp.scale[1] < 0
250
- scale_uniform=not scale_len or abs(abs(decomp.scale[0])-abs(decomp.scale[1]))<epsilon
251
- has_shear=abs(decomp.shear)>epsilon
252
- has_translate=max(abs(decomp.translate[0]),abs(decomp.translate[1]))>epsilon
253
-
254
- return OptimizableSvg.TransformTools.TransformProperties(has_rotation=has_rotation,has_scale=has_scale,has_mirror=has_mirror,scale_uniform=scale_uniform,has_shear=has_shear,has_translation=has_translate)
255
-
256
- @staticmethod
257
- def check_and_decomp(M):
258
- decomp=OptimizableSvg.TransformTools.decompose(M) if M is not None else OptimizableSvg.TransformTools.TransformDecomposition(theta=0,scale=(1,1),shear=0,translate=(0,0))
259
- props=OptimizableSvg.TransformTools.analyze_transform(decomp)
260
- return (decomp, props)
261
-
262
- @staticmethod
263
- def tf_to_string(M):
264
- tfstring = "matrix({} {} {} {} {} {})".format(M[0, 0], M[1, 0], M[0, 1], M[1, 1], M[0, 2], M[1, 2])
265
- return tfstring
266
-
267
- @staticmethod
268
- def decomp_to_string(decomp):
269
- decomp = OptimizableSvg.TransformTools.make_named(decomp)
270
- ret=""
271
- props=OptimizableSvg.TransformTools.analyze_transform(decomp)
272
- if props.has_rotation:
273
- ret+="rotate({}) ".format(math.degrees(decomp.theta.item()))
274
- if props.has_scale:
275
- if decomp.scale.dim()==0:
276
- ret += "scale({}) ".format(decomp.scale.item())
277
- else:
278
- ret+="scale({} {}) ".format(decomp.scale[0], decomp.scale[1])
279
- if props.has_shear:
280
- ret+="skewX({}) ".format(decomp.shear.item())
281
- if props.has_translation:
282
- ret+="translate({} {}) ".format(decomp.translate[0],decomp.translate[1])
283
-
284
- return ret
285
-
286
- @staticmethod
287
- def decompose(M):
288
- m = M[0:2, 0:2]
289
- t0=M[0:2, 2]
290
- #get translation so that we can post-multiply with it
291
- TXY=np.linalg.solve(m,t0)
292
-
293
- T=np.eye(3)
294
- T[0:2,2]=TXY
295
-
296
- q, r = np.linalg.qr(m)
297
-
298
- ref = np.array([[1, 0], [0, np.sign(np.linalg.det(q))]])
299
-
300
- Rot = np.dot(q, ref)
301
-
302
- ref2 = np.array([[1, 0], [0, np.sign(np.linalg.det(r))]])
303
-
304
- r2 = np.dot(ref2, r)
305
-
306
- Ref = np.dot(ref, ref2)
307
-
308
- sc = np.diag(r2)
309
- Scale = np.diagflat(sc)
310
-
311
- Shear = np.eye(2)
312
- Shear[0, 1] = r2[0, 1] / sc[0]
313
- #the actual shear coefficient
314
- ShearX=r2[0, 1] / sc[0]
315
-
316
- if np.sum(sc) < 0:
317
- # both scales are negative, flip this and add a 180 rotation
318
- Rot = np.dot(Rot, -np.eye(2))
319
- Scale = -Scale
320
-
321
- Theta = math.atan2(Rot[1, 0], Rot[0, 0])
322
- ScaleXY = np.array([Scale[0,0],Scale[1,1]*Ref[1,1]])
323
-
324
- return OptimizableSvg.TransformTools.TransformDecomposition(theta=Theta, scale=ScaleXY, shear=ShearX, translate=TXY)
325
-
326
- #region suboptimizers
327
-
328
- #optimizes color, but really any tensor that needs to stay between 0 and 1 per-entry
329
- class ColorOptimizer:
330
- def __init__(self,tensor,optim_type,lr):
331
- self.tensor=tensor
332
- self.optim=optim_type([tensor],lr=lr)
333
-
334
- def zero_grad(self):
335
- self.optim.zero_grad()
336
-
337
- def step(self):
338
- self.optim.step()
339
- self.tensor.data.clamp_(min=1e-4,max=1.)
340
-
341
- #optimizes gradient stop positions
342
- class StopOptimizer:
343
- def __init__(self,stops,optim_type,lr):
344
- self.stops=stops
345
- self.optim=optim_type([stops],lr=lr)
346
-
347
- def zero_grad(self):
348
- self.optim.zero_grad()
349
-
350
- def step(self):
351
- self.optim.step()
352
- self.stops.data.clamp_(min=0., max=1.)
353
- self.stops.data, _ = self.stops.sort()
354
- self.stops.data[0] = 0.
355
- self.stops.data[-1]=1.
356
-
357
- #optimizes gradient: stop, positions, colors+opacities, locations
358
- class GradientOptimizer:
359
- def __init__(self, begin, end, offsets, stops, optim_params):
360
- self.begin=begin.clone().detach() if begin is not None else None
361
- self.end=end.clone().detach() if end is not None else None
362
- self.offsets=offsets.clone().detach() if offsets is not None else None
363
- self.stop_colors=stops[:,0:3].clone().detach() if stops is not None else None
364
- self.stop_alphas=stops[:,3].clone().detach() if stops is not None else None
365
- self.optimizers=[]
366
-
367
- if optim_params["gradients"]["optimize_stops"] and self.offsets is not None:
368
- self.offsets.requires_grad_(True)
369
- self.optimizers.append(OptimizableSvg.StopOptimizer(self.offsets,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["stop_lr"]))
370
- if optim_params["gradients"]["optimize_color"] and self.stop_colors is not None:
371
- self.stop_colors.requires_grad_(True)
372
- self.optimizers.append(OptimizableSvg.ColorOptimizer(self.stop_colors,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["color_lr"]))
373
- if optim_params["gradients"]["optimize_alpha"] and self.stop_alphas is not None:
374
- self.stop_alphas.requires_grad_(True)
375
- self.optimizers.append(OptimizableSvg.ColorOptimizer(self.stop_alphas,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["alpha_lr"]))
376
- if optim_params["gradients"]["optimize_location"] and self.begin is not None and self.end is not None:
377
- self.begin.requires_grad_(True)
378
- self.end.requires_grad_(True)
379
- self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.begin,self.end],lr=optim_params["gradients"]["location_lr"]))
380
-
381
-
382
- def get_vals(self):
383
- return self.begin, self.end, self.offsets, torch.cat((self.stop_colors,self.stop_alphas.unsqueeze(1)),1) if self.stop_colors is not None and self.stop_alphas is not None else None
384
-
385
- def zero_grad(self):
386
- for optim in self.optimizers:
387
- optim.zero_grad()
388
-
389
- def step(self):
390
- for optim in self.optimizers:
391
- optim.step()
392
-
393
- class TransformOptimizer:
394
- def __init__(self,transform,optim_params):
395
- self.transform=transform
396
- self.optimizes=optim_params["transforms"]["optimize_transforms"] and transform is not None
397
- self.params=copy.deepcopy(optim_params)
398
- self.transform_mode=optim_params["transforms"]["transform_mode"]
399
-
400
- if self.optimizes:
401
- optimvars=[]
402
- self.residual=None
403
- lr=optim_params["transforms"]["transform_lr"]
404
- tmult=optim_params["transforms"]["translation_mult"]
405
- decomp,props=OptimizableSvg.TransformTools.check_and_decomp(transform.cpu().numpy())
406
- if self.transform_mode=="move":
407
- #only translation and rotation should be set
408
- if props.has_scale or props.has_shear or props.has_mirror:
409
- print("Warning: set to optimize move only, but input transform has residual scale or shear")
410
- self.residual=self.transform.clone().detach().requires_grad_(False)
411
- self.Theta=torch.tensor(0,dtype=torch.float32,requires_grad=True,device=transform.device)
412
- self.translation=torch.tensor([0, 0],dtype=torch.float32,requires_grad=True,device=transform.device)
413
- else:
414
- self.residual=None
415
- self.Theta=torch.tensor(decomp.theta,dtype=torch.float32,requires_grad=True,device=transform.device)
416
- self.translation=torch.tensor(decomp.translate,dtype=torch.float32,requires_grad=True,device=transform.device)
417
- optimvars+=[{'params':x,'lr':lr} for x in [self.Theta]]+[{'params':self.translation,'lr':lr*tmult}]
418
- elif self.transform_mode=="rigid":
419
- #only translation, rotation, and uniform scale should be set
420
- if props.has_shear or props.has_mirror or not props.scale_uniform:
421
- print("Warning: set to optimize rigid transform only, but input transform has residual shear, mirror or non-uniform scale")
422
- self.residual = self.transform.clone().detach().requires_grad_(False)
423
- self.Theta = torch.tensor(0, dtype=torch.float32, requires_grad=True,device=transform.device)
424
- self.translation = torch.tensor([0, 0], dtype=torch.float32, requires_grad=True,device=transform.device)
425
- self.scale=torch.tensor(1, dtype=torch.float32, requires_grad=True,device=transform.device)
426
- else:
427
- self.residual = None
428
- self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device)
429
- self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device)
430
- self.scale = torch.tensor(decomp.scale[0], dtype=torch.float32, requires_grad=True,device=transform.device)
431
- optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale]]+[{'params':self.translation,'lr':lr*tmult}]
432
- elif self.transform_mode=="similarity":
433
- if props.has_shear or not props.scale_uniform:
434
- print("Warning: set to optimize rigid transform only, but input transform has residual shear or non-uniform scale")
435
- self.residual = self.transform.clone().detach().requires_grad_(False)
436
- self.Theta = torch.tensor(0, dtype=torch.float32, requires_grad=True,device=transform.device)
437
- self.translation = torch.tensor([0, 0], dtype=torch.float32, requires_grad=True,device=transform.device)
438
- self.scale=torch.tensor(1, dtype=torch.float32, requires_grad=True,device=transform.device)
439
- self.scale_sign=torch.tensor(1,dtype=torch.float32,requires_grad=False,device=transform.device)
440
- else:
441
- self.residual = None
442
- self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device)
443
- self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device)
444
- self.scale = torch.tensor(decomp.scale[0], dtype=torch.float32, requires_grad=True,device=transform.device)
445
- self.scale_sign = torch.tensor(np.sign(decomp.scale[0]*decomp.scale[1]), dtype=torch.float32, requires_grad=False,device=transform.device)
446
- optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale]]+[{'params':self.translation,'lr':lr*tmult}]
447
- elif self.transform_mode=="affine":
448
- self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device)
449
- self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device)
450
- self.scale = torch.tensor(decomp.scale, dtype=torch.float32, requires_grad=True,device=transform.device)
451
- self.shear = torch.tensor(decomp.shear, dtype=torch.float32, requires_grad=True,device=transform.device)
452
- optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale, self.shear]]+[{'params':self.translation,'lr':lr*tmult}]
453
- else:
454
- raise ValueError("Unrecognized transform mode '{}'".format(self.transform_mode))
455
- self.optimizer=SvgOptimizationSettings.optims[optim_params["optimizer"]](optimvars)
456
-
457
- def get_transform(self):
458
- if not self.optimizes:
459
- return self.transform
460
- else:
461
- if self.transform_mode == "move":
462
- composed=OptimizableSvg.TransformTools.recompose(self.Theta,torch.tensor([1.],device=self.Theta.device),torch.tensor(0.,device=self.Theta.device),self.translation)
463
- return self.residual.mm(composed) if self.residual is not None else composed
464
- elif self.transform_mode == "rigid":
465
- composed = OptimizableSvg.TransformTools.recompose(self.Theta, self.scale, torch.tensor(0.,device=self.Theta.device),
466
- self.translation)
467
- return self.residual.mm(composed) if self.residual is not None else composed
468
- elif self.transform_mode == "similarity":
469
- composed=OptimizableSvg.TransformTools.recompose(self.Theta, torch.cat((self.scale,self.scale*self.scale_sign)),torch.tensor(0.,device=self.Theta.device),self.translation)
470
- return self.residual.mm(composed) if self.residual is not None else composed
471
- elif self.transform_mode == "affine":
472
- composed = OptimizableSvg.TransformTools.recompose(self.Theta, self.scale, self.shear, self.translation)
473
- return composed
474
- else:
475
- raise ValueError("Unrecognized transform mode '{}'".format(self.transform_mode))
476
-
477
- def tfToString(self):
478
- if self.transform is None:
479
- return None
480
- elif not self.optimizes:
481
- return OptimizableSvg.TransformTools.tf_to_string(self.transform)
482
- else:
483
- if self.transform_mode == "move":
484
- str=OptimizableSvg.TransformTools.decomp_to_string((self.Theta,torch.tensor([1.]),torch.tensor(0.),self.translation))
485
- return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str
486
- elif self.transform_mode == "rigid":
487
- str = OptimizableSvg.TransformTools.decomp_to_string((self.Theta, self.scale, torch.tensor(0.),
488
- self.translation))
489
- return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str
490
- elif self.transform_mode == "similarity":
491
- str=OptimizableSvg.TransformTools.decomp_to_string((self.Theta, torch.cat((self.scale,self.scale*self.scale_sign)),torch.tensor(0.),self.translation))
492
- return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str
493
- elif self.transform_mode == "affine":
494
- str = OptimizableSvg.TransformTools.decomp_to_string((self.Theta, self.scale, self.shear, self.translation))
495
- return composed
496
-
497
- def zero_grad(self):
498
- if self.optimizes:
499
- self.optimizer.zero_grad()
500
-
501
- def step(self):
502
- if self.optimizes:
503
- self.optimizer.step()
504
-
505
- #endregion
506
-
507
- #region Nodes
508
- class SvgNode:
509
- def __init__(self,id,transform,appearance,settings):
510
- self.id=id
511
- self.children=[]
512
- self.optimizers=[]
513
- self.device = settings.device
514
- self.transform=torch.tensor(transform,dtype=torch.float32,device=self.device) if transform is not None else None
515
- self.transform_optim=OptimizableSvg.TransformOptimizer(self.transform,settings.retrieve(self.id)[0])
516
- self.optimizers.append(self.transform_optim)
517
- self.proc_appearance(appearance,settings.retrieve(self.id)[0])
518
-
519
- def tftostring(self):
520
- return self.transform_optim.tfToString()
521
-
522
- def appearanceToString(self):
523
- appstring=""
524
- for key,value in self.appearance.items():
525
- if key in ["fill", "stroke"]:
526
- #a paint-type value
527
- if value[0] == "none":
528
- appstring+="{}:none;".format(key)
529
- elif value[0] == "solid":
530
- appstring += "{}:{};".format(key,OptimizableSvg.rgb_to_string(value[1]))
531
- elif value[0] == "url":
532
- appstring += "{}:url(#{});".format(key,value[1].id)
533
- #appstring += "{}:{};".format(key,"#ff00ff")
534
- elif key in ["opacity", "fill-opacity", "stroke-opacity", "stroke-width", "fill-rule"]:
535
- appstring+="{}:{};".format(key,value)
536
- else:
537
- raise ValueError("Don't know how to write appearance parameter '{}'".format(key))
538
- return appstring
539
-
540
-
541
- def write_xml_common_attrib(self,node,tfname="transform"):
542
- if self.transform is not None:
543
- node.set(tfname,self.tftostring())
544
- if len(self.appearance)>0:
545
- node.set('style',self.appearanceToString())
546
- if self.id is not None:
547
- node.set('id',self.id)
548
-
549
-
550
- def proc_appearance(self,appearance,optim_params):
551
- self.appearance=appearance
552
- for key, value in appearance.items():
553
- if key == "fill" or key == "stroke":
554
- if optim_params["optimize_color"] and value[0]=="solid":
555
- value[1].requires_grad_(True)
556
- self.optimizers.append(OptimizableSvg.ColorOptimizer(value[1],SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["color_lr"]))
557
- elif key == "fill-opacity" or key == "stroke-opacity" or key == "opacity":
558
- if optim_params["optimize_alpha"]:
559
- value[1].requires_grad_(True)
560
- self.optimizers.append(OptimizableSvg.ColorOptimizer(value[1], optim_params["optimizer"],
561
- optim_params["alpha_lr"]))
562
- elif key == "fill-rule" or key == "stroke-width":
563
- pass
564
- else:
565
- raise RuntimeError("Unrecognized appearance key '{}'".format(key))
566
-
567
- def prop_transform(self,intform):
568
- return intform.matmul(self.transform_optim.get_transform()) if self.transform is not None else intform
569
-
570
- def prop_appearance(self,inappearance):
571
- outappearance=copy.copy(inappearance)
572
- for key,value in self.appearance.items():
573
- if key == "fill":
574
- #gets replaced
575
- outappearance[key]=value
576
- elif key == "fill-opacity":
577
- #gets multiplied
578
- outappearance[key] = outappearance[key]*value
579
- elif key == "fill-rule":
580
- #gets replaced
581
- outappearance[key] = value
582
- elif key =="opacity":
583
- # gets multiplied
584
- outappearance[key] = outappearance[key]*value
585
- elif key == "stroke":
586
- # gets replaced
587
- outappearance[key] = value
588
- elif key == "stroke-opacity":
589
- # gets multiplied
590
- outappearance[key] = outappearance[key]*value
591
- elif key =="stroke-width":
592
- # gets replaced
593
- outappearance[key] = value
594
- else:
595
- raise RuntimeError("Unrecognized appearance key '{}'".format(key))
596
- return outappearance
597
-
598
- def zero_grad(self):
599
- for optim in self.optimizers:
600
- optim.zero_grad()
601
- for child in self.children:
602
- child.zero_grad()
603
-
604
- def step(self):
605
- for optim in self.optimizers:
606
- optim.step()
607
- for child in self.children:
608
- child.step()
609
-
610
- def get_type(self):
611
- return "Generic node"
612
-
613
- def is_shape(self):
614
- return False
615
-
616
- def build_scene(self,shapes,shape_groups,transform,appearance):
617
- raise NotImplementedError("Abstract SvgNode cannot recurse")
618
-
619
- class GroupNode(SvgNode):
620
- def __init__(self, id, transform, appearance,settings):
621
- super().__init__(id, transform, appearance,settings)
622
-
623
- def get_type(self):
624
- return "Group node"
625
-
626
- def build_scene(self,shapes,shape_groups,transform,appearance):
627
- outtf=self.prop_transform(transform)
628
- outapp=self.prop_appearance(appearance)
629
- for child in self.children:
630
- child.build_scene(shapes,shape_groups,outtf,outapp)
631
-
632
- def write_xml(self, parent):
633
- elm=etree.SubElement(parent,"g")
634
- self.write_xml_common_attrib(elm)
635
-
636
- for child in self.children:
637
- child.write_xml(elm)
638
-
639
- class RootNode(SvgNode):
640
- def __init__(self, id, transform, appearance,settings):
641
- super().__init__(id, transform, appearance,settings)
642
-
643
- def write_xml(self,document):
644
- elm=etree.Element('svg')
645
- self.write_xml_common_attrib(elm)
646
- elm.set("version","2.0")
647
- elm.set("width",str(document.canvas[0]))
648
- elm.set("height", str(document.canvas[1]))
649
- elm.set("xmlns","http://www.w3.org/2000/svg")
650
- elm.set("xmlns:xlink","http://www.w3.org/1999/xlink")
651
- #write definitions before we write any children
652
- document.write_defs(elm)
653
-
654
- #write the children
655
- for child in self.children:
656
- child.write_xml(elm)
657
-
658
- return elm
659
-
660
- def get_type(self):
661
- return "Root node"
662
-
663
- def build_scene(self,shapes,shape_groups,transform,appearance):
664
- outtf = self.prop_transform(transform).to(self.device)
665
- for child in self.children:
666
- child.build_scene(shapes,shape_groups,outtf,appearance)
667
-
668
- @staticmethod
669
- def get_default_appearance(device):
670
- default_appearance = {"fill": ("solid", torch.tensor([0., 0., 0.],device=device)),
671
- "fill-opacity": torch.tensor([1.],device=device),
672
- "fill-rule": "nonzero",
673
- "opacity": torch.tensor([1.],device=device),
674
- "stroke": ("none", None),
675
- "stroke-opacity": torch.tensor([1.],device=device),
676
- "stroke-width": torch.tensor([0.],device=device)}
677
- return default_appearance
678
-
679
- @staticmethod
680
- def get_default_transform():
681
- return torch.eye(3)
682
-
683
-
684
-
685
- class ShapeNode(SvgNode):
686
- def __init__(self, id, transform, appearance,settings):
687
- super().__init__(id, transform, appearance,settings)
688
-
689
- def get_type(self):
690
- return "Generic shape node"
691
-
692
- def is_shape(self):
693
- return True
694
-
695
- def construct_paint(self,value,combined_opacity,transform):
696
- if value[0] == "none":
697
- return None
698
- elif value[0] == "solid":
699
- return torch.cat([value[1],combined_opacity]).to(self.device)
700
- elif value[0] == "url":
701
- #get the gradient object from this node
702
- return value[1].getGrad(combined_opacity,transform)
703
- else:
704
- raise ValueError("Unknown paint value type '{}'".format(value[0]))
705
-
706
- def make_shape_group(self,appearance,transform,num_shapes,num_subobjects):
707
- fill=self.construct_paint(appearance["fill"],appearance["opacity"]*appearance["fill-opacity"],transform)
708
- stroke=self.construct_paint(appearance["stroke"],appearance["opacity"]*appearance["stroke-opacity"],transform)
709
- sg = pydiffvg.ShapeGroup(shape_ids=torch.tensor(range(num_shapes, num_shapes + num_subobjects)),
710
- fill_color=fill,
711
- use_even_odd_rule=appearance["fill-rule"]=="evenodd",
712
- stroke_color=stroke,
713
- shape_to_canvas=transform,
714
- id=self.id)
715
- return sg
716
-
717
- class PathNode(ShapeNode):
718
- def __init__(self, id, transform, appearance,settings, paths):
719
- super().__init__(id, transform, appearance,settings)
720
- self.proc_paths(paths,settings.retrieve(self.id)[0])
721
-
722
- def proc_paths(self,paths,optim_params):
723
- self.paths=paths
724
- if optim_params["paths"]["optimize_points"]:
725
- ptlist=[]
726
- for path in paths:
727
- ptlist.append(path.points.requires_grad_(True))
728
- self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]](ptlist,lr=optim_params["paths"]["shape_lr"]))
729
-
730
- def get_type(self):
731
- return "Path node"
732
-
733
- def build_scene(self,shapes,shape_groups,transform,appearance):
734
- applytf=self.prop_transform(transform)
735
- applyapp = self.prop_appearance(appearance)
736
- sg=self.make_shape_group(applyapp,applytf,len(shapes),len(self.paths))
737
- for path in self.paths:
738
- disp_path=pydiffvg.Path(path.num_control_points,path.points,path.is_closed,applyapp["stroke-width"],path.id)
739
- shapes.append(disp_path)
740
- shape_groups.append(sg)
741
-
742
- def path_to_string(self,path):
743
- path_string = "M {},{} ".format(path.points[0][0].item(), path.points[0][1].item())
744
- idx = 1
745
- numpoints = path.points.shape[0]
746
- for type in path.num_control_points:
747
- toproc = type + 1
748
- if type == 0:
749
- # add line
750
- path_string += "L "
751
- elif type == 1:
752
- # add quadric
753
- path_string += "Q "
754
- elif type == 2:
755
- # add cubic
756
- path_string += "C "
757
- while toproc > 0:
758
- path_string += "{},{} ".format(path.points[idx % numpoints][0].item(),
759
- path.points[idx % numpoints][1].item())
760
- idx += 1
761
- toproc -= 1
762
- if path.is_closed:
763
- path_string += "Z "
764
-
765
- return path_string
766
-
767
- def paths_string(self):
768
- pstr=""
769
- for path in self.paths:
770
- pstr+=self.path_to_string(path)
771
- return pstr
772
-
773
- def write_xml(self, parent):
774
- elm = etree.SubElement(parent, "path")
775
- self.write_xml_common_attrib(elm)
776
- elm.set("d",self.paths_string())
777
-
778
- for child in self.children:
779
- child.write_xml(elm)
780
-
781
- class RectNode(ShapeNode):
782
- def __init__(self, id, transform, appearance,settings, rect):
783
- super().__init__(id, transform, appearance,settings)
784
- self.rect=torch.tensor(rect,dtype=torch.float,device=settings.device)
785
- optim_params=settings.retrieve(self.id)[0]
786
- #borrowing path settings for this
787
- if optim_params["paths"]["optimize_points"]:
788
- self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.rect],lr=optim_params["paths"]["shape_lr"]))
789
-
790
- def get_type(self):
791
- return "Rect node"
792
-
793
- def build_scene(self,shapes,shape_groups,transform,appearance):
794
- applytf=self.prop_transform(transform)
795
- applyapp = self.prop_appearance(appearance)
796
- sg=self.make_shape_group(applyapp,applytf,len(shapes),1)
797
- shapes.append(pydiffvg.Rect(self.rect[0:2],self.rect[0:2]+self.rect[2:4],applyapp["stroke-width"],self.id))
798
- shape_groups.append(sg)
799
-
800
- def write_xml(self, parent):
801
- elm = etree.SubElement(parent, "rect")
802
- self.write_xml_common_attrib(elm)
803
- elm.set("x",str(self.rect[0]))
804
- elm.set("y", str(self.rect[1]))
805
- elm.set("width", str(self.rect[2]))
806
- elm.set("height", str(self.rect[3]))
807
-
808
- for child in self.children:
809
- child.write_xml(elm)
810
-
811
- class CircleNode(ShapeNode):
812
- def __init__(self, id, transform, appearance,settings, rect):
813
- super().__init__(id, transform, appearance,settings)
814
- self.circle=torch.tensor(rect,dtype=torch.float,device=settings.device)
815
- optim_params=settings.retrieve(self.id)[0]
816
- #borrowing path settings for this
817
- if optim_params["paths"]["optimize_points"]:
818
- self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.circle],lr=optim_params["paths"]["shape_lr"]))
819
-
820
- def get_type(self):
821
- return "Circle node"
822
-
823
- def build_scene(self,shapes,shape_groups,transform,appearance):
824
- applytf=self.prop_transform(transform)
825
- applyapp = self.prop_appearance(appearance)
826
- sg=self.make_shape_group(applyapp,applytf,len(shapes),1)
827
- shapes.append(pydiffvg.Circle(self.circle[2],self.circle[0:2],applyapp["stroke-width"],self.id))
828
- shape_groups.append(sg)
829
-
830
- def write_xml(self, parent):
831
- elm = etree.SubElement(parent, "circle")
832
- self.write_xml_common_attrib(elm)
833
- elm.set("cx",str(self.circle[0]))
834
- elm.set("cy", str(self.circle[1]))
835
- elm.set("r", str(self.circle[2]))
836
-
837
- for child in self.children:
838
- child.write_xml(elm)
839
-
840
-
841
- class EllipseNode(ShapeNode):
842
- def __init__(self, id, transform, appearance,settings, ellipse):
843
- super().__init__(id, transform, appearance,settings)
844
- self.ellipse=torch.tensor(ellipse,dtype=torch.float,device=settings.device)
845
- optim_params=settings.retrieve(self.id)[0]
846
- #borrowing path settings for this
847
- if optim_params["paths"]["optimize_points"]:
848
- self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.ellipse],lr=optim_params["paths"]["shape_lr"]))
849
-
850
- def get_type(self):
851
- return "Ellipse node"
852
-
853
- def build_scene(self,shapes,shape_groups,transform,appearance):
854
- applytf=self.prop_transform(transform)
855
- applyapp = self.prop_appearance(appearance)
856
- sg=self.make_shape_group(applyapp,applytf,len(shapes),1)
857
- shapes.append(pydiffvg.Ellipse(self.ellipse[2:4],self.ellipse[0:2],applyapp["stroke-width"],self.id))
858
- shape_groups.append(sg)
859
-
860
- def write_xml(self, parent):
861
- elm = etree.SubElement(parent, "ellipse")
862
- self.write_xml_common_attrib(elm)
863
- elm.set("cx", str(self.ellipse[0]))
864
- elm.set("cy", str(self.ellipse[1]))
865
- elm.set("rx", str(self.ellipse[2]))
866
- elm.set("ry", str(self.ellipse[3]))
867
-
868
- for child in self.children:
869
- child.write_xml(elm)
870
-
871
- class PolygonNode(ShapeNode):
872
- def __init__(self, id, transform, appearance,settings, points):
873
- super().__init__(id, transform, appearance,settings)
874
- self.points=points
875
- optim_params=settings.retrieve(self.id)[0]
876
- #borrowing path settings for this
877
- if optim_params["paths"]["optimize_points"]:
878
- self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.points],lr=optim_params["paths"]["shape_lr"]))
879
-
880
- def get_type(self):
881
- return "Polygon node"
882
-
883
- def build_scene(self,shapes,shape_groups,transform,appearance):
884
- applytf=self.prop_transform(transform)
885
- applyapp = self.prop_appearance(appearance)
886
- sg=self.make_shape_group(applyapp,applytf,len(shapes),1)
887
- shapes.append(pydiffvg.Polygon(self.points,True,applyapp["stroke-width"],self.id))
888
- shape_groups.append(sg)
889
-
890
- def point_string(self):
891
- ret=""
892
- for i in range(self.points.shape[0]):
893
- pt=self.points[i,:]
894
- #assert pt.shape == (1,2)
895
- ret+= str(pt[0])+","+str(pt[1])+" "
896
- return ret
897
-
898
- def write_xml(self, parent):
899
- elm = etree.SubElement(parent, "polygon")
900
- self.write_xml_common_attrib(elm)
901
- elm.set("points",self.point_string())
902
-
903
- for child in self.children:
904
- child.write_xml(elm)
905
-
906
- class GradientNode(SvgNode):
907
- def __init__(self, id, transform,settings,begin,end,offsets,stops,href):
908
- super().__init__(id, transform, {},settings)
909
- self.optim=OptimizableSvg.GradientOptimizer(begin, end, offsets, stops, settings.retrieve(id)[0])
910
- self.optimizers.append(self.optim)
911
- self.href=href
912
-
913
- def is_ref(self):
914
- return self.href is not None
915
-
916
- def get_type(self):
917
- return "Gradient node"
918
-
919
- def get_stops(self):
920
- _, _, offsets, stops=self.optim.get_vals()
921
- return offsets, stops
922
-
923
- def get_points(self):
924
- begin, end, _, _ =self.optim.get_vals()
925
- return begin, end
926
-
927
- def write_xml(self, parent):
928
- elm = etree.SubElement(parent, "linearGradient")
929
- self.write_xml_common_attrib(elm,tfname="gradientTransform")
930
-
931
- begin, end, offsets, stops = self.optim.get_vals()
932
-
933
- if self.href is None:
934
- #we have stops
935
- for idx, offset in enumerate(offsets):
936
- stop=etree.SubElement(elm,"stop")
937
- stop.set("offset",str(offset.item()))
938
- stop.set("stop-color",OptimizableSvg.rgb_to_string(stops[idx,0:3]))
939
- stop.set("stop-opacity",str(stops[idx,3].item()))
940
- else:
941
- elm.set('xlink:href', "#{}".format(self.href.id))
942
-
943
- if begin is not None and end is not None:
944
- #no stops
945
- elm.set('x1', str(begin[0].item()))
946
- elm.set('y1', str(begin[1].item()))
947
- elm.set('x2', str(end[0].item()))
948
- elm.set('y2', str(end[1].item()))
949
-
950
- # magic value to make this work
951
- elm.set("gradientUnits", "userSpaceOnUse")
952
-
953
- for child in self.children:
954
- child.write_xml(elm)
955
-
956
- def getGrad(self,combined_opacity,transform):
957
- if self.is_ref():
958
- offsets, stops=self.href.get_stops()
959
- else:
960
- offsets, stops=self.get_stops()
961
-
962
- stops=stops.clone()
963
- stops[:,3]*=combined_opacity
964
-
965
- begin,end = self.get_points()
966
-
967
- applytf=self.prop_transform(transform)
968
- begin=OptimizableSvg.TransformTools.transformPoints(begin.unsqueeze(0),applytf).squeeze()
969
- end = OptimizableSvg.TransformTools.transformPoints(end.unsqueeze(0), applytf).squeeze()
970
-
971
- return pydiffvg.LinearGradient(begin, end, offsets, stops)
972
- #endregion
973
-
974
- def __init__(self, filename, settings=SvgOptimizationSettings(),optimize_background=False, verbose=False, device=torch.device("cpu")):
975
- self.settings=settings
976
- self.verbose=verbose
977
- self.device=device
978
- self.settings.device=device
979
-
980
- tree = etree.parse(filename)
981
- root = tree.getroot()
982
-
983
- #in case we need global optimization
984
- self.optimizers=[]
985
- self.background=torch.tensor([1.,1.,1.],dtype=torch.float32,requires_grad=optimize_background,device=self.device)
986
-
987
- if optimize_background:
988
- p=settings.retrieve("default")[0]
989
- self.optimizers.append(OptimizableSvg.ColorOptimizer(self.background,SvgOptimizationSettings.optims[p["optimizer"]],p["color_lr"]))
990
-
991
- self.defs={}
992
-
993
- self.depth=0
994
-
995
- self.dirty=True
996
- self.scene=None
997
-
998
- self.parseRoot(root)
999
-
1000
- recognised_shapes=["path","circle","rect","ellipse","polygon"]
1001
-
1002
- #region core functionality
1003
- def build_scene(self):
1004
- if self.dirty:
1005
- shape_groups=[]
1006
- shapes=[]
1007
- self.root.build_scene(shapes,shape_groups,OptimizableSvg.RootNode.get_default_transform().to(self.device),OptimizableSvg.RootNode.get_default_appearance(self.device))
1008
- self.scene=(self.canvas[0],self.canvas[1],shapes,shape_groups)
1009
- self.dirty=False
1010
- return self.scene
1011
-
1012
- def zero_grad(self):
1013
- self.root.zero_grad()
1014
- for optim in self.optimizers:
1015
- optim.zero_grad()
1016
- for item in self.defs.values():
1017
- if issubclass(item.__class__,OptimizableSvg.SvgNode):
1018
- item.zero_grad()
1019
-
1020
- def render(self,scale=None,seed=0):
1021
- #render at native resolution
1022
- scene = self.build_scene()
1023
- scene_args = pydiffvg.RenderFunction.serialize_scene(*scene)
1024
- render = pydiffvg.RenderFunction.apply
1025
- out_size=(scene[0],scene[1]) if scale is None else (int(scene[0]*scale),int(scene[1]*scale))
1026
- img = render(out_size[0], # width
1027
- out_size[1], # height
1028
- 2, # num_samples_x
1029
- 2, # num_samples_y
1030
- seed, # seed
1031
- None, # background_image
1032
- *scene_args)
1033
- return img
1034
-
1035
- def step(self):
1036
- self.dirty=True
1037
- self.root.step()
1038
- for optim in self.optimizers:
1039
- optim.step()
1040
- for item in self.defs.values():
1041
- if issubclass(item.__class__, OptimizableSvg.SvgNode):
1042
- item.step()
1043
- #endregion
1044
-
1045
- #region reporting
1046
-
1047
- def offset_str(self,s):
1048
- return ("\t"*self.depth)+s
1049
-
1050
- def reportSkippedAttribs(self, node, non_skipped=[]):
1051
- skipped=set([k for k in node.attrib.keys() if not OptimizableSvg.is_namespace(k)])-set(non_skipped)
1052
- if len(skipped)>0:
1053
- tag=OptimizableSvg.remove_namespace(node.tag) if "id" not in node.attrib else "{}#{}".format(OptimizableSvg.remove_namespace(node.tag),node.attrib["id"])
1054
- print(self.offset_str("Warning: Skipping the following attributes of node '{}': {}".format(tag,", ".join(["'{}'".format(atr) for atr in skipped]))))
1055
-
1056
- def reportSkippedChildren(self,node,skipped):
1057
- skipped_names=["{}#{}".format(elm.tag,elm.attrib["id"]) if "id" in elm.attrib else elm.tag for elm in skipped]
1058
- if len(skipped)>0:
1059
- tag = OptimizableSvg.remove_namespace(node.tag) if "id" not in node.attrib else "{}#{}".format(OptimizableSvg.remove_namespace(node.tag),
1060
- node.attrib["id"])
1061
- print(self.offset_str("Warning: Skipping the following children of node '{}': {}".format(tag,", ".join(["'{}'".format(name) for name in skipped_names]))))
1062
-
1063
- #endregion
1064
-
1065
- #region parsing
1066
- @staticmethod
1067
- def remove_namespace(s):
1068
- """
1069
- {...} ... -> ...
1070
- """
1071
- return re.sub('{.*}', '', s)
1072
-
1073
- @staticmethod
1074
- def is_namespace(s):
1075
- return re.match('{.*}', s) is not None
1076
-
1077
- @staticmethod
1078
- def parseTransform(node):
1079
- if "transform" not in node.attrib and "gradientTransform" not in node.attrib:
1080
- return None
1081
-
1082
- tf_string=node.attrib["transform"] if "transform" in node.attrib else node.attrib["gradientTransform"]
1083
- tforms=tf_string.split(")")[:-1]
1084
- mat=np.eye(3)
1085
- for tform in tforms:
1086
- type = tform.split("(")[0]
1087
- args = [float(val) for val in re.split("[, ]+",tform.split("(")[1])]
1088
- if type == "matrix":
1089
- mat=mat @ OptimizableSvg.TransformTools.parse_matrix(args)
1090
- elif type == "translate":
1091
- mat = mat @ OptimizableSvg.TransformTools.parse_translate(args)
1092
- elif type == "rotate":
1093
- mat = mat @ OptimizableSvg.TransformTools.parse_rotate(args)
1094
- elif type == "scale":
1095
- mat = mat @ OptimizableSvg.TransformTools.parse_scale(args)
1096
- elif type == "skewX":
1097
- mat = mat @ OptimizableSvg.TransformTools.parse_skewx(args)
1098
- elif type == "skewY":
1099
- mat = mat @ OptimizableSvg.TransformTools.parse_skewy(args)
1100
- else:
1101
- raise ValueError("Unknown transform type '{}'".format(type))
1102
- return mat
1103
-
1104
- #dictionary that defines what constant do we need to multiply different units to get the value in pixels
1105
- #gleaned from the CSS definition
1106
- unit_dict = {"px":1,
1107
- "mm":4,
1108
- "cm":40,
1109
- "in":25.4*4,
1110
- "pt":25.4*4/72,
1111
- "pc":25.4*4/6
1112
- }
1113
-
1114
- @staticmethod
1115
- def parseLength(s):
1116
- #length is a number followed possibly by a unit definition
1117
- #we assume that default unit is the pixel (px) equal to 0.25mm
1118
- #last two characters might be unit
1119
- val=None
1120
- for i in range(len(s)):
1121
- try:
1122
- val=float(s[:len(s)-i])
1123
- unit=s[len(s)-i:]
1124
- break
1125
- except ValueError:
1126
- continue
1127
- if len(unit)>0 and unit not in OptimizableSvg.unit_dict:
1128
- raise ValueError("Unknown or unsupported unit '{}' encountered while parsing".format(unit))
1129
- if unit != "":
1130
- val*=OptimizableSvg.unit_dict[unit]
1131
- return val
1132
-
1133
- @staticmethod
1134
- def parseOpacity(s):
1135
- is_percent=s.endswith("%")
1136
- s=s.rstrip("%")
1137
- val=float(s)
1138
- if is_percent:
1139
- val=val/100
1140
- return np.clip(val,0.,1.)
1141
-
1142
- @staticmethod
1143
- def parse_color(s):
1144
- """
1145
- Hex to tuple
1146
- """
1147
- if s[0] != '#':
1148
- raise ValueError("Color argument `{}` not supported".format(s))
1149
- s = s.lstrip('#')
1150
- if len(s)==6:
1151
- rgb = tuple(int(s[i:i + 2], 16) for i in (0, 2, 4))
1152
- return torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0])
1153
- elif len(s)==3:
1154
- rgb = tuple((int(s[i:i + 1], 16)) for i in (0, 1, 2))
1155
- return torch.tensor([rgb[0] / 15.0, rgb[1] / 15.0, rgb[2] / 15.0])
1156
- else:
1157
- raise ValueError("Color argument `{}` not supported".format(s))
1158
- # sRGB to RGB
1159
- # return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 2.2)
1160
-
1161
-
1162
- @staticmethod
1163
- def rgb_to_string(val):
1164
- byte_rgb=(val.clone().detach()*255).type(torch.int)
1165
- byte_rgb.clamp_(min=0,max=255)
1166
- s="#{:02x}{:02x}{:02x}".format(*byte_rgb)
1167
- return s
1168
-
1169
- #parses a "paint" string for use in fill and stroke definitions
1170
- @staticmethod
1171
- def parsePaint(paintStr,defs,device):
1172
- paintStr=paintStr.strip()
1173
- if paintStr=="none":
1174
- return ("none", None)
1175
- elif paintStr[0]=="#":
1176
- return ("solid",OptimizableSvg.parse_color(paintStr).to(device))
1177
- elif paintStr.startswith("url"):
1178
- url=paintStr.lstrip("url(").rstrip(")").strip("\'\"").lstrip("#")
1179
- if url not in defs:
1180
- raise ValueError("Paint-type attribute referencing an unknown object with ID '#{}'".format(url))
1181
- return ("url",defs[url])
1182
- else:
1183
- raise ValueError("Unrecognized paint string: '{}'".format(paintStr))
1184
-
1185
- appearance_keys=["fill","fill-opacity","fill-rule","opacity","stroke","stroke-opacity","stroke-width"]
1186
-
1187
- @staticmethod
1188
- def parseAppearance(node, defs, device):
1189
- ret={}
1190
- parse_keys = OptimizableSvg.appearance_keys
1191
- local_dict={key:value for key,value in node.attrib.items() if key in parse_keys}
1192
- css_dict={}
1193
- style_dict={}
1194
- appearance_dict={}
1195
- if "class" in node.attrib:
1196
- cls=node.attrib["class"]
1197
- if "."+cls in defs:
1198
- css_string=defs["."+cls]
1199
- css_dict={item.split(":")[0]:item.split(":")[1] for item in css_string.split(";") if len(item)>0 and item.split(":")[0] in parse_keys}
1200
- if "style" in node.attrib:
1201
- style_string=node.attrib["style"]
1202
- style_dict={item.split(":")[0]:item.split(":")[1] for item in style_string.split(";") if len(item)>0 and item.split(":")[0] in parse_keys}
1203
- appearance_dict.update(css_dict)
1204
- appearance_dict.update(style_dict)
1205
- appearance_dict.update(local_dict)
1206
- for key,value in appearance_dict.items():
1207
- if key=="fill":
1208
- ret[key]=OptimizableSvg.parsePaint(value,defs,device)
1209
- elif key == "fill-opacity":
1210
- ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device)
1211
- elif key == "fill-rule":
1212
- ret[key]=value
1213
- elif key == "opacity":
1214
- ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device)
1215
- elif key == "stroke":
1216
- ret[key]=OptimizableSvg.parsePaint(value,defs,device)
1217
- elif key == "stroke-opacity":
1218
- ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device)
1219
- elif key == "stroke-width":
1220
- ret[key]=torch.tensor(OptimizableSvg.parseLength(value),device=device)
1221
- else:
1222
- raise ValueError("Error while parsing appearance attributes: key '{}' should not be here".format(key))
1223
-
1224
- return ret
1225
-
1226
- def parseRoot(self,root):
1227
- if self.verbose:
1228
- print(self.offset_str("Parsing root"))
1229
- self.depth += 1
1230
-
1231
- # get document canvas dimensions
1232
- self.parseViewport(root)
1233
- canvmax=np.max(self.canvas)
1234
- self.settings.global_override(["transforms","translation_mult"],canvmax)
1235
- id=root.attrib["id"] if "id" in root.attrib else None
1236
-
1237
- transform=OptimizableSvg.parseTransform(root)
1238
- appearance=OptimizableSvg.parseAppearance(root,self.defs,self.device)
1239
-
1240
- version=root.attrib["version"] if "version" in root.attrib else "<unknown version>"
1241
- if version != "2.0":
1242
- print(self.offset_str("Warning: Version {} is not 2.0, strange things may happen".format(version)))
1243
-
1244
- self.root=OptimizableSvg.RootNode(id,transform,appearance,self.settings)
1245
-
1246
- if self.verbose:
1247
- self.reportSkippedAttribs(root, ["width", "height", "id", "transform","version", "style"]+OptimizableSvg.appearance_keys)
1248
-
1249
- #go through the root children and parse them appropriately
1250
- skipped=[]
1251
- for child in root:
1252
- if OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes:
1253
- self.parseShape(child,self.root)
1254
- elif OptimizableSvg.remove_namespace(child.tag) == "defs":
1255
- self.parseDefs(child)
1256
- elif OptimizableSvg.remove_namespace(child.tag) == "style":
1257
- self.parseStyle(child)
1258
- elif OptimizableSvg.remove_namespace(child.tag) == "g":
1259
- self.parseGroup(child,self.root)
1260
- else:
1261
- skipped.append(child)
1262
-
1263
- if self.verbose:
1264
- self.reportSkippedChildren(root,skipped)
1265
-
1266
- self.depth-=1
1267
-
1268
- def parseShape(self,shape,parent):
1269
- tag=OptimizableSvg.remove_namespace(shape.tag)
1270
- if self.verbose:
1271
- print(self.offset_str("Parsing {}#{}".format(tag,shape.attrib["id"] if "id" in shape.attrib else "<No ID>")))
1272
-
1273
- self.depth+=1
1274
- if tag == "path":
1275
- self.parsePath(shape,parent)
1276
- elif tag == "circle":
1277
- self.parseCircle(shape,parent)
1278
- elif tag == "rect":
1279
- self.parseRect(shape,parent)
1280
- elif tag == "ellipse":
1281
- self.parseEllipse(shape,parent)
1282
- elif tag == "polygon":
1283
- self.parsePolygon(shape,parent)
1284
- else:
1285
- raise ValueError("Encountered unknown shape type '{}'".format(tag))
1286
- self.depth -= 1
1287
-
1288
- def parsePath(self,shape,parent):
1289
- path_string=shape.attrib['d']
1290
- name = ''
1291
- if 'id' in shape.attrib:
1292
- name = shape.attrib['id']
1293
- paths = pydiffvg.from_svg_path(path_string)
1294
- for idx, path in enumerate(paths):
1295
- path.stroke_width = torch.tensor([0.],device=self.device)
1296
- path.num_control_points=path.num_control_points.to(self.device)
1297
- path.points=path.points.to(self.device)
1298
- path.source_id = name
1299
- path.id = "{}-{}".format(name,idx) if len(paths)>1 else name
1300
- transform = OptimizableSvg.parseTransform(shape)
1301
- appearance = OptimizableSvg.parseAppearance(shape,self.defs,self.device)
1302
- node=OptimizableSvg.PathNode(name,transform,appearance,self.settings,paths)
1303
- parent.children.append(node)
1304
-
1305
- if self.verbose:
1306
- self.reportSkippedAttribs(shape, ["id","d","transform","style"]+OptimizableSvg.appearance_keys)
1307
- self.reportSkippedChildren(shape,list(shape))
1308
-
1309
- def parseEllipse(self, shape, parent):
1310
- cx = float(shape.attrib["cx"]) if "cx" in shape.attrib else 0.
1311
- cy = float(shape.attrib["cy"]) if "cy" in shape.attrib else 0.
1312
- rx = float(shape.attrib["rx"])
1313
- ry = float(shape.attrib["ry"])
1314
- name = ''
1315
- if 'id' in shape.attrib:
1316
- name = shape.attrib['id']
1317
- transform = OptimizableSvg.parseTransform(shape)
1318
- appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device)
1319
- node = OptimizableSvg.EllipseNode(name, transform, appearance, self.settings, (cx, cy, rx, ry))
1320
- parent.children.append(node)
1321
-
1322
- if self.verbose:
1323
- self.reportSkippedAttribs(shape, ["id", "x", "y", "r", "transform",
1324
- "style"] + OptimizableSvg.appearance_keys)
1325
- self.reportSkippedChildren(shape, list(shape))
1326
-
1327
- def parsePolygon(self, shape, parent):
1328
- points_string = shape.attrib['points']
1329
- name = ''
1330
- points=[]
1331
- for point_string in points_string.split(" "):
1332
- if len(point_string) == 0:
1333
- continue
1334
- coord_strings=point_string.split(",")
1335
- assert len(coord_strings)==2
1336
- points.append([float(coord_strings[0]),float(coord_strings[1])])
1337
- points=torch.tensor(points,dtype=torch.float,device=self.device)
1338
- if 'id' in shape.attrib:
1339
- name = shape.attrib['id']
1340
- transform = OptimizableSvg.parseTransform(shape)
1341
- appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device)
1342
- node = OptimizableSvg.PolygonNode(name, transform, appearance, self.settings, points)
1343
- parent.children.append(node)
1344
-
1345
- if self.verbose:
1346
- self.reportSkippedAttribs(shape, ["id", "points", "transform", "style"] + OptimizableSvg.appearance_keys)
1347
- self.reportSkippedChildren(shape, list(shape))
1348
-
1349
- def parseCircle(self,shape,parent):
1350
- cx = float(shape.attrib["cx"]) if "cx" in shape.attrib else 0.
1351
- cy = float(shape.attrib["cy"]) if "cy" in shape.attrib else 0.
1352
- r = float(shape.attrib["r"])
1353
- name = ''
1354
- if 'id' in shape.attrib:
1355
- name = shape.attrib['id']
1356
- transform = OptimizableSvg.parseTransform(shape)
1357
- appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device)
1358
- node = OptimizableSvg.CircleNode(name, transform, appearance, self.settings, (cx, cy, r))
1359
- parent.children.append(node)
1360
-
1361
- if self.verbose:
1362
- self.reportSkippedAttribs(shape, ["id", "x", "y", "r", "transform",
1363
- "style"] + OptimizableSvg.appearance_keys)
1364
- self.reportSkippedChildren(shape, list(shape))
1365
-
1366
- def parseRect(self,shape,parent):
1367
- x = float(shape.attrib["x"]) if "x" in shape.attrib else 0.
1368
- y = float(shape.attrib["y"]) if "y" in shape.attrib else 0.
1369
- width = float(shape.attrib["width"])
1370
- height = float(shape.attrib["height"])
1371
- name = ''
1372
- if 'id' in shape.attrib:
1373
- name = shape.attrib['id']
1374
- transform = OptimizableSvg.parseTransform(shape)
1375
- appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device)
1376
- node = OptimizableSvg.RectNode(name, transform, appearance, self.settings, (x,y,width,height))
1377
- parent.children.append(node)
1378
-
1379
- if self.verbose:
1380
- self.reportSkippedAttribs(shape, ["id", "x", "y", "width", "height", "transform", "style"] + OptimizableSvg.appearance_keys)
1381
- self.reportSkippedChildren(shape, list(shape))
1382
-
1383
- def parseGroup(self,group,parent):
1384
- tag = OptimizableSvg.remove_namespace(group.tag)
1385
- id = group.attrib["id"] if "id" in group.attrib else "<No ID>"
1386
- if self.verbose:
1387
- print(self.offset_str("Parsing {}#{}".format(tag, id)))
1388
-
1389
- self.depth+=1
1390
-
1391
- transform=self.parseTransform(group)
1392
-
1393
- #todo process more attributes
1394
- appearance=OptimizableSvg.parseAppearance(group,self.defs,self.device)
1395
- node=OptimizableSvg.GroupNode(id,transform,appearance,self.settings)
1396
- parent.children.append(node)
1397
-
1398
- if self.verbose:
1399
- self.reportSkippedAttribs(group,["id","transform","style"]+OptimizableSvg.appearance_keys)
1400
-
1401
- skipped_children=[]
1402
- for child in group:
1403
- if OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes:
1404
- self.parseShape(child,node)
1405
- elif OptimizableSvg.remove_namespace(child.tag) == "defs":
1406
- self.parseDefs(child)
1407
- elif OptimizableSvg.remove_namespace(child.tag) == "style":
1408
- self.parseStyle(child)
1409
- elif OptimizableSvg.remove_namespace(child.tag) == "g":
1410
- self.parseGroup(child,node)
1411
- else:
1412
- skipped_children.append(child)
1413
-
1414
- if self.verbose:
1415
- self.reportSkippedChildren(group,skipped_children)
1416
-
1417
- self.depth-=1
1418
-
1419
- def parseStyle(self,style_node):
1420
- tag = OptimizableSvg.remove_namespace(style_node.tag)
1421
- id = style_node.attrib["id"] if "id" in style_node.attrib else "<No ID>"
1422
- if self.verbose:
1423
- print(self.offset_str("Parsing {}#{}".format(tag, id)))
1424
-
1425
- if style_node.attrib["type"] != "text/css":
1426
- raise ValueError("Only text/css style recognized, got {}".format(style_node.attrib["type"]))
1427
-
1428
- self.depth += 1
1429
-
1430
- # creating only a dummy node
1431
- node = OptimizableSvg.SvgNode(id, None, {}, self.settings)
1432
-
1433
- if self.verbose:
1434
- self.reportSkippedAttribs(def_node, ["id"])
1435
-
1436
- if len(style_node)>0:
1437
- raise ValueError("Style node should not have children (has {})".format(len(style_node)))
1438
-
1439
- # collect CSS classes
1440
- sheet = cssutils.parseString(style_node.text)
1441
- for rule in sheet:
1442
- if hasattr(rule, 'selectorText') and hasattr(rule, 'style'):
1443
- name = rule.selectorText
1444
- if len(name) >= 2 and name[0] == '.':
1445
- self.defs[name] = rule.style.getCssText().replace("\n","")
1446
- else:
1447
- raise ValueError("Unrecognized CSS selector {}".format(name))
1448
- else:
1449
- raise ValueError("No style or selector text in CSS rule")
1450
-
1451
- if self.verbose:
1452
- self.reportSkippedChildren(def_node, skipped_children)
1453
-
1454
- self.depth -= 1
1455
-
1456
- def parseDefs(self,def_node):
1457
- #only linear gradients are currently supported
1458
- tag = OptimizableSvg.remove_namespace(def_node.tag)
1459
- id = def_node.attrib["id"] if "id" in def_node.attrib else "<No ID>"
1460
- if self.verbose:
1461
- print(self.offset_str("Parsing {}#{}".format(tag, id)))
1462
-
1463
- self.depth += 1
1464
-
1465
-
1466
- # creating only a dummy node
1467
- node = OptimizableSvg.SvgNode(id, None, {},self.settings)
1468
-
1469
- if self.verbose:
1470
- self.reportSkippedAttribs(def_node, ["id"])
1471
-
1472
- skipped_children = []
1473
- for child in def_node:
1474
- if OptimizableSvg.remove_namespace(child.tag) == "linearGradient":
1475
- self.parseGradient(child,node)
1476
- elif OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes:
1477
- raise NotImplementedError("Definition/instantiation of shapes not supported")
1478
- elif OptimizableSvg.remove_namespace(child.tag) == "defs":
1479
- raise NotImplementedError("Definition within definition not supported")
1480
- elif OptimizableSvg.remove_namespace(child.tag) == "g":
1481
- raise NotImplementedError("Groups within definition not supported")
1482
- else:
1483
- skipped_children.append(child)
1484
-
1485
- if len(node.children)>0:
1486
- #take this node out and enter it into defs
1487
- self.defs[node.children[0].id]=node.children[0]
1488
- node.children.pop()
1489
-
1490
-
1491
- if self.verbose:
1492
- self.reportSkippedChildren(def_node, skipped_children)
1493
-
1494
- self.depth -= 1
1495
-
1496
- def parseGradientStop(self,stop):
1497
- param_dict={key:value for key,value in stop.attrib.items() if key in ["id","offset","stop-color","stop-opacity"]}
1498
- style_dict={}
1499
- if "style" in stop.attrib:
1500
- style_dict={item.split(":")[0]:item.split(":")[1] for item in stop.attrib["style"].split(";") if len(item)>0}
1501
- param_dict.update(style_dict)
1502
-
1503
- offset=OptimizableSvg.parseOpacity(param_dict["offset"])
1504
- color=OptimizableSvg.parse_color(param_dict["stop-color"])
1505
- opacity=OptimizableSvg.parseOpacity(param_dict["stop-opacity"]) if "stop-opacity" in param_dict else 1.
1506
-
1507
- return offset, color, opacity
1508
-
1509
- def parseGradient(self, gradient_node, parent):
1510
- tag = OptimizableSvg.remove_namespace(gradient_node.tag)
1511
- id = gradient_node.attrib["id"] if "id" in gradient_node.attrib else "<No ID>"
1512
- if self.verbose:
1513
- print(self.offset_str("Parsing {}#{}".format(tag, id)))
1514
-
1515
- self.depth += 1
1516
- if "stop" not in [OptimizableSvg.remove_namespace(child.tag) for child in gradient_node]\
1517
- and "href" not in [OptimizableSvg.remove_namespace(key) for key in gradient_node.attrib.keys()]:
1518
- raise ValueError("Gradient {} has neither stops nor a href link to them".format(id))
1519
-
1520
- transform=self.parseTransform(gradient_node)
1521
- begin=None
1522
- end = None
1523
- offsets=[]
1524
- stops=[]
1525
- href=None
1526
-
1527
- if "x1" in gradient_node.attrib or "y1" in gradient_node.attrib:
1528
- begin=np.array([0.,0.])
1529
- if "x1" in gradient_node.attrib:
1530
- begin[0] = float(gradient_node.attrib["x1"])
1531
- if "y1" in gradient_node.attrib:
1532
- begin[1] = float(gradient_node.attrib["y1"])
1533
- begin = torch.tensor(begin.transpose(),dtype=torch.float32)
1534
-
1535
- if "x2" in gradient_node.attrib or "y2" in gradient_node.attrib:
1536
- end=np.array([0.,0.])
1537
- if "x2" in gradient_node.attrib:
1538
- end[0] = float(gradient_node.attrib["x2"])
1539
- if "y2" in gradient_node.attrib:
1540
- end[1] = float(gradient_node.attrib["y2"])
1541
- end=torch.tensor(end.transpose(),dtype=torch.float32)
1542
-
1543
- stop_nodes=[node for node in list(gradient_node) if OptimizableSvg.remove_namespace(node.tag)=="stop"]
1544
- if len(stop_nodes)>0:
1545
- stop_nodes=sorted(stop_nodes,key=lambda n: float(n.attrib["offset"]))
1546
-
1547
- for stop in stop_nodes:
1548
- offset, color, opacity = self.parseGradientStop(stop)
1549
- offsets.append(offset)
1550
- stops.append(np.concatenate((color,np.array([opacity]))))
1551
-
1552
- hkey=next((value for key,value in gradient_node.attrib.items() if OptimizableSvg.remove_namespace(key)=="href"),None)
1553
- if hkey is not None:
1554
- href=self.defs[hkey.lstrip("#")]
1555
-
1556
- parent.children.append(OptimizableSvg.GradientNode(id,transform,self.settings,begin.to(self.device) if begin is not None else begin,end.to(self.device) if end is not None else end,torch.tensor(offsets,dtype=torch.float32,device=self.device) if len(offsets)>0 else None,torch.tensor(np.array(stops),dtype=torch.float32,device=self.device) if len(stops)>0 else None,href))
1557
-
1558
- self.depth -= 1
1559
-
1560
- def parseViewport(self, root):
1561
- if "width" in root.attrib and "height" in root.attrib:
1562
- self.canvas = np.array([int(math.ceil(float(root.attrib["width"]))), int(math.ceil(float(root.attrib["height"])))])
1563
- elif "viewBox" in root.attrib:
1564
- s=root.attrib["viewBox"].split(" ")
1565
- w=s[2]
1566
- h=s[3]
1567
- self.canvas = np.array(
1568
- [int(math.ceil(float(w))), int(math.ceil(float(h)))])
1569
- else:
1570
- raise ValueError("Size information is missing from document definition")
1571
- #endregion
1572
-
1573
- #region writing
1574
- def write_xml(self):
1575
- tree=self.root.write_xml(self)
1576
-
1577
- return minidom.parseString(etree.tostring(tree, 'utf-8')).toprettyxml(indent=" ")
1578
-
1579
- def write_defs(self,root):
1580
- if len(self.defs)==0:
1581
- return
1582
-
1583
- defnode = etree.SubElement(root, 'defs')
1584
- stylenode = etree.SubElement(root,'style')
1585
- stylenode.set('type','text/css')
1586
- stylenode.text=""
1587
-
1588
- defcpy=copy.copy(self.defs)
1589
- while len(defcpy)>0:
1590
- torem=[]
1591
- for key,value in defcpy.items():
1592
- if issubclass(value.__class__,OptimizableSvg.SvgNode):
1593
- if value.href is None or value.href not in defcpy:
1594
- value.write_xml(defnode)
1595
- torem.append(key)
1596
- else:
1597
- continue
1598
- else:
1599
- #this is a string, and hence a CSS attribute
1600
- stylenode.text+=key+" {"+value+"}\n"
1601
- torem.append(key)
1602
-
1603
- for key in torem:
1604
- del defcpy[key]
1605
- #endregion
1606
-
1607
-