parquet-converter commited on
Commit
101dfaf
·
1 Parent(s): b4ea192

Update parquet files (step 36 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0x90e/ESRGAN-MANGA/ESRGAN/block.py +0 -261
  2. spaces/101-5/gpt4free/g4f/.v1/gpt4free/usesless/account_creation.py +0 -3
  3. spaces/14-26AA/sovits_aishell3/app.py +0 -39
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dont Panic! Heres Why Your PC Makes a Crackling Noise and How to Fix It.md +0 -28
  5. spaces/1acneusushi/gradio-2dmoleculeeditor/data/FL Studio 20 Keygen Reddit The Ultimate Guide to Unlocking All Features and Plugins.md +0 -40
  6. spaces/1acneusushi/gradio-2dmoleculeeditor/data/GSG HDRI Studio Pack 1.8 for Cinema 4D How to Achieve Realistic Reflections and Shadows.md +0 -150
  7. spaces/1gistliPinn/ChatGPT4/Examples/Designing With Type 5th Edition - The Essential Guide To Typography By James Craig.pdf !NEW!.md +0 -7
  8. spaces/1line/AutoGPT/autogpt/commands/audio_text.py +0 -36
  9. spaces/1phancelerku/anime-remove-background/60 Seconds! Reatomized - A Crazy and Funny Adventure in a Nuclear Wasteland - Play Online for Free.md +0 -129
  10. spaces/2ndelement/voicevox/test/test_kana_parser.py +0 -688
  11. spaces/3laa2/Text2img/README.md +0 -13
  12. spaces/42digital/DeepFashion_Classification/README.md +0 -12
  13. spaces/AI-ANK/PaLM-Kosmos-Vision/app.py +0 -165
  14. spaces/AIConsultant/MusicGen/audiocraft/models/unet.py +0 -214
  15. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/ema.py +0 -76
  16. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/test.py +0 -31
  17. spaces/AchyuthGamer/OpenGPT/g4f/Provider/You.py +0 -40
  18. spaces/Adapter/T2I-Adapter/train_seg.py +0 -372
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/buttons/RemoveChildMethods.js +0 -55
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/customprogress/Factory.d.ts +0 -5
  21. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/customprogress/Factory.js +0 -13
  22. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/space/Factory.d.ts +0 -3
  23. spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py +0 -4
  24. spaces/Andy1621/uniformer_image_detection/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +0 -5
  25. spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/gfocal_loss.py +0 -188
  26. spaces/AngoHF/ANGO-Leaderboard/components/top.py +0 -13
  27. spaces/AnimalEquality/chatbot/app.py +0 -21
  28. spaces/Anindya/Marketing_Campaign_LLM/README.md +0 -13
  29. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/main.py +0 -382
  30. spaces/AzumaSeren100/XuanShen-Bert-VITS2/monotonic_align/core.py +0 -35
  31. spaces/Benson/text-generation/Examples/Colinas De Acero 2.md +0 -92
  32. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/faceshq.py +0 -134
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/errorfactory.py +0 -90
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/__init__.py +0 -44
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/log.py +0 -80
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py +0 -68
  37. spaces/Blaise-g/summarize-biomedical-papers-long-summary-or-tldr/app.py +0 -280
  38. spaces/CVPR/LIVE/pybind11/tools/pybind11Tools.cmake +0 -188
  39. spaces/CVPR/LIVE/thrust/thrust/detail/numeric_traits.h +0 -130
  40. spaces/CVPR/WALT/mmdet/models/backbones/ssd_vgg.py +0 -169
  41. spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py +0 -172
  42. spaces/CVPR/lama-example/predict.py +0 -89
  43. spaces/CikeyQI/Yunzai/Yunzai/lib/bot.js +0 -231
  44. spaces/Codecooker/rvcapi/src/trainset_preprocess_pipeline_print.py +0 -146
  45. spaces/DCandE/rvc-models/config.py +0 -88
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-7648fc8d.js +0 -7
  47. spaces/Dana19/animal_classifier/app.py +0 -29
  48. spaces/DeathRoad/PornagraphyIsGreat/README.md +0 -10
  49. spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/prroi_pool/src/prroi_pooling_gpu.h +0 -22
  50. spaces/DrGabrielLopez/fractal-generator/fractal_generator.py +0 -82
spaces/0x90e/ESRGAN-MANGA/ESRGAN/block.py DELETED
@@ -1,261 +0,0 @@
1
- from collections import OrderedDict
2
- import torch
3
- import torch.nn as nn
4
-
5
- ####################
6
- # Basic blocks
7
- ####################
8
-
9
-
10
- def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1):
11
- # helper selecting activation
12
- # neg_slope: for leakyrelu and init of prelu
13
- # n_prelu: for p_relu num_parameters
14
- act_type = act_type.lower()
15
- if act_type == 'relu':
16
- layer = nn.ReLU(inplace)
17
- elif act_type == 'leakyrelu':
18
- layer = nn.LeakyReLU(neg_slope, inplace)
19
- elif act_type == 'prelu':
20
- layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
21
- else:
22
- raise NotImplementedError('activation layer [%s] is not found' % act_type)
23
- return layer
24
-
25
-
26
- def norm(norm_type, nc):
27
- # helper selecting normalization layer
28
- norm_type = norm_type.lower()
29
- if norm_type == 'batch':
30
- layer = nn.BatchNorm2d(nc, affine=True)
31
- elif norm_type == 'instance':
32
- layer = nn.InstanceNorm2d(nc, affine=False)
33
- else:
34
- raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
35
- return layer
36
-
37
-
38
- def pad(pad_type, padding):
39
- # helper selecting padding layer
40
- # if padding is 'zero', do by conv layers
41
- pad_type = pad_type.lower()
42
- if padding == 0:
43
- return None
44
- if pad_type == 'reflect':
45
- layer = nn.ReflectionPad2d(padding)
46
- elif pad_type == 'replicate':
47
- layer = nn.ReplicationPad2d(padding)
48
- else:
49
- raise NotImplementedError('padding layer [%s] is not implemented' % pad_type)
50
- return layer
51
-
52
-
53
- def get_valid_padding(kernel_size, dilation):
54
- kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
55
- padding = (kernel_size - 1) // 2
56
- return padding
57
-
58
-
59
- class ConcatBlock(nn.Module):
60
- # Concat the output of a submodule to its input
61
- def __init__(self, submodule):
62
- super(ConcatBlock, self).__init__()
63
- self.sub = submodule
64
-
65
- def forward(self, x):
66
- output = torch.cat((x, self.sub(x)), dim=1)
67
- return output
68
-
69
- def __repr__(self):
70
- tmpstr = 'Identity .. \n|'
71
- modstr = self.sub.__repr__().replace('\n', '\n|')
72
- tmpstr = tmpstr + modstr
73
- return tmpstr
74
-
75
-
76
- class ShortcutBlock(nn.Module):
77
- #Elementwise sum the output of a submodule to its input
78
- def __init__(self, submodule):
79
- super(ShortcutBlock, self).__init__()
80
- self.sub = submodule
81
-
82
- def forward(self, x):
83
- output = x + self.sub(x)
84
- return output
85
-
86
- def __repr__(self):
87
- tmpstr = 'Identity + \n|'
88
- modstr = self.sub.__repr__().replace('\n', '\n|')
89
- tmpstr = tmpstr + modstr
90
- return tmpstr
91
-
92
-
93
- def sequential(*args):
94
- # Flatten Sequential. It unwraps nn.Sequential.
95
- if len(args) == 1:
96
- if isinstance(args[0], OrderedDict):
97
- raise NotImplementedError('sequential does not support OrderedDict input.')
98
- return args[0] # No sequential is needed.
99
- modules = []
100
- for module in args:
101
- if isinstance(module, nn.Sequential):
102
- for submodule in module.children():
103
- modules.append(submodule)
104
- elif isinstance(module, nn.Module):
105
- modules.append(module)
106
- return nn.Sequential(*modules)
107
-
108
-
109
- def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True,
110
- pad_type='zero', norm_type=None, act_type='relu', mode='CNA'):
111
- """
112
- Conv layer with padding, normalization, activation
113
- mode: CNA --> Conv -> Norm -> Act
114
- NAC --> Norm -> Act --> Conv (Identity Mappings in Deep Residual Networks, ECCV16)
115
- """
116
- assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [%s]' % mode
117
- padding = get_valid_padding(kernel_size, dilation)
118
- p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
119
- padding = padding if pad_type == 'zero' else 0
120
-
121
- c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, \
122
- dilation=dilation, bias=bias, groups=groups)
123
- a = act(act_type) if act_type else None
124
- if 'CNA' in mode:
125
- n = norm(norm_type, out_nc) if norm_type else None
126
- return sequential(p, c, n, a)
127
- elif mode == 'NAC':
128
- if norm_type is None and act_type is not None:
129
- a = act(act_type, inplace=False)
130
- # Important!
131
- # input----ReLU(inplace)----Conv--+----output
132
- # |________________________|
133
- # inplace ReLU will modify the input, therefore wrong output
134
- n = norm(norm_type, in_nc) if norm_type else None
135
- return sequential(n, a, p, c)
136
-
137
-
138
- ####################
139
- # Useful blocks
140
- ####################
141
-
142
-
143
- class ResNetBlock(nn.Module):
144
- """
145
- ResNet Block, 3-3 style
146
- with extra residual scaling used in EDSR
147
- (Enhanced Deep Residual Networks for Single Image Super-Resolution, CVPRW 17)
148
- """
149
-
150
- def __init__(self, in_nc, mid_nc, out_nc, kernel_size=3, stride=1, dilation=1, groups=1, \
151
- bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='CNA', res_scale=1):
152
- super(ResNetBlock, self).__init__()
153
- conv0 = conv_block(in_nc, mid_nc, kernel_size, stride, dilation, groups, bias, pad_type, \
154
- norm_type, act_type, mode)
155
- if mode == 'CNA':
156
- act_type = None
157
- if mode == 'CNAC': # Residual path: |-CNAC-|
158
- act_type = None
159
- norm_type = None
160
- conv1 = conv_block(mid_nc, out_nc, kernel_size, stride, dilation, groups, bias, pad_type, \
161
- norm_type, act_type, mode)
162
- # if in_nc != out_nc:
163
- # self.project = conv_block(in_nc, out_nc, 1, stride, dilation, 1, bias, pad_type, \
164
- # None, None)
165
- # print('Need a projecter in ResNetBlock.')
166
- # else:
167
- # self.project = lambda x:x
168
- self.res = sequential(conv0, conv1)
169
- self.res_scale = res_scale
170
-
171
- def forward(self, x):
172
- res = self.res(x).mul(self.res_scale)
173
- return x + res
174
-
175
-
176
- class ResidualDenseBlock_5C(nn.Module):
177
- """
178
- Residual Dense Block
179
- style: 5 convs
180
- The core module of paper: (Residual Dense Network for Image Super-Resolution, CVPR 18)
181
- """
182
-
183
- def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \
184
- norm_type=None, act_type='leakyrelu', mode='CNA'):
185
- super(ResidualDenseBlock_5C, self).__init__()
186
- # gc: growth channel, i.e. intermediate channels
187
- self.conv1 = conv_block(nc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \
188
- norm_type=norm_type, act_type=act_type, mode=mode)
189
- self.conv2 = conv_block(nc+gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \
190
- norm_type=norm_type, act_type=act_type, mode=mode)
191
- self.conv3 = conv_block(nc+2*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \
192
- norm_type=norm_type, act_type=act_type, mode=mode)
193
- self.conv4 = conv_block(nc+3*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, \
194
- norm_type=norm_type, act_type=act_type, mode=mode)
195
- if mode == 'CNA':
196
- last_act = None
197
- else:
198
- last_act = act_type
199
- self.conv5 = conv_block(nc+4*gc, nc, 3, stride, bias=bias, pad_type=pad_type, \
200
- norm_type=norm_type, act_type=last_act, mode=mode)
201
-
202
- def forward(self, x):
203
- x1 = self.conv1(x)
204
- x2 = self.conv2(torch.cat((x, x1), 1))
205
- x3 = self.conv3(torch.cat((x, x1, x2), 1))
206
- x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
207
- x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
208
- return x5.mul(0.2) + x
209
-
210
-
211
- class RRDB(nn.Module):
212
- """
213
- Residual in Residual Dense Block
214
- """
215
-
216
- def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \
217
- norm_type=None, act_type='leakyrelu', mode='CNA'):
218
- super(RRDB, self).__init__()
219
- self.RDB1 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \
220
- norm_type, act_type, mode)
221
- self.RDB2 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \
222
- norm_type, act_type, mode)
223
- self.RDB3 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, \
224
- norm_type, act_type, mode)
225
-
226
- def forward(self, x):
227
- out = self.RDB1(x)
228
- out = self.RDB2(out)
229
- out = self.RDB3(out)
230
- return out.mul(0.2) + x
231
-
232
-
233
- ####################
234
- # Upsampler
235
- ####################
236
-
237
-
238
- def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
239
- pad_type='zero', norm_type=None, act_type='relu'):
240
- """
241
- Pixel shuffle layer
242
- (Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional
243
- Neural Network, CVPR17)
244
- """
245
- conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias,
246
- pad_type=pad_type, norm_type=None, act_type=None)
247
- pixel_shuffle = nn.PixelShuffle(upscale_factor)
248
-
249
- n = norm(norm_type, out_nc) if norm_type else None
250
- a = act(act_type) if act_type else None
251
- return sequential(conv, pixel_shuffle, n, a)
252
-
253
-
254
- def upconv_blcok(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
255
- pad_type='zero', norm_type=None, act_type='relu', mode='nearest'):
256
- # Up conv
257
- # described in https://distill.pub/2016/deconv-checkerboard/
258
- upsample = nn.Upsample(scale_factor=upscale_factor, mode=mode)
259
- conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias,
260
- pad_type=pad_type, norm_type=norm_type, act_type=act_type)
261
- return sequential(upsample, conv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/101-5/gpt4free/g4f/.v1/gpt4free/usesless/account_creation.py DELETED
@@ -1,3 +0,0 @@
1
- import usesless
2
-
3
- usesless.Account.create(logging=True)
 
 
 
 
spaces/14-26AA/sovits_aishell3/app.py DELETED
@@ -1,39 +0,0 @@
1
- import gradio as gr
2
- from inference import infer
3
- import numpy as np
4
-
5
- spkdict=np.arange(175).tolist()
6
- spkdict=['speaker'+str(spk) for spk in spkdict]
7
-
8
- app = gr.Blocks()
9
- with app:
10
- with gr.Tabs():
11
- with gr.TabItem("Basic"):
12
- gr.Markdown(value="""
13
- 本模型为基于soft-vc和vits的AI声线转换模型。\n
14
- 模型混合了aishell3(174说话人,共约80+h)和opencpop(1说话人,5+h)数据集训练,用A100单卡在60batchsize下训练了350左右epoch得到的。\n
15
- 模型对于通常的说话语音转换效果良好,唱歌的话需要在目标说话人音域范围内才能有较好效果。由于aishell3数据占比过大,训练epoch不足,opencpop说话人的高音部分质量不佳。\n
16
- 模型中各说话人的适宜中心音域分别为:\n
17
- aishell3(0-173号) 女性说话:A3,男性说话:C3\n
18
- opencpop(174号) 女性唱歌:C4-G4(超过C5基本失真)\n
19
- 如果转换通常说话音频,需要参考上面范围调key至目标说话人中心音域附近(如男性说话转为女性说话,key=8,反过来为-8(4-16这个区间基本都可以试试))\n
20
- 如果源音频为部分虚拟主播音频,通常音调会高于正常女性说话范围,可达到F4-A4左右,请适当降调\n
21
- 对于说话人的详细信息(如性别、年龄等),可以在文件目录的spkdic_new.json中查看\n
22
- \n
23
- 若合成效果不佳请首先考虑如下因素加以改善:\n
24
- 1.音域范围是否合适,可参考上文调整调key的参数,或更换说话人进行尝试\n
25
- 2.源音频是否存在杂音/bgm,请尽量使用干净的音源进行合成,录音时保持室内安静\n
26
- 3.源音频是否存在混响。较强的混响会显著干扰合成效果,导致ai念错字/音调识别错误等\n
27
- 4.再合成一次试试,每次合成会有部分随机性因素参与,微小的声调问题可能重新合成一次就不存在了。\n
28
- 5.部分对源音频的消伴奏/降噪处理会对合成效果有较大影响,虽然人听不出差别,但是对ai识别的频谱有影响。
29
- """)
30
- sid = gr.Dropdown(label="说话人",choices=spkdict, value='speaker0')
31
- vc_audio = gr.Audio(label="上传音频,建议小于2分钟",type='filepath')
32
- vc_record = gr.Audio(source="microphone", label="或者录制你的声音", type="filepath")
33
- vc_transform = gr.Number(label="调key(按照十二平均律确定的半音,一整个8度就是12)",value=0)
34
- vc_submit = gr.Button("转换", variant="primary")
35
- vc_output1 = gr.Textbox(label="Output Message")
36
- vc_output2 = gr.Audio(label="Output Audio")
37
- vc_submit.click(infer, [vc_audio,vc_record,sid, vc_transform], [vc_output1, vc_output2])
38
-
39
- app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dont Panic! Heres Why Your PC Makes a Crackling Noise and How to Fix It.md DELETED
@@ -1,28 +0,0 @@
1
- <br />
2
- <h1>Why Does My PC Make a Crackling Noise and How to Fix It</h1>
3
-
4
- <p>If you hear a crackling or popping sound coming from your PC, you might be worried that something is wrong with your hardware. However, before you panic, you should know that there are many possible causes for this problem and some of them are easy to fix. In this article, we will explain why your PC makes a crackling noise and how to troubleshoot it.</p>
5
-
6
- <h2>What Causes Crackling or Popping Sound on a Windows PC?</h2>
7
-
8
- <p>Crackling or popping sound on a Windows PC can occur for a variety of reasons. Some of the most common ones are:</p>
9
- <h2>why does my pc make a crackling noise</h2><br /><p><b><b>Download File</b> ::: <a href="https://byltly.com/2uKzYR">https://byltly.com/2uKzYR</a></b></p><br /><br />
10
-
11
- <ul>
12
- <li><b>Audio device settings:</b> Your audio device settings, such as the audio format, sample rate, bit depth, and enhancements, might not be compatible with your speakers or headphones. This can result in distorted or noisy sound output.</li>
13
- <li><b>Sound driver:</b> Your sound driver might be missing, corrupted, outdated, or incompatible with your Windows version. This can affect the performance and quality of your audio device.</li>
14
- <li><b>Hardware interference:</b> Your audio device might be interfered by another hardware device, such as a microphone, webcam, USB device, or wireless adapter. This can cause electrical or radio frequency interference that manifests as crackling or popping sound.</li>
15
- <li><b>Fan bearing or cable rubbing:</b> Your PC fan might have a worn-out bearing or a loose cable that rubs against it. This can create a mechanical noise that sounds like crackling or popping.</li>
16
- </ul>
17
-
18
- <h2>How to Fix Crackling or Popping Sound on a Windows PC?</h2>
19
-
20
- <p>Depending on the cause of the problem, there are different ways to fix crackling or popping sound on a Windows PC. Here are some of the most effective solutions:</p>
21
-
22
- <ol>
23
- <li><b>Change your audio format:</b> Changing the audio quality on your output device can solve some sound problems. To do this, right-click the speaker icon in the notification area next to your clock and select <code>Playback Devices</code>. Double-click the default playback device, which has a green checkmark on its icon. Click the <code>Advanced</code> tab and use the <code>Default Format</code> box to select your sound quality level. Try setting your audio quality to <code>16 bit, 44100 Hz (CD Quality)</code>. Click <code>OK</code> afterwards and see if the crackling or other audio problems continue.</li>
24
- <li><b>Disable audio enhancements:</b> Some sound drivers use software enhancements in an attempt to improve your sound quality. If these aren't working properly or if your CPU is being taxed too heavily, these could result in sound problems. To disable sound enhancements, use the same Properties window. Click the <code>Enhancements</code> tab here—if you see one—and check the <code>Disable All Enhancements</code> checkbox. Click <code>OK</code> to save your changes and then test to see if the problems continue. Not all software drivers perform this function, so you won't always see the <code>Enhancements</code> tab on all systems.</li>
25
- <li><b>Disable exclusive mode:</b> Some sound drivers seem to have issue with the exclusive mode option that allows applications to take exclusive control of your sound card. This shouldn't normally be a problem: Blame bad sound drivers if it's causing issues on your system. You'll find this setting on the same window where the <code>Default Format</code> option is. Disable the <code>Allow applications to take exclusive control of this device</code> option under <code>Exclusive Mode</code>. Click <code>OK</code> and see if this solved your problem. This option normally isn't a problem, so you should probably re-enable it if disabling it doesn't solve the problem.</li>
26
- <li><b>Update your sound drivers:</b>A missing, corrupted, or outdated sound driver could be why you're experiencing crackling audio. Updating or reinstalling it can fix the issue in this regard. To update your sound driver manually, go to <code>Device Manager</code>, expand <</p> ddb901b051<br />
27
- <br />
28
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FL Studio 20 Keygen Reddit The Ultimate Guide to Unlocking All Features and Plugins.md DELETED
@@ -1,40 +0,0 @@
1
-
2
- <h1>FL Studio Keygen Reddit: How to Crack FL Studio 20 for Free</h1>
3
- <p>If you are looking for a way to crack FL Studio 20 for free, you might have come across some torrents or links that claim to offer a keygen or a patch for the popular music production software. But are they safe and reliable? And how do you use them?</p>
4
- <h2>fl studio keygen reddit</h2><br /><p><b><b>Download</b> > <a href="https://byltly.com/2uKwL4">https://byltly.com/2uKwL4</a></b></p><br /><br />
5
- <p>In this article, we will explain what a keygen is, how it works, and what are the risks and benefits of using one. We will also show you how to use a keygen from a reputable source, R2R, to unlock FL Studio 20 and enjoy its full features.</p>
6
- <h2>What is a Keygen?</h2>
7
- <p>A keygen, short for key generator, is a program that can generate valid serial numbers or license keys for a software application. A keygen can be used to activate a software without paying for it or going through the official registration process.</p>
8
- <p>A keygen usually works by exploiting a flaw or a weakness in the software's protection system, such as a weak encryption algorithm or a hardcoded key. A keygen can also emulate the server-side validation process and generate keys that match the expected format.</p>
9
- <h2>How to Use R2R Keygen for FL Studio 20?</h2>
10
- <p>R2R is a well-known group of crackers that release high-quality keygens and patches for various software applications, including FL Studio. R2R's keygen for FL Studio 20 can unlock all the features and plugins of the software, such as Edison, Gross Beat, Harmor, Sytrus, Maximus, and more.</p>
11
- <p>To use R2R's keygen for FL Studio 20, you need to follow these steps:</p>
12
- <ol>
13
- <li>Download the torrent file of FL Studio 20 from <a href="https://www.reddit.com/r/piratebay/comments/fmgn21/how_to_use_r2r_keygen_for_fl_studio_20/">this Reddit post</a> by u/orbital_malice42. Make sure you download the one uploaded by Deepstatus, who is a verified uploader on Piratebay.</li>
14
- <li>Extract the .7z file using 7-Zip or WinRAR. You will get two folders: FL Studio 20 and Shared.</li>
15
- <li>Install FL Studio 20 by running the setup.exe file in the FL Studio 20 folder. Choose your preferred language and location. Do not run FL Studio after installation.</li>
16
- <li>Copy the Keygen.exe file from the Shared folder and paste it into the installation directory of FL Studio 20. The default location is C:\Program Files (x86)\Image-Line\FL Studio 20.</li>
17
- <li>Run the Keygen.exe file as administrator. You will see a window with a button that says Register. Click on it and wait for a few seconds. You will see a message that says "Successfully registered!"</li>
18
- <li>Open FL Studio 20 by running the fl.exe file in the installation directory. You should see that it is unlocked and activated. You can now use all the features and plugins of FL Studio 20 without any limitations.</li>
19
- </ol>
20
- <h2>What are the Risks and Benefits of Using a Keygen?</h2>
21
- <p>Using a keygen can have some advantages and disadvantages. Here are some of them:</p>
22
- <h3>Benefits</h3>
23
- <ul>
24
- <li>You can save money by not paying for the software license.</li>
25
- <li>You can access all the features and plugins of the software without any restrictions.</li>
26
- <li>You can use the software offline without needing an internet connection or an account.</li>
27
- </ul>
28
- <h3>Risks</h3>
29
- <ul>
30
- <li>You may violate the terms and conditions of the software developer and face legal consequences.</li>
31
- <li>You may expose your computer to malware or viruses that may be hidden in the keygen or the torrent file.</li>
32
- <li>You may not receive any updates or support from the software developer.</li>
33
- <li>You may experience some bugs or errors in the software that may affect its performance or functionality.</li>
34
- </ul>
35
- <h2>Conclusion</h2>
36
- <p>FL Studio 20 is a powerful and versatile music production software that can help you create amazing beats and songs. However, it is also quite expensive and requires a license key to activate it.</p>
37
- <p>If you want to crack FL Studio</p>
38
- <p></p> ddb901b051<br />
39
- <br />
40
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/GSG HDRI Studio Pack 1.8 for Cinema 4D How to Achieve Realistic Reflections and Shadows.md DELETED
@@ -1,150 +0,0 @@
1
-
2
- <h1>GSG HDRI Studio Pack 1.8 for Cinema 4D: A Review</h1>
3
- <p>If you are looking for a way to create stunning lighting and reflections in Cinema 4D, you might have heard of GSG HDRI Studio Pack 1.8. This is a bundle of two plugins from Greyscalegorilla that allow you to browse and apply hundreds of high-quality HDRI (High Dynamic Range Images) in seconds. But what exactly is GSG HDRI Studio Pack 1.8, and why should you use it? In this article, we will review this product and show you how it can help you improve your 3D renders.</p>
4
- <h2>What is GSG HDRI Studio Pack 1.8?</h2>
5
- <p>GSG HDRI Studio Pack 1.8 is a collection of two plugins for Cinema 4D that make it easy to use HDRI lighting and reflections in your scenes. HDRI stands for High Dynamic Range Images, which are images that capture a wide range of brightness values, from very dark to very bright. By using HDRI as your light source, you can create realistic and natural lighting effects that mimic the real world.</p>
6
- <h2>GSG HDRI Studio Pack 1.8 for Cinema 4D</h2><br /><p><b><b>DOWNLOAD</b> &#10038;&#10038;&#10038; <a href="https://byltly.com/2uKyNB">https://byltly.com/2uKyNB</a></b></p><br /><br />
7
- <h3>HDRI Studio Rig</h3>
8
- <p>HDRI Studio Rig is a plugin that lets you browse and apply HDRI from Greyscalegorilla's library or your own collection. You can then rotate, adjust, and place them in the perfect position for your scene. You can also create professional studio quality backdrops and seamless floors with this plugin. HDRI Studio Rig works with Cinema 4D's Standard and Physical renderers, and it is ideal for product shots, motion graphics, and animations.</p>
9
- <h3>HDRI Link</h3>
10
- <p>HDRI Link is a plugin that lets you connect any third-party render engine to Greyscalegorilla's library of HDRI or your own collection. You can instantly browse and apply HDRI with a simple drag-and-drop interface, without having to deal with complex settings or file paths. HDRI Link is compatible with popular render engines like Redshift, Octane, and Arnold, and it is ideal for photorealistic renders, architectural visualization, and VFX.</p>
11
- <h2>Why use GSG HDRI Studio Pack 1.8?</h2>
12
- <p>GSG HDRI Studio Pack 1.8 offers many benefits for Cinema 4D users who want to create better looking renders with less hassle.</p>
13
- <p>How to use GSG HDRI Studio Pack in Cinema 4D<br />
14
- GSG HDRI Studio Pack review and tutorial<br />
15
- Best HDRI lighting presets for Cinema 4D<br />
16
- GSG HDRI Studio Pack vs other HDRI plugins<br />
17
- Where to buy GSG HDRI Studio Pack for Cinema 4D<br />
18
- GSG HDRI Studio Pack features and benefits<br />
19
- How to create realistic renders with GSG HDRI Studio Pack<br />
20
- GSG HDRI Studio Pack compatibility and requirements<br />
21
- How to install and update GSG HDRI Studio Pack<br />
22
- GSG HDRI Studio Pack free download and trial<br />
23
- How to customize and save HDRI settings in GSG HDRI Studio Pack<br />
24
- GSG HDRI Studio Pack tips and tricks<br />
25
- How to optimize render speed with GSG HDRI Studio Pack<br />
26
- GSG HDRI Studio Pack customer testimonials and feedback<br />
27
- How to get support and help for GSG HDRI Studio Pack<br />
28
- How to use GSG HDRI Studio Pack with other Cinema 4D tools<br />
29
- GSG HDRI Studio Pack alternatives and competitors<br />
30
- How to add your own HDRIs to GSG HDRI Studio Pack<br />
31
- How to use GSG HDRI Studio Pack for animation and motion graphics<br />
32
- How to use GSG HDRI Studio Pack for product visualization and design<br />
33
- How to use GSG HDRI Studio Pack for architectural rendering and interior design<br />
34
- How to use GSG HDRI Studio Pack for character modeling and sculpting<br />
35
- How to use GSG HDRI Studio Pack for VFX and compositing<br />
36
- How to use GSG HDRI Studio Pack for game development and VR/AR<br />
37
- How to use GSG HDRI Studio Pack for photography and video editing<br />
38
- How to create stunning HDRIs with GSG HDRI Studio Pack<br />
39
- How to use GSG HDRI Studio Pack with Octane Render, Redshift, Arnold, etc.<br />
40
- How to use GSG HDRI Studio Pack with After Effects, Photoshop, Illustrator, etc.<br />
41
- How to use GSG HDRI Studio Pack with Blender, Maya, 3ds Max, etc.<br />
42
- How to use GSG HDRI Studio Pack with SketchUp, Revit, AutoCAD, etc.<br />
43
- How to use GSG HDRI Studio Pack with ZBrush, Substance Painter, Marvelous Designer, etc.<br />
44
- How to use GSG HDRI Studio Pack with Unity, Unreal Engine, Godot, etc.<br />
45
- How to use GSG HDRI Studio Pack with Premiere Pro, Final Cut Pro, DaVinci Resolve, etc.<br />
46
- How to use GSG HDRI Studio Pack with Lightroom, Capture One, Affinity Photo, etc.<br />
47
- What are the advantages of using HDRIs in Cinema 4D<br />
48
- What are the best practices for using HDRIs in Cinema 4D<br />
49
- What are the common mistakes and pitfalls when using HDRIs in Cinema 4D<br />
50
- What are the latest trends and developments in HDRIs and Cinema 4D<br />
51
- What are the best sources and resources for HDRIs and Cinema 4D<br />
52
- What are the best examples and inspirations of HDRIs and Cinema 4D projects</p>
53
- <h3>Benefits of HDRI lighting</h3>
54
- <p>HDRI lighting is one of the most realistic and natural ways to light your scenes in Cinema 4D. By using HDRI as your light source, you can achieve:</p>
55
- <ul>
56
- <li>Accurate color representation</li>
57
- <li>Soft shadows and reflections</li>
58
- <li>Global illumination effects</li>
59
- <li>Ambient occlusion effects</li>
60
- <li>Mood and atmosphere</li>
61
- </ul>
62
- <p>HDRI lighting can also save you time and resources by eliminating the need for multiple lights, complex setups, and long render times.</p>
63
- <h3>Features of GSG HDRI Studio Pack 1.8</h3>
64
- <p>GSG HDRI Studio Pack 1.8 offers many features that make it easy and fun to use HDRI lighting in Cinema 4D.</p>
65
- <ul>
66
- <li>Browse hundreds of HDRI in seconds with a simple interface</li>
67
- <li>Add beautiful reflections in seconds with a click</li>
68
- <li>Adjust brightness and reflections separately to find the perfect look</li>
69
- <li>Blur the HDRI to create better global illumination effects</li>
70
- <li>Use the C4D shadow catcher to adjust or remove shadows from your scene</li>
71
- <li>Preview your lighting before hitting render with rotation preview</li>
72
- <li>Create professional studio quality backdrops and seamless floors</li>
73
- <li>Switch between render engines without losing your settings</li>
74
- <li>Access exclusive Greyscalegorilla HDRI collections or use your own</li>
75
- </ul>
76
- <h2>How to use GSG HDRI Studio Pack 1.8?</h2>
77
- <p>GSG HDRI Studio Pack 1.8 is very easy to use in Cinema 4D.</p>
78
- <h3>Installation and compatibility</h3>
79
- <p>To install GSG HDRI Studio Pack 1.8, you need to have Cinema 4D R20 or higher installed on your computer. You also need to have a Greyscalegorilla Plus membership account, which gives you access to all of their products and training for one low price.</p>
80
- <p>To install the plugins, you need to download them from your Greyscalegorilla account page, unzip them, and copy them to your Cinema 4D plugins folder.</p>
81
- <p>To use the plugins, you need to activate them with your Greyscalegorilla Plus account credentials.</p>
82
- <p>GSG HDRI Studio Rig works with Cinema 4D's Standard and Physical renderers, while GSG HDRI Link works with third-party render engines like Redshift, Octane, and Arnold.</p>
83
- <h3>Browsing and applying HDRI</h3>
84
- <p>To browse and apply HDRI with GSG HDRI Studio Rig, you need to add an object to your scene (such as a sphere or a cube), then add an HDRi Studio Rig object from the plugins menu.</p>
85
- <p>This will open up the HDRi Browser window, where you can see all the available HDRi collections from Greyscalegorilla or your own folder.</p>
86
- <p>You can then drag any HDRi image onto the HDRi Preview window or double-click on it to apply it to your scene.</p>
87
- <p>You can also use the search bar or the filters to find the HDRi that suits your needs.</p>
88
- <p>You will see a small icon on the tag that indicates which render engine you are using. You can change it by clicking on it and selecting another one.</p>
89
- <p>This will open up the HDRI Browser window, where you can see all the available HDRI collections from Greyscalegorilla or your own folder.</p>
90
- <p>You can then drag any HDRI image onto the HDRI Link Tag or double-click on it to apply it to your scene.</p>
91
- <p>You can also use the search bar or the filters to find the HDRI that suits your needs.</p>
92
- <h3>Adjusting and customizing HDRI</h3>
93
- <p>To adjust and customize HDRI with GSG HDRI Studio Rig, you need to select the HDRi Studio Rig object and go to the attributes panel.</p>
94
- <p>There you will find several options to tweak your lighting and reflections, such as:</p>
95
- <ul>
96
- <li>Brightness: Adjusts the overall intensity of the HDRI</li>
97
- <li>Reflection: Adjusts the intensity of the reflections on your object</li>
98
- <li>Rotation: Rotates the HDRI around your scene</li>
99
- <li>Blur: Blurs the HDRI to create softer shadows and global illumination effects</li>
100
- <li>Fill Light: Adds a secondary light source to fill in the dark areas of your scene</li>
101
- <li>Color Correction: Applies hue, saturation, contrast, and gamma adjustments to the HDRI</li>
102
- <li>Floor: Enables or disables the seamless floor option and lets you change its color, height, and reflection</li>
103
- <li>Backdrop: Enables or disables the studio backdrop option and lets you change its color, height, and width</li>
104
- </ul>
105
- <p>To adjust and customize HDRI with GSG HDRI Link, you need to select the HDRi Link Tag and go to the attributes panel.</p>
106
- <p>There you will find a few options to tweak your lighting and reflections, such as:</p>
107
- <ul>
108
- <li>Brightness: Adjusts the overall intensity of the HDRI</li>
109
- <li>Reflection: Adjusts the intensity of the reflections on your object</li>
110
- <li>Rotation: Rotates the HDRI around your scene</li>
111
- <li>Blur: Blurs the HDRI to create softer shadows and global illumination effects</li>
112
- <li>Preview Mode: Enables or disables a low-resolution preview of the HDRI for faster feedback</li>
113
- </ul>
114
- <h2>Where to get GSG HDRI Studio Pack 1.8?</h2>
115
- <p>If you are interested in getting GSG HDRI Studio Pack 1.8, you have two options:</p>
116
- <h3>Pricing and plans</h3>
117
- <p>You can buy GSG HDRI Studio Pack 1.8 as a standalone product for $129. This will give you access to both plugins and 10 sample HDRI images. You can also buy additional HDRI collections from Greyscalegorilla's website, ranging from $49 to $99 each.</p>
118
- <p>You can also get GSG HDRI Studio Pack 1.8 as part of Greyscalegorilla Plus membership for $399 per year or $64 per month. This will give you access to all of Greyscalegorilla's products and training, including over 3,000 materials, HDRIs, and other 3D assets, all of their time-saving plugins for Cinema 4D, and 500+ hours of pro training.</p>
119
- <h3>Greyscalegorilla Plus membership</h3>
120
- <p>Greyscalegorilla Plus is a subscription service that gives you unlimited access to all of Greyscalegorilla's products and training for one low price. You can get over $13,000 worth of tools and training for only $399 per year or $64 per month.</p>
121
- <p>With Greyscalegorilla Plus, you can:</p>
122
- <ul>
123
- <li>Leverage over 3,000 materials, HDRIs, and other 3D assets to create stunning renders in Cinema 4D</li>
124
- <li>Use all of Greyscalegorilla's time-saving plugins for Cinema 4D, such as Signal, Transform, Light Kit Pro, GorillaCam, City Kit, Topcoat, Texture Kit Pro, and more</li>
125
- <li>Learn from over 500 hours of pro training on Cinema 4D, After Effects, Redshift, Octane, Arnold, X-Particles, Houdini, RealFlow, and more</li>
126
- <li>Get instant updates and new releases as soon as they are available</li>
127
- <li>Enjoy a 60-day money-back guarantee if you are not satisfied with your membership</li>
128
- </ul>
129
- <h2>Conclusion</h2>
130
- <p>GSG HDRI Studio Pack 1.8 is a bundle of two plugins for Cinema 4D that let you browse and apply hundreds of high-quality HDRI in seconds. You can use them to create realistic and natural lighting and reflections in your scenes with ease. Whether you are using Cinema 4D's Standard and Physical renderers or third-party render engines like Redshift, Octane, or Arnold, GSG HDRI Studio Pack 1.8 can help you improve your renders.</p>
131
- <p>If you want to get GSG HDRI Studio Pack 1.8, you can buy it as a standalone product for $129 or as part of Greyscalegorilla Plus membership for $399 per year or $64 per month. Greyscalegorilla Plus gives you unlimited access to all of Greyscalegorilla's products and training for one low price.</p>
132
- <p>GSG HDRI Studio Pack 1.8 is a great product for Cinema 4D users who want to create better looking renders with less hassle. If you are interested in trying it out, you can visit Greyscalegorilla's website for more information.</p>
133
- <h2>FAQs</h2>
134
- <h3>What is HDRI?</h3>
135
- <p>HDRI stands for High Dynamic Range Images, which are images that capture a wide range of brightness values, from very dark to very bright. By using HDRI as your light source, you can create realistic and natural lighting effects that mimic the real world.</p>
136
- <h3>What is GSG HDRI Studio Pack 1.8?</h3>
137
- <p>GSG HDRI Studio Pack 1.8 is a collection of two plugins for Cinema 4D that make it easy to use HDRI lighting and reflections in your scenes. They are:</p>
138
- <ul>
139
- <li>HDRI Studio Rig: A plugin that works with Cinema 4D's Standard and Physical renderers</li>
140
- <li>HDRI Link: A plugin that works with third-party render engines like Redshift, Octane, and Arnold</li>
141
- </ul>
142
- <h3>How do I use GSG HDRI Studio Pack 1.8?</h3>
143
- <p>To use GSG HDRI Studio Pack 1.8, you need to add an object to your scene (such as a sphere or a cube), then add an HDRi Studio Rig object or an HDRi Link Tag from the plugins menu. This will open up the HDRi Browser window, where you can browse and apply any HDRi image from Greyscalegorilla's library or your own folder. You can then adjust and customize your lighting and reflections with various options in the attributes panel.</p>
144
- <h3>Where do I get GSG HDRI Studio Pack 1.8?</h3>
145
- <p>You can get GSG HDRI Studio Pack 1.8 from Greyscalegorilla's website. You can buy it as a standalone product for $129 or as part of Greyscalegorilla Plus membership for $399 per year or $64 per month.</p>
146
- <h3>What is Greyscalegorilla Plus?</h3>
147
- <p>Greyscalegorilla Plus is a subscription service that gives you unlimited access to all of Greyscalegorilla's products and training for one low price. You can get over $13,000 worth of tools and training for only $399 per year or $64 per month.</p>
148
- </p> 0a6ba089eb<br />
149
- <br />
150
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Designing With Type 5th Edition - The Essential Guide To Typography By James Craig.pdf !NEW!.md DELETED
@@ -1,7 +0,0 @@
1
-
2
- <p>Typography is often regarded as an uninteresting topic. It is no less important than any other medium or discipline, but it is often not given the attention that it deserves. Sadly, some designers today seem to think that just calling oneself a "typographer" will get one a job. It is time to dispel that myth. This book is designed for anyone in any field who wants to know how to design with type. It is true that not all designers think of type as being important or useful, but even that attitude has changed. Designing books, pamphlets, posters, mailings, logos, advertisements, and even Web pages means knowing something about type. And now that type is commonplace in both the print and the electronic media, the need for designers who know type is not less than it has ever been. This book presents the essentials of type and shows how they can be utilized as in the design of anything printed, from books to pamphlets to logos. It is to be hoped that this text will be the first of many on this crucial subject.</p>
3
- <p>Designing with Type continues to be a perennial best seller, as well as one of the most frequently cited textbooks ever written on the subject. Where it was once considered a dry, obscure subject, of little relevance to graphic designers, it is now accepted as an essential text for designers and design students alike. For the new generation of designers, this book still offers the most complete, current overview of the subject.</p>
4
- <h2>Designing with Type, 5th Edition - The Essential Guide to Typography by James Craig.pdf</h2><br /><p><b><b>Download Zip</b> &#10027; <a href="https://imgfil.com/2uxYRV">https://imgfil.com/2uxYRV</a></b></p><br /><br />
5
- <p>This book is about a way of looking at typography that helps us think about type as a form and about the people who designed it, as well as its use in mass-communication. The book begins by defining what typography is and isn't, with particular emphasis on the rationale behind it. Next, the book outlines types of design and typographic criteria, with sections on structure, form, function, illustration, language, visual and verbal communication, and media placement and organization. The authors show how these methods can be applied to any kind of type-based communication, from books, posters, logos, magazines, and broadsheets to websites, advertisements, and classified listings.</p> 899543212b<br />
6
- <br />
7
- <br />
 
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/commands/audio_text.py DELETED
@@ -1,36 +0,0 @@
1
- import json
2
-
3
- import requests
4
-
5
- from autogpt.config import Config
6
- from autogpt.workspace import path_in_workspace
7
-
8
- cfg = Config()
9
-
10
-
11
- def read_audio_from_file(audio_path):
12
- audio_path = path_in_workspace(audio_path)
13
- with open(audio_path, "rb") as audio_file:
14
- audio = audio_file.read()
15
- return read_audio(audio)
16
-
17
-
18
- def read_audio(audio):
19
- model = cfg.huggingface_audio_to_text_model
20
- api_url = f"https://api-inference.huggingface.co/models/{model}"
21
- api_token = cfg.huggingface_api_token
22
- headers = {"Authorization": f"Bearer {api_token}"}
23
-
24
- if api_token is None:
25
- raise ValueError(
26
- "You need to set your Hugging Face API token in the config file."
27
- )
28
-
29
- response = requests.post(
30
- api_url,
31
- headers=headers,
32
- data=audio,
33
- )
34
-
35
- text = json.loads(response.content.decode("utf-8"))["text"]
36
- return "The audio says: " + text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/60 Seconds! Reatomized - A Crazy and Funny Adventure in a Nuclear Wasteland - Play Online for Free.md DELETED
@@ -1,129 +0,0 @@
1
-
2
- <h1>How to Play 60 Seconds! Reatomized for Free Online Without Downloading Anything</h1>
3
- <p>Do you like survival games? Do you enjoy dark humor and quirky characters? Do you want to experience a nuclear apocalypse without risking your life? If you answered yes to any of these questions, then you might want to try 60 Seconds! Reatomized, a game that lets you play as a suburban dad who has to save his family and himself from a nuclear blast. And the best part is, you can play it for free online without downloading anything. Here's how.</p>
4
- <h2>What is 60 Seconds! Reatomized?</h2>
5
- <p>60 Seconds! Reatomized is a game that combines two genres: survival simulator and dark comedy adventure. It was developed by Robot Gentleman and released in 2019 as a remastered version of the original 60 Seconds! game from 2015. Here are some of the features of the game:</p>
6
- <h2>60 seconds free no download unblocked</h2><br /><p><b><b>Download File</b> &#10040;&#10040;&#10040; <a href="https://jinyurl.com/2uNODe">https://jinyurl.com/2uNODe</a></b></p><br /><br />
7
- <h3>A post-apocalyptic survival simulator</h3>
8
- <p>In this game, you have to face the consequences of a nuclear war that has destroyed most of the world. You have to scavenge for supplies, ration food and water, deal with illnesses and injuries, and make tough decisions that will affect your survival. You also have to deal with random events and visitors that can either help or harm you. The game has four different modes: Atomic Drill, Apocalypse, Scavenge, and Survival. Each mode has its own rules and challenges.</p>
9
- <h3>A dark comedy adventure game</h3>
10
- <p>While the game has a serious theme, it also has a lot of humor and absurdity. The game is narrated by a sarcastic robot named Dolores, who comments on your actions and choices. The game also has a lot of references to pop culture, such as movies, books, games, and celebrities. The game also has a lot of funny scenarios and outcomes, such as turning into mutants, becoming cannibals, or joining cults. The game does not take itself too seriously and encourages you to have fun with it.</p>
11
- <h3>A remastered version of the original 60 Seconds!</h3>
12
- <p>60 Seconds! Reatomized is an improved version of the original game, with new features and content. Some of the improvements include:</p>
13
- <ul>
14
- <li>Better graphics and sound effects</li>
15
- <li>New endings and achievements</li>
16
- <li>New characters and items</li>
17
- <li>New challenges and events</li>
18
- <li>New mini-games and quests</li>
19
- <li>New game mode: Challenge</li>
20
- </ul>
21
- <h2>How to play 60 seconds free no download unblocked?</h2>
22
- <p>If you want to play 60 Seconds! Reatomized for free online without downloading anything, you need to find a reliable website that offers the game. One such website is [Gameroze.com](^1^), which lets you play the game in your browser without any registration or installation. Another website is [60secondsreatomizedgame.com](^2^), which also offers the game for free online. Here are the steps to play the game:</p>
23
- <h3>Find a reliable website that offers the game</h3>
24
- <p>Go to one of the websites mentioned above or search for other websites that offer the game. Make sure that the website is safe and secure, and does not contain any viruses or malware. You can use an antivirus software or a browser extension to check the website's reputation and safety.</p>
25
- <h3>Choose your mode and difficulty level</h3>
26
- <p> <p>Once you have accessed the game on the website, you can choose the mode you want to play. The game has five modes: Atomic Drill, Apocalypse, Scavenge, Survival, and Challenge. Each mode has a different objective and gameplay. Here is a brief description of each mode:</p>
27
- <table>
28
- <tr>
29
- <th>Mode</th>
30
- <th>Description</th>
31
- </tr>
32
- <tr>
33
- <td>Atomic Drill</td>
34
- <td>This is the tutorial mode, where you can learn the basics of the game. You have to collect supplies and family members in 60 seconds and then survive in the bunker for a few days.</td>
35
- </tr>
36
- <tr>
37
- <td>Apocalypse</td>
38
- <td>This is the main mode, where you have to complete the full game. You have to collect supplies and family members in 60 seconds and then survive in the bunker as long as you can. You can choose from four difficulty levels: Little Boy, Fat Man, Tsar Bomba, and Scavenger.</td>
39
- </tr>
40
- <tr>
41
- <td>Scavenge</td>
42
- <td>This is a mode where you only have to collect supplies and family members in 60 seconds. You can choose from four difficulty levels: Easy, Normal, Hard, and Impossible.</td>
43
- </tr>
44
- <tr>
45
- <td>Survival</td>
46
- <td>This is a mode where you only have to survive in the bunker with the supplies and family members you have. You can choose from four difficulty levels: Easy, Normal, Hard, and Impossible.</td>
47
- </tr>
48
- <tr>
49
- <td>Challenge</td>
50
- <td>This is a mode where you have to complete a specific scenario with a set of rules and conditions. You can choose from 12 challenges, such as Cat Lady, Twins, or Soup Only.</td>
51
- </tr>
52
- </table>
53
- <h3>Collect supplies and family members in 60 seconds</h3>
54
- <p>After choosing your mode and difficulty level, you will start the game in your house. You will have 60 seconds to grab as many supplies and family members as you can and bring them to the fallout shelter in your backyard. You can use the arrow keys or the WASD keys to move around, and the spacebar or the left mouse button to pick up items or people. You can also use the E key or the right mouse button to drop items or people. You can only carry up to four items or people at a time, so you have to plan carefully what you need and what you can leave behind. Some of the items you can find are:</p>
55
- <ul>
56
- <li>Food: Canned soup and water bottles. You need these to feed yourself and your family.</li>
57
- <li>Medicine: First aid kit and medkit. You need these to heal yourself and your family from injuries or illnesses.</li>
58
- <li>Weapons: Axe, rifle, shotgun, pistol, ammo, padlock. You need these to defend yourself from raiders or other threats.</li>
59
- <li>Tools: Flashlight, radio, map, gas mask, bug spray, suitcase, Boy Scout book. You need these to communicate with others, explore outside, or fix things.</li>
60
- <li>Luxuries: Playing cards, chess board, harmonica, checkers board. You need these to entertain yourself and your family and prevent boredom or insanity.</li>
61
- <li>Pets: Cat or dog. You can choose to bring one of them with you for companionship.</li>
62
- </ul>
63
- <p>You also have to bring your family members with you. They are:</p>
64
- <p>60 seconds reatomized game online free<br />
65
- 60 seconds survival simulator unblocked no download<br />
66
- 60 seconds apocalypse game free online<br />
67
- 60 seconds bunker challenge unblocked free<br />
68
- 60 seconds dark comedy adventure free no download<br />
69
- 60 seconds nuclear fallout game online unblocked<br />
70
- 60 seconds post-apocalyptic simulator free online<br />
71
- 60 seconds atomic explosion game unblocked no download<br />
72
- 60 seconds family survival game free online<br />
73
- 60 seconds scavenging adventure free no download<br />
74
- 60 seconds reatomized unblocked game play online<br />
75
- 60 seconds atomic shelter game free no download<br />
76
- 60 seconds survival adventure game online unblocked<br />
77
- 60 seconds nuclear blast game free online<br />
78
- 60 seconds bunker survival game unblocked no download<br />
79
- 60 seconds reatomized dark comedy game free online<br />
80
- 60 seconds apocalypse simulator unblocked no download<br />
81
- 60 seconds atomic adventure game online free<br />
82
- 60 seconds fallout shelter game unblocked free<br />
83
- 60 seconds post-nuclear game free no download<br />
84
- 60 seconds reatomized survival challenge game online unblocked<br />
85
- 60 seconds nuclear war game free no download<br />
86
- 60 seconds bunker adventure game unblocked online<br />
87
- 60 seconds reatomized comedy simulator free online<br />
88
- 60 seconds atomic bomb game unblocked no download<br />
89
- 60 seconds survival comedy game online free<br />
90
- 60 seconds apocalypse adventure game unblocked free<br />
91
- 60 seconds reatomized nuclear fallout game no download<br />
92
- 60 seconds bunker simulator game online unblocked<br />
93
- 60 seconds reatomized atomic blast game free online</p>
94
- <ul>
95
- <li>Ted: The protagonist and father of the family. He is strong and brave.</li>
96
- <li>Dolores: The wife of Ted and mother of the family. She is smart and resourceful.</li>
97
- <li>Mary Jane: The daughter of Ted and Dolores. She is adventurous and curious.</li>
98
- <li>Timmy: The son of Ted and Dolores. He is clever and optimistic.</li>
99
- </ul>
100
- <p>You have to decide who and what to bring with you before the time runs out. If you don't make it to the shelter in time, you will die in the blast. If you don't bring enough supplies or family members with you, you will have a harder time surviving in the bunker.</p>
101
- <h3>Survive in the bunker as long as you can</h3>
102
- <p>After collecting supplies and family members in 60 seconds, you will enter the bunker. This is where the survival part of the game begins. You will have to manage your resources, make decisions, and deal with events that will affect your survival. Here are some of the things you have to do:</p>
103
- - Feed yourself and your family every few days with soup and water. - Use medicine to treat injuries or illnesses that may occur. - Use weapons to fend off raiders or other enemies that may attack. - Use tools to communicate with other survivors or explore outside. - Use luxuries to - Use luxuries to keep yourself and your family happy and sane. - Follow the instructions or requests of Dolores, the robot narrator, who will guide you through the game. - Make choices that will affect your survival, such as who to send outside, who to trust, or what to trade. - Face random events and visitors that will have positive or negative consequences for you. - Try to find a way to escape the bunker or get rescued by the military or other survivors. <p>The game will end when you either die, escape, or get rescued. The game will also show you your stats, such as how many days you survived, how many items you used, and how many endings you unlocked. The game has over 100 endings, some of which are funny, sad, or bizarre.</p>
104
- <h2>Why play 60 seconds free no download unblocked?</h2>
105
- <p>There are many reasons why you might want to play 60 Seconds! Reatomized for free online without downloading anything. Here are some of them:</p>
106
- <h3>It's fun and challenging</h3>
107
- <p>The game is a mix of strategy, luck, and humor. You have to think fast and smart when collecting supplies and family members in 60 seconds. You also have to adapt to different situations and scenarios that will test your survival skills. The game is not easy, but it's rewarding when you manage to survive or achieve a good ending. The game also has a lot of humor and absurdity that will make you laugh or smile.</p>
108
- <h3>It's different every time you play</h3>
109
- <p>The game is randomly generated, which means that every time you play, you will have a different experience. The items and people you find in your house, the events and visitors you encounter in the bunker, and the endings you unlock will vary each time. The game also has different modes and difficulty levels that will change the gameplay and the challenge. The game has a lot of replay value and surprises.</p>
110
- <h3>It's compatible with any device and browser</h3>
111
- <p>The game is designed to run on any device and browser that supports HTML5. You don't need to download anything or install anything to play the game. You just need an internet connection and a web browser. You can play the game on your computer, laptop, tablet, or smartphone. You can also play the game on any operating system, such as Windows, Mac, Linux, Android, or iOS. The game is accessible and convenient for anyone.</p>
112
- <h2>Conclusion</h2>
113
- <p>60 Seconds! Reatomized is a game that lets you play as a suburban dad who has to save his family and himself from a nuclear blast. You can play it for free online without downloading anything on websites like [Gameroze.com] or [60secondsreatomizedgame.com]. The game is a combination of survival simulator and dark comedy adventure. It has four different modes: Atomic Drill, Apocalypse, Scavenge, Survival, and Challenge. It also has over 100 endings and new features and content. The game is fun and challenging, different every time you play, and compatible with any device and browser. If you are looking for a game that will test your survival skills and make you laugh at the same time, then you should try 60 Seconds! Reatomized.</p>
114
- <h2>FAQs</h2>
115
- <p>Here are some of the frequently asked questions about 60 Seconds! Reatomized:</p>
116
- <ul>
117
- <li><b>Q: How long does it take to finish the game?</b></li>
118
- <li>A: It depends on how well you play and what mode and difficulty level you choose. The game can last from a few minutes to several hours.</li>
119
- <li><b>Q: How can I save my progress in the game?</b></li>
120
- <li>A: The game has an auto-save feature that saves your progress every day in the bunker. You can also manually save your progress by clicking on the floppy disk icon in the top right corner of the screen.</li>
121
- <li><b>Q: How can I unlock more endings in the game?</b></li>
122
- <li>A: You can unlock more endings by playing different modes and difficulty levels, making different choices, using different items, interacting with different visitors, and exploring different locations.</li>
123
- <li><b>Q: How can I get more supplies in the game?</b></li>
124
- <li>A: You can get more supplies by scavenging outside the bunker with a gas mask and a map. You can also trade with other survivors or raiders who may visit your bunker.</li>
125
- <li><b>Q: How can I get rid of the cockroaches in the game?</b></li>
126
- <li>A: You can get rid of the cockroaches by using bug spray or fire. However, be careful not to burn your supplies or your family or yourself. You can also prevent the cockroaches from appearing by keeping your bunker clean and tidy.</li>
127
- </ul></p> 401be4b1e0<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/test/test_kana_parser.py DELETED
@@ -1,688 +0,0 @@
1
- from typing import List
2
- from unittest import TestCase
3
-
4
- from voicevox_engine import kana_parser
5
- from voicevox_engine.kana_parser import create_kana
6
- from voicevox_engine.model import AccentPhrase, Mora, ParseKanaError, ParseKanaErrorCode
7
-
8
-
9
- def parse_kana(text: str) -> List[AccentPhrase]:
10
- accent_phrases = kana_parser.parse_kana(text)
11
- return accent_phrases
12
-
13
-
14
- class TestParseKana(TestCase):
15
- def test_phrase_length(self):
16
- self.assertEqual(len(parse_kana("ア'/ア'")), 2)
17
- self.assertEqual(len(parse_kana("ア'、ア'")), 2)
18
- self.assertEqual(len(parse_kana("ア'/ア'/ア'/ア'/ア'")), 5)
19
- self.assertEqual(len(parse_kana("ス'")), 1)
20
- self.assertEqual(len(parse_kana("_ス'")), 1)
21
- self.assertEqual(len(parse_kana("ギェ'")), 1)
22
- self.assertEqual(len(parse_kana("ギェ'、ギェ'/ギェ'")), 3)
23
-
24
- def test_accent(self):
25
- self.assertEqual(parse_kana("シャ'シシュシェショ")[0].accent, 1)
26
- self.assertEqual(parse_kana("シャ'_シシュシェショ")[0].accent, 1)
27
- self.assertEqual(parse_kana("シャシ'シュシェショ")[0].accent, 2)
28
- self.assertEqual(parse_kana("シャ_シ'シュシェショ")[0].accent, 2)
29
- self.assertEqual(parse_kana("シャシシュ'シェショ")[0].accent, 3)
30
- self.assertEqual(parse_kana("シャ_シシュ'シェショ")[0].accent, 3)
31
- self.assertEqual(parse_kana("シャシシュシェショ'")[0].accent, 5)
32
- self.assertEqual(parse_kana("シャ_シシュシェショ'")[0].accent, 5)
33
-
34
- def test_mora_length(self):
35
- self.assertEqual(len(parse_kana("シャ'シシュシェショ")[0].moras), 5)
36
- self.assertEqual(len(parse_kana("シャ'_シシュシェショ")[0].moras), 5)
37
- self.assertEqual(len(parse_kana("シャシ'シュシェショ")[0].moras), 5)
38
- self.assertEqual(len(parse_kana("シャ_シ'シュシェショ")[0].moras), 5)
39
- self.assertEqual(len(parse_kana("シャシシュシェショ'")[0].moras), 5)
40
- self.assertEqual(len(parse_kana("シャ_シシュシェショ'")[0].moras), 5)
41
-
42
- def test_pause(self):
43
- self.assertIsNone(parse_kana("ア'/ア'")[0].pause_mora)
44
- self.assertIsNone(parse_kana("ア'/ア'")[1].pause_mora)
45
- self.assertIsNotNone(parse_kana("ア'、ア'")[0].pause_mora)
46
- self.assertIsNone(parse_kana("ア'、ア'")[1].pause_mora)
47
-
48
- def test_unvoice(self):
49
- self.assertEqual(parse_kana("ス'")[0].moras[0].vowel, "u")
50
- self.assertEqual(parse_kana("_ス'")[0].moras[0].vowel, "U")
51
-
52
- def test_roundtrip(self):
53
- for text in ["コンニチワ'", "ワタシワ'/シャチョオデ'_ス", "トテモ'、エラ'インデス"]:
54
- self.assertEqual(create_kana(parse_kana(text)), text)
55
-
56
- for text in ["ヲ'", "ェ'"]:
57
- self.assertEqual(create_kana(parse_kana(text)), text)
58
-
59
- def _accent_phrase_marks_base(
60
- self, text: str, expected_accent_phrases: List[AccentPhrase]
61
- ) -> None:
62
- accent_phrases = kana_parser.parse_kana(text)
63
- self.assertEqual(expected_accent_phrases, accent_phrases)
64
-
65
- def test_accent_phrase_marks(self):
66
- def a_slash_a_accent_phrases():
67
- return [
68
- AccentPhrase(
69
- moras=[
70
- Mora(
71
- text="ア",
72
- consonant=None,
73
- consonant_length=None,
74
- vowel="a",
75
- vowel_length=0.0,
76
- pitch=0.0,
77
- ),
78
- ],
79
- accent=1,
80
- pause_mora=None,
81
- ),
82
- AccentPhrase(
83
- moras=[
84
- Mora(
85
- text="ア",
86
- consonant=None,
87
- consonant_length=None,
88
- vowel="a",
89
- vowel_length=0.0,
90
- pitch=0.0,
91
- ),
92
- ],
93
- accent=1,
94
- pause_mora=None,
95
- ),
96
- ]
97
-
98
- expected_accent_phrases = a_slash_a_accent_phrases()
99
- self._accent_phrase_marks_base(
100
- text="ア'/ア'",
101
- expected_accent_phrases=expected_accent_phrases,
102
- )
103
-
104
- def a_jp_comma_a_accent_phrases():
105
- return [
106
- AccentPhrase(
107
- moras=[
108
- Mora(
109
- text="ア",
110
- consonant=None,
111
- consonant_length=None,
112
- vowel="a",
113
- vowel_length=0.0,
114
- pitch=0.0,
115
- ),
116
- ],
117
- accent=1,
118
- pause_mora=Mora(
119
- text="、",
120
- consonant=None,
121
- consonant_length=None,
122
- vowel="pau",
123
- vowel_length=0.0,
124
- pitch=0.0,
125
- ),
126
- ),
127
- AccentPhrase(
128
- moras=[
129
- Mora(
130
- text="ア",
131
- consonant=None,
132
- consonant_length=None,
133
- vowel="a",
134
- vowel_length=0.0,
135
- pitch=0.0,
136
- ),
137
- ],
138
- accent=1,
139
- pause_mora=None,
140
- ),
141
- ]
142
-
143
- expected_accent_phrases = a_jp_comma_a_accent_phrases()
144
- self._accent_phrase_marks_base(
145
- text="ア'、ア'",
146
- expected_accent_phrases=expected_accent_phrases,
147
- )
148
-
149
- def a_slash_a_slash_a_slash_a_slash_a_accent_phrases():
150
- return [
151
- AccentPhrase(
152
- moras=[
153
- Mora(
154
- text="ア",
155
- consonant=None,
156
- consonant_length=None,
157
- vowel="a",
158
- vowel_length=0.0,
159
- pitch=0.0,
160
- ),
161
- ],
162
- accent=1,
163
- pause_mora=None,
164
- ),
165
- AccentPhrase(
166
- moras=[
167
- Mora(
168
- text="ア",
169
- consonant=None,
170
- consonant_length=None,
171
- vowel="a",
172
- vowel_length=0.0,
173
- pitch=0.0,
174
- ),
175
- ],
176
- accent=1,
177
- pause_mora=None,
178
- ),
179
- AccentPhrase(
180
- moras=[
181
- Mora(
182
- text="ア",
183
- consonant=None,
184
- consonant_length=None,
185
- vowel="a",
186
- vowel_length=0.0,
187
- pitch=0.0,
188
- ),
189
- ],
190
- accent=1,
191
- pause_mora=None,
192
- ),
193
- AccentPhrase(
194
- moras=[
195
- Mora(
196
- text="ア",
197
- consonant=None,
198
- consonant_length=None,
199
- vowel="a",
200
- vowel_length=0.0,
201
- pitch=0.0,
202
- ),
203
- ],
204
- accent=1,
205
- pause_mora=None,
206
- ),
207
- AccentPhrase(
208
- moras=[
209
- Mora(
210
- text="ア",
211
- consonant=None,
212
- consonant_length=None,
213
- vowel="a",
214
- vowel_length=0.0,
215
- pitch=0.0,
216
- ),
217
- ],
218
- accent=1,
219
- pause_mora=None,
220
- ),
221
- ]
222
-
223
- expected_accent_phrases = a_slash_a_slash_a_slash_a_slash_a_accent_phrases()
224
- self._accent_phrase_marks_base(
225
- text="ア'/ア'/ア'/ア'/ア'",
226
- expected_accent_phrases=expected_accent_phrases,
227
- )
228
-
229
- def su_accent_phrases():
230
- return [
231
- AccentPhrase(
232
- moras=[
233
- Mora(
234
- text="ス",
235
- consonant="s",
236
- consonant_length=0.0,
237
- vowel="u",
238
- vowel_length=0.0,
239
- pitch=0.0,
240
- ),
241
- ],
242
- accent=1,
243
- pause_mora=None,
244
- ),
245
- ]
246
-
247
- expected_accent_phrases = su_accent_phrases()
248
- self._accent_phrase_marks_base(
249
- text="ス'",
250
- expected_accent_phrases=expected_accent_phrases,
251
- )
252
-
253
- def under_score_su_accent_phrases():
254
- return [
255
- AccentPhrase(
256
- moras=[
257
- Mora(
258
- text="ス",
259
- consonant="s",
260
- consonant_length=0.0,
261
- vowel="U",
262
- vowel_length=0.0,
263
- pitch=0.0,
264
- ),
265
- ],
266
- accent=1,
267
- pause_mora=None,
268
- ),
269
- ]
270
-
271
- expected_accent_phrases = under_score_su_accent_phrases()
272
- self._accent_phrase_marks_base(
273
- text="_ス'",
274
- expected_accent_phrases=expected_accent_phrases,
275
- )
276
-
277
- def gye_accent_phrases():
278
- return [
279
- AccentPhrase(
280
- moras=[
281
- Mora(
282
- text="ギェ",
283
- consonant="gy",
284
- consonant_length=0.0,
285
- vowel="e",
286
- vowel_length=0.0,
287
- pitch=0.0,
288
- ),
289
- ],
290
- accent=1,
291
- pause_mora=None,
292
- ),
293
- ]
294
-
295
- expected_accent_phrases = gye_accent_phrases()
296
- self._accent_phrase_marks_base(
297
- text="ギェ'",
298
- expected_accent_phrases=expected_accent_phrases,
299
- )
300
-
301
- def gye_gye_gye_accent_phrases():
302
- return [
303
- AccentPhrase(
304
- moras=[
305
- Mora(
306
- text="ギェ",
307
- consonant="gy",
308
- consonant_length=0.0,
309
- vowel="e",
310
- vowel_length=0.0,
311
- pitch=0.0,
312
- ),
313
- ],
314
- accent=1,
315
- pause_mora=Mora(
316
- text="、",
317
- consonant=None,
318
- consonant_length=None,
319
- vowel="pau",
320
- vowel_length=0.0,
321
- pitch=0.0,
322
- ),
323
- ),
324
- AccentPhrase(
325
- moras=[
326
- Mora(
327
- text="ギェ",
328
- consonant="gy",
329
- consonant_length=0.0,
330
- vowel="e",
331
- vowel_length=0.0,
332
- pitch=0.0,
333
- ),
334
- ],
335
- accent=1,
336
- pause_mora=None,
337
- ),
338
- AccentPhrase(
339
- moras=[
340
- Mora(
341
- text="ギェ",
342
- consonant="gy",
343
- consonant_length=0.0,
344
- vowel="e",
345
- vowel_length=0.0,
346
- pitch=0.0,
347
- ),
348
- ],
349
- accent=1,
350
- pause_mora=None,
351
- ),
352
- ]
353
-
354
- expected_accent_phrases = gye_gye_gye_accent_phrases()
355
- self._accent_phrase_marks_base(
356
- text="ギェ'、ギェ'/ギェ'",
357
- expected_accent_phrases=expected_accent_phrases,
358
- )
359
-
360
- def test_interrogative_accent_phrase_marks(self):
361
- def a_question_mark_accent_phrases():
362
- return [
363
- AccentPhrase(
364
- moras=[
365
- Mora(
366
- text="ア",
367
- consonant=None,
368
- consonant_length=None,
369
- vowel="a",
370
- vowel_length=0.0,
371
- pitch=0.0,
372
- ),
373
- ],
374
- accent=1,
375
- pause_mora=None,
376
- is_interrogative=True,
377
- ),
378
- ]
379
-
380
- expected_accent_phrases = a_question_mark_accent_phrases()
381
- self._accent_phrase_marks_base(
382
- text="ア'?",
383
- expected_accent_phrases=expected_accent_phrases,
384
- )
385
-
386
- def gye_gye_gye_question_mark_accent_phrases():
387
- return [
388
- AccentPhrase(
389
- moras=[
390
- Mora(
391
- text="ギェ",
392
- consonant="gy",
393
- consonant_length=0.0,
394
- vowel="e",
395
- vowel_length=0.0,
396
- pitch=0.0,
397
- ),
398
- ],
399
- accent=1,
400
- pause_mora=Mora(
401
- text="、",
402
- consonant=None,
403
- consonant_length=None,
404
- vowel="pau",
405
- vowel_length=0.0,
406
- pitch=0.0,
407
- ),
408
- ),
409
- AccentPhrase(
410
- moras=[
411
- Mora(
412
- text="ギェ",
413
- consonant="gy",
414
- consonant_length=0.0,
415
- vowel="e",
416
- vowel_length=0.0,
417
- pitch=0.0,
418
- ),
419
- ],
420
- accent=1,
421
- pause_mora=None,
422
- ),
423
- AccentPhrase(
424
- moras=[
425
- Mora(
426
- text="ギェ",
427
- consonant="gy",
428
- consonant_length=0.0,
429
- vowel="e",
430
- vowel_length=0.0,
431
- pitch=0.0,
432
- ),
433
- ],
434
- accent=1,
435
- pause_mora=None,
436
- is_interrogative=True,
437
- ),
438
- ]
439
-
440
- expected_accent_phrases = gye_gye_gye_question_mark_accent_phrases()
441
- self._accent_phrase_marks_base(
442
- text="ギェ'、ギェ'/ギェ'?",
443
- expected_accent_phrases=expected_accent_phrases,
444
- )
445
-
446
- def a_pause_a_question_pause_a_question_a_question_mark_accent_phrases():
447
- return [
448
- AccentPhrase(
449
- moras=[
450
- Mora(
451
- text="ア",
452
- consonant=None,
453
- consonant_length=None,
454
- vowel="a",
455
- vowel_length=0.0,
456
- pitch=0.0,
457
- ),
458
- ],
459
- accent=1,
460
- pause_mora=Mora(
461
- text="、",
462
- consonant=None,
463
- consonant_length=None,
464
- vowel="pau",
465
- vowel_length=0.0,
466
- pitch=0.0,
467
- ),
468
- ),
469
- AccentPhrase(
470
- moras=[
471
- Mora(
472
- text="ア",
473
- consonant=None,
474
- consonant_length=None,
475
- vowel="a",
476
- vowel_length=0.0,
477
- pitch=0.0,
478
- ),
479
- ],
480
- accent=1,
481
- pause_mora=Mora(
482
- text="、",
483
- consonant=None,
484
- consonant_length=None,
485
- vowel="pau",
486
- vowel_length=0.0,
487
- pitch=0.0,
488
- ),
489
- is_interrogative=True,
490
- ),
491
- AccentPhrase(
492
- moras=[
493
- Mora(
494
- text="ア",
495
- consonant=None,
496
- consonant_length=None,
497
- vowel="a",
498
- vowel_length=0.0,
499
- pitch=0.0,
500
- ),
501
- ],
502
- accent=1,
503
- pause_mora=None,
504
- is_interrogative=True,
505
- ),
506
- AccentPhrase(
507
- moras=[
508
- Mora(
509
- text="ア",
510
- consonant=None,
511
- consonant_length=None,
512
- vowel="a",
513
- vowel_length=0.0,
514
- pitch=0.0,
515
- ),
516
- ],
517
- accent=1,
518
- pause_mora=None,
519
- is_interrogative=True,
520
- ),
521
- ]
522
-
523
- expected_accent_phrases = (
524
- a_pause_a_question_pause_a_question_a_question_mark_accent_phrases()
525
- )
526
- self._accent_phrase_marks_base(
527
- text="ア'、ア'?、ア'?/ア'?",
528
- expected_accent_phrases=expected_accent_phrases,
529
- )
530
-
531
-
532
- class TestParseKanaException(TestCase):
533
- def _assert_error_code(self, kana: str, code: ParseKanaErrorCode):
534
- with self.assertRaises(ParseKanaError) as err:
535
- parse_kana(kana)
536
- self.assertEqual(err.exception.errcode, code)
537
-
538
- def test_exceptions(self):
539
- self._assert_error_code("アクセント", ParseKanaErrorCode.ACCENT_NOTFOUND)
540
- self._assert_error_code("'アクセント", ParseKanaErrorCode.ACCENT_TOP)
541
- self._assert_error_code("ア'ク'セント", ParseKanaErrorCode.ACCENT_TWICE)
542
- self._assert_error_code("ひ'らがな", ParseKanaErrorCode.UNKNOWN_TEXT)
543
- self._assert_error_code("__ス'", ParseKanaErrorCode.UNKNOWN_TEXT)
544
- self._assert_error_code("ア'/", ParseKanaErrorCode.EMPTY_PHRASE)
545
- self._assert_error_code("/ア'", ParseKanaErrorCode.EMPTY_PHRASE)
546
- self._assert_error_code("", ParseKanaErrorCode.EMPTY_PHRASE)
547
-
548
- with self.assertRaises(ParseKanaError) as err:
549
- parse_kana("ヒト'ツメ/フタツメ")
550
- self.assertEqual(err.exception.errcode, ParseKanaErrorCode.ACCENT_NOTFOUND)
551
- self.assertEqual(err.exception.kwargs, {"text": "フタツメ"})
552
-
553
- with self.assertRaises(ParseKanaError) as err:
554
- parse_kana("ア'/")
555
- self.assertEqual(err.exception.errcode, ParseKanaErrorCode.EMPTY_PHRASE)
556
- self.assertEqual(err.exception.kwargs, {"position": "2"})
557
-
558
- with self.assertRaises(ParseKanaError) as err:
559
- kana_parser.parse_kana("ア?ア'")
560
- self.assertEqual(
561
- err.exception.errcode, ParseKanaErrorCode.INTERROGATION_MARK_NOT_AT_END
562
- )
563
-
564
-
565
- class TestCreateKana(TestCase):
566
- def test_create_kana_interrogative(self):
567
- def koreha_arimasuka_accent_phrases():
568
- return [
569
- AccentPhrase(
570
- moras=[
571
- Mora(
572
- text="コ",
573
- consonant="k",
574
- consonant_length=2.5,
575
- vowel="o",
576
- vowel_length=2.5,
577
- pitch=2.5,
578
- ),
579
- Mora(
580
- text="レ",
581
- consonant="r",
582
- consonant_length=2.5,
583
- vowel="e",
584
- vowel_length=2.5,
585
- pitch=2.5,
586
- ),
587
- Mora(
588
- text="ワ",
589
- consonant="w",
590
- consonant_length=2.5,
591
- vowel="a",
592
- vowel_length=2.5,
593
- pitch=2.5,
594
- ),
595
- ],
596
- accent=3,
597
- pause_mora=None,
598
- is_interrogative=False,
599
- ),
600
- AccentPhrase(
601
- moras=[
602
- Mora(
603
- text="ア",
604
- consonant=None,
605
- consonant_length=None,
606
- vowel="a",
607
- vowel_length=2.5,
608
- pitch=2.5,
609
- ),
610
- Mora(
611
- text="リ",
612
- consonant="r",
613
- consonant_length=2.5,
614
- vowel="i",
615
- vowel_length=2.5,
616
- pitch=2.5,
617
- ),
618
- Mora(
619
- text="マ",
620
- consonant="m",
621
- consonant_length=2.5,
622
- vowel="a",
623
- vowel_length=2.5,
624
- pitch=2.5,
625
- ),
626
- Mora(
627
- text="ス",
628
- consonant="s",
629
- consonant_length=2.5,
630
- vowel="U",
631
- vowel_length=2.5,
632
- pitch=2.5,
633
- ),
634
- Mora(
635
- text="カ",
636
- consonant="k",
637
- consonant_length=2.5,
638
- vowel="a",
639
- vowel_length=2.5,
640
- pitch=2.5,
641
- ),
642
- ],
643
- accent=3,
644
- pause_mora=None,
645
- is_interrogative=False,
646
- ),
647
- ]
648
-
649
- accent_phrases = koreha_arimasuka_accent_phrases()
650
- self.assertEqual(create_kana(accent_phrases), "コレワ'/アリマ'_スカ")
651
-
652
- accent_phrases = koreha_arimasuka_accent_phrases()
653
- accent_phrases[-1].is_interrogative = True
654
- self.assertEqual(create_kana(accent_phrases), "コレワ'/アリマ'_スカ?")
655
-
656
- def kya_accent_phrases():
657
- return [
658
- AccentPhrase(
659
- moras=[
660
- Mora(
661
- text="キャ",
662
- consonant="ky",
663
- consonant_length=2.5,
664
- vowel="a",
665
- vowel_length=2.5,
666
- pitch=2.5,
667
- ),
668
- Mora(
669
- text="ッ",
670
- consonant=None,
671
- consonant_length=None,
672
- vowel="cl",
673
- vowel_length=0.1,
674
- pitch=0,
675
- ),
676
- ],
677
- accent=1,
678
- pause_mora=None,
679
- is_interrogative=False,
680
- ),
681
- ]
682
-
683
- accent_phrases = kya_accent_phrases()
684
- self.assertEqual(create_kana(accent_phrases), "キャ'ッ")
685
-
686
- accent_phrases = kya_accent_phrases()
687
- accent_phrases[-1].is_interrogative = True
688
- self.assertEqual(create_kana(accent_phrases), "キャ'ッ?")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/3laa2/Text2img/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Text2img
3
- emoji: 🔥
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: streamlit
7
- sdk_version: 1.19.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/42digital/DeepFashion_Classification/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: DeepFashion Classification
3
- emoji: 🏆
4
- colorFrom: purple
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: true
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-ANK/PaLM-Kosmos-Vision/app.py DELETED
@@ -1,165 +0,0 @@
1
- import streamlit as st
2
- import extra_streamlit_components as stx
3
- import requests
4
- from PIL import Image
5
- from transformers import AutoProcessor, AutoModelForVision2Seq
6
- from io import BytesIO
7
- import replicate
8
- from llama_index.llms.palm import PaLM
9
- from llama_index import ServiceContext, VectorStoreIndex, Document
10
- from llama_index.memory import ChatMemoryBuffer
11
- import os
12
- import datetime
13
-
14
- # Set up the title of the application
15
- #st.title("PaLM-Kosmos-Vision")
16
- st.set_page_config(layout="wide")
17
- st.write("My version of ChatGPT vision. You can upload an image and start chatting with the LLM about the image")
18
-
19
- # Sidebar
20
- st.sidebar.markdown('## Created By')
21
- st.sidebar.markdown("""
22
- [Harshad Suryawanshi](https://www.linkedin.com/in/harshadsuryawanshi/)
23
- """)
24
-
25
- st.sidebar.markdown('## Other Projects')
26
- st.sidebar.markdown("""
27
- - [AI Equity Research Analyst](https://ai-eqty-rsrch-anlyst.streamlit.app/)
28
- - [Recasting "The Office" Scene](https://blackmirroroffice.streamlit.app/)
29
- - [Story Generator](https://appstorycombined-agaf9j4ceit.streamlit.app/)
30
- """)
31
-
32
- st.sidebar.markdown('## Disclaimer')
33
- st.sidebar.markdown("""
34
- This application is a conceptual prototype created to demonstrate the potential of Large Language Models (LLMs) in generating equity research reports. The contents generated by this application are purely illustrative and should not be construed as financial advice, endorsements, or recommendations. The author and the application do not provide any guarantee regarding the accuracy, completeness, or timeliness of the information provided.
35
- """)
36
-
37
- # Initialize the cookie manager
38
- cookie_manager = stx.CookieManager()
39
-
40
- # Function to get image caption via Kosmos2.
41
- @st.cache_data
42
- def get_image_caption(image_data):
43
- input_data = {
44
- "image": image_data,
45
- "description_type": "Brief"
46
- }
47
- output = replicate.run(
48
- "lucataco/kosmos-2:3e7b211c29c092f4bcc8853922cc986baa52efe255876b80cac2c2fbb4aff805",
49
- input=input_data
50
- )
51
- # Split the output string on the newline character and take the first item
52
- text_description = output.split('\n\n')[0]
53
- return text_description
54
-
55
- # Function to create the chat engine.
56
- @st.cache_resource
57
- def create_chat_engine(img_desc, api_key):
58
- llm = PaLM(api_key=api_key)
59
- service_context = ServiceContext.from_defaults(llm=llm)
60
- doc = Document(text=img_desc)
61
- index = VectorStoreIndex.from_documents([doc], service_context=service_context)
62
- chatmemory = ChatMemoryBuffer.from_defaults(token_limit=1500)
63
-
64
- chat_engine = index.as_chat_engine(
65
- chat_mode="context",
66
- system_prompt=(
67
- f"You are a chatbot, able to have normal interactions, as well as talk. "
68
- "You always answer in great detail and are polite. Your responses always descriptive. "
69
- "Your job is to talk about an image the user has uploaded. Image description: {img_desc}."
70
- ),
71
- verbose=True,
72
- memory=chatmemory
73
- )
74
- return chat_engine
75
-
76
- # Clear chat function
77
- def clear_chat():
78
- if "messages" in st.session_state:
79
- del st.session_state.messages
80
- if "image_file" in st.session_state:
81
- del st.session_state.image_file
82
-
83
- # Callback function to clear the chat when a new image is uploaded
84
- def on_image_upload():
85
- clear_chat()
86
-
87
- # Retrieve the message count from cookies
88
- message_count = cookie_manager.get(cookie='message_count')
89
- if message_count is None:
90
- message_count = 0
91
- else:
92
- message_count = int(message_count)
93
-
94
- # If the message limit has been reached, disable the inputs
95
- if message_count >= 20:
96
- st.error("Notice: The maximum message limit for this demo version has been reached.")
97
- # Disabling the uploader and input by not displaying them
98
- image_uploader_placeholder = st.empty() # Placeholder for the uploader
99
- chat_input_placeholder = st.empty() # Placeholder for the chat input
100
- else:
101
- # Add a clear chat button
102
- if st.button("Clear Chat"):
103
- clear_chat()
104
-
105
- # Image upload section.
106
- image_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"], key="uploaded_image", on_change=on_image_upload)
107
- if image_file:
108
- # Display the uploaded image at a standard width.
109
- st.image(image_file, caption='Uploaded Image.', width=200)
110
- # Process the uploaded image to get a caption.
111
- image_data = BytesIO(image_file.getvalue())
112
- img_desc = get_image_caption(image_data)
113
- st.write("Image Uploaded Successfully. Ask me anything about it.")
114
-
115
- # Initialize the chat engine with the image description.
116
- chat_engine = create_chat_engine(img_desc, os.environ["GOOGLE_API_KEY"])
117
-
118
- # Initialize session state for messages if it doesn't exist
119
- if "messages" not in st.session_state:
120
- st.session_state.messages = []
121
-
122
- # Display previous messages
123
- for message in st.session_state.messages:
124
- with st.chat_message(message["role"]):
125
- st.markdown(message["content"])
126
-
127
- # Handle new user input
128
- user_input = st.chat_input("Ask me about the image:", key="chat_input")
129
- if user_input:
130
- # Append user message to the session state
131
- st.session_state.messages.append({"role": "user", "content": user_input})
132
-
133
- # Display user message immediately
134
- with st.chat_message("user"):
135
- st.markdown(user_input)
136
-
137
- # Call the chat engine to get the response if an image has been uploaded
138
- if image_file and user_input:
139
- try:
140
- with st.spinner('Waiting for the chat engine to respond...'):
141
- # Get the response from your chat engine
142
- response = chat_engine.chat(user_input)
143
-
144
- # Append assistant message to the session state
145
- st.session_state.messages.append({"role": "assistant", "content": response})
146
-
147
- # Display the assistant message
148
- with st.chat_message("assistant"):
149
- st.markdown(response)
150
-
151
- except Exception as e:
152
- st.error(f'An error occurred: {e}')
153
- # Optionally, you can choose to break the flow here if a critical error happens
154
- # return
155
-
156
- # Increment the message count and update the cookie
157
- message_count += 1
158
- cookie_manager.set('message_count', str(message_count), expires_at=datetime.datetime.now() + datetime.timedelta(days=30))
159
-
160
-
161
-
162
-
163
- # Set Replicate and Google API keys
164
- os.environ['REPLICATE_API_TOKEN'] = st.secrets['REPLICATE_API_TOKEN']
165
- os.environ["GOOGLE_API_KEY"] = st.secrets['GOOGLE_API_KEY']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/models/unet.py DELETED
@@ -1,214 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Pytorch Unet Module used for diffusion.
9
- """
10
-
11
- from dataclasses import dataclass
12
- import typing as tp
13
-
14
- import torch
15
- from torch import nn
16
- from torch.nn import functional as F
17
- from audiocraft.modules.transformer import StreamingTransformer, create_sin_embedding
18
-
19
-
20
- @dataclass
21
- class Output:
22
- sample: torch.Tensor
23
-
24
-
25
- def get_model(cfg, channels: int, side: int, num_steps: int):
26
- if cfg.model == 'unet':
27
- return DiffusionUnet(
28
- chin=channels, num_steps=num_steps, **cfg.diffusion_unet)
29
- else:
30
- raise RuntimeError('Not Implemented')
31
-
32
-
33
- class ResBlock(nn.Module):
34
- def __init__(self, channels: int, kernel: int = 3, norm_groups: int = 4,
35
- dilation: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
36
- dropout: float = 0.):
37
- super().__init__()
38
- stride = 1
39
- padding = dilation * (kernel - stride) // 2
40
- Conv = nn.Conv1d
41
- Drop = nn.Dropout1d
42
- self.norm1 = nn.GroupNorm(norm_groups, channels)
43
- self.conv1 = Conv(channels, channels, kernel, 1, padding, dilation=dilation)
44
- self.activation1 = activation()
45
- self.dropout1 = Drop(dropout)
46
-
47
- self.norm2 = nn.GroupNorm(norm_groups, channels)
48
- self.conv2 = Conv(channels, channels, kernel, 1, padding, dilation=dilation)
49
- self.activation2 = activation()
50
- self.dropout2 = Drop(dropout)
51
-
52
- def forward(self, x):
53
- h = self.dropout1(self.conv1(self.activation1(self.norm1(x))))
54
- h = self.dropout2(self.conv2(self.activation2(self.norm2(h))))
55
- return x + h
56
-
57
-
58
- class DecoderLayer(nn.Module):
59
- def __init__(self, chin: int, chout: int, kernel: int = 4, stride: int = 2,
60
- norm_groups: int = 4, res_blocks: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
61
- dropout: float = 0.):
62
- super().__init__()
63
- padding = (kernel - stride) // 2
64
- self.res_blocks = nn.Sequential(
65
- *[ResBlock(chin, norm_groups=norm_groups, dilation=2**idx, dropout=dropout)
66
- for idx in range(res_blocks)])
67
- self.norm = nn.GroupNorm(norm_groups, chin)
68
- ConvTr = nn.ConvTranspose1d
69
- self.convtr = ConvTr(chin, chout, kernel, stride, padding, bias=False)
70
- self.activation = activation()
71
-
72
- def forward(self, x: torch.Tensor) -> torch.Tensor:
73
- x = self.res_blocks(x)
74
- x = self.norm(x)
75
- x = self.activation(x)
76
- x = self.convtr(x)
77
- return x
78
-
79
-
80
- class EncoderLayer(nn.Module):
81
- def __init__(self, chin: int, chout: int, kernel: int = 4, stride: int = 2,
82
- norm_groups: int = 4, res_blocks: int = 1, activation: tp.Type[nn.Module] = nn.ReLU,
83
- dropout: float = 0.):
84
- super().__init__()
85
- padding = (kernel - stride) // 2
86
- Conv = nn.Conv1d
87
- self.conv = Conv(chin, chout, kernel, stride, padding, bias=False)
88
- self.norm = nn.GroupNorm(norm_groups, chout)
89
- self.activation = activation()
90
- self.res_blocks = nn.Sequential(
91
- *[ResBlock(chout, norm_groups=norm_groups, dilation=2**idx, dropout=dropout)
92
- for idx in range(res_blocks)])
93
-
94
- def forward(self, x: torch.Tensor) -> torch.Tensor:
95
- B, C, T = x.shape
96
- stride, = self.conv.stride
97
- pad = (stride - (T % stride)) % stride
98
- x = F.pad(x, (0, pad))
99
-
100
- x = self.conv(x)
101
- x = self.norm(x)
102
- x = self.activation(x)
103
- x = self.res_blocks(x)
104
- return x
105
-
106
-
107
- class BLSTM(nn.Module):
108
- """BiLSTM with same hidden units as input dim.
109
- """
110
- def __init__(self, dim, layers=2):
111
- super().__init__()
112
- self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim)
113
- self.linear = nn.Linear(2 * dim, dim)
114
-
115
- def forward(self, x):
116
- x = x.permute(2, 0, 1)
117
- x = self.lstm(x)[0]
118
- x = self.linear(x)
119
- x = x.permute(1, 2, 0)
120
- return x
121
-
122
-
123
- class DiffusionUnet(nn.Module):
124
- def __init__(self, chin: int = 3, hidden: int = 24, depth: int = 3, growth: float = 2.,
125
- max_channels: int = 10_000, num_steps: int = 1000, emb_all_layers=False, cross_attention: bool = False,
126
- bilstm: bool = False, transformer: bool = False,
127
- codec_dim: tp.Optional[int] = None, **kwargs):
128
- super().__init__()
129
- self.encoders = nn.ModuleList()
130
- self.decoders = nn.ModuleList()
131
- self.embeddings: tp.Optional[nn.ModuleList] = None
132
- self.embedding = nn.Embedding(num_steps, hidden)
133
- if emb_all_layers:
134
- self.embeddings = nn.ModuleList()
135
- self.condition_embedding: tp.Optional[nn.Module] = None
136
- for d in range(depth):
137
- encoder = EncoderLayer(chin, hidden, **kwargs)
138
- decoder = DecoderLayer(hidden, chin, **kwargs)
139
- self.encoders.append(encoder)
140
- self.decoders.insert(0, decoder)
141
- if emb_all_layers and d > 0:
142
- assert self.embeddings is not None
143
- self.embeddings.append(nn.Embedding(num_steps, hidden))
144
- chin = hidden
145
- hidden = min(int(chin * growth), max_channels)
146
- self.bilstm: tp.Optional[nn.Module]
147
- if bilstm:
148
- self.bilstm = BLSTM(chin)
149
- else:
150
- self.bilstm = None
151
- self.use_transformer = transformer
152
- self.cross_attention = False
153
- if transformer:
154
- self.cross_attention = cross_attention
155
- self.transformer = StreamingTransformer(chin, 8, 6, bias_ff=False, bias_attn=False,
156
- cross_attention=cross_attention)
157
-
158
- self.use_codec = False
159
- if codec_dim is not None:
160
- self.conv_codec = nn.Conv1d(codec_dim, chin, 1)
161
- self.use_codec = True
162
-
163
- def forward(self, x: torch.Tensor, step: tp.Union[int, torch.Tensor], condition: tp.Optional[torch.Tensor] = None):
164
- skips = []
165
- bs = x.size(0)
166
- z = x
167
- view_args = [1]
168
- if type(step) is torch.Tensor:
169
- step_tensor = step
170
- else:
171
- step_tensor = torch.tensor([step], device=x.device, dtype=torch.long).expand(bs)
172
-
173
- for idx, encoder in enumerate(self.encoders):
174
- z = encoder(z)
175
- if idx == 0:
176
- z = z + self.embedding(step_tensor).view(bs, -1, *view_args).expand_as(z)
177
- elif self.embeddings is not None:
178
- z = z + self.embeddings[idx - 1](step_tensor).view(bs, -1, *view_args).expand_as(z)
179
-
180
- skips.append(z)
181
-
182
- if self.use_codec: # insert condition in the bottleneck
183
- assert condition is not None, "Model defined for conditionnal generation"
184
- condition_emb = self.conv_codec(condition) # reshape to the bottleneck dim
185
- assert condition_emb.size(-1) <= 2 * z.size(-1), \
186
- f"You are downsampling the conditionning with factor >=2 : {condition_emb.size(-1)=} and {z.size(-1)=}"
187
- if not self.cross_attention:
188
-
189
- condition_emb = torch.nn.functional.interpolate(condition_emb, z.size(-1))
190
- assert z.size() == condition_emb.size()
191
- z += condition_emb
192
- cross_attention_src = None
193
- else:
194
- cross_attention_src = condition_emb.permute(0, 2, 1) # B, T, C
195
- B, T, C = cross_attention_src.shape
196
- positions = torch.arange(T, device=x.device).view(1, -1, 1)
197
- pos_emb = create_sin_embedding(positions, C, max_period=10_000, dtype=cross_attention_src.dtype)
198
- cross_attention_src = cross_attention_src + pos_emb
199
- if self.use_transformer:
200
- z = self.transformer(z.permute(0, 2, 1), cross_attention_src=cross_attention_src).permute(0, 2, 1)
201
- else:
202
- if self.bilstm is None:
203
- z = torch.zeros_like(z)
204
- else:
205
- z = self.bilstm(z)
206
-
207
- for decoder in self.decoders:
208
- s = skips.pop(-1)
209
- z = z[:, :, :s.shape[2]]
210
- z = z + s
211
- z = decoder(z)
212
-
213
- z = z[:, :, :x.shape[2]]
214
- return Output(z)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/ema.py DELETED
@@ -1,76 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- class LitEma(nn.Module):
6
- def __init__(self, model, decay=0.9999, use_num_upates=True):
7
- super().__init__()
8
- if decay < 0.0 or decay > 1.0:
9
- raise ValueError('Decay must be between 0 and 1')
10
-
11
- self.m_name2s_name = {}
12
- self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
13
- self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
14
- else torch.tensor(-1,dtype=torch.int))
15
-
16
- for name, p in model.named_parameters():
17
- if p.requires_grad:
18
- #remove as '.'-character is not allowed in buffers
19
- s_name = name.replace('.','')
20
- self.m_name2s_name.update({name:s_name})
21
- self.register_buffer(s_name,p.clone().detach().data)
22
-
23
- self.collected_params = []
24
-
25
- def forward(self,model):
26
- decay = self.decay
27
-
28
- if self.num_updates >= 0:
29
- self.num_updates += 1
30
- decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
31
-
32
- one_minus_decay = 1.0 - decay
33
-
34
- with torch.no_grad():
35
- m_param = dict(model.named_parameters())
36
- shadow_params = dict(self.named_buffers())
37
-
38
- for key in m_param:
39
- if m_param[key].requires_grad:
40
- sname = self.m_name2s_name[key]
41
- shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
42
- shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
43
- else:
44
- assert not key in self.m_name2s_name
45
-
46
- def copy_to(self, model):
47
- m_param = dict(model.named_parameters())
48
- shadow_params = dict(self.named_buffers())
49
- for key in m_param:
50
- if m_param[key].requires_grad:
51
- m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
52
- else:
53
- assert not key in self.m_name2s_name
54
-
55
- def store(self, parameters):
56
- """
57
- Save the current parameters for restoring later.
58
- Args:
59
- parameters: Iterable of `torch.nn.Parameter`; the parameters to be
60
- temporarily stored.
61
- """
62
- self.collected_params = [param.clone() for param in parameters]
63
-
64
- def restore(self, parameters):
65
- """
66
- Restore the parameters stored with the `store` method.
67
- Useful to validate the model with EMA parameters without affecting the
68
- original optimization process. Store the parameters before the
69
- `copy_to` method. After validation (or model saving), use this to
70
- restore the former parameters.
71
- Args:
72
- parameters: Iterable of `torch.nn.Parameter`; the parameters to be
73
- updated with the stored parameters.
74
- """
75
- for c_param, param in zip(self.collected_params, parameters):
76
- param.data.copy_(c_param.data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/test.py DELETED
@@ -1,31 +0,0 @@
1
- import argparse
2
- import random
3
- from tester import DiacritizationTester
4
-
5
- import numpy as np
6
- import torch
7
-
8
-
9
- SEED = 1234
10
- random.seed(SEED)
11
- np.random.seed(SEED)
12
- torch.manual_seed(SEED)
13
- torch.cuda.manual_seed(SEED)
14
- torch.backends.cudnn.deterministic = True
15
- torch.backends.cudnn.benchmark = False
16
-
17
-
18
- def train_parser():
19
- parser = argparse.ArgumentParser()
20
- parser.add_argument("--model", dest="model_kind", type=str, required=True)
21
- parser.add_argument("--config", dest="config", type=str, required=True)
22
- parser.add_argument("--model_path", dest="model_path", type=str, required=False)
23
- parser.add_argument("--test", dest="test", type=bool)
24
- return parser
25
-
26
-
27
- parser = train_parser()
28
- args = parser.parse_args()
29
-
30
- tester = DiacritizationTester(args.config, args.model_kind)
31
- tester.run()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/You.py DELETED
@@ -1,40 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
-
5
- from ..requests import StreamSession
6
- from ..typing import AsyncGenerator, Messages
7
- from .base_provider import AsyncGeneratorProvider, format_prompt
8
-
9
-
10
- class You(AsyncGeneratorProvider):
11
- url = "https://you.com"
12
- working = True
13
- supports_gpt_35_turbo = True
14
-
15
-
16
- @classmethod
17
- async def create_async_generator(
18
- cls,
19
- model: str,
20
- messages: Messages,
21
- proxy: str = None,
22
- timeout: int = 120,
23
- **kwargs,
24
- ) -> AsyncGenerator:
25
- async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
26
- headers = {
27
- "Accept": "text/event-stream",
28
- "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
29
- }
30
- data = {"q": format_prompt(messages), "domain": "youchat", "chat": ""}
31
- async with session.get(
32
- f"{cls.url}/api/streamingSearch",
33
- params=data,
34
- headers=headers
35
- ) as response:
36
- response.raise_for_status()
37
- start = b'data: {"youChatToken": '
38
- async for line in response.iter_lines():
39
- if line.startswith(start):
40
- yield json.loads(line[len(start):-1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/train_seg.py DELETED
@@ -1,372 +0,0 @@
1
- import cv2
2
- import torch
3
- import os
4
- from basicsr.utils import img2tensor, tensor2img, scandir, get_time_str, get_root_logger, get_env_info
5
- from ldm.data.dataset_coco import dataset_coco_mask_color
6
- import argparse
7
- from ldm.models.diffusion.ddim import DDIMSampler
8
- from ldm.models.diffusion.plms import PLMSSampler
9
- from ldm.models.diffusion.dpm_solver import DPMSolverSampler
10
- from omegaconf import OmegaConf
11
- from ldm.util import instantiate_from_config
12
- from ldm.modules.encoders.adapter import Adapter
13
- from PIL import Image
14
- import numpy as np
15
- import torch.nn as nn
16
- import matplotlib.pyplot as plt
17
- import time
18
- import os.path as osp
19
- from basicsr.utils.options import copy_opt_file, dict2str
20
- import logging
21
- from dist_util import init_dist, master_only, get_bare_model, get_dist_info
22
-
23
- def load_model_from_config(config, ckpt, verbose=False):
24
- print(f"Loading model from {ckpt}")
25
- pl_sd = torch.load(ckpt, map_location="cpu")
26
- if "global_step" in pl_sd:
27
- print(f"Global Step: {pl_sd['global_step']}")
28
- sd = pl_sd["state_dict"]
29
- model = instantiate_from_config(config.model)
30
- m, u = model.load_state_dict(sd, strict=False)
31
- if len(m) > 0 and verbose:
32
- print("missing keys:")
33
- print(m)
34
- if len(u) > 0 and verbose:
35
- print("unexpected keys:")
36
- print(u)
37
-
38
- model.cuda()
39
- model.eval()
40
- return model
41
-
42
- @master_only
43
- def mkdir_and_rename(path):
44
- """mkdirs. If path exists, rename it with timestamp and create a new one.
45
-
46
- Args:
47
- path (str): Folder path.
48
- """
49
- if osp.exists(path):
50
- new_name = path + '_archived_' + get_time_str()
51
- print(f'Path already exists. Rename it to {new_name}', flush=True)
52
- os.rename(path, new_name)
53
- os.makedirs(path, exist_ok=True)
54
- os.makedirs(osp.join(experiments_root, 'models'))
55
- os.makedirs(osp.join(experiments_root, 'training_states'))
56
- os.makedirs(osp.join(experiments_root, 'visualization'))
57
-
58
- def load_resume_state(opt):
59
- resume_state_path = None
60
- if opt.auto_resume:
61
- state_path = osp.join('experiments', opt.name, 'training_states')
62
- if osp.isdir(state_path):
63
- states = list(scandir(state_path, suffix='state', recursive=False, full_path=False))
64
- if len(states) != 0:
65
- states = [float(v.split('.state')[0]) for v in states]
66
- resume_state_path = osp.join(state_path, f'{max(states):.0f}.state')
67
- opt.resume_state_path = resume_state_path
68
- # else:
69
- # if opt['path'].get('resume_state'):
70
- # resume_state_path = opt['path']['resume_state']
71
-
72
- if resume_state_path is None:
73
- resume_state = None
74
- else:
75
- device_id = torch.cuda.current_device()
76
- resume_state = torch.load(resume_state_path, map_location=lambda storage, loc: storage.cuda(device_id))
77
- # check_resume(opt, resume_state['iter'])
78
- return resume_state
79
-
80
- parser = argparse.ArgumentParser()
81
- parser.add_argument(
82
- "--bsize",
83
- type=int,
84
- default=8,
85
- help="the prompt to render"
86
- )
87
- parser.add_argument(
88
- "--epochs",
89
- type=int,
90
- default=10000,
91
- help="the prompt to render"
92
- )
93
- parser.add_argument(
94
- "--num_workers",
95
- type=int,
96
- default=8,
97
- help="the prompt to render"
98
- )
99
- parser.add_argument(
100
- "--use_shuffle",
101
- type=bool,
102
- default=True,
103
- help="the prompt to render"
104
- )
105
- parser.add_argument(
106
- "--dpm_solver",
107
- action='store_true',
108
- help="use dpm_solver sampling",
109
- )
110
- parser.add_argument(
111
- "--plms",
112
- action='store_true',
113
- help="use plms sampling",
114
- )
115
- parser.add_argument(
116
- "--auto_resume",
117
- action='store_true',
118
- help="use plms sampling",
119
- )
120
- parser.add_argument(
121
- "--ckpt",
122
- type=str,
123
- default="ckp/sd-v1-4.ckpt",
124
- help="path to checkpoint of model",
125
- )
126
- parser.add_argument(
127
- "--config",
128
- type=str,
129
- default="configs/stable-diffusion/train_mask.yaml",
130
- help="path to config which constructs model",
131
- )
132
- parser.add_argument(
133
- "--print_fq",
134
- type=int,
135
- default=100,
136
- help="path to config which constructs model",
137
- )
138
- parser.add_argument(
139
- "--H",
140
- type=int,
141
- default=512,
142
- help="image height, in pixel space",
143
- )
144
- parser.add_argument(
145
- "--W",
146
- type=int,
147
- default=512,
148
- help="image width, in pixel space",
149
- )
150
- parser.add_argument(
151
- "--C",
152
- type=int,
153
- default=4,
154
- help="latent channels",
155
- )
156
- parser.add_argument(
157
- "--f",
158
- type=int,
159
- default=8,
160
- help="downsampling factor",
161
- )
162
- parser.add_argument(
163
- "--ddim_steps",
164
- type=int,
165
- default=50,
166
- help="number of ddim sampling steps",
167
- )
168
- parser.add_argument(
169
- "--n_samples",
170
- type=int,
171
- default=1,
172
- help="how many samples to produce for each given prompt. A.k.a. batch size",
173
- )
174
- parser.add_argument(
175
- "--ddim_eta",
176
- type=float,
177
- default=0.0,
178
- help="ddim eta (eta=0.0 corresponds to deterministic sampling",
179
- )
180
- parser.add_argument(
181
- "--scale",
182
- type=float,
183
- default=7.5,
184
- help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
185
- )
186
- parser.add_argument(
187
- "--gpus",
188
- default=[0,1,2,3],
189
- help="gpu idx",
190
- )
191
- parser.add_argument(
192
- '--local_rank',
193
- default=0,
194
- type=int,
195
- help='node rank for distributed training'
196
- )
197
- parser.add_argument(
198
- '--launcher',
199
- default='pytorch',
200
- type=str,
201
- help='node rank for distributed training'
202
- )
203
- opt = parser.parse_args()
204
-
205
- if __name__ == '__main__':
206
- config = OmegaConf.load(f"{opt.config}")
207
- opt.name = config['name']
208
-
209
- # distributed setting
210
- init_dist(opt.launcher)
211
- torch.backends.cudnn.benchmark = True
212
- device='cuda'
213
- torch.cuda.set_device(opt.local_rank)
214
-
215
- # dataset
216
- path_json_train = 'coco_stuff/mask/annotations/captions_train2017.json'
217
- path_json_val = 'coco_stuff/mask/annotations/captions_val2017.json'
218
- train_dataset = dataset_coco_mask_color(path_json_train,
219
- root_path_im='coco/train2017',
220
- root_path_mask='coco_stuff/mask/train2017_color',
221
- image_size=512
222
- )
223
- train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
224
- val_dataset = dataset_coco_mask_color(path_json_val,
225
- root_path_im='coco/val2017',
226
- root_path_mask='coco_stuff/mask/val2017_color',
227
- image_size=512
228
- )
229
- train_dataloader = torch.utils.data.DataLoader(
230
- train_dataset,
231
- batch_size=opt.bsize,
232
- shuffle=(train_sampler is None),
233
- num_workers=opt.num_workers,
234
- pin_memory=True,
235
- sampler=train_sampler)
236
- val_dataloader = torch.utils.data.DataLoader(
237
- val_dataset,
238
- batch_size=1,
239
- shuffle=False,
240
- num_workers=1,
241
- pin_memory=False)
242
-
243
- # stable diffusion
244
- model = load_model_from_config(config, f"{opt.ckpt}").to(device)
245
-
246
- # sketch encoder
247
- model_ad = Adapter(cin=int(3*64), channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False).to(device)
248
-
249
-
250
- # to gpus
251
- model_ad = torch.nn.parallel.DistributedDataParallel(
252
- model_ad,
253
- device_ids=[opt.local_rank],
254
- output_device=opt.local_rank)
255
- model = torch.nn.parallel.DistributedDataParallel(
256
- model,
257
- device_ids=[opt.local_rank],
258
- output_device=opt.local_rank)
259
- # device_ids=[torch.cuda.current_device()])
260
-
261
- # optimizer
262
- params = list(model_ad.parameters())
263
- optimizer = torch.optim.AdamW(params, lr=config['training']['lr'])
264
-
265
- experiments_root = osp.join('experiments', opt.name)
266
-
267
- # resume state
268
- resume_state = load_resume_state(opt)
269
- if resume_state is None:
270
- mkdir_and_rename(experiments_root)
271
- start_epoch = 0
272
- current_iter = 0
273
- # WARNING: should not use get_root_logger in the above codes, including the called functions
274
- # Otherwise the logger will not be properly initialized
275
- log_file = osp.join(experiments_root, f"train_{opt.name}_{get_time_str()}.log")
276
- logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
277
- logger.info(get_env_info())
278
- logger.info(dict2str(config))
279
- else:
280
- # WARNING: should not use get_root_logger in the above codes, including the called functions
281
- # Otherwise the logger will not be properly initialized
282
- log_file = osp.join(experiments_root, f"train_{opt.name}_{get_time_str()}.log")
283
- logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
284
- logger.info(get_env_info())
285
- logger.info(dict2str(config))
286
- resume_optimizers = resume_state['optimizers']
287
- optimizer.load_state_dict(resume_optimizers)
288
- logger.info(f"Resuming training from epoch: {resume_state['epoch']}, " f"iter: {resume_state['iter']}.")
289
- start_epoch = resume_state['epoch']
290
- current_iter = resume_state['iter']
291
-
292
- # copy the yml file to the experiment root
293
- copy_opt_file(opt.config, experiments_root)
294
-
295
- # training
296
- logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}')
297
- for epoch in range(start_epoch, opt.epochs):
298
- train_dataloader.sampler.set_epoch(epoch)
299
- # train
300
- for _, data in enumerate(train_dataloader):
301
- current_iter += 1
302
- with torch.no_grad():
303
- c = model.module.get_learned_conditioning(data['sentence'])
304
- z = model.module.encode_first_stage((data['im']*2-1.).cuda(non_blocking=True))
305
- z = model.module.get_first_stage_encoding(z)
306
-
307
- mask = data['mask']
308
- optimizer.zero_grad()
309
- model.zero_grad()
310
- features_adapter = model_ad(mask)
311
- l_pixel, loss_dict = model(z, c=c, features_adapter = features_adapter)
312
- l_pixel.backward()
313
- optimizer.step()
314
-
315
- if (current_iter+1)%opt.print_fq == 0:
316
- logger.info(loss_dict)
317
-
318
- # save checkpoint
319
- rank, _ = get_dist_info()
320
- if (rank==0) and ((current_iter+1)%config['training']['save_freq'] == 0):
321
- save_filename = f'model_ad_{current_iter+1}.pth'
322
- save_path = os.path.join(experiments_root, 'models', save_filename)
323
- save_dict = {}
324
- model_ad_bare = get_bare_model(model_ad)
325
- state_dict = model_ad_bare.state_dict()
326
- for key, param in state_dict.items():
327
- if key.startswith('module.'): # remove unnecessary 'module.'
328
- key = key[7:]
329
- save_dict[key] = param.cpu()
330
- torch.save(save_dict, save_path)
331
- # save state
332
- state = {'epoch': epoch, 'iter': current_iter+1, 'optimizers': optimizer.state_dict()}
333
- save_filename = f'{current_iter+1}.state'
334
- save_path = os.path.join(experiments_root, 'training_states', save_filename)
335
- torch.save(state, save_path)
336
-
337
- # val
338
- rank, _ = get_dist_info()
339
- if rank==0:
340
- for data in val_dataloader:
341
- with torch.no_grad():
342
- if opt.dpm_solver:
343
- sampler = DPMSolverSampler(model.module)
344
- elif opt.plms:
345
- sampler = PLMSSampler(model.module)
346
- else:
347
- sampler = DDIMSampler(model.module)
348
- c = model.module.get_learned_conditioning(data['sentence'])
349
- mask = data['mask']
350
- im_mask = tensor2img(mask)
351
- cv2.imwrite(os.path.join(experiments_root, 'visualization', 'mask_%04d.png'%epoch), im_mask)
352
- features_adapter = model_ad(mask)
353
- shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
354
- samples_ddim, _ = sampler.sample(S=opt.ddim_steps,
355
- conditioning=c,
356
- batch_size=opt.n_samples,
357
- shape=shape,
358
- verbose=False,
359
- unconditional_guidance_scale=opt.scale,
360
- unconditional_conditioning=model.module.get_learned_conditioning(opt.n_samples * [""]),
361
- eta=opt.ddim_eta,
362
- x_T=None,
363
- features_adapter=features_adapter)
364
- x_samples_ddim = model.module.decode_first_stage(samples_ddim)
365
- x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
366
- x_samples_ddim = x_samples_ddim.cpu().permute(0, 2, 3, 1).numpy()
367
- for id_sample, x_sample in enumerate(x_samples_ddim):
368
- x_sample = 255.*x_sample
369
- img = x_sample.astype(np.uint8)
370
- img = cv2.putText(img.copy(), data['sentence'][0], (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2)
371
- cv2.imwrite(os.path.join(experiments_root, 'visualization', 'sample_e%04d_s%04d.png'%(epoch, id_sample)), img[:,:,::-1])
372
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/buttons/RemoveChildMethods.js DELETED
@@ -1,55 +0,0 @@
1
- import Sizer from '../sizer/Sizer.js';
2
- import IsArray from '../../../plugins/utils/object/IsArray.js';
3
-
4
- const SizerRmove = Sizer.prototype.remove;
5
- const SizerClear = Sizer.prototype.clear;
6
-
7
- var Remove = function (gameObject, destroyChild) {
8
- if (this.getParentSizer(gameObject) !== this) {
9
- return this;
10
- }
11
-
12
- this.buttonGroup.remove(gameObject);
13
- SizerRmove.call(this, gameObject, destroyChild);
14
- return this;
15
- };
16
-
17
- export default {
18
- remove(gameObject, destroyChild) {
19
- // Remove gameObject no matter it is a button or not
20
- if (IsArray(gameObject)) {
21
- var gameObjects = gameObject;
22
- for (var i = 0, cnt = gameObjects.length; i < cnt; i++) {
23
- Remove.call(this, gameObjects[i], destroyChild);
24
- }
25
- } else {
26
- Remove.call(this, gameObject, destroyChild);
27
- }
28
- return this;
29
- },
30
-
31
- clear(destroyChild) {
32
- var buttons = this.buttonGroup.buttons;
33
- buttons.length = 0;
34
- SizerClear.call(this, destroyChild);
35
- return this;
36
- },
37
-
38
- removeButton(gameObject, destroyChild) {
39
- var gameObject = this.getButton(gameObject);
40
- // Don't remove this gameObject, it is not a button
41
- if (!gameObject) {
42
- return this;
43
- }
44
- this.remove(gameObject, destroyChild);
45
- return this;
46
- },
47
-
48
- clearButtons(destroyChild) {
49
- var buttons = this.buttonGroup.buttons;
50
- for (var i = buttons.length - 1; i >= 0; i--) {
51
- Remove.call(this, buttons[i], destroyChild);
52
- }
53
- return this;
54
- }
55
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/customprogress/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import CustomProgress from "./CustomProgress";
2
-
3
- export default function (
4
- config?: CustomProgress.IConfig
5
- ): CustomProgress;
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/customprogress/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import CustomProgress from './CustomProgress.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('customProgress', function (x, y, width, height, config) {
6
- var gameObject = new CustomProgress(this.scene, x, y, width, height, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.CustomProgress', CustomProgress);
12
-
13
- export default CustomProgress;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/space/Factory.d.ts DELETED
@@ -1,3 +0,0 @@
1
- import Space from './Space';
2
-
3
- export default function (): Space;
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://detectron2/resnet101_caffe',
4
- backbone=dict(depth=101))
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
5
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/gfocal_loss.py DELETED
@@ -1,188 +0,0 @@
1
- import mmcv
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from ..builder import LOSSES
6
- from .utils import weighted_loss
7
-
8
-
9
- @mmcv.jit(derivate=True, coderize=True)
10
- @weighted_loss
11
- def quality_focal_loss(pred, target, beta=2.0):
12
- r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
13
- Qualified and Distributed Bounding Boxes for Dense Object Detection
14
- <https://arxiv.org/abs/2006.04388>`_.
15
-
16
- Args:
17
- pred (torch.Tensor): Predicted joint representation of classification
18
- and quality (IoU) estimation with shape (N, C), C is the number of
19
- classes.
20
- target (tuple([torch.Tensor])): Target category label with shape (N,)
21
- and target quality label with shape (N,).
22
- beta (float): The beta parameter for calculating the modulating factor.
23
- Defaults to 2.0.
24
-
25
- Returns:
26
- torch.Tensor: Loss tensor with shape (N,).
27
- """
28
- assert len(target) == 2, """target for QFL must be a tuple of two elements,
29
- including category label and quality label, respectively"""
30
- # label denotes the category id, score denotes the quality score
31
- label, score = target
32
-
33
- # negatives are supervised by 0 quality score
34
- pred_sigmoid = pred.sigmoid()
35
- scale_factor = pred_sigmoid
36
- zerolabel = scale_factor.new_zeros(pred.shape)
37
- loss = F.binary_cross_entropy_with_logits(
38
- pred, zerolabel, reduction='none') * scale_factor.pow(beta)
39
-
40
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
41
- bg_class_ind = pred.size(1)
42
- pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
43
- pos_label = label[pos].long()
44
- # positives are supervised by bbox quality (IoU) score
45
- scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
46
- loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
47
- pred[pos, pos_label], score[pos],
48
- reduction='none') * scale_factor.abs().pow(beta)
49
-
50
- loss = loss.sum(dim=1, keepdim=False)
51
- return loss
52
-
53
-
54
- @mmcv.jit(derivate=True, coderize=True)
55
- @weighted_loss
56
- def distribution_focal_loss(pred, label):
57
- r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
58
- Qualified and Distributed Bounding Boxes for Dense Object Detection
59
- <https://arxiv.org/abs/2006.04388>`_.
60
-
61
- Args:
62
- pred (torch.Tensor): Predicted general distribution of bounding boxes
63
- (before softmax) with shape (N, n+1), n is the max value of the
64
- integral set `{0, ..., n}` in paper.
65
- label (torch.Tensor): Target distance label for bounding boxes with
66
- shape (N,).
67
-
68
- Returns:
69
- torch.Tensor: Loss tensor with shape (N,).
70
- """
71
- dis_left = label.long()
72
- dis_right = dis_left + 1
73
- weight_left = dis_right.float() - label
74
- weight_right = label - dis_left.float()
75
- loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
76
- + F.cross_entropy(pred, dis_right, reduction='none') * weight_right
77
- return loss
78
-
79
-
80
- @LOSSES.register_module()
81
- class QualityFocalLoss(nn.Module):
82
- r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
83
- Learning Qualified and Distributed Bounding Boxes for Dense Object
84
- Detection <https://arxiv.org/abs/2006.04388>`_.
85
-
86
- Args:
87
- use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
88
- Defaults to True.
89
- beta (float): The beta parameter for calculating the modulating factor.
90
- Defaults to 2.0.
91
- reduction (str): Options are "none", "mean" and "sum".
92
- loss_weight (float): Loss weight of current loss.
93
- """
94
-
95
- def __init__(self,
96
- use_sigmoid=True,
97
- beta=2.0,
98
- reduction='mean',
99
- loss_weight=1.0):
100
- super(QualityFocalLoss, self).__init__()
101
- assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
102
- self.use_sigmoid = use_sigmoid
103
- self.beta = beta
104
- self.reduction = reduction
105
- self.loss_weight = loss_weight
106
-
107
- def forward(self,
108
- pred,
109
- target,
110
- weight=None,
111
- avg_factor=None,
112
- reduction_override=None):
113
- """Forward function.
114
-
115
- Args:
116
- pred (torch.Tensor): Predicted joint representation of
117
- classification and quality (IoU) estimation with shape (N, C),
118
- C is the number of classes.
119
- target (tuple([torch.Tensor])): Target category label with shape
120
- (N,) and target quality label with shape (N,).
121
- weight (torch.Tensor, optional): The weight of loss for each
122
- prediction. Defaults to None.
123
- avg_factor (int, optional): Average factor that is used to average
124
- the loss. Defaults to None.
125
- reduction_override (str, optional): The reduction method used to
126
- override the original reduction method of the loss.
127
- Defaults to None.
128
- """
129
- assert reduction_override in (None, 'none', 'mean', 'sum')
130
- reduction = (
131
- reduction_override if reduction_override else self.reduction)
132
- if self.use_sigmoid:
133
- loss_cls = self.loss_weight * quality_focal_loss(
134
- pred,
135
- target,
136
- weight,
137
- beta=self.beta,
138
- reduction=reduction,
139
- avg_factor=avg_factor)
140
- else:
141
- raise NotImplementedError
142
- return loss_cls
143
-
144
-
145
- @LOSSES.register_module()
146
- class DistributionFocalLoss(nn.Module):
147
- r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
148
- Learning Qualified and Distributed Bounding Boxes for Dense Object
149
- Detection <https://arxiv.org/abs/2006.04388>`_.
150
-
151
- Args:
152
- reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
153
- loss_weight (float): Loss weight of current loss.
154
- """
155
-
156
- def __init__(self, reduction='mean', loss_weight=1.0):
157
- super(DistributionFocalLoss, self).__init__()
158
- self.reduction = reduction
159
- self.loss_weight = loss_weight
160
-
161
- def forward(self,
162
- pred,
163
- target,
164
- weight=None,
165
- avg_factor=None,
166
- reduction_override=None):
167
- """Forward function.
168
-
169
- Args:
170
- pred (torch.Tensor): Predicted general distribution of bounding
171
- boxes (before softmax) with shape (N, n+1), n is the max value
172
- of the integral set `{0, ..., n}` in paper.
173
- target (torch.Tensor): Target distance label for bounding boxes
174
- with shape (N,).
175
- weight (torch.Tensor, optional): The weight of loss for each
176
- prediction. Defaults to None.
177
- avg_factor (int, optional): Average factor that is used to average
178
- the loss. Defaults to None.
179
- reduction_override (str, optional): The reduction method used to
180
- override the original reduction method of the loss.
181
- Defaults to None.
182
- """
183
- assert reduction_override in (None, 'none', 'mean', 'sum')
184
- reduction = (
185
- reduction_override if reduction_override else self.reduction)
186
- loss_cls = self.loss_weight * distribution_focal_loss(
187
- pred, target, weight, reduction=reduction, avg_factor=avg_factor)
188
- return loss_cls
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AngoHF/ANGO-Leaderboard/components/top.py DELETED
@@ -1,13 +0,0 @@
1
- import gradio as gr
2
-
3
- from assets.content import TITLE, INTRODUCTION_TEXT
4
- from assets.path import SEASON
5
-
6
-
7
- def create_top():
8
- gr.HTML(TITLE)
9
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
10
- with gr.Row():
11
- season_dropdown = gr.Dropdown(choices=list(SEASON), value="latest", label="Season Select")
12
- language_dropdown = gr.Dropdown(choices=['en', 'zh'], value='en', label='Language Select')
13
- return {"season": season_dropdown, "language": language_dropdown}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnimalEquality/chatbot/app.py DELETED
@@ -1,21 +0,0 @@
1
- from lv_recipe_chatbot.app import create_demo, ConversationBot
2
- from lv_recipe_chatbot.ingredient_vision import (
3
- VeganIngredientFinder,
4
- BlipImageCaptioning,
5
- )
6
- import os
7
-
8
-
9
- # for Hugging Face
10
-
11
- if __name__ == "__main__":
12
- vegan_ingred_finder = VeganIngredientFinder()
13
- img_cap = BlipImageCaptioning("cpu")
14
- demo = create_demo(
15
- ConversationBot(
16
- vegan_ingred_finder=vegan_ingred_finder, img_cap=img_cap, verbose=True
17
- )
18
- )
19
- demo.launch(
20
- auth=(os.environ["GRADIO_DEMO_USERNAME"], os.environ["GRADIO_DEMO_PASSWORD"])
21
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anindya/Marketing_Campaign_LLM/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Marketing Campaign LLM
3
- emoji: 📚
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: streamlit
7
- sdk_version: 1.26.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- # Marketing_Campaign_LLM
13
- Simple Marketing Campaign app using LLM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/main.py DELETED
@@ -1,382 +0,0 @@
1
- import io
2
- import logging
3
- import os
4
- import shutil
5
- import sys
6
- import tempfile
7
- from collections import OrderedDict
8
- from contextlib import contextmanager
9
- from typing import (IO, Dict, Iterable, Iterator, Mapping, Optional, Tuple,
10
- Union)
11
-
12
- from .parser import Binding, parse_stream
13
- from .variables import parse_variables
14
-
15
- # A type alias for a string path to be used for the paths in this file.
16
- # These paths may flow to `open()` and `shutil.move()`; `shutil.move()`
17
- # only accepts string paths, not byte paths or file descriptors. See
18
- # https://github.com/python/typeshed/pull/6832.
19
- StrPath = Union[str, 'os.PathLike[str]']
20
-
21
- logger = logging.getLogger(__name__)
22
-
23
-
24
- def with_warn_for_invalid_lines(mappings: Iterator[Binding]) -> Iterator[Binding]:
25
- for mapping in mappings:
26
- if mapping.error:
27
- logger.warning(
28
- "Python-dotenv could not parse statement starting at line %s",
29
- mapping.original.line,
30
- )
31
- yield mapping
32
-
33
-
34
- class DotEnv:
35
- def __init__(
36
- self,
37
- dotenv_path: Optional[StrPath],
38
- stream: Optional[IO[str]] = None,
39
- verbose: bool = False,
40
- encoding: Optional[str] = None,
41
- interpolate: bool = True,
42
- override: bool = True,
43
- ) -> None:
44
- self.dotenv_path: Optional[StrPath] = dotenv_path
45
- self.stream: Optional[IO[str]] = stream
46
- self._dict: Optional[Dict[str, Optional[str]]] = None
47
- self.verbose: bool = verbose
48
- self.encoding: Optional[str] = encoding
49
- self.interpolate: bool = interpolate
50
- self.override: bool = override
51
-
52
- @contextmanager
53
- def _get_stream(self) -> Iterator[IO[str]]:
54
- if self.dotenv_path and os.path.isfile(self.dotenv_path):
55
- with open(self.dotenv_path, encoding=self.encoding) as stream:
56
- yield stream
57
- elif self.stream is not None:
58
- yield self.stream
59
- else:
60
- if self.verbose:
61
- logger.info(
62
- "Python-dotenv could not find configuration file %s.",
63
- self.dotenv_path or '.env',
64
- )
65
- yield io.StringIO('')
66
-
67
- def dict(self) -> Dict[str, Optional[str]]:
68
- """Return dotenv as dict"""
69
- if self._dict:
70
- return self._dict
71
-
72
- raw_values = self.parse()
73
-
74
- if self.interpolate:
75
- self._dict = OrderedDict(resolve_variables(raw_values, override=self.override))
76
- else:
77
- self._dict = OrderedDict(raw_values)
78
-
79
- return self._dict
80
-
81
- def parse(self) -> Iterator[Tuple[str, Optional[str]]]:
82
- with self._get_stream() as stream:
83
- for mapping in with_warn_for_invalid_lines(parse_stream(stream)):
84
- if mapping.key is not None:
85
- yield mapping.key, mapping.value
86
-
87
- def set_as_environment_variables(self) -> bool:
88
- """
89
- Load the current dotenv as system environment variable.
90
- """
91
- if not self.dict():
92
- return False
93
-
94
- for k, v in self.dict().items():
95
- if k in os.environ and not self.override:
96
- continue
97
- if v is not None:
98
- os.environ[k] = v
99
-
100
- return True
101
-
102
- def get(self, key: str) -> Optional[str]:
103
- """
104
- """
105
- data = self.dict()
106
-
107
- if key in data:
108
- return data[key]
109
-
110
- if self.verbose:
111
- logger.warning("Key %s not found in %s.", key, self.dotenv_path)
112
-
113
- return None
114
-
115
-
116
- def get_key(
117
- dotenv_path: StrPath,
118
- key_to_get: str,
119
- encoding: Optional[str] = "utf-8",
120
- ) -> Optional[str]:
121
- """
122
- Get the value of a given key from the given .env.
123
-
124
- Returns `None` if the key isn't found or doesn't have a value.
125
- """
126
- return DotEnv(dotenv_path, verbose=True, encoding=encoding).get(key_to_get)
127
-
128
-
129
- @contextmanager
130
- def rewrite(
131
- path: StrPath,
132
- encoding: Optional[str],
133
- ) -> Iterator[Tuple[IO[str], IO[str]]]:
134
- if not os.path.isfile(path):
135
- with open(path, mode="w", encoding=encoding) as source:
136
- source.write("")
137
- with tempfile.NamedTemporaryFile(mode="w", encoding=encoding, delete=False) as dest:
138
- try:
139
- with open(path, encoding=encoding) as source:
140
- yield (source, dest)
141
- except BaseException:
142
- os.unlink(dest.name)
143
- raise
144
- shutil.move(dest.name, path)
145
-
146
-
147
- def set_key(
148
- dotenv_path: StrPath,
149
- key_to_set: str,
150
- value_to_set: str,
151
- quote_mode: str = "always",
152
- export: bool = False,
153
- encoding: Optional[str] = "utf-8",
154
- ) -> Tuple[Optional[bool], str, str]:
155
- """
156
- Adds or Updates a key/value to the given .env
157
-
158
- If the .env path given doesn't exist, fails instead of risking creating
159
- an orphan .env somewhere in the filesystem
160
- """
161
- if quote_mode not in ("always", "auto", "never"):
162
- raise ValueError(f"Unknown quote_mode: {quote_mode}")
163
-
164
- quote = (
165
- quote_mode == "always"
166
- or (quote_mode == "auto" and not value_to_set.isalnum())
167
- )
168
-
169
- if quote:
170
- value_out = "'{}'".format(value_to_set.replace("'", "\\'"))
171
- else:
172
- value_out = value_to_set
173
- if export:
174
- line_out = f'export {key_to_set}={value_out}\n'
175
- else:
176
- line_out = f"{key_to_set}={value_out}\n"
177
-
178
- with rewrite(dotenv_path, encoding=encoding) as (source, dest):
179
- replaced = False
180
- missing_newline = False
181
- for mapping in with_warn_for_invalid_lines(parse_stream(source)):
182
- if mapping.key == key_to_set:
183
- dest.write(line_out)
184
- replaced = True
185
- else:
186
- dest.write(mapping.original.string)
187
- missing_newline = not mapping.original.string.endswith("\n")
188
- if not replaced:
189
- if missing_newline:
190
- dest.write("\n")
191
- dest.write(line_out)
192
-
193
- return True, key_to_set, value_to_set
194
-
195
-
196
- def unset_key(
197
- dotenv_path: StrPath,
198
- key_to_unset: str,
199
- quote_mode: str = "always",
200
- encoding: Optional[str] = "utf-8",
201
- ) -> Tuple[Optional[bool], str]:
202
- """
203
- Removes a given key from the given `.env` file.
204
-
205
- If the .env path given doesn't exist, fails.
206
- If the given key doesn't exist in the .env, fails.
207
- """
208
- if not os.path.exists(dotenv_path):
209
- logger.warning("Can't delete from %s - it doesn't exist.", dotenv_path)
210
- return None, key_to_unset
211
-
212
- removed = False
213
- with rewrite(dotenv_path, encoding=encoding) as (source, dest):
214
- for mapping in with_warn_for_invalid_lines(parse_stream(source)):
215
- if mapping.key == key_to_unset:
216
- removed = True
217
- else:
218
- dest.write(mapping.original.string)
219
-
220
- if not removed:
221
- logger.warning("Key %s not removed from %s - key doesn't exist.", key_to_unset, dotenv_path)
222
- return None, key_to_unset
223
-
224
- return removed, key_to_unset
225
-
226
-
227
- def resolve_variables(
228
- values: Iterable[Tuple[str, Optional[str]]],
229
- override: bool,
230
- ) -> Mapping[str, Optional[str]]:
231
- new_values: Dict[str, Optional[str]] = {}
232
-
233
- for (name, value) in values:
234
- if value is None:
235
- result = None
236
- else:
237
- atoms = parse_variables(value)
238
- env: Dict[str, Optional[str]] = {}
239
- if override:
240
- env.update(os.environ) # type: ignore
241
- env.update(new_values)
242
- else:
243
- env.update(new_values)
244
- env.update(os.environ) # type: ignore
245
- result = "".join(atom.resolve(env) for atom in atoms)
246
-
247
- new_values[name] = result
248
-
249
- return new_values
250
-
251
-
252
- def _walk_to_root(path: str) -> Iterator[str]:
253
- """
254
- Yield directories starting from the given directory up to the root
255
- """
256
- if not os.path.exists(path):
257
- raise IOError('Starting path not found')
258
-
259
- if os.path.isfile(path):
260
- path = os.path.dirname(path)
261
-
262
- last_dir = None
263
- current_dir = os.path.abspath(path)
264
- while last_dir != current_dir:
265
- yield current_dir
266
- parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
267
- last_dir, current_dir = current_dir, parent_dir
268
-
269
-
270
- def find_dotenv(
271
- filename: str = '.env',
272
- raise_error_if_not_found: bool = False,
273
- usecwd: bool = False,
274
- ) -> str:
275
- """
276
- Search in increasingly higher folders for the given file
277
-
278
- Returns path to the file if found, or an empty string otherwise
279
- """
280
-
281
- def _is_interactive():
282
- """ Decide whether this is running in a REPL or IPython notebook """
283
- main = __import__('__main__', None, None, fromlist=['__file__'])
284
- return not hasattr(main, '__file__')
285
-
286
- if usecwd or _is_interactive() or getattr(sys, 'frozen', False):
287
- # Should work without __file__, e.g. in REPL or IPython notebook.
288
- path = os.getcwd()
289
- else:
290
- # will work for .py files
291
- frame = sys._getframe()
292
- current_file = __file__
293
-
294
- while frame.f_code.co_filename == current_file:
295
- assert frame.f_back is not None
296
- frame = frame.f_back
297
- frame_filename = frame.f_code.co_filename
298
- path = os.path.dirname(os.path.abspath(frame_filename))
299
-
300
- for dirname in _walk_to_root(path):
301
- check_path = os.path.join(dirname, filename)
302
- if os.path.isfile(check_path):
303
- return check_path
304
-
305
- if raise_error_if_not_found:
306
- raise IOError('File not found')
307
-
308
- return ''
309
-
310
-
311
- def load_dotenv(
312
- dotenv_path: Optional[StrPath] = None,
313
- stream: Optional[IO[str]] = None,
314
- verbose: bool = False,
315
- override: bool = False,
316
- interpolate: bool = True,
317
- encoding: Optional[str] = "utf-8",
318
- ) -> bool:
319
- """Parse a .env file and then load all the variables found as environment variables.
320
-
321
- Parameters:
322
- dotenv_path: Absolute or relative path to .env file.
323
- stream: Text stream (such as `io.StringIO`) with .env content, used if
324
- `dotenv_path` is `None`.
325
- verbose: Whether to output a warning the .env file is missing.
326
- override: Whether to override the system environment variables with the variables
327
- from the `.env` file.
328
- encoding: Encoding to be used to read the file.
329
- Returns:
330
- Bool: True if at least one environment variable is set else False
331
-
332
- If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the
333
- .env file.
334
- """
335
- if dotenv_path is None and stream is None:
336
- dotenv_path = find_dotenv()
337
-
338
- dotenv = DotEnv(
339
- dotenv_path=dotenv_path,
340
- stream=stream,
341
- verbose=verbose,
342
- interpolate=interpolate,
343
- override=override,
344
- encoding=encoding,
345
- )
346
- return dotenv.set_as_environment_variables()
347
-
348
-
349
- def dotenv_values(
350
- dotenv_path: Optional[StrPath] = None,
351
- stream: Optional[IO[str]] = None,
352
- verbose: bool = False,
353
- interpolate: bool = True,
354
- encoding: Optional[str] = "utf-8",
355
- ) -> Dict[str, Optional[str]]:
356
- """
357
- Parse a .env file and return its content as a dict.
358
-
359
- The returned dict will have `None` values for keys without values in the .env file.
360
- For example, `foo=bar` results in `{"foo": "bar"}` whereas `foo` alone results in
361
- `{"foo": None}`
362
-
363
- Parameters:
364
- dotenv_path: Absolute or relative path to the .env file.
365
- stream: `StringIO` object with .env content, used if `dotenv_path` is `None`.
366
- verbose: Whether to output a warning if the .env file is missing.
367
- encoding: Encoding to be used to read the file.
368
-
369
- If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the
370
- .env file.
371
- """
372
- if dotenv_path is None and stream is None:
373
- dotenv_path = find_dotenv()
374
-
375
- return DotEnv(
376
- dotenv_path=dotenv_path,
377
- stream=stream,
378
- verbose=verbose,
379
- interpolate=interpolate,
380
- override=True,
381
- encoding=encoding,
382
- ).dict()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/monotonic_align/core.py DELETED
@@ -1,35 +0,0 @@
1
- import numba
2
-
3
-
4
- @numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True)
5
- def maximum_path_jit(paths, values, t_ys, t_xs):
6
- b = paths.shape[0]
7
- max_neg_val=-1e9
8
- for i in range(int(b)):
9
- path = paths[i]
10
- value = values[i]
11
- t_y = t_ys[i]
12
- t_x = t_xs[i]
13
-
14
- v_prev = v_cur = 0.0
15
- index = t_x - 1
16
-
17
- for y in range(t_y):
18
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
19
- if x == y:
20
- v_cur = max_neg_val
21
- else:
22
- v_cur = value[y-1, x]
23
- if x == 0:
24
- if y == 0:
25
- v_prev = 0.
26
- else:
27
- v_prev = max_neg_val
28
- else:
29
- v_prev = value[y-1, x-1]
30
- value[y, x] += max(v_prev, v_cur)
31
-
32
- for y in range(t_y - 1, -1, -1):
33
- path[y, index] = 1
34
- if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
35
- index = index - 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Colinas De Acero 2.md DELETED
@@ -1,92 +0,0 @@
1
-
2
- <h1>Hills of Steel 2: Un juego de tanques basado en la física con batallas por equipos 3vs3 en tiempo real</h1>
3
- <p>Si usted está buscando un divertido y adictivo juego de tanques que se puede jugar con sus amigos u otros jugadores en línea, entonces usted debe echa un vistazo a Hills of Steel 2. Este es un juego de tanques basado en la física que es una secuela del popular juego Hills of Steel. En este juego, puedes elegir entre 18 tanques diferentes, cada uno con sus propias habilidades y objetos únicos, y competir en batallas de equipo en tiempo real 3vs3 en varias colinas. También puedes unirte o crear un clan, participar en ocho eventos en línea, subir las tablas de clasificación, ganar recompensas gratis y chatear con otros jugadores en el servidor activo de Discord. En este artículo, te contaremos más sobre las características, la jugabilidad, los pros y los contras, y las preguntas frecuentes de Hills of Steel 2.</p>
4
- <h2>colinas de acero 2</h2><br /><p><b><b>Download File</b> &#10003; <a href="https://bltlly.com/2v6LXY">https://bltlly.com/2v6LXY</a></b></p><br /><br />
5
- <h2>Características de Hills of Steel 2</h2>
6
- <p>Hills of Steel 2 es un juego gratuito que ofrece muchas características para los amantes de los tanques. Estos son algunos de ellos:</p>
7
- <h3>Clanes</h3>
8
- <p>Puedes crear tu propio clan o unirte a uno existente y competir con otros clanes en las tablas de clasificación. También puedes chatear con los miembros de tu clan, invitarlos a jugar contigo y compartir tus mejores momentos. Los clanes son una gran manera de hacer nuevos amigos y divertirse más en el juego. </p>
9
- <h3>Eventos</h3>
10
- <p>Puedes participar en ocho eventos en línea que tienen diferentes modos y objetivos. Algunos de los eventos son:</p>
11
- <ul>
12
- <li>Equipo de supervivencia: El último equipo de pie gana. </li>
13
- <li>bunker bash: destruir el búnker enemigo antes de que destruyan el tuyo. </li>
14
- <li>Captura de estrellas: recoge tantas estrellas como sea posible evitando el fuego enemigo. </li>
15
- <li>Batalla del jefe: Equipo con otros jugadores para derrotar a un tanque poderoso jefe. </li>
16
- <li>Duelo raro: Lucha contra otro jugador usando un tanque raro. </li>
17
- <li>duelo épico: lucha contra otro jugador usando un tanque épico. </li>
18
- <li>Dominación: Captura y mantén tantas banderas como sea posible. </li>
19
- <li>Alboroto: Destruye tantos tanques enemigos como sea posible en un tiempo limitado. </li>
20
- </ul>
21
-
22
- <h3>Tanques</h3>
23
- <p>Puedes desbloquear y personalizar 18 tanques únicos con diferentes habilidades y objetos. Algunos de los tanques son:</p>
24
- <p></p>
25
- <ul>
26
- <li>Joker: Un pequeño tanque que tiene un gran golpe. </li>
27
- <li>Morty: Un tanque sanador de bombas. </li>
28
- <li>Stinger: Un tanque que causa estragos con salvos de cohetes. </li>
29
- <li>Buck: Un feroz combatiente de corto alcance. </li>
30
- <li>Titan: Un tanque grande y robusto que puede llamar en ataques aéreos. </li>
31
- <li>Wally: Un tanque que perfora a través de las líneas enemigas. </li>
32
- <li>Sparky: Un tanque de relámpago sobrealimentado. </li>
33
- <li>Ninja: Un tanque de sigilo con espada fatal. </li>
34
- <li>Gatlyn: Un tanque de disparo rápido con torretas desplegables. </li>
35
- <li>Phoenix Continuando el artículo: <li>Phoenix: Un tanque ardiente que puede revivir de las cenizas. </li>
36
- <li>Reaper: Un tanque mortal que puede cosechar almas. </li>
37
- <li>Arachno: Un tanque parecido a una araña que puede colocar minas y telarañas. </li>
38
- <li>Blaze: Un tanque lanzallamas que puede incendiar a los enemigos. </li>
39
- <li>Frosty: Un tanque helado que puede congelar enemigos y crear paredes de hielo. </li>
40
- <li>Thor: Un tanque atronador que puede convocar rayos y tormentas. </li>
41
- <li>Draco: Un tanque tipo dragón que puede respirar fuego y volar. </li>
42
- <li>Escorpio: Un tanque tipo escorpión que puede picar a los enemigos y excavar bajo tierra. </li>
43
- </ul>
44
- <p>Puedes actualizar tus tanques con monedas y gemas, y equiparlos con diferentes elementos como escudos, imanes, boosters y más. También puedes cambiar la apariencia de tus tanques con pieles y pegatinas. Los tanques son una gran manera de expresar tu personalidad y estilo en el juego. </p>
45
- <h3>Tablas de clasificación</h3>
46
- <p>Puedes subir de rango y convertirte en el mejor de tu país o del mundo ganando batallas y ganando trofeos. También puede comparar sus estadísticas y logros con otros jugadores y ver cómo se apilan. Las tablas de clasificación son una gran manera de desafiarte y mostrar tus habilidades en el juego. </p>
47
- <h3>Recompensas</h3>
48
-
49
- <h3>Comunidad</h3>
50
- <p>Puedes chatear con otros jugadores en el servidor activo de Discord donde puedes encontrar consejos, guías, noticias, actualizaciones, memes, fan art y más. También puedes unirte a la página oficial de Facebook y a la cuenta de Instagram donde puedes ver las últimas publicaciones de los desarrolladores y otros jugadores. Comunidad es una gran manera de conectarse con otros fans y mantenerse al día sobre el juego. </p>
51
- <h2>Cómo jugar Hills of Steel 2</h2>
52
- <p>Hills of Steel 2 es un juego de tanques basado en la física que es fácil de aprender pero difícil de dominar. Aquí hay algunas instrucciones básicas sobre cómo jugar:</p>
53
- <h3>Controles</h3>
54
- <p>Puedes controlar tu tanque usando dos botones en la pantalla: uno para avanzar o retroceder, y otro para apuntar y disparar. También puede tocar en su tanque para activar su habilidad especial o elemento. Puede ajustar la sensibilidad de los controles en el menú de configuración. </p>
55
- <h3>Estrategia</h3>
56
- <p>Puedes mejorar tus posibilidades de ganar siguiendo algunos consejos y trucos sencillos:</p>
57
- <ul>
58
- <li>Elija un tanque que se adapte a su estilo de juego y el modo de evento. Por ejemplo, si te gusta ser agresivo e infligir mucho daño, es posible que quieras usar Buck o Stinger. Si te gusta apoyar y sanar a tus compañeros de equipo, es posible que quieras usar Morty o Phoenix.</li>
59
- <li>Usa el terreno a tu favor. Por ejemplo, puedes esconderte detrás de colinas u obstáculos para evitar el fuego enemigo, o usar rampas o pendientes para ganar impulso o saltar sobre los enemigos. </li>
60
- <li>Trabaja junto con tus compañeros de equipo. Por ejemplo, puedes coordinar tus ataques, cubrirse las espaldas o compartir objetos o habilidades. </li>
61
- <li>Sea consciente de su entorno. Por ejemplo, puede estar atento a los movimientos enemigos, proyectiles, minas, banderas, estrellas u otros objetos en el mapa. </li>
62
- <li>Diviértete y experimenta. Por ejemplo, puedes probar diferentes combinaciones de tanques, objetos, pieles, pegatinas o estrategias para ver qué funciona mejor para ti. </li>
63
- </ul>
64
- <h2>Pros y contras de las colinas de acero 2</h2>
65
-
66
- <h3>Pros</h3>
67
- <ul>
68
- <li>El juego tiene gráficos coloridos y animaciones suaves que lo hacen visualmente atractivo. </li>
69
- <li>El juego tiene física realista y dinámica de juego que lo hacen desafiante y emocionante. </li>
70
- <li>El juego tiene una variedad de tanques, objetos, pieles, pegatinas, eventos, modos, mapas, Continuando el artículo: <li>El juego tiene un montón de características, recompensas y actualizaciones que lo hacen gratificante y atractivo. </li>
71
- <li>El juego tiene una comunidad amigable y activa que lo hace social y divertido. </li>
72
- </ul>
73
- <h3>Contras</h3>
74
- <ul>
75
- <li>El juego puede ser frustrante e injusto a veces debido a retrasos, problemas técnicos, hackers, o tanques o artículos desequilibrados. </li>
76
- <li> El juego puede ser repetitivo y aburrido después de un tiempo debido a la falta de variedad o innovación. </li>
77
- <li>El juego puede ser caro y pagar para ganar si desea desbloquear o actualizar todo más rápido o más fácil. </li>
78
- <li>El juego puede ser adictivo y poco saludable si lo juegas demasiado o descuidas otros aspectos de tu vida. </li>
79
- </ul>
80
- <h2>Conclusión</h2>
81
- <p>Hills of Steel 2 es un juego de tanques basado en la física que es una secuela del popular juego Hills of Steel. Es un juego gratuito que ofrece muchas características para los amantes de los tanques, como clanes, eventos, tanques, tablas de clasificación, recompensas y comunidad. Es un juego divertido y adictivo que se puede jugar con tus amigos u otros jugadores en línea en tiempo real 3vs3 batallas de equipo en varias colinas. Sin embargo, también tiene algunos inconvenientes, como retraso, problemas técnicos, hackers, tanques o artículos desequilibrados, repetición, aburrimiento, gastos y adicción. Por lo tanto, le recomendamos que lo pruebe por sí mismo y vea si le gusta o no. Puedes descargarlo desde la Google Play Store o la App Store gratis. </p>
82
- <h3>Preguntas frecuentes</h3>
83
- <p>Aquí hay algunas preguntas frecuentes y sus respuestas sobre Hills of Steel 2:</p>
84
- <ol>
85
-
86
- <li>P: ¿Cómo puedo desbloquear más tanques en el juego? <br>A: Puedes desbloquear más tanques alcanzando ciertos niveles de trofeos, abriendo cofres, participando en eventos o comprándolos con gemas. </li>
87
- <li>P: ¿Cómo puedo actualizar mis tanques en el juego? <br>A: Puedes actualizar tus tanques gastando monedas y gemas en ellos. También puedes equiparlos con diferentes artículos que puedes comprar con monedas o gemas. </li>
88
- <li>P: ¿Cómo puedo cambiar la apariencia de mis tanques en el juego? <br>A: Puede cambiar la apariencia de sus tanques mediante la aplicación de pieles y pegatinas que se pueden desbloquear de cofres, carretera temporada, camino trofeo, eventos, o comprar con gemas. </li>
89
- <li>P: ¿Cómo puedo contactar a los desarrolladores o reportar un problema en el juego? <br>A: Puede contactar a los desarrolladores o reportar un problema enviando un correo electrónico a [email protected] o uniéndose a su servidor Discord en https://discord.gg/hillsofsteel2.</li>
90
- </ol></p> 64aa2da5cf<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/data/faceshq.py DELETED
@@ -1,134 +0,0 @@
1
- import os
2
- import numpy as np
3
- import albumentations
4
- from torch.utils.data import Dataset
5
-
6
- from taming.data.base import ImagePaths, NumpyPaths, ConcatDatasetWithIndex
7
-
8
-
9
- class FacesBase(Dataset):
10
- def __init__(self, *args, **kwargs):
11
- super().__init__()
12
- self.data = None
13
- self.keys = None
14
-
15
- def __len__(self):
16
- return len(self.data)
17
-
18
- def __getitem__(self, i):
19
- example = self.data[i]
20
- ex = {}
21
- if self.keys is not None:
22
- for k in self.keys:
23
- ex[k] = example[k]
24
- else:
25
- ex = example
26
- return ex
27
-
28
-
29
- class CelebAHQTrain(FacesBase):
30
- def __init__(self, size, keys=None):
31
- super().__init__()
32
- root = "data/celebahq"
33
- with open("data/celebahqtrain.txt", "r") as f:
34
- relpaths = f.read().splitlines()
35
- paths = [os.path.join(root, relpath) for relpath in relpaths]
36
- self.data = NumpyPaths(paths=paths, size=size, random_crop=False)
37
- self.keys = keys
38
-
39
-
40
- class CelebAHQValidation(FacesBase):
41
- def __init__(self, size, keys=None):
42
- super().__init__()
43
- root = "data/celebahq"
44
- with open("data/celebahqvalidation.txt", "r") as f:
45
- relpaths = f.read().splitlines()
46
- paths = [os.path.join(root, relpath) for relpath in relpaths]
47
- self.data = NumpyPaths(paths=paths, size=size, random_crop=False)
48
- self.keys = keys
49
-
50
-
51
- class FFHQTrain(FacesBase):
52
- def __init__(self, size, keys=None):
53
- super().__init__()
54
- root = "data/ffhq"
55
- with open("data/ffhqtrain.txt", "r") as f:
56
- relpaths = f.read().splitlines()
57
- paths = [os.path.join(root, relpath) for relpath in relpaths]
58
- self.data = ImagePaths(paths=paths, size=size, random_crop=False)
59
- self.keys = keys
60
-
61
-
62
- class FFHQValidation(FacesBase):
63
- def __init__(self, size, keys=None):
64
- super().__init__()
65
- root = "data/ffhq"
66
- with open("data/ffhqvalidation.txt", "r") as f:
67
- relpaths = f.read().splitlines()
68
- paths = [os.path.join(root, relpath) for relpath in relpaths]
69
- self.data = ImagePaths(paths=paths, size=size, random_crop=False)
70
- self.keys = keys
71
-
72
-
73
- class FacesHQTrain(Dataset):
74
- # CelebAHQ [0] + FFHQ [1]
75
- def __init__(self, size, keys=None, crop_size=None, coord=False):
76
- d1 = CelebAHQTrain(size=size, keys=keys)
77
- d2 = FFHQTrain(size=size, keys=keys)
78
- self.data = ConcatDatasetWithIndex([d1, d2])
79
- self.coord = coord
80
- if crop_size is not None:
81
- self.cropper = albumentations.RandomCrop(height=crop_size,width=crop_size)
82
- if self.coord:
83
- self.cropper = albumentations.Compose([self.cropper],
84
- additional_targets={"coord": "image"})
85
-
86
- def __len__(self):
87
- return len(self.data)
88
-
89
- def __getitem__(self, i):
90
- ex, y = self.data[i]
91
- if hasattr(self, "cropper"):
92
- if not self.coord:
93
- out = self.cropper(image=ex["image"])
94
- ex["image"] = out["image"]
95
- else:
96
- h,w,_ = ex["image"].shape
97
- coord = np.arange(h*w).reshape(h,w,1)/(h*w)
98
- out = self.cropper(image=ex["image"], coord=coord)
99
- ex["image"] = out["image"]
100
- ex["coord"] = out["coord"]
101
- ex["class"] = y
102
- return ex
103
-
104
-
105
- class FacesHQValidation(Dataset):
106
- # CelebAHQ [0] + FFHQ [1]
107
- def __init__(self, size, keys=None, crop_size=None, coord=False):
108
- d1 = CelebAHQValidation(size=size, keys=keys)
109
- d2 = FFHQValidation(size=size, keys=keys)
110
- self.data = ConcatDatasetWithIndex([d1, d2])
111
- self.coord = coord
112
- if crop_size is not None:
113
- self.cropper = albumentations.CenterCrop(height=crop_size,width=crop_size)
114
- if self.coord:
115
- self.cropper = albumentations.Compose([self.cropper],
116
- additional_targets={"coord": "image"})
117
-
118
- def __len__(self):
119
- return len(self.data)
120
-
121
- def __getitem__(self, i):
122
- ex, y = self.data[i]
123
- if hasattr(self, "cropper"):
124
- if not self.coord:
125
- out = self.cropper(image=ex["image"])
126
- ex["image"] = out["image"]
127
- else:
128
- h,w,_ = ex["image"].shape
129
- coord = np.arange(h*w).reshape(h,w,1)/(h*w)
130
- out = self.cropper(image=ex["image"], coord=coord)
131
- ex["image"] = out["image"]
132
- ex["coord"] = out["coord"]
133
- ex["class"] = y
134
- return ex
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/errorfactory.py DELETED
@@ -1,90 +0,0 @@
1
- # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- from botocore.exceptions import ClientError
14
- from botocore.utils import get_service_module_name
15
-
16
-
17
- class BaseClientExceptions:
18
- ClientError = ClientError
19
-
20
- def __init__(self, code_to_exception):
21
- """Base class for exceptions object on a client
22
-
23
- :type code_to_exception: dict
24
- :param code_to_exception: Mapping of error codes (strings) to exception
25
- class that should be raised when encountering a particular
26
- error code.
27
- """
28
- self._code_to_exception = code_to_exception
29
-
30
- def from_code(self, error_code):
31
- """Retrieves the error class based on the error code
32
-
33
- This is helpful for identifying the exception class needing to be
34
- caught based on the ClientError.parsed_reponse['Error']['Code'] value
35
-
36
- :type error_code: string
37
- :param error_code: The error code associated to a ClientError exception
38
-
39
- :rtype: ClientError or a subclass of ClientError
40
- :returns: The appropriate modeled exception class for that error
41
- code. If the error code does not match any of the known
42
- modeled exceptions then return a generic ClientError.
43
- """
44
- return self._code_to_exception.get(error_code, self.ClientError)
45
-
46
- def __getattr__(self, name):
47
- exception_cls_names = [
48
- exception_cls.__name__
49
- for exception_cls in self._code_to_exception.values()
50
- ]
51
- raise AttributeError(
52
- fr"{self} object has no attribute {name}. "
53
- fr"Valid exceptions are: {', '.join(exception_cls_names)}"
54
- )
55
-
56
-
57
- class ClientExceptionsFactory:
58
- def __init__(self):
59
- self._client_exceptions_cache = {}
60
-
61
- def create_client_exceptions(self, service_model):
62
- """Creates a ClientExceptions object for the particular service client
63
-
64
- :type service_model: botocore.model.ServiceModel
65
- :param service_model: The service model for the client
66
-
67
- :rtype: object that subclasses from BaseClientExceptions
68
- :returns: The exceptions object of a client that can be used
69
- to grab the various different modeled exceptions.
70
- """
71
- service_name = service_model.service_name
72
- if service_name not in self._client_exceptions_cache:
73
- client_exceptions = self._create_client_exceptions(service_model)
74
- self._client_exceptions_cache[service_name] = client_exceptions
75
- return self._client_exceptions_cache[service_name]
76
-
77
- def _create_client_exceptions(self, service_model):
78
- cls_props = {}
79
- code_to_exception = {}
80
- for error_shape in service_model.error_shapes:
81
- exception_name = str(error_shape.name)
82
- exception_cls = type(exception_name, (ClientError,), {})
83
- cls_props[exception_name] = exception_cls
84
- code = str(error_shape.error_code)
85
- code_to_exception[code] = exception_cls
86
- cls_name = str(get_service_module_name(service_model) + 'Exceptions')
87
- client_exceptions_cls = type(
88
- cls_name, (BaseClientExceptions,), cls_props
89
- )
90
- return client_exceptions_cls(code_to_exception)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/idna/__init__.py DELETED
@@ -1,44 +0,0 @@
1
- from .package_data import __version__
2
- from .core import (
3
- IDNABidiError,
4
- IDNAError,
5
- InvalidCodepoint,
6
- InvalidCodepointContext,
7
- alabel,
8
- check_bidi,
9
- check_hyphen_ok,
10
- check_initial_combiner,
11
- check_label,
12
- check_nfc,
13
- decode,
14
- encode,
15
- ulabel,
16
- uts46_remap,
17
- valid_contextj,
18
- valid_contexto,
19
- valid_label_length,
20
- valid_string_length,
21
- )
22
- from .intranges import intranges_contain
23
-
24
- __all__ = [
25
- "IDNABidiError",
26
- "IDNAError",
27
- "InvalidCodepoint",
28
- "InvalidCodepointContext",
29
- "alabel",
30
- "check_bidi",
31
- "check_hyphen_ok",
32
- "check_initial_combiner",
33
- "check_label",
34
- "check_nfc",
35
- "decode",
36
- "encode",
37
- "intranges_contain",
38
- "ulabel",
39
- "uts46_remap",
40
- "valid_contextj",
41
- "valid_contexto",
42
- "valid_label_length",
43
- "valid_string_length",
44
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/log.py DELETED
@@ -1,80 +0,0 @@
1
- """A simple log mechanism styled after PEP 282."""
2
-
3
- # The class here is styled after PEP 282 so that it could later be
4
- # replaced with a standard Python logging implementation.
5
-
6
- import sys
7
-
8
- DEBUG = 1
9
- INFO = 2
10
- WARN = 3
11
- ERROR = 4
12
- FATAL = 5
13
-
14
-
15
- class Log:
16
- def __init__(self, threshold=WARN):
17
- self.threshold = threshold
18
-
19
- def _log(self, level, msg, args):
20
- if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
21
- raise ValueError('%s wrong log level' % str(level))
22
-
23
- if level >= self.threshold:
24
- if args:
25
- msg = msg % args
26
- if level in (WARN, ERROR, FATAL):
27
- stream = sys.stderr
28
- else:
29
- stream = sys.stdout
30
- try:
31
- stream.write('%s\n' % msg)
32
- except UnicodeEncodeError:
33
- # emulate backslashreplace error handler
34
- encoding = stream.encoding
35
- msg = msg.encode(encoding, "backslashreplace").decode(encoding)
36
- stream.write('%s\n' % msg)
37
- stream.flush()
38
-
39
- def log(self, level, msg, *args):
40
- self._log(level, msg, args)
41
-
42
- def debug(self, msg, *args):
43
- self._log(DEBUG, msg, args)
44
-
45
- def info(self, msg, *args):
46
- self._log(INFO, msg, args)
47
-
48
- def warn(self, msg, *args):
49
- self._log(WARN, msg, args)
50
-
51
- def error(self, msg, *args):
52
- self._log(ERROR, msg, args)
53
-
54
- def fatal(self, msg, *args):
55
- self._log(FATAL, msg, args)
56
-
57
-
58
- _global_log = Log()
59
- log = _global_log.log
60
- debug = _global_log.debug
61
- info = _global_log.info
62
- warn = _global_log.warn
63
- error = _global_log.error
64
- fatal = _global_log.fatal
65
-
66
-
67
- def set_threshold(level):
68
- # return the old threshold for use from tests
69
- old = _global_log.threshold
70
- _global_log.threshold = level
71
- return old
72
-
73
-
74
- def set_verbosity(v):
75
- if v <= 0:
76
- set_threshold(WARN)
77
- elif v == 1:
78
- set_threshold(INFO)
79
- elif v >= 2:
80
- set_threshold(DEBUG)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py DELETED
@@ -1,68 +0,0 @@
1
- import re
2
- import textwrap
3
- import email.message
4
-
5
- from ._text import FoldedCase
6
-
7
-
8
- class Message(email.message.Message):
9
- multiple_use_keys = set(
10
- map(
11
- FoldedCase,
12
- [
13
- 'Classifier',
14
- 'Obsoletes-Dist',
15
- 'Platform',
16
- 'Project-URL',
17
- 'Provides-Dist',
18
- 'Provides-Extra',
19
- 'Requires-Dist',
20
- 'Requires-External',
21
- 'Supported-Platform',
22
- 'Dynamic',
23
- ],
24
- )
25
- )
26
- """
27
- Keys that may be indicated multiple times per PEP 566.
28
- """
29
-
30
- def __new__(cls, orig: email.message.Message):
31
- res = super().__new__(cls)
32
- vars(res).update(vars(orig))
33
- return res
34
-
35
- def __init__(self, *args, **kwargs):
36
- self._headers = self._repair_headers()
37
-
38
- # suppress spurious error from mypy
39
- def __iter__(self):
40
- return super().__iter__()
41
-
42
- def _repair_headers(self):
43
- def redent(value):
44
- "Correct for RFC822 indentation"
45
- if not value or '\n' not in value:
46
- return value
47
- return textwrap.dedent(' ' * 8 + value)
48
-
49
- headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
50
- if self._payload:
51
- headers.append(('Description', self.get_payload()))
52
- return headers
53
-
54
- @property
55
- def json(self):
56
- """
57
- Convert PackageMetadata to a JSON-compatible format
58
- per PEP 0566.
59
- """
60
-
61
- def transform(key):
62
- value = self.get_all(key) if key in self.multiple_use_keys else self[key]
63
- if key == 'Keywords':
64
- value = re.split(r'\s+', value)
65
- tk = key.lower().replace('-', '_')
66
- return tk, value
67
-
68
- return dict(map(transform, map(FoldedCase, self)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Blaise-g/summarize-biomedical-papers-long-summary-or-tldr/app.py DELETED
@@ -1,280 +0,0 @@
1
- import logging
2
- import time
3
- from pathlib import Path
4
-
5
- import gradio as gr
6
- import nltk
7
- from cleantext import clean
8
- from summarize import load_model_and_tokenizer, summarize_via_tokenbatches
9
- from utils import load_example_filenames, truncate_word_count
10
-
11
- _here = Path(__file__).parent
12
-
13
- nltk.download("stopwords")
14
-
15
- logging.basicConfig(
16
- level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
17
- )
18
-
19
-
20
- def proc_submission(
21
- input_text: str,
22
- model_size: str,
23
- num_beams,
24
- token_batch_length,
25
- length_penalty,
26
- max_input_length: int = 3060,
27
- ):
28
- """
29
- proc_submission - a helper function for the gradio module to process submissions
30
- Args:
31
- input_text (str): the input text to summarize
32
- model_size (str): the size of the model to use
33
- num_beams (int): the number of beams to use
34
- token_batch_length (int): the length of the token batches to use
35
- length_penalty (float): the length penalty to use
36
- repetition_penalty (float): the repetition penalty to use
37
- no_repeat_ngram_size (int): the no repeat ngram size to use
38
- max_input_length (int, optional): the maximum input length to use. Defaults to 768.
39
- Returns:
40
- str in HTML format, string of the summary, str of score
41
- """
42
-
43
- settings_det = {
44
- "length_penalty": float(length_penalty),
45
- "repetition_penalty": 3.5,
46
- "no_repeat_ngram_size": 3,
47
- "encoder_no_repeat_ngram_size": 4,
48
- "num_beams": int(num_beams),
49
- "min_length": 100,
50
- "max_length": 512,#int(token_batch_length // 4),
51
- "early_stopping": True,
52
- "do_sample": False,
53
- }
54
- settings_tldr = {
55
- "length_penalty": float(length_penalty),
56
- "repetition_penalty": 3.5,
57
- "no_repeat_ngram_size": 3,
58
- "encoder_no_repeat_ngram_size": 4,
59
- "num_beams": int(num_beams),
60
- "min_length": 11,
61
- "max_length": 62,
62
- "early_stopping": True,
63
- "do_sample": False,
64
- }
65
-
66
- if model_size == "tldr":
67
- settings = settings_tldr
68
- else:
69
- settings = settings_det
70
-
71
- st = time.perf_counter()
72
- history = {}
73
- clean_text = clean(input_text, extra_spaces=True, lowercase=True, reg="\b(?!(?:Although|Also)\b)(?:[A-Z][A-Za-z'`-]+)(?:,? (?:(?:and |& )?(?:[A-Z][A-Za-z'`-]+)|(?:et al.?)))*(?:, *(?:19|20)[0-9][0-9](?:, p\.? [0-9]+)?| *\((?:19|20)[0-9][0-9](?:, p\.? [0-9]+)?\))", reg_replace="")
74
- #max_input_length = 2048 if model_size == "tldr" else max_input_length
75
- processed = truncate_word_count(clean_text, max_input_length)
76
-
77
- if processed["was_truncated"]:
78
- tr_in = processed["truncated_text"]
79
- msg = f"Input text was truncated to {max_input_length} words to fit within the computational constraints of the inference API"
80
- logging.warning(msg)
81
- history["WARNING"] = msg
82
- else:
83
- tr_in = input_text
84
- msg = None
85
-
86
- _summaries = summarize_via_tokenbatches(
87
- tr_in,
88
- model_sm if model_size == "tldr" else model,
89
- tokenizer_sm if model_size == "tldr" else tokenizer,
90
- batch_length=token_batch_length,
91
- **settings,
92
- )
93
- sum_text = [f"Section {i}: " + s["summary"][0] for i, s in enumerate(_summaries)]
94
- rates = [
95
- f" - Section {i}: {round(s['compression_rate'],3)}"
96
- for i, s in enumerate(_summaries)
97
- ]
98
-
99
- sum_text_out = "\n".join(sum_text)
100
- history["Compression Rates"] = "<br><br>"
101
- rates_out = "\n".join(rates)
102
- rt = round((time.perf_counter() - st) / 60, 2)
103
- print(f"Runtime: {rt} minutes")
104
- html = ""
105
- html += f"<p>Runtime: {rt} minutes on CPU</p>"
106
- if msg is not None:
107
- html += f"<h2>WARNING:</h2><hr><b>{msg}</b><br><br>"
108
-
109
- html += ""
110
-
111
- return html, sum_text_out, rates_out
112
-
113
-
114
- def load_single_example_text(
115
- example_path: str or Path,
116
- ):
117
- """
118
- load_single_example - a helper function for the gradio module to load examples
119
- Returns:
120
- list of str, the examples
121
- """
122
- global name_to_path
123
- full_ex_path = name_to_path[example_path]
124
- full_ex_path = Path(full_ex_path)
125
- # load the examples into a list
126
- with open(full_ex_path, "r", encoding="utf-8", errors="ignore") as f:
127
- raw_text = f.read()
128
- text = clean(raw_text, extra_spaces=True, lowercase=False) #see if it works
129
- return text
130
-
131
-
132
- def load_uploaded_file(file_obj):
133
- """
134
- load_uploaded_file - process an uploaded file
135
- Args:
136
- file_obj (POTENTIALLY list): Gradio file object inside a list
137
- Returns:
138
- str, the uploaded file contents
139
- """
140
-
141
- # file_path = Path(file_obj[0].name)
142
-
143
- # check if mysterious file object is a list
144
- if isinstance(file_obj, list):
145
- file_obj = file_obj[0]
146
- file_path = Path(file_obj.name)
147
- try:
148
- with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
149
- raw_text = f.read()
150
- text = clean(raw_text, extra_spaces=True, lowercase=True, reg="\s(?=[\,.':;!?])",reg_replace="")
151
- return text
152
- except Exception as e:
153
- logging.info(f"Trying to load file with path {file_path}, error: {e}")
154
- return "Error: Could not read file. Ensure that it is a valid text file with encoding UTF-8."
155
-
156
-
157
- if __name__ == "__main__":
158
-
159
- model, tokenizer = load_model_and_tokenizer("Blaise-g/longt5_tglobal_large_sumpubmed")
160
- model_sm, tokenizer_sm = load_model_and_tokenizer("Blaise-g/longt5_tglobal_large_scitldr")
161
-
162
- name_to_path = load_example_filenames(_here / "examples")
163
- logging.info(f"Loaded {len(name_to_path)} examples")
164
- demo = gr.Blocks()
165
-
166
- with demo:
167
-
168
- gr.Markdown("# Automatic summarization of biomedical research papers with neural abstractive methods into a long and comprehensive synopsis or extreme TLDR summary version")
169
- gr.Markdown(
170
- "A demo developed for my Master Thesis project using ad-hoc fine-tuned abstractive summarization models to summarize long biomedical articles into a detailed, explanatory synopsis or extreme TLDR summary."
171
- )
172
- with gr.Column():
173
-
174
- gr.Markdown("### Select Summary type and text generation parameters then load input text")
175
- gr.Markdown(
176
- "Enter text below in the text area or alternatively load an example below or upload a file."
177
- )
178
- with gr.Row():
179
- model_size = gr.Radio(
180
- choices=["tldr", "detailed"], label="Summary type", value="detailed"
181
- )
182
- num_beams = gr.Radio(
183
- choices=[2, 3, 4],
184
- label="Beam Search: Number of Beams",
185
- value=2,
186
- )
187
- gr.Markdown(
188
- "_For optimal results use a GPU as the hosted CPU inference is lacking at times and hinders the output summary quality as well as forcing to divide the input text into batches._"
189
- )
190
- with gr.Row():
191
- length_penalty = gr.inputs.Slider(
192
- minimum=0.5,
193
- maximum=1.0,
194
- label="length penalty",
195
- default=0.7,
196
- step=0.05,
197
- )
198
- token_batch_length = gr.Radio(
199
- choices=[1024, 2048, 3060],
200
- label="token batch length",
201
- value=2048,
202
- )
203
- with gr.Row():
204
- example_name = gr.Dropdown(
205
- list(name_to_path.keys()),
206
- label="Choose an Example",
207
- )
208
- load_examples_button = gr.Button(
209
- "Load Example",
210
- )
211
- input_text = gr.Textbox(
212
- lines=6,
213
- label="Input Text (for summarization)",
214
- placeholder="Enter any scientific text to be condensed into a detailed, explanatory synopsis or TLDR summary version. The input text is divided into batches of the selected token lengths to fit within the memory constraints, pre-processed and fed into the model of choice. The models were trained to handle long scientific papers but generalize reasonably well also to shorter text documents like scientific abstracts. Might take a while to produce long summaries :)",
215
- )
216
- gr.Markdown("Upload your own file:")
217
- with gr.Row():
218
- uploaded_file = gr.File(
219
- label="Upload a text file",
220
- file_count="single",
221
- type="file",
222
- )
223
- load_file_button = gr.Button("Load Uploaded File")
224
-
225
- gr.Markdown("---")
226
-
227
- with gr.Column():
228
- gr.Markdown("## Generate Summary")
229
- gr.Markdown(
230
- "Summary generation should take approximately 2-3 minutes for most generation settings but can take significantly more time for very long documents with a high beam number."
231
- )
232
- summarize_button = gr.Button(
233
- "Summarize!",
234
- variant="primary",
235
- )
236
-
237
- output_text = gr.HTML("<p><em>Output will appear below:</em></p>")
238
- gr.Markdown("### Summary Output")
239
- summary_text = gr.Textbox(
240
- label="Summary 📝", placeholder="The generated 📝 will appear here"
241
- )
242
- gr.Markdown(
243
- "The compression rate 🗜 indicates the ratio between the machine-generated summary length and the input text (from 0% to 100%). The higher the 🗜 the more extreme the summary is."
244
- )
245
- compression_rate = gr.Textbox(
246
- label="Compression rate 🗜", placeholder="The 🗜 will appear here"
247
- )
248
- gr.Markdown("---")
249
-
250
- with gr.Column():
251
- gr.Markdown("## About the Models")
252
- gr.Markdown(
253
- "- [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) is a fine-tuned checkpoint of [Stancld/longt5-tglobal-large-16384-pubmed-3k_steps](https://huggingface.co/Stancld/longt5-tglobal-large-16384-pubmed-3k_steps) on the [SumPubMed dataset](https://aclanthology.org/2021.acl-srw.30/). [Blaise-g/longt5_tglobal_large_scitldr](https://huggingface.co/Blaise-g/longt5_tglobal_large_scitldr) is a fine-tuned checkpoint of [Blaise-g/longt5_tglobal_large_sumpubmed](https://huggingface.co/Blaise-g/longt5_tglobal_large_sumpubmed) on the [Scitldr dataset](https://arxiv.org/abs/2004.15011). The goal was to create two models capable of handling the complex information contained in long biomedical documents and subsequently producing scientific summaries according to one of the two possible levels of conciseness: 1) A long explanatory synopsis that retains the majority of domain-specific language used in the original source text. 2)A one sentence long, TLDR style summary."
254
- )
255
- gr.Markdown(
256
- "- The two most important text generation parameters are the number of beams and length penalty : 1) Choosing a higher number of beams for the beam search algorithm results in generating a summary with higher probability (hence theoretically higher quality) at the cost of increasing computation times and memory usage. 2) The length penalty encourages the model to generate longer (with values closer to 1.0) or shorter (with values closer to 0.0) summary sequences by placing an exponential penalty on the beam score according to the current sequence length."
257
- )
258
- gr.Markdown("---")
259
-
260
- load_examples_button.click(
261
- fn=load_single_example_text, inputs=[example_name], outputs=[input_text]
262
- )
263
-
264
- load_file_button.click(
265
- fn=load_uploaded_file, inputs=uploaded_file, outputs=[input_text]
266
- )
267
-
268
- summarize_button.click(
269
- fn=proc_submission,
270
- inputs=[
271
- input_text,
272
- model_size,
273
- num_beams,
274
- token_batch_length,
275
- length_penalty,
276
- ],
277
- outputs=[output_text, summary_text, compression_rate],
278
- )
279
-
280
- demo.launch(enable_queue=True, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tools/pybind11Tools.cmake DELETED
@@ -1,188 +0,0 @@
1
- # tools/pybind11Tools.cmake -- Build system for the pybind11 modules
2
- #
3
- # Copyright (c) 2015 Wenzel Jakob <[email protected]>
4
- #
5
- # All rights reserved. Use of this source code is governed by a
6
- # BSD-style license that can be found in the LICENSE file.
7
-
8
- # Built-in in CMake 3.5+
9
- include(CMakeParseArguments)
10
-
11
- if(pybind11_FIND_QUIETLY)
12
- set(_pybind11_quiet QUIET)
13
- endif()
14
-
15
- # If this is the first run, PYTHON_VERSION can stand in for PYBIND11_PYTHON_VERSION
16
- if(NOT DEFINED PYBIND11_PYTHON_VERSION AND DEFINED PYTHON_VERSION)
17
- message(WARNING "Set PYBIND11_PYTHON_VERSION to search for a specific version, not "
18
- "PYTHON_VERSION (which is an output). Assuming that is what you "
19
- "meant to do and continuing anyway.")
20
- set(PYBIND11_PYTHON_VERSION
21
- "${PYTHON_VERSION}"
22
- CACHE STRING "Python version to use for compiling modules")
23
- unset(PYTHON_VERSION)
24
- unset(PYTHON_VERSION CACHE)
25
- else()
26
- # If this is set as a normal variable, promote it, otherwise, make an empty cache variable.
27
- set(PYBIND11_PYTHON_VERSION
28
- "${PYBIND11_PYTHON_VERSION}"
29
- CACHE STRING "Python version to use for compiling modules")
30
- endif()
31
-
32
- # A user can set versions manually too
33
- set(Python_ADDITIONAL_VERSIONS
34
- "3.9;3.8;3.7;3.6;3.5;3.4"
35
- CACHE INTERNAL "")
36
-
37
- list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}")
38
- find_package(PythonLibsNew ${PYBIND11_PYTHON_VERSION} MODULE REQUIRED ${_pybind11_quiet})
39
- list(REMOVE_AT CMAKE_MODULE_PATH -1)
40
-
41
- # Cache variables so pybind11_add_module can be used in parent projects
42
- set(PYTHON_INCLUDE_DIRS
43
- ${PYTHON_INCLUDE_DIRS}
44
- CACHE INTERNAL "")
45
- set(PYTHON_LIBRARIES
46
- ${PYTHON_LIBRARIES}
47
- CACHE INTERNAL "")
48
- set(PYTHON_MODULE_PREFIX
49
- ${PYTHON_MODULE_PREFIX}
50
- CACHE INTERNAL "")
51
- set(PYTHON_MODULE_EXTENSION
52
- ${PYTHON_MODULE_EXTENSION}
53
- CACHE INTERNAL "")
54
- set(PYTHON_VERSION_MAJOR
55
- ${PYTHON_VERSION_MAJOR}
56
- CACHE INTERNAL "")
57
- set(PYTHON_VERSION_MINOR
58
- ${PYTHON_VERSION_MINOR}
59
- CACHE INTERNAL "")
60
- set(PYTHON_VERSION
61
- ${PYTHON_VERSION}
62
- CACHE INTERNAL "")
63
- set(PYTHON_IS_DEBUG
64
- "${PYTHON_IS_DEBUG}"
65
- CACHE INTERNAL "")
66
-
67
- if(PYBIND11_MASTER_PROJECT)
68
- if(PYTHON_MODULE_EXTENSION MATCHES "pypy")
69
- if(NOT DEFINED PYPY_VERSION)
70
- execute_process(
71
- COMMAND ${PYTHON_EXECUTABLE} -c
72
- [=[import sys; print(".".join(map(str, sys.pypy_version_info[:3])))]=]
73
- OUTPUT_VARIABLE pypy_version)
74
- set(PYPY_VERSION
75
- ${pypy_version}
76
- CACHE INTERNAL "")
77
- endif()
78
- message(STATUS "PYPY ${PYPY_VERSION} (Py ${PYTHON_VERSION})")
79
- else()
80
- message(STATUS "PYTHON ${PYTHON_VERSION}")
81
- endif()
82
- endif()
83
-
84
- # Only add Python for build - must be added during the import for config since it has to be re-discovered.
85
- set_property(
86
- TARGET pybind11::pybind11
87
- APPEND
88
- PROPERTY INTERFACE_INCLUDE_DIRECTORIES $<BUILD_INTERFACE:${PYTHON_INCLUDE_DIRS}>)
89
-
90
- # Python debug libraries expose slightly different objects before 3.8
91
- # https://docs.python.org/3.6/c-api/intro.html#debugging-builds
92
- # https://stackoverflow.com/questions/39161202/how-to-work-around-missing-pymodule-create2-in-amd64-win-python35-d-lib
93
- if(PYTHON_IS_DEBUG)
94
- set_property(
95
- TARGET pybind11::pybind11
96
- APPEND
97
- PROPERTY INTERFACE_COMPILE_DEFINITIONS Py_DEBUG)
98
- endif()
99
-
100
- set_property(
101
- TARGET pybind11::module
102
- APPEND
103
- PROPERTY
104
- INTERFACE_LINK_LIBRARIES pybind11::python_link_helper
105
- "$<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:Cygwin>>:$<BUILD_INTERFACE:${PYTHON_LIBRARIES}>>")
106
-
107
- if(PYTHON_VERSION VERSION_LESS 3)
108
- set_property(
109
- TARGET pybind11::pybind11
110
- APPEND
111
- PROPERTY INTERFACE_LINK_LIBRARIES pybind11::python2_no_register)
112
- endif()
113
-
114
- set_property(
115
- TARGET pybind11::embed
116
- APPEND
117
- PROPERTY INTERFACE_LINK_LIBRARIES pybind11::pybind11 $<BUILD_INTERFACE:${PYTHON_LIBRARIES}>)
118
-
119
- function(pybind11_extension name)
120
- # The prefix and extension are provided by FindPythonLibsNew.cmake
121
- set_target_properties(${name} PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
122
- SUFFIX "${PYTHON_MODULE_EXTENSION}")
123
- endfunction()
124
-
125
- # Build a Python extension module:
126
- # pybind11_add_module(<name> [MODULE | SHARED] [EXCLUDE_FROM_ALL]
127
- # [NO_EXTRAS] [THIN_LTO] source1 [source2 ...])
128
- #
129
- function(pybind11_add_module target_name)
130
- set(options MODULE SHARED EXCLUDE_FROM_ALL NO_EXTRAS SYSTEM THIN_LTO)
131
- cmake_parse_arguments(ARG "${options}" "" "" ${ARGN})
132
-
133
- if(ARG_MODULE AND ARG_SHARED)
134
- message(FATAL_ERROR "Can't be both MODULE and SHARED")
135
- elseif(ARG_SHARED)
136
- set(lib_type SHARED)
137
- else()
138
- set(lib_type MODULE)
139
- endif()
140
-
141
- if(ARG_EXCLUDE_FROM_ALL)
142
- set(exclude_from_all EXCLUDE_FROM_ALL)
143
- else()
144
- set(exclude_from_all "")
145
- endif()
146
-
147
- add_library(${target_name} ${lib_type} ${exclude_from_all} ${ARG_UNPARSED_ARGUMENTS})
148
-
149
- target_link_libraries(${target_name} PRIVATE pybind11::module)
150
-
151
- if(ARG_SYSTEM)
152
- message(
153
- STATUS
154
- "Warning: this does not have an effect - use NO_SYSTEM_FROM_IMPORTED if using imported targets"
155
- )
156
- endif()
157
-
158
- pybind11_extension(${target_name})
159
-
160
- # -fvisibility=hidden is required to allow multiple modules compiled against
161
- # different pybind versions to work properly, and for some features (e.g.
162
- # py::module_local). We force it on everything inside the `pybind11`
163
- # namespace; also turning it on for a pybind module compilation here avoids
164
- # potential warnings or issues from having mixed hidden/non-hidden types.
165
- set_target_properties(${target_name} PROPERTIES CXX_VISIBILITY_PRESET "hidden"
166
- CUDA_VISIBILITY_PRESET "hidden")
167
-
168
- if(ARG_NO_EXTRAS)
169
- return()
170
- endif()
171
-
172
- if(NOT DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
173
- if(ARG_THIN_LTO)
174
- target_link_libraries(${target_name} PRIVATE pybind11::thin_lto)
175
- else()
176
- target_link_libraries(${target_name} PRIVATE pybind11::lto)
177
- endif()
178
- endif()
179
-
180
- if(NOT MSVC AND NOT ${CMAKE_BUILD_TYPE} MATCHES Debug|RelWithDebInfo)
181
- pybind11_strip(${target_name})
182
- endif()
183
-
184
- if(MSVC)
185
- target_link_libraries(${target_name} PRIVATE pybind11::windows_extras)
186
- endif()
187
-
188
- endfunction()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/numeric_traits.h DELETED
@@ -1,130 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/type_traits.h>
20
- #include <limits>
21
-
22
- //#include <stdint.h> // for intmax_t (not provided on MSVS 2005)
23
-
24
- namespace thrust
25
- {
26
-
27
- namespace detail
28
- {
29
-
30
- // XXX good enough for the platforms we care about
31
- typedef long long intmax_t;
32
-
33
- template<typename Number>
34
- struct is_signed
35
- : integral_constant<bool, std::numeric_limits<Number>::is_signed>
36
- {}; // end is_signed
37
-
38
-
39
- template<typename T>
40
- struct num_digits
41
- : eval_if<
42
- std::numeric_limits<T>::is_specialized,
43
- integral_constant<
44
- int,
45
- std::numeric_limits<T>::digits
46
- >,
47
- integral_constant<
48
- int,
49
- sizeof(T) * std::numeric_limits<unsigned char>::digits - (is_signed<T>::value ? 1 : 0)
50
- >
51
- >::type
52
- {}; // end num_digits
53
-
54
-
55
- template<typename Integer>
56
- struct integer_difference
57
- //: eval_if<
58
- // sizeof(Integer) >= sizeof(intmax_t),
59
- // eval_if<
60
- // is_signed<Integer>::value,
61
- // identity_<Integer>,
62
- // identity_<intmax_t>
63
- // >,
64
- // eval_if<
65
- // sizeof(Integer) < sizeof(std::ptrdiff_t),
66
- // identity_<std::ptrdiff_t>,
67
- // identity_<intmax_t>
68
- // >
69
- // >
70
- {
71
- private:
72
- // XXX workaround a pedantic warning in old versions of g++
73
- // which complains about &&ing with a constant value
74
- template<bool x, bool y>
75
- struct and_
76
- {
77
- static const bool value = false;
78
- };
79
-
80
- template<bool y>
81
- struct and_<true,y>
82
- {
83
- static const bool value = y;
84
- };
85
-
86
- public:
87
- typedef typename
88
- eval_if<
89
- and_<
90
- std::numeric_limits<Integer>::is_signed,
91
- // digits is the number of no-sign bits
92
- (!std::numeric_limits<Integer>::is_bounded || (int(std::numeric_limits<Integer>::digits) + 1 >= num_digits<intmax_t>::value))
93
- >::value,
94
- identity_<Integer>,
95
- eval_if<
96
- int(std::numeric_limits<Integer>::digits) + 1 < num_digits<signed int>::value,
97
- identity_<signed int>,
98
- eval_if<
99
- int(std::numeric_limits<Integer>::digits) + 1 < num_digits<signed long>::value,
100
- identity_<signed long>,
101
- identity_<intmax_t>
102
- >
103
- >
104
- >::type type;
105
- }; // end integer_difference
106
-
107
-
108
- template<typename Number>
109
- struct numeric_difference
110
- : eval_if<
111
- is_integral<Number>::value,
112
- integer_difference<Number>,
113
- identity_<Number>
114
- >
115
- {}; // end numeric_difference
116
-
117
-
118
- template<typename Number>
119
- __host__ __device__
120
- typename numeric_difference<Number>::type
121
- numeric_distance(Number x, Number y)
122
- {
123
- typedef typename numeric_difference<Number>::type difference_type;
124
- return difference_type(y) - difference_type(x);
125
- } // end numeric_distance
126
-
127
- } // end detail
128
-
129
- } // end thrust
130
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/backbones/ssd_vgg.py DELETED
@@ -1,169 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init
5
- from mmcv.runner import load_checkpoint
6
-
7
- from mmdet.utils import get_root_logger
8
- from ..builder import BACKBONES
9
-
10
-
11
- @BACKBONES.register_module()
12
- class SSDVGG(VGG):
13
- """VGG Backbone network for single-shot-detection.
14
-
15
- Args:
16
- input_size (int): width and height of input, from {300, 512}.
17
- depth (int): Depth of vgg, from {11, 13, 16, 19}.
18
- out_indices (Sequence[int]): Output from which stages.
19
-
20
- Example:
21
- >>> self = SSDVGG(input_size=300, depth=11)
22
- >>> self.eval()
23
- >>> inputs = torch.rand(1, 3, 300, 300)
24
- >>> level_outputs = self.forward(inputs)
25
- >>> for level_out in level_outputs:
26
- ... print(tuple(level_out.shape))
27
- (1, 1024, 19, 19)
28
- (1, 512, 10, 10)
29
- (1, 256, 5, 5)
30
- (1, 256, 3, 3)
31
- (1, 256, 1, 1)
32
- """
33
- extra_setting = {
34
- 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
35
- 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
36
- }
37
-
38
- def __init__(self,
39
- input_size,
40
- depth,
41
- with_last_pool=False,
42
- ceil_mode=True,
43
- out_indices=(3, 4),
44
- out_feature_indices=(22, 34),
45
- l2_norm_scale=20.):
46
- # TODO: in_channels for mmcv.VGG
47
- super(SSDVGG, self).__init__(
48
- depth,
49
- with_last_pool=with_last_pool,
50
- ceil_mode=ceil_mode,
51
- out_indices=out_indices)
52
- assert input_size in (300, 512)
53
- self.input_size = input_size
54
-
55
- self.features.add_module(
56
- str(len(self.features)),
57
- nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
58
- self.features.add_module(
59
- str(len(self.features)),
60
- nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
61
- self.features.add_module(
62
- str(len(self.features)), nn.ReLU(inplace=True))
63
- self.features.add_module(
64
- str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
65
- self.features.add_module(
66
- str(len(self.features)), nn.ReLU(inplace=True))
67
- self.out_feature_indices = out_feature_indices
68
-
69
- self.inplanes = 1024
70
- self.extra = self._make_extra_layers(self.extra_setting[input_size])
71
- self.l2_norm = L2Norm(
72
- self.features[out_feature_indices[0] - 1].out_channels,
73
- l2_norm_scale)
74
-
75
- def init_weights(self, pretrained=None):
76
- """Initialize the weights in backbone.
77
-
78
- Args:
79
- pretrained (str, optional): Path to pre-trained weights.
80
- Defaults to None.
81
- """
82
- if isinstance(pretrained, str):
83
- logger = get_root_logger()
84
- load_checkpoint(self, pretrained, strict=False, logger=logger)
85
- elif pretrained is None:
86
- for m in self.features.modules():
87
- if isinstance(m, nn.Conv2d):
88
- kaiming_init(m)
89
- elif isinstance(m, nn.BatchNorm2d):
90
- constant_init(m, 1)
91
- elif isinstance(m, nn.Linear):
92
- normal_init(m, std=0.01)
93
- else:
94
- raise TypeError('pretrained must be a str or None')
95
-
96
- for m in self.extra.modules():
97
- if isinstance(m, nn.Conv2d):
98
- xavier_init(m, distribution='uniform')
99
-
100
- constant_init(self.l2_norm, self.l2_norm.scale)
101
-
102
- def forward(self, x):
103
- """Forward function."""
104
- outs = []
105
- for i, layer in enumerate(self.features):
106
- x = layer(x)
107
- if i in self.out_feature_indices:
108
- outs.append(x)
109
- for i, layer in enumerate(self.extra):
110
- x = F.relu(layer(x), inplace=True)
111
- if i % 2 == 1:
112
- outs.append(x)
113
- outs[0] = self.l2_norm(outs[0])
114
- if len(outs) == 1:
115
- return outs[0]
116
- else:
117
- return tuple(outs)
118
-
119
- def _make_extra_layers(self, outplanes):
120
- layers = []
121
- kernel_sizes = (1, 3)
122
- num_layers = 0
123
- outplane = None
124
- for i in range(len(outplanes)):
125
- if self.inplanes == 'S':
126
- self.inplanes = outplane
127
- continue
128
- k = kernel_sizes[num_layers % 2]
129
- if outplanes[i] == 'S':
130
- outplane = outplanes[i + 1]
131
- conv = nn.Conv2d(
132
- self.inplanes, outplane, k, stride=2, padding=1)
133
- else:
134
- outplane = outplanes[i]
135
- conv = nn.Conv2d(
136
- self.inplanes, outplane, k, stride=1, padding=0)
137
- layers.append(conv)
138
- self.inplanes = outplanes[i]
139
- num_layers += 1
140
- if self.input_size == 512:
141
- layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))
142
-
143
- return nn.Sequential(*layers)
144
-
145
-
146
- class L2Norm(nn.Module):
147
-
148
- def __init__(self, n_dims, scale=20., eps=1e-10):
149
- """L2 normalization layer.
150
-
151
- Args:
152
- n_dims (int): Number of dimensions to be normalized
153
- scale (float, optional): Defaults to 20..
154
- eps (float, optional): Used to avoid division by zero.
155
- Defaults to 1e-10.
156
- """
157
- super(L2Norm, self).__init__()
158
- self.n_dims = n_dims
159
- self.weight = nn.Parameter(torch.Tensor(self.n_dims))
160
- self.eps = eps
161
- self.scale = scale
162
-
163
- def forward(self, x):
164
- """Forward function."""
165
- # normalization layer convert to FP32 in FP16 training
166
- x_float = x.float()
167
- norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
168
- return (self.weight[None, :, None, None].float().expand_as(x_float) *
169
- x_float / norm).type_as(x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py DELETED
@@ -1,172 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import ConvModule, normal_init, xavier_init
3
-
4
- from mmdet.models.backbones.resnet import Bottleneck
5
- from mmdet.models.builder import HEADS
6
- from .bbox_head import BBoxHead
7
-
8
-
9
- class BasicResBlock(nn.Module):
10
- """Basic residual block.
11
-
12
- This block is a little different from the block in the ResNet backbone.
13
- The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
14
-
15
- Args:
16
- in_channels (int): Channels of the input feature map.
17
- out_channels (int): Channels of the output feature map.
18
- conv_cfg (dict): The config dict for convolution layers.
19
- norm_cfg (dict): The config dict for normalization layers.
20
- """
21
-
22
- def __init__(self,
23
- in_channels,
24
- out_channels,
25
- conv_cfg=None,
26
- norm_cfg=dict(type='BN')):
27
- super(BasicResBlock, self).__init__()
28
-
29
- # main path
30
- self.conv1 = ConvModule(
31
- in_channels,
32
- in_channels,
33
- kernel_size=3,
34
- padding=1,
35
- bias=False,
36
- conv_cfg=conv_cfg,
37
- norm_cfg=norm_cfg)
38
- self.conv2 = ConvModule(
39
- in_channels,
40
- out_channels,
41
- kernel_size=1,
42
- bias=False,
43
- conv_cfg=conv_cfg,
44
- norm_cfg=norm_cfg,
45
- act_cfg=None)
46
-
47
- # identity path
48
- self.conv_identity = ConvModule(
49
- in_channels,
50
- out_channels,
51
- kernel_size=1,
52
- conv_cfg=conv_cfg,
53
- norm_cfg=norm_cfg,
54
- act_cfg=None)
55
-
56
- self.relu = nn.ReLU(inplace=True)
57
-
58
- def forward(self, x):
59
- identity = x
60
-
61
- x = self.conv1(x)
62
- x = self.conv2(x)
63
-
64
- identity = self.conv_identity(identity)
65
- out = x + identity
66
-
67
- out = self.relu(out)
68
- return out
69
-
70
-
71
- @HEADS.register_module()
72
- class DoubleConvFCBBoxHead(BBoxHead):
73
- r"""Bbox head used in Double-Head R-CNN
74
-
75
- .. code-block:: none
76
-
77
- /-> cls
78
- /-> shared convs ->
79
- \-> reg
80
- roi features
81
- /-> cls
82
- \-> shared fc ->
83
- \-> reg
84
- """ # noqa: W605
85
-
86
- def __init__(self,
87
- num_convs=0,
88
- num_fcs=0,
89
- conv_out_channels=1024,
90
- fc_out_channels=1024,
91
- conv_cfg=None,
92
- norm_cfg=dict(type='BN'),
93
- **kwargs):
94
- kwargs.setdefault('with_avg_pool', True)
95
- super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
96
- assert self.with_avg_pool
97
- assert num_convs > 0
98
- assert num_fcs > 0
99
- self.num_convs = num_convs
100
- self.num_fcs = num_fcs
101
- self.conv_out_channels = conv_out_channels
102
- self.fc_out_channels = fc_out_channels
103
- self.conv_cfg = conv_cfg
104
- self.norm_cfg = norm_cfg
105
-
106
- # increase the channel of input features
107
- self.res_block = BasicResBlock(self.in_channels,
108
- self.conv_out_channels)
109
-
110
- # add conv heads
111
- self.conv_branch = self._add_conv_branch()
112
- # add fc heads
113
- self.fc_branch = self._add_fc_branch()
114
-
115
- out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
116
- self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
117
-
118
- self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
119
- self.relu = nn.ReLU(inplace=True)
120
-
121
- def _add_conv_branch(self):
122
- """Add the fc branch which consists of a sequential of conv layers."""
123
- branch_convs = nn.ModuleList()
124
- for i in range(self.num_convs):
125
- branch_convs.append(
126
- Bottleneck(
127
- inplanes=self.conv_out_channels,
128
- planes=self.conv_out_channels // 4,
129
- conv_cfg=self.conv_cfg,
130
- norm_cfg=self.norm_cfg))
131
- return branch_convs
132
-
133
- def _add_fc_branch(self):
134
- """Add the fc branch which consists of a sequential of fc layers."""
135
- branch_fcs = nn.ModuleList()
136
- for i in range(self.num_fcs):
137
- fc_in_channels = (
138
- self.in_channels *
139
- self.roi_feat_area if i == 0 else self.fc_out_channels)
140
- branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
141
- return branch_fcs
142
-
143
- def init_weights(self):
144
- # conv layers are already initialized by ConvModule
145
- normal_init(self.fc_cls, std=0.01)
146
- normal_init(self.fc_reg, std=0.001)
147
-
148
- for m in self.fc_branch.modules():
149
- if isinstance(m, nn.Linear):
150
- xavier_init(m, distribution='uniform')
151
-
152
- def forward(self, x_cls, x_reg):
153
- # conv head
154
- x_conv = self.res_block(x_reg)
155
-
156
- for conv in self.conv_branch:
157
- x_conv = conv(x_conv)
158
-
159
- if self.with_avg_pool:
160
- x_conv = self.avg_pool(x_conv)
161
-
162
- x_conv = x_conv.view(x_conv.size(0), -1)
163
- bbox_pred = self.fc_reg(x_conv)
164
-
165
- # fc head
166
- x_fc = x_cls.view(x_cls.size(0), -1)
167
- for fc in self.fc_branch:
168
- x_fc = self.relu(fc(x_fc))
169
-
170
- cls_score = self.fc_cls(x_fc)
171
-
172
- return cls_score, bbox_pred
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/predict.py DELETED
@@ -1,89 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- # Example command:
4
- # ./bin/predict.py \
5
- # model.path=<path to checkpoint, prepared by make_checkpoint.py> \
6
- # indir=<path to input data> \
7
- # outdir=<where to store predicts>
8
-
9
- import logging
10
- import os
11
- import sys
12
- import traceback
13
-
14
- from saicinpainting.evaluation.utils import move_to_device
15
-
16
- os.environ['OMP_NUM_THREADS'] = '1'
17
- os.environ['OPENBLAS_NUM_THREADS'] = '1'
18
- os.environ['MKL_NUM_THREADS'] = '1'
19
- os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
20
- os.environ['NUMEXPR_NUM_THREADS'] = '1'
21
-
22
- import cv2
23
- import hydra
24
- import numpy as np
25
- import torch
26
- import tqdm
27
- import yaml
28
- from omegaconf import OmegaConf
29
- from torch.utils.data._utils.collate import default_collate
30
-
31
- from saicinpainting.training.data.datasets import make_default_val_dataset
32
- from saicinpainting.training.trainers import load_checkpoint
33
- from saicinpainting.utils import register_debug_signal_handlers
34
-
35
- LOGGER = logging.getLogger(__name__)
36
-
37
-
38
- @hydra.main(config_path='configs/prediction', config_name='default.yaml')
39
- def main(predict_config: OmegaConf):
40
- try:
41
- register_debug_signal_handlers() # kill -10 <pid> will result in traceback dumped into log
42
-
43
- device = torch.device(predict_config.device)
44
-
45
- train_config_path = os.path.join(predict_config.model.path, 'config.yaml')
46
- with open(train_config_path, 'r') as f:
47
- train_config = OmegaConf.create(yaml.safe_load(f))
48
-
49
- train_config.training_model.predict_only = True
50
-
51
- out_ext = predict_config.get('out_ext', '.png')
52
-
53
- checkpoint_path = os.path.join(predict_config.model.path,
54
- 'models',
55
- predict_config.model.checkpoint)
56
- model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location='cpu')
57
- model.freeze()
58
- model.to(device)
59
-
60
- if not predict_config.indir.endswith('/'):
61
- predict_config.indir += '/'
62
-
63
- dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset)
64
- with torch.no_grad():
65
- for img_i in tqdm.trange(len(dataset)):
66
- mask_fname = dataset.mask_filenames[img_i]
67
- cur_out_fname = os.path.join(
68
- predict_config.outdir,
69
- os.path.splitext(mask_fname[len(predict_config.indir):])[0] + out_ext
70
- )
71
- os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True)
72
-
73
- batch = move_to_device(default_collate([dataset[img_i]]), device)
74
- batch['mask'] = (batch['mask'] > 0) * 1
75
- batch = model(batch)
76
- cur_res = batch[predict_config.out_key][0].permute(1, 2, 0).detach().cpu().numpy()
77
-
78
- cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8')
79
- cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR)
80
- cv2.imwrite(cur_out_fname, cur_res)
81
- except KeyboardInterrupt:
82
- LOGGER.warning('Interrupted by user')
83
- except Exception as ex:
84
- LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}')
85
- sys.exit(1)
86
-
87
-
88
- if __name__ == '__main__':
89
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/lib/bot.js DELETED
@@ -1,231 +0,0 @@
1
- import "./config/init.js"
2
- import cfg from "./config/config.js"
3
- import PluginsLoader from "./plugins/loader.js"
4
- import ListenerLoader from "./listener/loader.js"
5
- import { EventEmitter } from "events"
6
- import express from "express"
7
- import http from "http"
8
- import { WebSocketServer } from "ws"
9
- import _ from "lodash"
10
-
11
- export default class Yunzai extends EventEmitter {
12
- constructor() {
13
- super()
14
- this.uin = []
15
- this.adapter = []
16
- this.express = express()
17
- this.server = http.createServer(this.express)
18
- this.server.on("upgrade", (req, socket, head) => {
19
- this.wss.handleUpgrade(req, socket, head, conn => {
20
- conn.id = `${req.connection.remoteAddress}-${req.headers["sec-websocket-key"]}`
21
- this.makeLog("mark", `${logger.blue(`[${conn.id} <=> ${req.url}]`)} 建立连接:${JSON.stringify(req.headers)}`)
22
- conn.on("error", logger.error)
23
- conn.on("close", () => this.makeLog("mark", `${logger.blue(`[${conn.id} <≠> ${req.url}]`)} 断开连接`))
24
- conn.on("message", msg => this.makeLog("debug", `${logger.blue(`[${conn.id} => ${req.url}]`)} 消息:${String(msg).trim()}`))
25
- conn.sendMsg = msg => {
26
- if (typeof msg == "object")
27
- msg = JSON.stringify(msg)
28
- this.makeLog("debug", `${logger.blue(`[${conn.id} <= ${req.url}]`)} 消息:${msg}`)
29
- return conn.send(msg)
30
- }
31
- for (const i of this.wsf[req.url.split("/")[1]] || [])
32
- i(conn, req, socket, head)
33
- })
34
- })
35
- this.wss = new WebSocketServer({ noServer: true })
36
- this.wsf = {}
37
- }
38
-
39
- makeLog(level, msg) {
40
- logger[level](_.truncate(msg, { length: cfg.bot.logLength }))
41
- }
42
-
43
- em(name = "", data = {}) {
44
- if (data.self_id)
45
- Object.defineProperty(data, "bot", { value: Bot[data.self_id] })
46
- while (true) {
47
- this.emit(name, data)
48
- const i = name.lastIndexOf(".")
49
- if (i == -1) break
50
- name = name.slice(0, i)
51
- }
52
- }
53
-
54
- async run() {
55
- await import("./plugins/stdin.js")
56
- await PluginsLoader.load()
57
- await ListenerLoader.load()
58
- this.serverLoad()
59
- this.emit("online", this)
60
- }
61
-
62
- serverLoad() {
63
- this.express.use(req => {
64
- logger.mark(`${logger.blue(`[${req.ip} => ${req.url}]`)} HTTP ${req.method} 请求:${JSON.stringify(req.headers)}`)
65
- req.res.redirect("https://github.com/TimeRainStarSky/Yunzai")
66
- })
67
-
68
- this.server.listen(cfg.bot.port, () => {
69
- const host = this.server.address().address
70
- const port = this.server.address().port
71
- logger.mark(`启动 HTTP 服务器:${logger.green(`http://[${host}]:${port}`)}`)
72
- for (const i of Object.keys(this.wsf))
73
- logger.info(`本机 ${i} 连接地址:${logger.blue(`ws://localhost:${port}/${i}`)}`)
74
- })
75
- }
76
-
77
- getFriendArray() {
78
- const array = []
79
- for (const bot_id of this.uin)
80
- for (const [id, i] of this[bot_id].fl || [])
81
- array.push({ ...i, bot_id })
82
- return array
83
- }
84
-
85
- getFriendList() {
86
- const array = []
87
- for (const bot_id of this.uin)
88
- for (const [id, i] of this[bot_id].fl || [])
89
- array.push(id)
90
- return array
91
- }
92
-
93
- getFriendMap() {
94
- const map = new Map
95
- for (const bot_id of this.uin)
96
- for (const [id, i] of this[bot_id].fl || [])
97
- map.set(id, { ...i, bot_id })
98
- return map
99
- }
100
- get fl() { return this.getFriendMap() }
101
-
102
- getGroupArray() {
103
- const array = []
104
- for (const bot_id of this.uin)
105
- for (const [id, i] of this[bot_id].gl || [])
106
- array.push({ ...i, bot_id })
107
- return array
108
- }
109
-
110
- getGroupList() {
111
- const array = []
112
- for (const bot_id of this.uin)
113
- for (const [id, i] of this[bot_id].gl || [])
114
- array.push(id)
115
- return array
116
- }
117
-
118
- getGroupMap() {
119
- const map = new Map
120
- for (const bot_id of this.uin)
121
- for (const [id, i] of this[bot_id].gl || [])
122
- map.set(id, { ...i, bot_id })
123
- return map
124
- }
125
- get gl() { return this.getGroupMap() }
126
- get gml() {
127
- const map = new Map
128
- for (const bot_id of this.uin)
129
- for (const [id, i] of this[bot_id].gml || [])
130
- map.set(id, i)
131
- return map
132
- }
133
-
134
- pickFriend(user_id) {
135
- user_id = Number(user_id) || String(user_id)
136
- const user = this.fl.get(user_id)
137
- if (user) return this[user.bot_id].pickFriend(user_id)
138
- logger.error(`获取用户对象失败:找不到用户 ${logger.red(user_id)}`)
139
- }
140
- get pickUser() { return this.pickFriend }
141
-
142
- pickGroup(group_id) {
143
- group_id = Number(group_id) || String(group_id)
144
- const group = this.gl.get(group_id)
145
- if (group) return this[group.bot_id].pickGroup(group_id)
146
- logger.error(`获取群对象失败:找不到群 ${logger.red(group_id)}`)
147
- }
148
-
149
- pickMember(group_id, user_id) {
150
- const group = this.pickGroup(group_id)
151
- if (group) return group.pickMember(user_id)
152
- }
153
-
154
- sendFriendMsg(bot_id, user_id, msg) {
155
- try {
156
- if (!bot_id)
157
- return this.pickFriend(user_id).sendMsg(msg)
158
-
159
- if (this[bot_id])
160
- return this[bot_id].pickFriend(user_id).sendMsg(msg)
161
-
162
- return new Promise(resolve =>
163
- this.once(`connect.${bot_id}`, data =>
164
- resolve(data.bot.pickFriend(user_id).sendMsg(msg))))
165
- } catch (err) {
166
- logger.error(`${logger.blue(`[${bot_id}]`)} 发送好友消息失败:[$${user_id}] ${err}`)
167
- }
168
- return false
169
- }
170
-
171
- sendGroupMsg(bot_id, group_id, msg) {
172
- try {
173
- if (!bot_id)
174
- return this.pickGroup(group_id).sendMsg(msg)
175
-
176
- if (this[bot_id])
177
- return this[bot_id].pickGroup(group_id).sendMsg(msg)
178
-
179
- return new Promise(resolve =>
180
- this.once(`connect.${bot_id}`, data =>
181
- resolve(data.bot.pickGroup(group_id).sendMsg(msg))))
182
- } catch (err) {
183
- logger.error(`${logger.blue(`[${bot_id}]`)} 发送群消息失败:[$${group_id}] ${err}`)
184
- }
185
- return false
186
- }
187
-
188
- async getFriendMsg(fnc = () => true) {
189
- if (typeof fnc != "function") {
190
- const { self_id, user_id } = fnc
191
- fnc = data => data.self_id == self_id && data.user_id == user_id
192
- }
193
-
194
- while (true) {
195
- const msg = await new Promise(resolve => {
196
- this.once("message", data => {
197
- if (data.message && fnc(data)) {
198
- let msg = ""
199
- for (const i of data.message)
200
- if (i.type = "text")
201
- msg += i.text.trim()
202
- resolve(msg)
203
- } else {
204
- resolve(false)
205
- }
206
- })
207
- })
208
- if (msg) return msg
209
- }
210
- }
211
-
212
- getMasterMsg() {
213
- return this.getFriendMsg(data =>
214
- cfg.master[data.self_id]?.includes(String(data.user_id)))
215
- }
216
-
217
- sendMasterMsg(msg) {
218
- for (const bot_id in cfg.master)
219
- for (const user_id of cfg.master[bot_id])
220
- this.sendFriendMsg(bot_id, user_id, msg)
221
- }
222
-
223
- makeForwardMsg(msg) { return { type: "node", data: msg } }
224
-
225
- async sendForwardMsg(send, msg) {
226
- const messages = []
227
- for (const { message } of msg)
228
- messages.push(await send(message))
229
- return messages
230
- }
231
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Codecooker/rvcapi/src/trainset_preprocess_pipeline_print.py DELETED
@@ -1,146 +0,0 @@
1
- import sys, os, multiprocessing
2
- from scipy import signal
3
-
4
- now_dir = os.getcwd()
5
- sys.path.append(now_dir)
6
-
7
- inp_root = sys.argv[1]
8
- sr = int(sys.argv[2])
9
- n_p = int(sys.argv[3])
10
- exp_dir = sys.argv[4]
11
- noparallel = sys.argv[5] == "True"
12
- import numpy as np, os, traceback
13
- from slicer2 import Slicer
14
- import librosa, traceback
15
- from scipy.io import wavfile
16
- import multiprocessing
17
- from my_utils import load_audio
18
- import tqdm
19
-
20
- DoFormant = False
21
- Quefrency = 1.0
22
- Timbre = 1.0
23
-
24
- mutex = multiprocessing.Lock()
25
- f = open("%s/preprocess.log" % exp_dir, "a+")
26
-
27
-
28
- def println(strr):
29
- mutex.acquire()
30
- print(strr)
31
- f.write("%s\n" % strr)
32
- f.flush()
33
- mutex.release()
34
-
35
-
36
- class PreProcess:
37
- def __init__(self, sr, exp_dir):
38
- self.slicer = Slicer(
39
- sr=sr,
40
- threshold=-42,
41
- min_length=1500,
42
- min_interval=400,
43
- hop_size=15,
44
- max_sil_kept=500,
45
- )
46
- self.sr = sr
47
- self.bh, self.ah = signal.butter(N=5, Wn=48, btype="high", fs=self.sr)
48
- self.per = 3.0
49
- self.overlap = 0.3
50
- self.tail = self.per + self.overlap
51
- self.max = 0.9
52
- self.alpha = 0.75
53
- self.exp_dir = exp_dir
54
- self.gt_wavs_dir = "%s/0_gt_wavs" % exp_dir
55
- self.wavs16k_dir = "%s/1_16k_wavs" % exp_dir
56
- os.makedirs(self.exp_dir, exist_ok=True)
57
- os.makedirs(self.gt_wavs_dir, exist_ok=True)
58
- os.makedirs(self.wavs16k_dir, exist_ok=True)
59
-
60
- def norm_write(self, tmp_audio, idx0, idx1):
61
- tmp_max = np.abs(tmp_audio).max()
62
- if tmp_max > 2.5:
63
- print("%s-%s-%s-filtered" % (idx0, idx1, tmp_max))
64
- return
65
- tmp_audio = (tmp_audio / tmp_max * (self.max * self.alpha)) + (
66
- 1 - self.alpha
67
- ) * tmp_audio
68
- wavfile.write(
69
- "%s/%s_%s.wav" % (self.gt_wavs_dir, idx0, idx1),
70
- self.sr,
71
- tmp_audio.astype(np.float32),
72
- )
73
- tmp_audio = librosa.resample(
74
- tmp_audio, orig_sr=self.sr, target_sr=16000
75
- ) # , res_type="soxr_vhq"
76
- wavfile.write(
77
- "%s/%s_%s.wav" % (self.wavs16k_dir, idx0, idx1),
78
- 16000,
79
- tmp_audio.astype(np.float32),
80
- )
81
-
82
- def pipeline(self, path, idx0):
83
- try:
84
- audio = load_audio(path, self.sr, DoFormant, Quefrency, Timbre)
85
- # zero phased digital filter cause pre-ringing noise...
86
- # audio = signal.filtfilt(self.bh, self.ah, audio)
87
- audio = signal.lfilter(self.bh, self.ah, audio)
88
-
89
- idx1 = 0
90
- for audio in self.slicer.slice(audio):
91
- i = 0
92
- while 1:
93
- start = int(self.sr * (self.per - self.overlap) * i)
94
- i += 1
95
- if len(audio[start:]) > self.tail * self.sr:
96
- tmp_audio = audio[start : start + int(self.per * self.sr)]
97
- self.norm_write(tmp_audio, idx0, idx1)
98
- idx1 += 1
99
- else:
100
- tmp_audio = audio[start:]
101
- idx1 += 1
102
- break
103
- self.norm_write(tmp_audio, idx0, idx1)
104
- # println("%s->Suc." % path)
105
- except:
106
- println("%s->%s" % (path, traceback.format_exc()))
107
-
108
- def pipeline_mp(self, infos, thread_n):
109
- for path, idx0 in tqdm.tqdm(
110
- infos, position=thread_n, leave=True, desc="thread:%s" % thread_n
111
- ):
112
- self.pipeline(path, idx0)
113
-
114
- def pipeline_mp_inp_dir(self, inp_root, n_p):
115
- try:
116
- infos = [
117
- ("%s/%s" % (inp_root, name), idx)
118
- for idx, name in enumerate(sorted(list(os.listdir(inp_root))))
119
- ]
120
- if noparallel:
121
- for i in range(n_p):
122
- self.pipeline_mp(infos[i::n_p])
123
- else:
124
- ps = []
125
- for i in range(n_p):
126
- p = multiprocessing.Process(
127
- target=self.pipeline_mp, args=(infos[i::n_p], i)
128
- )
129
- ps.append(p)
130
- p.start()
131
- for i in range(n_p):
132
- ps[i].join()
133
- except:
134
- println("Fail. %s" % traceback.format_exc())
135
-
136
-
137
- def preprocess_trainset(inp_root, sr, n_p, exp_dir):
138
- pp = PreProcess(sr, exp_dir)
139
- println("start preprocess")
140
- println(sys.argv)
141
- pp.pipeline_mp_inp_dir(inp_root, n_p)
142
- println("end preprocess")
143
-
144
-
145
- if __name__ == "__main__":
146
- preprocess_trainset(inp_root, sr, n_p, exp_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DCandE/rvc-models/config.py DELETED
@@ -1,88 +0,0 @@
1
- ########################硬件参数########################
2
-
3
- # 填写cuda:x, cpu 或 mps, x指代第几张卡,只支持 N卡 / Apple Silicon 加速
4
- device = "cuda:0"
5
-
6
- # 9-10-20-30-40系显卡无脑True,不影响质量,>=20显卡开启有加速
7
- is_half = True
8
-
9
- # 默认0用上所有线程,写数字限制CPU资源使用
10
- n_cpu = 0
11
-
12
- ########################硬件参数########################
13
-
14
-
15
- ##################下为参数处理逻辑,勿动##################
16
-
17
- ########################命令行参数########################
18
- import argparse
19
-
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument("--port", type=int, default=7865, help="Listen port")
22
- parser.add_argument("--pycmd", type=str, default="python", help="Python command")
23
- parser.add_argument("--colab", action="store_true", help="Launch in colab")
24
- parser.add_argument(
25
- "--noparallel", action="store_true", help="Disable parallel processing"
26
- )
27
- parser.add_argument(
28
- "--noautoopen", action="store_true", help="Do not open in browser automatically"
29
- )
30
- cmd_opts, unknown = parser.parse_known_args()
31
-
32
- python_cmd = cmd_opts.pycmd
33
- listen_port = cmd_opts.port
34
- iscolab = cmd_opts.colab
35
- noparallel = cmd_opts.noparallel
36
- noautoopen = cmd_opts.noautoopen
37
- ########################命令行参数########################
38
-
39
- import sys
40
- import torch
41
-
42
-
43
- # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
44
- # check `getattr` and try it for compatibility
45
- def has_mps() -> bool:
46
- if sys.platform != "darwin":
47
- return False
48
- else:
49
- if not getattr(torch, "has_mps", False):
50
- return False
51
- try:
52
- torch.zeros(1).to(torch.device("mps"))
53
- return True
54
- except Exception:
55
- return False
56
-
57
-
58
- if not torch.cuda.is_available():
59
- if has_mps():
60
- print("没有发现支持的N卡, 使用MPS进行推理")
61
- device = "mps"
62
- else:
63
- print("没有发现支持的N卡, 使用CPU进行推理")
64
- device = "cpu"
65
- is_half = False
66
-
67
- if device not in ["cpu", "mps"]:
68
- gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1]))
69
- if "16" in gpu_name or "MX" in gpu_name:
70
- print("16系显卡/MX系显卡强制单精度")
71
- is_half = False
72
-
73
- from multiprocessing import cpu_count
74
-
75
- if n_cpu == 0:
76
- n_cpu = cpu_count()
77
- if is_half:
78
- # 6G显存配置
79
- x_pad = 3
80
- x_query = 10
81
- x_center = 60
82
- x_max = 65
83
- else:
84
- # 5G显存配置
85
- x_pad = 1
86
- x_query = 6
87
- x_center = 38
88
- x_max = 41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-7648fc8d.js DELETED
@@ -1,7 +0,0 @@
1
- import{c as F,e as I,s as ce,N as me,t as c,P as _e,g as Ue,T as E,p as Qe,h as J,E as v,b as se,j as Ze,k as Ge,l as Ve,m as Ke,f as Je,i as Ye,n as We,o as et,q as ne,r as tt}from"./index-3ba00a4a.js";import{html as rt}from"./index-c48bd2e8.js";import"./index-1d65707a.js";import"./Blocks-c9e1499d.js";import"./Button-f155035a.js";import"./BlockLabel-66866176.js";import"./Empty-eec13822.js";import"./Copy-9f1657c4.js";import"./Download-daff1959.js";import"./index-f8ff95a1.js";import"./index-7f39cecc.js";import"./index-b6ab4199.js";class X{constructor(e,r,s,n,i,o,a){this.type=e,this.value=r,this.from=s,this.hash=n,this.end=i,this.children=o,this.positions=a,this.hashProp=[[I.contextHash,n]]}static create(e,r,s,n,i){let o=n+(n<<8)+e+(r<<4)|0;return new X(e,r,s,o,i,[],[])}addChild(e,r){e.prop(I.contextHash)!=this.hash&&(e=new E(e.type,e.children,e.positions,e.length,this.hashProp)),this.children.push(e),this.positions.push(r)}toTree(e,r=this.end){let s=this.children.length-1;return s>=0&&(r=Math.max(r,this.positions[s]+this.children[s].length+this.from)),new E(e.types[this.type],this.children,this.positions,r-this.from).balance({makeTree:(i,o,a)=>new E(F.none,i,o,a,this.hashProp)})}}var f;(function(t){t[t.Document=1]="Document",t[t.CodeBlock=2]="CodeBlock",t[t.FencedCode=3]="FencedCode",t[t.Blockquote=4]="Blockquote",t[t.HorizontalRule=5]="HorizontalRule",t[t.BulletList=6]="BulletList",t[t.OrderedList=7]="OrderedList",t[t.ListItem=8]="ListItem",t[t.ATXHeading1=9]="ATXHeading1",t[t.ATXHeading2=10]="ATXHeading2",t[t.ATXHeading3=11]="ATXHeading3",t[t.ATXHeading4=12]="ATXHeading4",t[t.ATXHeading5=13]="ATXHeading5",t[t.ATXHeading6=14]="ATXHeading6",t[t.SetextHeading1=15]="SetextHeading1",t[t.SetextHeading2=16]="SetextHeading2",t[t.HTMLBlock=17]="HTMLBlock",t[t.LinkReference=18]="LinkReference",t[t.Paragraph=19]="Paragraph",t[t.CommentBlock=20]="CommentBlock",t[t.ProcessingInstructionBlock=21]="ProcessingInstructionBlock",t[t.Escape=22]="Escape",t[t.Entity=23]="Entity",t[t.HardBreak=24]="HardBreak",t[t.Emphasis=25]="Emphasis",t[t.StrongEmphasis=26]="StrongEmphasis",t[t.Link=27]="Link",t[t.Image=28]="Image",t[t.InlineCode=29]="InlineCode",t[t.HTMLTag=30]="HTMLTag",t[t.Comment=31]="Comment",t[t.ProcessingInstruction=32]="ProcessingInstruction",t[t.URL=33]="URL",t[t.HeaderMark=34]="HeaderMark",t[t.QuoteMark=35]="QuoteMark",t[t.ListMark=36]="ListMark",t[t.LinkMark=37]="LinkMark",t[t.EmphasisMark=38]="EmphasisMark",t[t.CodeMark=39]="CodeMark",t[t.CodeText=40]="CodeText",t[t.CodeInfo=41]="CodeInfo",t[t.LinkTitle=42]="LinkTitle",t[t.LinkLabel=43]="LinkLabel"})(f||(f={}));class st{constructor(e,r){this.start=e,this.content=r,this.marks=[],this.parsers=[]}}class nt{constructor(){this.text="",this.baseIndent=0,this.basePos=0,this.depth=0,this.markers=[],this.pos=0,this.indent=0,this.next=-1}forward(){this.basePos>this.pos&&this.forwardInner()}forwardInner(){let e=this.skipSpace(this.basePos);this.indent=this.countIndent(e,this.pos,this.indent),this.pos=e,this.next=e==this.text.length?-1:this.text.charCodeAt(e)}skipSpace(e){return N(this.text,e)}reset(e){for(this.text=e,this.baseIndent=this.basePos=this.pos=this.indent=0,this.forwardInner(),this.depth=1;this.markers.length;)this.markers.pop()}moveBase(e){this.basePos=e,this.baseIndent=this.countIndent(e,this.pos,this.indent)}moveBaseColumn(e){this.baseIndent=e,this.basePos=this.findColumn(e)}addMarker(e){this.markers.push(e)}countIndent(e,r=0,s=0){for(let n=r;n<e;n++)s+=this.text.charCodeAt(n)==9?4-s%4:1;return s}findColumn(e){let r=0;for(let s=0;r<this.text.length&&s<e;r++)s+=this.text.charCodeAt(r)==9?4-s%4:1;return r}scrub(){if(!this.baseIndent)return this.text;let e="";for(let r=0;r<this.basePos;r++)e+=" ";return e+this.text.slice(this.basePos)}}function ie(t,e,r){if(r.pos==r.text.length||t!=e.block&&r.indent>=e.stack[r.depth+1].value+r.baseIndent)return!0;if(r.indent>=r.baseIndent+4)return!1;let s=(t.type==f.OrderedList?ee:W)(r,e,!1);return s>0&&(t.type!=f.BulletList||Y(r,e,!1)<0)&&r.text.charCodeAt(r.pos+s-1)==t.value}const ge={[f.Blockquote](t,e,r){return r.next!=62?!1:(r.markers.push(m(f.QuoteMark,e.lineStart+r.pos,e.lineStart+r.pos+1)),r.moveBase(r.pos+(C(r.text.charCodeAt(r.pos+1))?2:1)),t.end=e.lineStart+r.text.length,!0)},[f.ListItem](t,e,r){return r.indent<r.baseIndent+t.value&&r.next>-1?!1:(r.moveBaseColumn(r.baseIndent+t.value),!0)},[f.OrderedList]:ie,[f.BulletList]:ie,[f.Document](){return!0}};function C(t){return t==32||t==9||t==10||t==13}function N(t,e=0){for(;e<t.length&&C(t.charCodeAt(e));)e++;return e}function oe(t,e,r){for(;e>r&&C(t.charCodeAt(e-1));)e--;return e}function ke(t){if(t.next!=96&&t.next!=126)return-1;let e=t.pos+1;for(;e<t.text.length&&t.text.charCodeAt(e)==t.next;)e++;if(e<t.pos+3)return-1;if(t.next==96){for(let r=e;r<t.text.length;r++)if(t.text.charCodeAt(r)==96)return-1}return e}function Le(t){return t.next!=62?-1:t.text.charCodeAt(t.pos+1)==32?2:1}function Y(t,e,r){if(t.next!=42&&t.next!=45&&t.next!=95)return-1;let s=1;for(let n=t.pos+1;n<t.text.length;n++){let i=t.text.charCodeAt(n);if(i==t.next)s++;else if(!C(i))return-1}return r&&t.next==45&&we(t)>-1&&t.depth==e.stack.length||s<3?-1:1}function be(t,e){for(let r=t.stack.length-1;r>=0;r--)if(t.stack[r].type==e)return!0;return!1}function W(t,e,r){return(t.next==45||t.next==43||t.next==42)&&(t.pos==t.text.length-1||C(t.text.charCodeAt(t.pos+1)))&&(!r||be(e,f.BulletList)||t.skipSpace(t.pos+2)<t.text.length)?1:-1}function ee(t,e,r){let s=t.pos,n=t.next;for(;n>=48&&n<=57;){s++;if(s==t.text.length)return-1;n=t.text.charCodeAt(s)}return s==t.pos||s>t.pos+9||n!=46&&n!=41||s<t.text.length-1&&!C(t.text.charCodeAt(s+1))||r&&!be(e,f.OrderedList)&&(t.skipSpace(s+1)==t.text.length||s>t.pos+1||t.next!=49)?-1:s+1-t.pos}function Se(t){if(t.next!=35)return-1;let e=t.pos+1;for(;e<t.text.length&&t.text.charCodeAt(e)==35;)e++;if(e<t.text.length&&t.text.charCodeAt(e)!=32)return-1;let r=e-t.pos;return r>6?-1:r}function we(t){if(t.next!=45&&t.next!=61||t.indent>=t.baseIndent+4)return-1;let e=t.pos+1;for(;e<t.text.length&&t.text.charCodeAt(e)==t.next;)e++;let r=e;for(;e<t.text.length&&C(t.text.charCodeAt(e));)e++;return e==t.text.length?r:-1}const Q=/^[ \t]*$/,Ce=/-->/,Ae=/\?>/,Z=[[/^<(?:script|pre|style)(?:\s|>|$)/i,/<\/(?:script|pre|style)>/i],[/^\s*<!--/,Ce],[/^\s*<\?/,Ae],[/^\s*<![A-Z]/,/>/],[/^\s*<!\[CDATA\[/,/\]\]>/],[/^\s*<\/?(?:address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h1|h2|h3|h4|h5|h6|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul)(?:\s|\/?>|$)/i,Q],[/^\s*(?:<\/[a-z][\w-]*\s*>|<[a-z][\w-]*(\s+[a-z:_][\w-.]*(?:\s*=\s*(?:[^\s"'=<>`]+|'[^']*'|"[^"]*"))?)*\s*>)\s*$/i,Q]];function xe(t,e,r){if(t.next!=60)return-1;let s=t.text.slice(t.pos);for(let n=0,i=Z.length-(r?1:0);n<i;n++)if(Z[n][0].test(s))return n;return-1}function ae(t,e){let r=t.countIndent(e,t.pos,t.indent),s=t.countIndent(t.skipSpace(e),e,r);return s>=r+5?r+1:s}function B(t,e,r){let s=t.length-1;s>=0&&t[s].to==e&&t[s].type==f.CodeText?t[s].to=r:t.push(m(f.CodeText,e,r))}const z={LinkReference:void 0,IndentedCode(t,e){let r=e.baseIndent+4;if(e.indent<r)return!1;let s=e.findColumn(r),n=t.lineStart+s,i=t.lineStart+e.text.length,o=[],a=[];for(B(o,n,i);t.nextLine()&&e.depth>=t.stack.length;)if(e.pos==e.text.length){B(a,t.lineStart-1,t.lineStart);for(let l of e.markers)a.push(l)}else{if(e.indent<r)break;{if(a.length){for(let h of a)h.type==f.CodeText?B(o,h.from,h.to):o.push(h);a=[]}B(o,t.lineStart-1,t.lineStart);for(let h of e.markers)o.push(h);i=t.lineStart+e.text.length;let l=t.lineStart+e.findColumn(e.baseIndent+4);l<i&&B(o,l,i)}}return a.length&&(a=a.filter(l=>l.type!=f.CodeText),a.length&&(e.markers=a.concat(e.markers))),t.addNode(t.buffer.writeElements(o,-n).finish(f.CodeBlock,i-n),n),!0},FencedCode(t,e){let r=ke(e);if(r<0)return!1;let s=t.lineStart+e.pos,n=e.next,i=r-e.pos,o=e.skipSpace(r),a=oe(e.text,e.text.length,o),l=[m(f.CodeMark,s,s+i)];o<a&&l.push(m(f.CodeInfo,t.lineStart+o,t.lineStart+a));for(let h=!0;t.nextLine()&&e.depth>=t.stack.length;h=!1){let u=e.pos;if(e.indent-e.baseIndent<4)for(;u<e.text.length&&e.text.charCodeAt(u)==n;)u++;if(u-e.pos>=i&&e.skipSpace(u)==e.text.length){for(let p of e.markers)l.push(p);l.push(m(f.CodeMark,t.lineStart+e.pos,t.lineStart+u)),t.nextLine();break}else{h||B(l,t.lineStart-1,t.lineStart);for(let L of e.markers)l.push(L);let p=t.lineStart+e.basePos,d=t.lineStart+e.text.length;p<d&&B(l,p,d)}}return t.addNode(t.buffer.writeElements(l,-s).finish(f.FencedCode,t.prevLineEnd()-s),s),!0},Blockquote(t,e){let r=Le(e);return r<0?!1:(t.startContext(f.Blockquote,e.pos),t.addNode(f.QuoteMark,t.lineStart+e.pos,t.lineStart+e.pos+1),e.moveBase(e.pos+r),null)},HorizontalRule(t,e){if(Y(e,t,!1)<0)return!1;let r=t.lineStart+e.pos;return t.nextLine(),t.addNode(f.HorizontalRule,r),!0},BulletList(t,e){let r=W(e,t,!1);if(r<0)return!1;t.block.type!=f.BulletList&&t.startContext(f.BulletList,e.basePos,e.next);let s=ae(e,e.pos+1);return t.startContext(f.ListItem,e.basePos,s-e.baseIndent),t.addNode(f.ListMark,t.lineStart+e.pos,t.lineStart+e.pos+r),e.moveBaseColumn(s),null},OrderedList(t,e){let r=ee(e,t,!1);if(r<0)return!1;t.block.type!=f.OrderedList&&t.startContext(f.OrderedList,e.basePos,e.text.charCodeAt(e.pos+r-1));let s=ae(e,e.pos+r);return t.startContext(f.ListItem,e.basePos,s-e.baseIndent),t.addNode(f.ListMark,t.lineStart+e.pos,t.lineStart+e.pos+r),e.moveBaseColumn(s),null},ATXHeading(t,e){let r=Se(e);if(r<0)return!1;let s=e.pos,n=t.lineStart+s,i=oe(e.text,e.text.length,s),o=i;for(;o>s&&e.text.charCodeAt(o-1)==e.next;)o--;(o==i||o==s||!C(e.text.charCodeAt(o-1)))&&(o=e.text.length);let a=t.buffer.write(f.HeaderMark,0,r).writeElements(t.parser.parseInline(e.text.slice(s+r+1,o),n+r+1),-n);o<e.text.length&&a.write(f.HeaderMark,o-s,i-s);let l=a.finish(f.ATXHeading1-1+r,e.text.length-s);return t.nextLine(),t.addNode(l,n),!0},HTMLBlock(t,e){let r=xe(e,t,!1);if(r<0)return!1;let s=t.lineStart+e.pos,n=Z[r][1],i=[],o=n!=Q;for(;!n.test(e.text)&&t.nextLine();){if(e.depth<t.stack.length){o=!1;break}for(let h of e.markers)i.push(h)}o&&t.nextLine();let a=n==Ce?f.CommentBlock:n==Ae?f.ProcessingInstructionBlock:f.HTMLBlock,l=t.prevLineEnd();return t.addNode(t.buffer.writeElements(i,-s).finish(a,l-s),s),!0},SetextHeading:void 0};class it{constructor(e){this.stage=0,this.elts=[],this.pos=0,this.start=e.start,this.advance(e.content)}nextLine(e,r,s){if(this.stage==-1)return!1;let n=s.content+`
2
- `+r.scrub(),i=this.advance(n);return i>-1&&i<n.length?this.complete(e,s,i):!1}finish(e,r){return(this.stage==2||this.stage==3)&&N(r.content,this.pos)==r.content.length?this.complete(e,r,r.content.length):!1}complete(e,r,s){return e.addLeafElement(r,m(f.LinkReference,this.start,this.start+s,this.elts)),!0}nextStage(e){return e?(this.pos=e.to-this.start,this.elts.push(e),this.stage++,!0):(e===!1&&(this.stage=-1),!1)}advance(e){for(;;){if(this.stage==-1)return-1;if(this.stage==0){if(!this.nextStage(ye(e,this.pos,this.start,!0)))return-1;if(e.charCodeAt(this.pos)!=58)return this.stage=-1;this.elts.push(m(f.LinkMark,this.pos+this.start,this.pos+this.start+1)),this.pos++}else if(this.stage==1){if(!this.nextStage(ve(e,N(e,this.pos),this.start)))return-1}else if(this.stage==2){let r=N(e,this.pos),s=0;if(r>this.pos){let n=Ne(e,r,this.start);if(n){let i=q(e,n.to-this.start);i>0&&(this.nextStage(n),s=i)}}return s||(s=q(e,this.pos)),s>0&&s<e.length?s:-1}else return q(e,this.pos)}}}function q(t,e){for(;e<t.length;e++){let r=t.charCodeAt(e);if(r==10)break;if(!C(r))return-1}return e}class ot{nextLine(e,r,s){let n=r.depth<e.stack.length?-1:we(r),i=r.next;if(n<0)return!1;let o=m(f.HeaderMark,e.lineStart+r.pos,e.lineStart+n);return e.nextLine(),e.addLeafElement(s,m(i==61?f.SetextHeading1:f.SetextHeading2,s.start,e.prevLineEnd(),[...e.parser.parseInline(s.content,s.start),o])),!0}finish(){return!1}}const at={LinkReference(t,e){return e.content.charCodeAt(0)==91?new it(e):null},SetextHeading(){return new ot}},lt=[(t,e)=>Se(e)>=0,(t,e)=>ke(e)>=0,(t,e)=>Le(e)>=0,(t,e)=>W(e,t,!0)>=0,(t,e)=>ee(e,t,!0)>=0,(t,e)=>Y(e,t,!0)>=0,(t,e)=>xe(e,t,!0)>=0],ht={text:"",end:0};class ft{constructor(e,r,s,n){this.parser=e,this.input=r,this.ranges=n,this.line=new nt,this.atEnd=!1,this.dontInject=new Set,this.stoppedAt=null,this.rangeI=0,this.to=n[n.length-1].to,this.lineStart=this.absoluteLineStart=this.absoluteLineEnd=n[0].from,this.block=X.create(f.Document,0,this.lineStart,0,0),this.stack=[this.block],this.fragments=s.length?new ct(s,r):null,this.readLine()}get parsedPos(){return this.absoluteLineStart}advance(){if(this.stoppedAt!=null&&this.absoluteLineStart>this.stoppedAt)return this.finish();let{line:e}=this;for(;;){for(;e.depth<this.stack.length;)this.finishContext();for(let s of e.markers)this.addNode(s.type,s.from,s.to);if(e.pos<e.text.length)break;if(!this.nextLine())return this.finish()}if(this.fragments&&this.reuseFragment(e.basePos))return null;e:for(;;){for(let s of this.parser.blockParsers)if(s){let n=s(this,e);if(n!=!1){if(n==!0)return null;e.forward();continue e}}break}let r=new st(this.lineStart+e.pos,e.text.slice(e.pos));for(let s of this.parser.leafBlockParsers)if(s){let n=s(this,r);n&&r.parsers.push(n)}e:for(;this.nextLine()&&e.pos!=e.text.length;){if(e.indent<e.baseIndent+4){for(let s of this.parser.endLeafBlock)if(s(this,e,r))break e}for(let s of r.parsers)if(s.nextLine(this,e,r))return null;r.content+=`
3
- `+e.scrub();for(let s of e.markers)r.marks.push(s)}return this.finishLeaf(r),null}stopAt(e){if(this.stoppedAt!=null&&this.stoppedAt<e)throw new RangeError("Can't move stoppedAt forward");this.stoppedAt=e}reuseFragment(e){if(!this.fragments.moveTo(this.absoluteLineStart+e,this.absoluteLineStart)||!this.fragments.matches(this.block.hash))return!1;let r=this.fragments.takeNodes(this);if(!r)return!1;let s=r,n=this.absoluteLineStart+r;for(let i=1;i<this.ranges.length;i++){let o=this.ranges[i-1].to,a=this.ranges[i].from;o>=this.lineStart&&a<n&&(s-=a-o)}return this.lineStart+=s,this.absoluteLineStart+=r,this.moveRangeI(),this.absoluteLineStart<this.to?(this.lineStart++,this.absoluteLineStart++,this.readLine()):(this.atEnd=!0,this.readLine()),!0}get depth(){return this.stack.length}parentType(e=this.depth-1){return this.parser.nodeSet.types[this.stack[e].type]}nextLine(){return this.lineStart+=this.line.text.length,this.absoluteLineEnd>=this.to?(this.absoluteLineStart=this.absoluteLineEnd,this.atEnd=!0,this.readLine(),!1):(this.lineStart++,this.absoluteLineStart=this.absoluteLineEnd+1,this.moveRangeI(),this.readLine(),!0)}moveRangeI(){for(;this.rangeI<this.ranges.length-1&&this.absoluteLineStart>=this.ranges[this.rangeI].to;)this.rangeI++,this.absoluteLineStart=Math.max(this.absoluteLineStart,this.ranges[this.rangeI].from)}scanLine(e){let r=ht;if(r.end=e,e>=this.to)r.text="";else if(r.text=this.lineChunkAt(e),r.end+=r.text.length,this.ranges.length>1){let s=this.absoluteLineStart,n=this.rangeI;for(;this.ranges[n].to<r.end;){n++;let i=this.ranges[n].from,o=this.lineChunkAt(i);r.end=i+o.length,r.text=r.text.slice(0,this.ranges[n-1].to-s)+o,s=r.end-r.text.length}}return r}readLine(){let{line:e}=this,{text:r,end:s}=this.scanLine(this.absoluteLineStart);for(this.absoluteLineEnd=s,e.reset(r);e.depth<this.stack.length;e.depth++){let n=this.stack[e.depth],i=this.parser.skipContextMarkup[n.type];if(!i)throw new Error("Unhandled block context "+f[n.type]);if(!i(n,this,e))break;e.forward()}}lineChunkAt(e){let r=this.input.chunk(e),s;if(this.input.lineChunks)s=r==`
4
- `?"":r;else{let n=r.indexOf(`
5
- `);s=n<0?r:r.slice(0,n)}return e+s.length>this.to?s.slice(0,this.to-e):s}prevLineEnd(){return this.atEnd?this.lineStart:this.lineStart-1}startContext(e,r,s=0){this.block=X.create(e,s,this.lineStart+r,this.block.hash,this.lineStart+this.line.text.length),this.stack.push(this.block)}startComposite(e,r,s=0){this.startContext(this.parser.getNodeType(e),r,s)}addNode(e,r,s){typeof e=="number"&&(e=new E(this.parser.nodeSet.types[e],M,M,(s??this.prevLineEnd())-r)),this.block.addChild(e,r-this.block.from)}addElement(e){this.block.addChild(e.toTree(this.parser.nodeSet),e.from-this.block.from)}addLeafElement(e,r){this.addNode(this.buffer.writeElements(V(r.children,e.marks),-r.from).finish(r.type,r.to-r.from),r.from)}finishContext(){let e=this.stack.pop(),r=this.stack[this.stack.length-1];r.addChild(e.toTree(this.parser.nodeSet),e.from-r.from),this.block=r}finish(){for(;this.stack.length>1;)this.finishContext();return this.addGaps(this.block.toTree(this.parser.nodeSet,this.lineStart))}addGaps(e){return this.ranges.length>1?Be(this.ranges,0,e.topNode,this.ranges[0].from,this.dontInject):e}finishLeaf(e){for(let s of e.parsers)if(s.finish(this,e))return;let r=V(this.parser.parseInline(e.content,e.start),e.marks);this.addNode(this.buffer.writeElements(r,-e.start).finish(f.Paragraph,e.content.length),e.start)}elt(e,r,s,n){return typeof e=="string"?m(this.parser.getNodeType(e),r,s,n):new Me(e,r)}get buffer(){return new Ie(this.parser.nodeSet)}}function Be(t,e,r,s,n){if(n.has(r.tree))return r.tree;let i=t[e].to,o=[],a=[],l=r.from+s;function h(u,p){for(;p?u>=i:u>i;){let d=t[e+1].from-i;s+=d,u+=d,e++,i=t[e].to}}for(let u=r.firstChild;u;u=u.nextSibling){h(u.from+s,!0);let p=u.from+s,d;u.to+s>i?(d=Be(t,e,u,s,n),h(u.to+s,!1)):d=u.toTree(),o.push(d),a.push(p-l)}return h(r.to+s,!1),new E(r.type,o,a,r.to+s-l,r.tree?r.tree.propValues:void 0)}class j extends _e{constructor(e,r,s,n,i,o,a,l,h){super(),this.nodeSet=e,this.blockParsers=r,this.leafBlockParsers=s,this.blockNames=n,this.endLeafBlock=i,this.skipContextMarkup=o,this.inlineParsers=a,this.inlineNames=l,this.wrappers=h,this.nodeTypes=Object.create(null);for(let u of e.types)this.nodeTypes[u.name]=u.id}createParse(e,r,s){let n=new ft(this,e,r,s);for(let i of this.wrappers)n=i(n,e,r,s);return n}configure(e){let r=G(e);if(!r)return this;let{nodeSet:s,skipContextMarkup:n}=this,i=this.blockParsers.slice(),o=this.leafBlockParsers.slice(),a=this.blockNames.slice(),l=this.inlineParsers.slice(),h=this.inlineNames.slice(),u=this.endLeafBlock.slice(),p=this.wrappers;if(H(r.defineNodes)){n=Object.assign({},n);let d=s.types.slice(),L;for(let S of r.defineNodes){let{name:g,block:k,composite:b,style:w}=typeof S=="string"?{name:S}:S;if(d.some($=>$.name==g))continue;b&&(n[d.length]=($,$e,qe)=>b($e,qe,$.value));let x=d.length,re=b?["Block","BlockContext"]:k?x>=f.ATXHeading1&&x<=f.SetextHeading2?["Block","LeafBlock","Heading"]:["Block","LeafBlock"]:void 0;d.push(F.define({id:x,name:g,props:re&&[[I.group,re]]})),w&&(L||(L={}),Array.isArray(w)||w instanceof Ue?L[g]=w:Object.assign(L,w))}s=new me(d),L&&(s=s.extend(ce(L)))}if(H(r.props)&&(s=s.extend(...r.props)),H(r.remove))for(let d of r.remove){let L=this.blockNames.indexOf(d),S=this.inlineNames.indexOf(d);L>-1&&(i[L]=o[L]=void 0),S>-1&&(l[S]=void 0)}if(H(r.parseBlock))for(let d of r.parseBlock){let L=a.indexOf(d.name);if(L>-1)i[L]=d.parse,o[L]=d.leaf;else{let S=d.before?T(a,d.before):d.after?T(a,d.after)+1:a.length-1;i.splice(S,0,d.parse),o.splice(S,0,d.leaf),a.splice(S,0,d.name)}d.endLeaf&&u.push(d.endLeaf)}if(H(r.parseInline))for(let d of r.parseInline){let L=h.indexOf(d.name);if(L>-1)l[L]=d.parse;else{let S=d.before?T(h,d.before):d.after?T(h,d.after)+1:h.length-1;l.splice(S,0,d.parse),h.splice(S,0,d.name)}}return r.wrap&&(p=p.concat(r.wrap)),new j(s,i,o,a,u,n,l,h,p)}getNodeType(e){let r=this.nodeTypes[e];if(r==null)throw new RangeError(`Unknown node type '${e}'`);return r}parseInline(e,r){let s=new dt(this,e,r);e:for(let n=r;n<s.end;){let i=s.char(n);for(let o of this.inlineParsers)if(o){let a=o(s,i,n);if(a>=0){n=a;continue e}}n++}return s.resolveMarkers(0)}}function H(t){return t!=null&&t.length>0}function G(t){if(!Array.isArray(t))return t;if(t.length==0)return null;let e=G(t[0]);if(t.length==1)return e;let r=G(t.slice(1));if(!r||!e)return e||r;let s=(o,a)=>(o||M).concat(a||M),n=e.wrap,i=r.wrap;return{props:s(e.props,r.props),defineNodes:s(e.defineNodes,r.defineNodes),parseBlock:s(e.parseBlock,r.parseBlock),parseInline:s(e.parseInline,r.parseInline),remove:s(e.remove,r.remove),wrap:n?i?(o,a,l,h)=>n(i(o,a,l,h),a,l,h):n:i}}function T(t,e){let r=t.indexOf(e);if(r<0)throw new RangeError(`Position specified relative to unknown parser ${e}`);return r}let Ee=[F.none];for(let t=1,e;e=f[t];t++)Ee[t]=F.define({id:t,name:e,props:t>=f.Escape?[]:[[I.group,t in ge?["Block","BlockContext"]:["Block","LeafBlock"]]]});const M=[];class Ie{constructor(e){this.nodeSet=e,this.content=[],this.nodes=[]}write(e,r,s,n=0){return this.content.push(e,r,s,4+n*4),this}writeElements(e,r=0){for(let s of e)s.writeTo(this,r);return this}finish(e,r){return E.build({buffer:this.content,nodeSet:this.nodeSet,reused:this.nodes,topID:e,length:r})}}class O{constructor(e,r,s,n=M){this.type=e,this.from=r,this.to=s,this.children=n}writeTo(e,r){let s=e.content.length;e.writeElements(this.children,r),e.content.push(this.type,this.from+r,this.to+r,e.content.length+4-s)}toTree(e){return new Ie(e).writeElements(this.children,-this.from).finish(this.type,this.to-this.from)}}class Me{constructor(e,r){this.tree=e,this.from=r}get to(){return this.from+this.tree.length}get type(){return this.tree.type.id}get children(){return M}writeTo(e,r){e.nodes.push(this.tree),e.content.push(e.nodes.length-1,this.from+r,this.to+r,-1)}toTree(){return this.tree}}function m(t,e,r,s){return new O(t,e,r,s)}const He={resolve:"Emphasis",mark:"EmphasisMark"},Pe={resolve:"Emphasis",mark:"EmphasisMark"},P={},le={};class A{constructor(e,r,s,n){this.type=e,this.from=r,this.to=s,this.side=n}}const he="!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~";let R=/[!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~\xA1\u2010-\u2027]/;try{R=new RegExp("[\\p{Pc}|\\p{Pd}|\\p{Pe}|\\p{Pf}|\\p{Pi}|\\p{Po}|\\p{Ps}]","u")}catch{}const _={Escape(t,e,r){if(e!=92||r==t.end-1)return-1;let s=t.char(r+1);for(let n=0;n<he.length;n++)if(he.charCodeAt(n)==s)return t.append(m(f.Escape,r,r+2));return-1},Entity(t,e,r){if(e!=38)return-1;let s=/^(?:#\d+|#x[a-f\d]+|\w+);/i.exec(t.slice(r+1,r+31));return s?t.append(m(f.Entity,r,r+1+s[0].length)):-1},InlineCode(t,e,r){if(e!=96||r&&t.char(r-1)==96)return-1;let s=r+1;for(;s<t.end&&t.char(s)==96;)s++;let n=s-r,i=0;for(;s<t.end;s++)if(t.char(s)==96){if(i++,i==n&&t.char(s+1)!=96)return t.append(m(f.InlineCode,r,s+1,[m(f.CodeMark,r,r+n),m(f.CodeMark,s+1-n,s+1)]))}else i=0;return-1},HTMLTag(t,e,r){if(e!=60||r==t.end-1)return-1;let s=t.slice(r+1,t.end),n=/^(?:[a-z][-\w+.]+:[^\s>]+|[a-z\d.!#$%&'*+/=?^_`{|}~-]+@[a-z\d](?:[a-z\d-]{0,61}[a-z\d])?(?:\.[a-z\d](?:[a-z\d-]{0,61}[a-z\d])?)*)>/i.exec(s);if(n)return t.append(m(f.URL,r,r+1+n[0].length));let i=/^!--[^>](?:-[^-]|[^-])*?-->/i.exec(s);if(i)return t.append(m(f.Comment,r,r+1+i[0].length));let o=/^\?[^]*?\?>/.exec(s);if(o)return t.append(m(f.ProcessingInstruction,r,r+1+o[0].length));let a=/^(?:![A-Z][^]*?>|!\[CDATA\[[^]*?\]\]>|\/\s*[a-zA-Z][\w-]*\s*>|\s*[a-zA-Z][\w-]*(\s+[a-zA-Z:_][\w-.:]*(?:\s*=\s*(?:[^\s"'=<>`]+|'[^']*'|"[^"]*"))?)*\s*(\/\s*)?>)/.exec(s);return a?t.append(m(f.HTMLTag,r,r+1+a[0].length)):-1},Emphasis(t,e,r){if(e!=95&&e!=42)return-1;let s=r+1;for(;t.char(s)==e;)s++;let n=t.slice(r-1,r),i=t.slice(s,s+1),o=R.test(n),a=R.test(i),l=/\s|^$/.test(n),h=/\s|^$/.test(i),u=!h&&(!a||l||o),p=!l&&(!o||h||a),d=u&&(e==42||!p||o),L=p&&(e==42||!u||a);return t.append(new A(e==95?He:Pe,r,s,(d?1:0)|(L?2:0)))},HardBreak(t,e,r){if(e==92&&t.char(r+1)==10)return t.append(m(f.HardBreak,r,r+2));if(e==32){let s=r+1;for(;t.char(s)==32;)s++;if(t.char(s)==10&&s>=r+2)return t.append(m(f.HardBreak,r,s+1))}return-1},Link(t,e,r){return e==91?t.append(new A(P,r,r+1,1)):-1},Image(t,e,r){return e==33&&t.char(r+1)==91?t.append(new A(le,r,r+2,1)):-1},LinkEnd(t,e,r){if(e!=93)return-1;for(let s=t.parts.length-1;s>=0;s--){let n=t.parts[s];if(n instanceof A&&(n.type==P||n.type==le)){if(!n.side||t.skipSpace(n.to)==r&&!/[(\[]/.test(t.slice(r+1,r+2)))return t.parts[s]=null,-1;let i=t.takeContent(s),o=t.parts[s]=ut(t,i,n.type==P?f.Link:f.Image,n.from,r+1);if(n.type==P)for(let a=0;a<s;a++){let l=t.parts[a];l instanceof A&&l.type==P&&(l.side=0)}return o.to}}return-1}};function ut(t,e,r,s,n){let{text:i}=t,o=t.char(n),a=n;if(e.unshift(m(f.LinkMark,s,s+(r==f.Image?2:1))),e.push(m(f.LinkMark,n-1,n)),o==40){let l=t.skipSpace(n+1),h=ve(i,l-t.offset,t.offset),u;h&&(l=t.skipSpace(h.to),u=Ne(i,l-t.offset,t.offset),u&&(l=t.skipSpace(u.to))),t.char(l)==41&&(e.push(m(f.LinkMark,n,n+1)),a=l+1,h&&e.push(h),u&&e.push(u),e.push(m(f.LinkMark,l,a)))}else if(o==91){let l=ye(i,n-t.offset,t.offset,!1);l&&(e.push(l),a=l.to)}return m(r,s,a,e)}function ve(t,e,r){if(t.charCodeAt(e)==60){for(let n=e+1;n<t.length;n++){let i=t.charCodeAt(n);if(i==62)return m(f.URL,e+r,n+1+r);if(i==60||i==10)return!1}return null}else{let n=0,i=e;for(let o=!1;i<t.length;i++){let a=t.charCodeAt(i);if(C(a))break;if(o)o=!1;else if(a==40)n++;else if(a==41){if(!n)break;n--}else a==92&&(o=!0)}return i>e?m(f.URL,e+r,i+r):i==t.length?null:!1}}function Ne(t,e,r){let s=t.charCodeAt(e);if(s!=39&&s!=34&&s!=40)return!1;let n=s==40?41:s;for(let i=e+1,o=!1;i<t.length;i++){let a=t.charCodeAt(i);if(o)o=!1;else{if(a==n)return m(f.LinkTitle,e+r,i+1+r);a==92&&(o=!0)}}return null}function ye(t,e,r,s){for(let n=!1,i=e+1,o=Math.min(t.length,i+999);i<o;i++){let a=t.charCodeAt(i);if(n)n=!1;else{if(a==93)return s?!1:m(f.LinkLabel,e+r,i+1+r);if(s&&!C(a)&&(s=!1),a==91)return!1;a==92&&(n=!0)}}return null}class dt{constructor(e,r,s){this.parser=e,this.text=r,this.offset=s,this.parts=[]}char(e){return e>=this.end?-1:this.text.charCodeAt(e-this.offset)}get end(){return this.offset+this.text.length}slice(e,r){return this.text.slice(e-this.offset,r-this.offset)}append(e){return this.parts.push(e),e.to}addDelimiter(e,r,s,n,i){return this.append(new A(e,r,s,(n?1:0)|(i?2:0)))}addElement(e){return this.append(e)}resolveMarkers(e){for(let s=e;s<this.parts.length;s++){let n=this.parts[s];if(!(n instanceof A&&n.type.resolve&&n.side&2))continue;let i=n.type==He||n.type==Pe,o=n.to-n.from,a,l=s-1;for(;l>=e;l--){let g=this.parts[l];if(g instanceof A&&g.side&1&&g.type==n.type&&!(i&&(n.side&1||g.side&2)&&(g.to-g.from+o)%3==0&&((g.to-g.from)%3||o%3))){a=g;break}}if(!a)continue;let h=n.type.resolve,u=[],p=a.from,d=n.to;if(i){let g=Math.min(2,a.to-a.from,o);p=a.to-g,d=n.from+g,h=g==1?"Emphasis":"StrongEmphasis"}a.type.mark&&u.push(this.elt(a.type.mark,p,a.to));for(let g=l+1;g<s;g++)this.parts[g]instanceof O&&u.push(this.parts[g]),this.parts[g]=null;n.type.mark&&u.push(this.elt(n.type.mark,n.from,d));let L=this.elt(h,p,d,u);this.parts[l]=i&&a.from!=p?new A(a.type,a.from,p,a.side):null,(this.parts[s]=i&&n.to!=d?new A(n.type,d,n.to,n.side):null)?this.parts.splice(s,0,L):this.parts[s]=L}let r=[];for(let s=e;s<this.parts.length;s++){let n=this.parts[s];n instanceof O&&r.push(n)}return r}findOpeningDelimiter(e){for(let r=this.parts.length-1;r>=0;r--){let s=this.parts[r];if(s instanceof A&&s.type==e)return r}return null}takeContent(e){let r=this.resolveMarkers(e);return this.parts.length=e,r}skipSpace(e){return N(this.text,e-this.offset)+this.offset}elt(e,r,s,n){return typeof e=="string"?m(this.parser.getNodeType(e),r,s,n):new Me(e,r)}}function V(t,e){if(!e.length)return t;if(!t.length)return e;let r=t.slice(),s=0;for(let n of e){for(;s<r.length&&r[s].to<n.to;)s++;if(s<r.length&&r[s].from<n.from){let i=r[s];i instanceof O&&(r[s]=new O(i.type,i.from,i.to,V(i.children,[n])))}else r.splice(s++,0,n)}return r}const pt=[f.CodeBlock,f.ListItem,f.OrderedList,f.BulletList];class ct{constructor(e,r){this.fragments=e,this.input=r,this.i=0,this.fragment=null,this.fragmentEnd=-1,this.cursor=null,e.length&&(this.fragment=e[this.i++])}nextFragment(){this.fragment=this.i<this.fragments.length?this.fragments[this.i++]:null,this.cursor=null,this.fragmentEnd=-1}moveTo(e,r){for(;this.fragment&&this.fragment.to<=e;)this.nextFragment();if(!this.fragment||this.fragment.from>(e?e-1:0))return!1;if(this.fragmentEnd<0){let i=this.fragment.to;for(;i>0&&this.input.read(i-1,i)!=`
6
- `;)i--;this.fragmentEnd=i?i-1:0}let s=this.cursor;s||(s=this.cursor=this.fragment.tree.cursor(),s.firstChild());let n=e+this.fragment.offset;for(;s.to<=n;)if(!s.parent())return!1;for(;;){if(s.from>=n)return this.fragment.from<=r;if(!s.childAfter(n))return!1}}matches(e){let r=this.cursor.tree;return r&&r.prop(I.contextHash)==e}takeNodes(e){let r=this.cursor,s=this.fragment.offset,n=this.fragmentEnd-(this.fragment.openEnd?1:0),i=e.absoluteLineStart,o=i,a=e.block.children.length,l=o,h=a;for(;;){if(r.to-s>n){if(r.type.isAnonymous&&r.firstChild())continue;break}if(e.dontInject.add(r.tree),e.addNode(r.tree,r.from-s),r.type.is("Block")&&(pt.indexOf(r.type.id)<0?(o=r.to-s,a=e.block.children.length):(o=l,a=h,l=r.to-s,h=e.block.children.length)),!r.nextSibling())break}for(;e.block.children.length>a;)e.block.children.pop(),e.block.positions.pop();return o-i}}const mt=ce({"Blockquote/...":c.quote,HorizontalRule:c.contentSeparator,"ATXHeading1/... SetextHeading1/...":c.heading1,"ATXHeading2/... SetextHeading2/...":c.heading2,"ATXHeading3/...":c.heading3,"ATXHeading4/...":c.heading4,"ATXHeading5/...":c.heading5,"ATXHeading6/...":c.heading6,"Comment CommentBlock":c.comment,Escape:c.escape,Entity:c.character,"Emphasis/...":c.emphasis,"StrongEmphasis/...":c.strong,"Link/... Image/...":c.link,"OrderedList/... BulletList/...":c.list,"BlockQuote/...":c.quote,"InlineCode CodeText":c.monospace,URL:c.url,"HeaderMark HardBreak QuoteMark ListMark LinkMark EmphasisMark CodeMark":c.processingInstruction,"CodeInfo LinkLabel":c.labelName,LinkTitle:c.string,Paragraph:c.content}),gt=new j(new me(Ee).extend(mt),Object.keys(z).map(t=>z[t]),Object.keys(z).map(t=>at[t]),Object.keys(z),lt,ge,Object.keys(_).map(t=>_[t]),Object.keys(_),[]);function kt(t,e,r){let s=[];for(let n=t.firstChild,i=e;;n=n.nextSibling){let o=n?n.from:r;if(o>i&&s.push({from:i,to:o}),!n)break;i=n.to}return s}function Lt(t){let{codeParser:e,htmlParser:r}=t;return{wrap:Qe((n,i)=>{let o=n.type.id;if(e&&(o==f.CodeBlock||o==f.FencedCode)){let a="";if(o==f.FencedCode){let h=n.node.getChild(f.CodeInfo);h&&(a=i.read(h.from,h.to))}let l=e(a);if(l)return{parser:l,overlay:h=>h.type.id==f.CodeText}}else if(r&&(o==f.HTMLBlock||o==f.HTMLTag))return{parser:r,overlay:kt(n.node,n.from,n.to)};return null})}}const bt={resolve:"Strikethrough",mark:"StrikethroughMark"},St={defineNodes:[{name:"Strikethrough",style:{"Strikethrough/...":c.strikethrough}},{name:"StrikethroughMark",style:c.processingInstruction}],parseInline:[{name:"Strikethrough",parse(t,e,r){if(e!=126||t.char(r+1)!=126||t.char(r+2)==126)return-1;let s=t.slice(r-1,r),n=t.slice(r+2,r+3),i=/\s|^$/.test(s),o=/\s|^$/.test(n),a=R.test(s),l=R.test(n);return t.addDelimiter(bt,r,r+2,!o&&(!l||i||a),!i&&(!a||o||l))},after:"Emphasis"}]};function y(t,e,r=0,s,n=0){let i=0,o=!0,a=-1,l=-1,h=!1,u=()=>{s.push(t.elt("TableCell",n+a,n+l,t.parser.parseInline(e.slice(a,l),n+a)))};for(let p=r;p<e.length;p++){let d=e.charCodeAt(p);d==124&&!h?((!o||a>-1)&&i++,o=!1,s&&(a>-1&&u(),s.push(t.elt("TableDelimiter",p+n,p+n+1))),a=l=-1):(h||d!=32&&d!=9)&&(a<0&&(a=p),l=p+1),h=!h&&d==92}return a>-1&&(i++,s&&u()),i}function fe(t,e){for(let r=e;r<t.length;r++){let s=t.charCodeAt(r);if(s==124)return!0;s==92&&r++}return!1}const Oe=/^\|?(\s*:?-+:?\s*\|)+(\s*:?-+:?\s*)?$/;class ue{constructor(){this.rows=null}nextLine(e,r,s){if(this.rows==null){this.rows=!1;let n;if((r.next==45||r.next==58||r.next==124)&&Oe.test(n=r.text.slice(r.pos))){let i=[];y(e,s.content,0,i,s.start)==y(e,n,r.pos)&&(this.rows=[e.elt("TableHeader",s.start,s.start+s.content.length,i),e.elt("TableDelimiter",e.lineStart+r.pos,e.lineStart+r.text.length)])}}else if(this.rows){let n=[];y(e,r.text,r.pos,n,e.lineStart),this.rows.push(e.elt("TableRow",e.lineStart+r.pos,e.lineStart+r.text.length,n))}return!1}finish(e,r){return this.rows?(e.addLeafElement(r,e.elt("Table",r.start,r.start+r.content.length,this.rows)),!0):!1}}const wt={defineNodes:[{name:"Table",block:!0},{name:"TableHeader",style:{"TableHeader/...":c.heading}},"TableRow",{name:"TableCell",style:c.content},{name:"TableDelimiter",style:c.processingInstruction}],parseBlock:[{name:"Table",leaf(t,e){return fe(e.content,0)?new ue:null},endLeaf(t,e,r){if(r.parsers.some(n=>n instanceof ue)||!fe(e.text,e.basePos))return!1;let s=t.scanLine(t.absoluteLineEnd+1).text;return Oe.test(s)&&y(t,e.text,e.basePos)==y(t,s,e.basePos)},before:"SetextHeading"}]};class Ct{nextLine(){return!1}finish(e,r){return e.addLeafElement(r,e.elt("Task",r.start,r.start+r.content.length,[e.elt("TaskMarker",r.start,r.start+3),...e.parser.parseInline(r.content.slice(3),r.start+3)])),!0}}const At={defineNodes:[{name:"Task",block:!0,style:c.list},{name:"TaskMarker",style:c.atom}],parseBlock:[{name:"TaskList",leaf(t,e){return/^\[[ xX]\]/.test(e.content)&&t.parentType().name=="ListItem"?new Ct:null},after:"SetextHeading"}]},xt=[wt,At,St];function Re(t,e,r){return(s,n,i)=>{if(n!=t||s.char(i+1)==t)return-1;let o=[s.elt(r,i,i+1)];for(let a=i+1;a<s.end;a++){let l=s.char(a);if(l==t)return s.addElement(s.elt(e,i,a+1,o.concat(s.elt(r,a,a+1))));if(l==92&&o.push(s.elt("Escape",a,a+++2)),C(l))break}return-1}}const Bt={defineNodes:[{name:"Superscript",style:c.special(c.content)},{name:"SuperscriptMark",style:c.processingInstruction}],parseInline:[{name:"Superscript",parse:Re(94,"Superscript","SuperscriptMark")}]},Et={defineNodes:[{name:"Subscript",style:c.special(c.content)},{name:"SubscriptMark",style:c.processingInstruction}],parseInline:[{name:"Subscript",parse:Re(126,"Subscript","SubscriptMark")}]},It={defineNodes:[{name:"Emoji",style:c.character}],parseInline:[{name:"Emoji",parse(t,e,r){let s;return e!=58||!(s=/^[a-zA-Z_0-9]+:/.exec(t.slice(r+1,t.end)))?-1:t.addElement(t.elt("Emoji",r,r+1+s[0].length))}}]},ze=Ke({block:{open:"<!--",close:"-->"}}),Te=new I,De=gt.configure({props:[Je.add(t=>!t.is("Block")||t.is("Document")||K(t)!=null?void 0:(e,r)=>({from:r.doc.lineAt(e.from).to,to:e.to})),Te.add(K),Ye.add({Document:()=>null}),We.add({Document:ze})]});function K(t){let e=/^(?:ATX|Setext)Heading(\d)$/.exec(t.name);return e?+e[1]:void 0}function Mt(t,e){let r=t;for(;;){let s=r.nextSibling,n;if(!s||(n=K(s.type))!=null&&n<=e)break;r=s}return r.to}const Ht=et.of((t,e,r)=>{for(let s=J(t).resolveInner(r,-1);s&&!(s.from<e);s=s.parent){let n=s.type.prop(Te);if(n==null)continue;let i=Mt(s,n);if(i>r)return{from:r,to:i}}return null});function te(t){return new Ve(ze,t,[Ht],"markdown")}const Pt=te(De),vt=De.configure([xt,Et,Bt,It]),Xe=te(vt);function Nt(t,e){return r=>{if(r&&t){let s=null;if(r=/\S*/.exec(r)[0],typeof t=="function"?s=t(r):s=ne.matchLanguageName(t,r,!0),s instanceof ne)return s.support?s.support.language.parser:tt.getSkippingParser(s.load());if(s)return s.parser}return e?e.parser:null}}class D{constructor(e,r,s,n,i,o,a){this.node=e,this.from=r,this.to=s,this.spaceBefore=n,this.spaceAfter=i,this.type=o,this.item=a}blank(e,r=!0){let s=this.spaceBefore+(this.node.name=="Blockquote"?">":"");if(e!=null){for(;s.length<e;)s+=" ";return s}else{for(let n=this.to-this.from-s.length-this.spaceAfter.length;n>0;n--)s+=" ";return s+(r?this.spaceAfter:"")}}marker(e,r){let s=this.node.name=="OrderedList"?String(+je(this.item,e)[2]+r):"";return this.spaceBefore+s+this.type+this.spaceAfter}}function Fe(t,e){let r=[];for(let n=t;n&&n.name!="Document";n=n.parent)(n.name=="ListItem"||n.name=="Blockquote"||n.name=="FencedCode")&&r.push(n);let s=[];for(let n=r.length-1;n>=0;n--){let i=r[n],o,a=e.lineAt(i.from),l=i.from-a.from;if(i.name=="FencedCode")s.push(new D(i,l,l,"","","",null));else if(i.name=="Blockquote"&&(o=/^[ \t]*>( ?)/.exec(a.text.slice(l))))s.push(new D(i,l,l+o[0].length,"",o[1],">",null));else if(i.name=="ListItem"&&i.parent.name=="OrderedList"&&(o=/^([ \t]*)\d+([.)])([ \t]*)/.exec(a.text.slice(l)))){let h=o[3],u=o[0].length;h.length>=4&&(h=h.slice(0,h.length-4),u-=4),s.push(new D(i.parent,l,l+u,o[1],h,o[2],i))}else if(i.name=="ListItem"&&i.parent.name=="BulletList"&&(o=/^([ \t]*)([-+*])([ \t]{1,4}\[[ xX]\])?([ \t]+)/.exec(a.text.slice(l)))){let h=o[4],u=o[0].length;h.length>4&&(h=h.slice(0,h.length-4),u-=4);let p=o[2];o[3]&&(p+=o[3].replace(/[xX]/," ")),s.push(new D(i.parent,l,l+u,o[1],h,p,i))}}return s}function je(t,e){return/^(\s*)(\d+)(?=[.)])/.exec(e.sliceString(t.from,t.from+10))}function U(t,e,r,s=0){for(let n=-1,i=t;;){if(i.name=="ListItem"){let a=je(i,e),l=+a[2];if(n>=0){if(l!=n+1)return;r.push({from:i.from+a[1].length,to:i.from+a[0].length,insert:String(n+2+s)})}n=l}let o=i.nextSibling;if(!o)break;i=o}}const yt=({state:t,dispatch:e})=>{let r=J(t),{doc:s}=t,n=null,i=t.changeByRange(o=>{if(!o.empty||!Xe.isActiveAt(t,o.from))return n={range:o};let a=o.from,l=s.lineAt(a),h=Fe(r.resolveInner(a,-1),s);for(;h.length&&h[h.length-1].from>a-l.from;)h.pop();if(!h.length)return n={range:o};let u=h[h.length-1];if(u.to-u.spaceAfter.length>a-l.from)return n={range:o};let p=a>=u.to-u.spaceAfter.length&&!/\S/.test(l.text.slice(u.to));if(u.item&&p)if(u.node.firstChild.to>=a||l.from>0&&!/[^\s>]/.test(s.lineAt(l.from-1).text)){let k=h.length>1?h[h.length-2]:null,b,w="";k&&k.item?(b=l.from+k.from,w=k.marker(s,1)):b=l.from+(k?k.to:0);let x=[{from:b,to:a,insert:w}];return u.node.name=="OrderedList"&&U(u.item,s,x,-2),k&&k.node.name=="OrderedList"&&U(k.item,s,x),{range:v.cursor(b+w.length),changes:x}}else{let k="";for(let b=0,w=h.length-2;b<=w;b++)k+=h[b].blank(b<w?h[b+1].from-k.length:null,b<w);return k+=t.lineBreak,{range:v.cursor(a+k.length),changes:{from:l.from,insert:k}}}if(u.node.name=="Blockquote"&&p&&l.from){let k=s.lineAt(l.from-1),b=/>\s*$/.exec(k.text);if(b&&b.index==u.from){let w=t.changes([{from:k.from+b.index,to:k.to},{from:l.from+u.from,to:l.to}]);return{range:o.map(w),changes:w}}}let d=[];u.node.name=="OrderedList"&&U(u.item,s,d);let L=u.item&&u.item.from<l.from,S="";if(!L||/^[\s\d.)\-+*>]*/.exec(l.text)[0].length>=u.to)for(let k=0,b=h.length-1;k<=b;k++)S+=k==b&&!L?h[k].marker(s,1):h[k].blank(k<b?h[k+1].from-S.length:null);let g=a;for(;g>l.from&&/\s/.test(l.text.charAt(g-l.from-1));)g--;return S=t.lineBreak+S,d.push({from:g,to:a,insert:S}),{range:v.cursor(g+S.length),changes:d}});return n?!1:(e(t.update(i,{scrollIntoView:!0,userEvent:"input"})),!0)};function de(t){return t.name=="QuoteMark"||t.name=="ListMark"}function Ot(t,e){let r=t.resolveInner(e,-1),s=e;de(r)&&(s=r.from,r=r.parent);for(let n;n=r.childBefore(s);)if(de(n))s=n.from;else if(n.name=="OrderedList"||n.name=="BulletList")r=n.lastChild,s=r.to;else break;return r}const Rt=({state:t,dispatch:e})=>{let r=J(t),s=null,n=t.changeByRange(i=>{let o=i.from,{doc:a}=t;if(i.empty&&Xe.isActiveAt(t,i.from)){let l=a.lineAt(o),h=Fe(Ot(r,o),a);if(h.length){let u=h[h.length-1],p=u.to-u.spaceAfter.length+(u.spaceAfter?1:0);if(o-l.from>p&&!/\S/.test(l.text.slice(p,o-l.from)))return{range:v.cursor(l.from+p),changes:{from:l.from+p,to:o}};if(o-l.from==p){let d=l.from+u.from;if(u.item&&u.node.from<u.item.from&&/\S/.test(l.text.slice(u.from,u.to)))return{range:i,changes:{from:d,to:l.from+u.to,insert:u.blank(u.to-u.from)}};if(d<o)return{range:v.cursor(d),changes:{from:d,to:o}}}}}return s={range:i}});return s?!1:(e(t.update(n,{scrollIntoView:!0,userEvent:"delete"})),!0)},zt=[{key:"Enter",run:yt},{key:"Backspace",run:Rt}],pe=rt({matchClosingTags:!1});function Vt(t={}){let{codeLanguages:e,defaultCodeLanguage:r,addKeymap:s=!0,base:{parser:n}=Pt}=t;if(!(n instanceof j))throw new RangeError("Base parser provided to `markdown` should be a Markdown parser");let i=t.extensions?[t.extensions]:[],o=[pe.support],a;r instanceof se?(o.push(r.support),a=r.language):r&&(a=r);let l=e||a?Nt(e,a):void 0;return i.push(Lt({codeParser:l,htmlParser:pe.language.parser})),s&&o.push(Ze.high(Ge.of(zt))),new se(te(n.configure(i)),o)}export{Pt as commonmarkLanguage,Rt as deleteMarkupBackward,yt as insertNewlineContinueMarkup,Vt as markdown,zt as markdownKeymap,Xe as markdownLanguage};
7
- //# sourceMappingURL=index-7648fc8d.js.map
 
 
 
 
 
 
 
 
spaces/Dana19/animal_classifier/app.py DELETED
@@ -1,29 +0,0 @@
1
- import gradio as gr
2
- import fastai
3
- from fastai.vision.all import *
4
-
5
- learn = load_learner("export.pkl")
6
-
7
- labels = learn.dls.vocab
8
-
9
- def predict(img):
10
- img = PILImage.create(img)
11
- pred, pred_idx, probs = learn.predict(img)
12
- return {labels[i]: float(probs[i]) for i in range(len(labels))}
13
-
14
- title = 'Animal Classifier'
15
- description = 'Animal classifier tried with downloaded data from internet. Created as a demo for Gradio and HuggingFace Spaces.'
16
-
17
- examples = ['c.jpeg', 'e.jpeg', 'e2.jpeg', 'e3.jpeg', 'g.jpeg', 'ec.jpeg']
18
- interpretation = 'default'
19
- enable_queue = True
20
-
21
- gr.Interface(fn = predict,
22
- inputs = gr.inputs.Image(shape=(224,224)),
23
- outputs = gr.outputs.Label(num_top_classes = 4),
24
- title = title,
25
- description = description,
26
- examples = examples,
27
- interpretation = interpretation,
28
- enable_queue = enable_queue
29
- ).launch(share = False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeathRoad/PornagraphyIsGreat/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: PornagraphyIsGreat
3
- emoji: 🐠
4
- colorFrom: green
5
- colorTo: indigo
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/prroi_pool/src/prroi_pooling_gpu.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * File : prroi_pooling_gpu.h
3
- * Author : Jiayuan Mao, Tete Xiao
4
5
- * Date : 07/13/2018
6
- *
7
- * Distributed under terms of the MIT license.
8
- * Copyright (c) 2017 Megvii Technology Limited.
9
- */
10
-
11
- int prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale);
12
-
13
- int prroi_pooling_backward_cuda(
14
- THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,
15
- int pooled_height, int pooled_width, float spatial_scale
16
- );
17
-
18
- int prroi_pooling_coor_backward_cuda(
19
- THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,
20
- int pooled_height, int pooled_width, float spatial_scal
21
- );
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DrGabrielLopez/fractal-generator/fractal_generator.py DELETED
@@ -1,82 +0,0 @@
1
- from enum import Enum
2
-
3
- import numpy as np
4
- import plotly.express as px
5
-
6
-
7
- class FractalType(Enum):
8
- Julia = 1
9
- Mandelbrot = 2
10
-
11
-
12
- class FractalGenerator:
13
- """Creates a single fractal object and either returns it as as a numpy array, plot it or persists it as an pgn
14
- image. The output of this class is used by FractalTrainingValidationSet to generate training/val sets
15
- Args:
16
- complex_function -- complex function to make a Julia fractal
17
- n -- fractal size will ne n*n
18
- xlim,ylim -- tuples with the plotting region on the complex plane
19
- thr -- once a function grows larger that this number is considered to be divergent to infinity
20
- max_iter -- number of compositions of the complex function with itself
21
- type_ -- fractal type
22
- fractal -- numpy array with the fractal
23
- """
24
-
25
- def __init__(self, n=256, xlim=(-2, 2), ylim=(-2, 2), thr=2, max_iter=10):
26
- self.type_ = None
27
- self.fractal = None
28
- self.n = n
29
- self.xlim = xlim
30
- self.ylim = ylim
31
- self.thr = thr
32
- self.max_iter = max_iter
33
-
34
- def create_julia(self, complex_function=lambda z: np.sin(z ** 4 + 1.41)):
35
- """Creates a fractal of the Julia family, the fractal is stored inside self.fractal"""
36
- fractal = np.zeros((self.n, self.n), dtype="complex")
37
- x_space = np.linspace(self.xlim[0], self.xlim[1], self.n)
38
- y_space = np.linspace(self.ylim[0], self.ylim[1], self.n)
39
- for ix, x in enumerate(x_space):
40
- for iy, y in enumerate(y_space):
41
- for i in range(self.max_iter):
42
- if i == 0:
43
- z = complex(x, y)
44
- z = complex_function(z)
45
- if np.abs(z) >= self.thr:
46
- z = self.thr
47
- break
48
- fractal[ix, iy] = z
49
- self.fractal = np.abs(fractal)
50
- self.type_ = FractalType.Julia
51
- return self
52
-
53
- def create_mandelbrot(self):
54
- """Creates a fractal of the Mandelbrot family, the fractal is stored inside self.fractal"""
55
- fractal = np.zeros((self.n, self.n), dtype="complex")
56
- x_space = np.linspace(self.xlim[0], self.xlim[1], self.n)
57
- y_space = np.linspace(self.ylim[0], self.ylim[1], self.n)
58
- for ix, x in enumerate(x_space):
59
- for iy, y in enumerate(y_space):
60
- for i in range(self.max_iter):
61
- if i == 0:
62
- z = 0
63
- z = z ** 2 + complex(x, y)
64
- if np.abs(z) >= self.thr:
65
- z = self.thr
66
- break
67
- fractal[ix, iy] = z
68
- self.fractal = np.abs(fractal.transpose())
69
- self.type_ = FractalType.Mandelbrot
70
- return self
71
-
72
- def plot(self, **kwargs):
73
- if self.fractal is None:
74
- print("Nothing to plot. Generate a fractal first.")
75
- return None
76
- random_colormap = np.random.choice(
77
- ["orrd", "inferno_r", "hot_r", "jet_r", "purples", "agsunset_r"]
78
- )
79
- fig = px.imshow(
80
- img=self.fractal, color_continuous_scale=random_colormap, **kwargs
81
- )
82
- return fig