parquet-converter commited on
Commit
12904ea
·
1 Parent(s): 8f8e3d3

Update parquet files (step 12 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/pretrained_networks.py +0 -181
  2. spaces/52Hz/SRMNet_thesis/model_arch/__init__.py +0 -2
  3. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/text_norm.py +0 -797
  4. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-300e_coco.py +0 -17
  5. spaces/Abhilashvj/planogram-compliance/utils/aws/resume.py +0 -42
  6. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Factory.d.ts +0 -5
  7. spaces/Aki004/herta-so-vits/inference/__init__.py +0 -0
  8. spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py +0 -509
  9. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/dataset/llff_dataset.py +0 -292
  10. spaces/Anandbheesetti/MNIST_digit_predictor/app.py +0 -105
  11. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/safety_checker.py +0 -59
  12. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py +0 -129
  13. spaces/Andy1621/uniformer_image_detection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x.py +0 -28
  14. spaces/Andy1621/uniformer_image_detection/tools/model_converters/regnet2mmdet.py +0 -89
  15. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/llamacpp_hf.py +0 -213
  16. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_20k.py +0 -9
  17. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/builder.py +0 -8
  18. spaces/ArkanDash/rvc-models-new/lib/infer_pack/models.py +0 -1142
  19. spaces/Arnx/MusicGenXvAKN/audiocraft/utils/utils.py +0 -234
  20. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py +0 -170
  21. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/build_py.py +0 -407
  22. spaces/Aveygo/AstroSleuth/file_queue.py +0 -109
  23. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/format_control.py +0 -80
  24. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py +0 -57
  25. spaces/Binguii/Ballen/Dockerfile +0 -20
  26. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/train_engine.py +0 -311
  27. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/par.h +0 -62
  28. spaces/CVPR/WALT/mmdet/core/bbox/iou_calculators/__init__.py +0 -4
  29. spaces/CVPR/WALT/mmdet/models/dense_heads/pisa_ssd_head.py +0 -139
  30. spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/functional_pil.py +0 -352
  31. spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h +0 -64
  32. spaces/ChevyWithAI/rvc-aicover/infer_pack/commons.py +0 -166
  33. spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/core.py +0 -35
  34. spaces/CofAI/chat.b4/g4f/Provider/Providers/Easychat.py +0 -55
  35. spaces/CofAI/chat/g4f/Provider/Providers/Yqcloud.py +0 -39
  36. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/dec.py +0 -78
  37. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/backbone.py +0 -119
  38. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/WmfImagePlugin.py +0 -178
  39. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Info-5611e10f.js +0 -2
  40. spaces/Daniton/MagicPrompt-Stable-Diffusion/style.css +0 -84
  41. spaces/DataScienceEngineering/1-SimPhysics-HTML5/style.css +0 -28
  42. spaces/DragGan/DragGan/gui_utils/__init__.py +0 -9
  43. spaces/DragGan/DragGan/stylegan_human/dnnlib/util.py +0 -479
  44. spaces/ECCV2022/bytetrack/tutorials/centertrack/tracker.py +0 -198
  45. spaces/EPFL-VILAB/MultiMAE/utils/taskonomy/task_configs.py +0 -105
  46. spaces/Felix123456/bingo/src/components/header.tsx +0 -12
  47. spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Ezcht.py +0 -35
  48. spaces/FinanceInc/Financial_Analyst_AI/app.py +0 -52
  49. spaces/Goodsea/deprem-ocr-paddleocr/app.py +0 -161
  50. spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/discriminator.py +0 -20
spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/pretrained_networks.py DELETED
@@ -1,181 +0,0 @@
1
- from collections import namedtuple
2
- import torch
3
- from torchvision import models as tv
4
- from IPython import embed
5
-
6
- class squeezenet(torch.nn.Module):
7
- def __init__(self, requires_grad=False, pretrained=True):
8
- super(squeezenet, self).__init__()
9
- pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features
10
- self.slice1 = torch.nn.Sequential()
11
- self.slice2 = torch.nn.Sequential()
12
- self.slice3 = torch.nn.Sequential()
13
- self.slice4 = torch.nn.Sequential()
14
- self.slice5 = torch.nn.Sequential()
15
- self.slice6 = torch.nn.Sequential()
16
- self.slice7 = torch.nn.Sequential()
17
- self.N_slices = 7
18
- for x in range(2):
19
- self.slice1.add_module(str(x), pretrained_features[x])
20
- for x in range(2,5):
21
- self.slice2.add_module(str(x), pretrained_features[x])
22
- for x in range(5, 8):
23
- self.slice3.add_module(str(x), pretrained_features[x])
24
- for x in range(8, 10):
25
- self.slice4.add_module(str(x), pretrained_features[x])
26
- for x in range(10, 11):
27
- self.slice5.add_module(str(x), pretrained_features[x])
28
- for x in range(11, 12):
29
- self.slice6.add_module(str(x), pretrained_features[x])
30
- for x in range(12, 13):
31
- self.slice7.add_module(str(x), pretrained_features[x])
32
- if not requires_grad:
33
- for param in self.parameters():
34
- param.requires_grad = False
35
-
36
- def forward(self, X):
37
- h = self.slice1(X)
38
- h_relu1 = h
39
- h = self.slice2(h)
40
- h_relu2 = h
41
- h = self.slice3(h)
42
- h_relu3 = h
43
- h = self.slice4(h)
44
- h_relu4 = h
45
- h = self.slice5(h)
46
- h_relu5 = h
47
- h = self.slice6(h)
48
- h_relu6 = h
49
- h = self.slice7(h)
50
- h_relu7 = h
51
- vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7'])
52
- out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7)
53
-
54
- return out
55
-
56
-
57
- class alexnet(torch.nn.Module):
58
- def __init__(self, requires_grad=False, pretrained=True):
59
- super(alexnet, self).__init__()
60
- alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features
61
- self.slice1 = torch.nn.Sequential()
62
- self.slice2 = torch.nn.Sequential()
63
- self.slice3 = torch.nn.Sequential()
64
- self.slice4 = torch.nn.Sequential()
65
- self.slice5 = torch.nn.Sequential()
66
- self.N_slices = 5
67
- for x in range(2):
68
- self.slice1.add_module(str(x), alexnet_pretrained_features[x])
69
- for x in range(2, 5):
70
- self.slice2.add_module(str(x), alexnet_pretrained_features[x])
71
- for x in range(5, 8):
72
- self.slice3.add_module(str(x), alexnet_pretrained_features[x])
73
- for x in range(8, 10):
74
- self.slice4.add_module(str(x), alexnet_pretrained_features[x])
75
- for x in range(10, 12):
76
- self.slice5.add_module(str(x), alexnet_pretrained_features[x])
77
- if not requires_grad:
78
- for param in self.parameters():
79
- param.requires_grad = False
80
-
81
- def forward(self, X):
82
- h = self.slice1(X)
83
- h_relu1 = h
84
- h = self.slice2(h)
85
- h_relu2 = h
86
- h = self.slice3(h)
87
- h_relu3 = h
88
- h = self.slice4(h)
89
- h_relu4 = h
90
- h = self.slice5(h)
91
- h_relu5 = h
92
- alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
93
- out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
94
-
95
- return out
96
-
97
- class vgg16(torch.nn.Module):
98
- def __init__(self, requires_grad=False, pretrained=True):
99
- super(vgg16, self).__init__()
100
- vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features
101
- self.slice1 = torch.nn.Sequential()
102
- self.slice2 = torch.nn.Sequential()
103
- self.slice3 = torch.nn.Sequential()
104
- self.slice4 = torch.nn.Sequential()
105
- self.slice5 = torch.nn.Sequential()
106
- self.N_slices = 5
107
- for x in range(4):
108
- self.slice1.add_module(str(x), vgg_pretrained_features[x])
109
- for x in range(4, 9):
110
- self.slice2.add_module(str(x), vgg_pretrained_features[x])
111
- for x in range(9, 16):
112
- self.slice3.add_module(str(x), vgg_pretrained_features[x])
113
- for x in range(16, 23):
114
- self.slice4.add_module(str(x), vgg_pretrained_features[x])
115
- for x in range(23, 30):
116
- self.slice5.add_module(str(x), vgg_pretrained_features[x])
117
- if not requires_grad:
118
- for param in self.parameters():
119
- param.requires_grad = False
120
-
121
- def forward(self, X):
122
- h = self.slice1(X)
123
- h_relu1_2 = h
124
- h = self.slice2(h)
125
- h_relu2_2 = h
126
- h = self.slice3(h)
127
- h_relu3_3 = h
128
- h = self.slice4(h)
129
- h_relu4_3 = h
130
- h = self.slice5(h)
131
- h_relu5_3 = h
132
- vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
133
- out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
134
-
135
- return out
136
-
137
-
138
-
139
- class resnet(torch.nn.Module):
140
- def __init__(self, requires_grad=False, pretrained=True, num=18):
141
- super(resnet, self).__init__()
142
- if(num==18):
143
- self.net = tv.resnet18(pretrained=pretrained)
144
- elif(num==34):
145
- self.net = tv.resnet34(pretrained=pretrained)
146
- elif(num==50):
147
- self.net = tv.resnet50(pretrained=pretrained)
148
- elif(num==101):
149
- self.net = tv.resnet101(pretrained=pretrained)
150
- elif(num==152):
151
- self.net = tv.resnet152(pretrained=pretrained)
152
- self.N_slices = 5
153
-
154
- self.conv1 = self.net.conv1
155
- self.bn1 = self.net.bn1
156
- self.relu = self.net.relu
157
- self.maxpool = self.net.maxpool
158
- self.layer1 = self.net.layer1
159
- self.layer2 = self.net.layer2
160
- self.layer3 = self.net.layer3
161
- self.layer4 = self.net.layer4
162
-
163
- def forward(self, X):
164
- h = self.conv1(X)
165
- h = self.bn1(h)
166
- h = self.relu(h)
167
- h_relu1 = h
168
- h = self.maxpool(h)
169
- h = self.layer1(h)
170
- h_conv2 = h
171
- h = self.layer2(h)
172
- h_conv3 = h
173
- h = self.layer3(h)
174
- h_conv4 = h
175
- h = self.layer4(h)
176
- h_conv5 = h
177
-
178
- outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5'])
179
- out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
180
-
181
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_thesis/model_arch/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .SRMNet import *
2
- from .SRMNet_SWFF import *
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/text_norm.py DELETED
@@ -1,797 +0,0 @@
1
- # coding=utf-8
2
- # Authors:
3
- # 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git)
4
- # 2019.9 Jiayu DU
5
- #
6
- # requirements:
7
- # - python 3.X
8
- # notes: python 2.X WILL fail or produce misleading results
9
-
10
- import sys, os, argparse, codecs, string, re
11
-
12
- # ================================================================================ #
13
- # basic constant
14
- # ================================================================================ #
15
- CHINESE_DIGIS = u'零一二三四五六七八九'
16
- BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖'
17
- BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖'
18
- SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万'
19
- SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬'
20
- LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载'
21
- LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載'
22
- SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万'
23
- SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬'
24
-
25
- ZERO_ALT = u'〇'
26
- ONE_ALT = u'幺'
27
- TWO_ALTS = [u'两', u'兩']
28
-
29
- POSITIVE = [u'正', u'正']
30
- NEGATIVE = [u'负', u'負']
31
- POINT = [u'点', u'點']
32
- # PLUS = [u'加', u'加']
33
- # SIL = [u'杠', u'槓']
34
-
35
- # 中文数字系统类型
36
- NUMBERING_TYPES = ['low', 'mid', 'high']
37
-
38
- CURRENCY_NAMES = '(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|' \
39
- '里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)'
40
- CURRENCY_UNITS = '((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)'
41
- COM_QUANTIFIERS = '(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|' \
42
- '砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|' \
43
- '针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|' \
44
- '毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|' \
45
- '盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|' \
46
- '纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)'
47
-
48
- # punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git)
49
- CHINESE_PUNC_STOP = '!?。。'
50
- CHINESE_PUNC_NON_STOP = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏'
51
- CHINESE_PUNC_LIST = CHINESE_PUNC_STOP + CHINESE_PUNC_NON_STOP
52
-
53
-
54
- # ================================================================================ #
55
- # basic class
56
- # ================================================================================ #
57
- class ChineseChar(object):
58
- """
59
- 中文字符
60
- 每个字符对应简体和繁体,
61
- e.g. 简体 = '负', 繁体 = '負'
62
- 转换时可转换为简体或繁体
63
- """
64
-
65
- def __init__(self, simplified, traditional):
66
- self.simplified = simplified
67
- self.traditional = traditional
68
- # self.__repr__ = self.__str__
69
-
70
- def __str__(self):
71
- return self.simplified or self.traditional or None
72
-
73
- def __repr__(self):
74
- return self.__str__()
75
-
76
-
77
- class ChineseNumberUnit(ChineseChar):
78
- """
79
- 中文数字/数位字符
80
- 每个字符除繁简体外还有一个额外的大写字符
81
- e.g. '陆' 和 '陸'
82
- """
83
-
84
- def __init__(self, power, simplified, traditional, big_s, big_t):
85
- super(ChineseNumberUnit, self).__init__(simplified, traditional)
86
- self.power = power
87
- self.big_s = big_s
88
- self.big_t = big_t
89
-
90
- def __str__(self):
91
- return '10^{}'.format(self.power)
92
-
93
- @classmethod
94
- def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False):
95
-
96
- if small_unit:
97
- return ChineseNumberUnit(power=index + 1,
98
- simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1])
99
- elif numbering_type == NUMBERING_TYPES[0]:
100
- return ChineseNumberUnit(power=index + 8,
101
- simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
102
- elif numbering_type == NUMBERING_TYPES[1]:
103
- return ChineseNumberUnit(power=(index + 2) * 4,
104
- simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
105
- elif numbering_type == NUMBERING_TYPES[2]:
106
- return ChineseNumberUnit(power=pow(2, index + 3),
107
- simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
108
- else:
109
- raise ValueError(
110
- 'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type))
111
-
112
-
113
- class ChineseNumberDigit(ChineseChar):
114
- """
115
- 中文数字字符
116
- """
117
-
118
- def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None):
119
- super(ChineseNumberDigit, self).__init__(simplified, traditional)
120
- self.value = value
121
- self.big_s = big_s
122
- self.big_t = big_t
123
- self.alt_s = alt_s
124
- self.alt_t = alt_t
125
-
126
- def __str__(self):
127
- return str(self.value)
128
-
129
- @classmethod
130
- def create(cls, i, v):
131
- return ChineseNumberDigit(i, v[0], v[1], v[2], v[3])
132
-
133
-
134
- class ChineseMath(ChineseChar):
135
- """
136
- 中文数位字符
137
- """
138
-
139
- def __init__(self, simplified, traditional, symbol, expression=None):
140
- super(ChineseMath, self).__init__(simplified, traditional)
141
- self.symbol = symbol
142
- self.expression = expression
143
- self.big_s = simplified
144
- self.big_t = traditional
145
-
146
-
147
- CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigit, ChineseMath
148
-
149
-
150
- class NumberSystem(object):
151
- """
152
- 中文数字系统
153
- """
154
- pass
155
-
156
-
157
- class MathSymbol(object):
158
- """
159
- 用于中文数字系统的数学符号 (繁/简体), e.g.
160
- positive = ['正', '正']
161
- negative = ['负', '負']
162
- point = ['点', '點']
163
- """
164
-
165
- def __init__(self, positive, negative, point):
166
- self.positive = positive
167
- self.negative = negative
168
- self.point = point
169
-
170
- def __iter__(self):
171
- for v in self.__dict__.values():
172
- yield v
173
-
174
-
175
- # class OtherSymbol(object):
176
- # """
177
- # 其他符号
178
- # """
179
- #
180
- # def __init__(self, sil):
181
- # self.sil = sil
182
- #
183
- # def __iter__(self):
184
- # for v in self.__dict__.values():
185
- # yield v
186
-
187
-
188
- # ================================================================================ #
189
- # basic utils
190
- # ================================================================================ #
191
- def create_system(numbering_type=NUMBERING_TYPES[1]):
192
- """
193
- 根据数字系统类型返回创建相应的数字系统,默认为 mid
194
- NUMBERING_TYPES = ['low', 'mid', 'high']: 中文数字系统类型
195
- low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc.
196
- mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc.
197
- high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc.
198
- 返回对应的数字系统
199
- """
200
-
201
- # chinese number units of '亿' and larger
202
- all_larger_units = zip(
203
- LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL)
204
- larger_units = [CNU.create(i, v, numbering_type, False)
205
- for i, v in enumerate(all_larger_units)]
206
- # chinese number units of '十, 百, 千, 万'
207
- all_smaller_units = zip(
208
- SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL)
209
- smaller_units = [CNU.create(i, v, small_unit=True)
210
- for i, v in enumerate(all_smaller_units)]
211
- # digis
212
- chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS,
213
- BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL)
214
- digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)]
215
- digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT
216
- digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT
217
- digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1]
218
-
219
- # symbols
220
- positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x)
221
- negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x)
222
- point_cn = CM(POINT[0], POINT[1], '.', lambda x,
223
- y: float(str(x) + '.' + str(y)))
224
- # sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y)))
225
- system = NumberSystem()
226
- system.units = smaller_units + larger_units
227
- system.digits = digits
228
- system.math = MathSymbol(positive_cn, negative_cn, point_cn)
229
- # system.symbols = OtherSymbol(sil_cn)
230
- return system
231
-
232
-
233
- def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]):
234
- def get_symbol(char, system):
235
- for u in system.units:
236
- if char in [u.traditional, u.simplified, u.big_s, u.big_t]:
237
- return u
238
- for d in system.digits:
239
- if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]:
240
- return d
241
- for m in system.math:
242
- if char in [m.traditional, m.simplified]:
243
- return m
244
-
245
- def string2symbols(chinese_string, system):
246
- int_string, dec_string = chinese_string, ''
247
- for p in [system.math.point.simplified, system.math.point.traditional]:
248
- if p in chinese_string:
249
- int_string, dec_string = chinese_string.split(p)
250
- break
251
- return [get_symbol(c, system) for c in int_string], \
252
- [get_symbol(c, system) for c in dec_string]
253
-
254
- def correct_symbols(integer_symbols, system):
255
- """
256
- 一百八 to 一百八十
257
- 一亿一千三百万 to 一亿 一千万 三百万
258
- """
259
-
260
- if integer_symbols and isinstance(integer_symbols[0], CNU):
261
- if integer_symbols[0].power == 1:
262
- integer_symbols = [system.digits[1]] + integer_symbols
263
-
264
- if len(integer_symbols) > 1:
265
- if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU):
266
- integer_symbols.append(
267
- CNU(integer_symbols[-2].power - 1, None, None, None, None))
268
-
269
- result = []
270
- unit_count = 0
271
- for s in integer_symbols:
272
- if isinstance(s, CND):
273
- result.append(s)
274
- unit_count = 0
275
- elif isinstance(s, CNU):
276
- current_unit = CNU(s.power, None, None, None, None)
277
- unit_count += 1
278
-
279
- if unit_count == 1:
280
- result.append(current_unit)
281
- elif unit_count > 1:
282
- for i in range(len(result)):
283
- if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power:
284
- result[-i - 1] = CNU(result[-i - 1].power +
285
- current_unit.power, None, None, None, None)
286
- return result
287
-
288
- def compute_value(integer_symbols):
289
- """
290
- Compute the value.
291
- When current unit is larger than previous unit, current unit * all previous units will be used as all previous units.
292
- e.g. '两千万' = 2000 * 10000 not 2000 + 10000
293
- """
294
- value = [0]
295
- last_power = 0
296
- for s in integer_symbols:
297
- if isinstance(s, CND):
298
- value[-1] = s.value
299
- elif isinstance(s, CNU):
300
- value[-1] *= pow(10, s.power)
301
- if s.power > last_power:
302
- value[:-1] = list(map(lambda v: v *
303
- pow(10, s.power), value[:-1]))
304
- last_power = s.power
305
- value.append(0)
306
- return sum(value)
307
-
308
- system = create_system(numbering_type)
309
- int_part, dec_part = string2symbols(chinese_string, system)
310
- int_part = correct_symbols(int_part, system)
311
- int_str = str(compute_value(int_part))
312
- dec_str = ''.join([str(d.value) for d in dec_part])
313
- if dec_part:
314
- return '{0}.{1}'.format(int_str, dec_str)
315
- else:
316
- return int_str
317
-
318
-
319
- def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False,
320
- traditional=False, alt_zero=False, alt_one=False, alt_two=True,
321
- use_zeros=True, use_units=True):
322
- def get_value(value_string, use_zeros=True):
323
-
324
- striped_string = value_string.lstrip('0')
325
-
326
- # record nothing if all zeros
327
- if not striped_string:
328
- return []
329
-
330
- # record one digits
331
- elif len(striped_string) == 1:
332
- if use_zeros and len(value_string) != len(striped_string):
333
- return [system.digits[0], system.digits[int(striped_string)]]
334
- else:
335
- return [system.digits[int(striped_string)]]
336
-
337
- # recursively record multiple digits
338
- else:
339
- result_unit = next(u for u in reversed(
340
- system.units) if u.power < len(striped_string))
341
- result_string = value_string[:-result_unit.power]
342
- return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:])
343
-
344
- system = create_system(numbering_type)
345
-
346
- int_dec = number_string.split('.')
347
- if len(int_dec) == 1:
348
- int_string = int_dec[0]
349
- dec_string = ""
350
- elif len(int_dec) == 2:
351
- int_string = int_dec[0]
352
- dec_string = int_dec[1]
353
- else:
354
- raise ValueError(
355
- "invalid input num string with more than one dot: {}".format(number_string))
356
-
357
- if use_units and len(int_string) > 1:
358
- result_symbols = get_value(int_string)
359
- else:
360
- result_symbols = [system.digits[int(c)] for c in int_string]
361
- dec_symbols = [system.digits[int(c)] for c in dec_string]
362
- if dec_string:
363
- result_symbols += [system.math.point] + dec_symbols
364
-
365
- if alt_two:
366
- liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t,
367
- system.digits[2].big_s, system.digits[2].big_t)
368
- for i, v in enumerate(result_symbols):
369
- if isinstance(v, CND) and v.value == 2:
370
- next_symbol = result_symbols[i +
371
- 1] if i < len(result_symbols) - 1 else None
372
- previous_symbol = result_symbols[i - 1] if i > 0 else None
373
- if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))):
374
- if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)):
375
- result_symbols[i] = liang
376
-
377
- # if big is True, '两' will not be used and `alt_two` has no impact on output
378
- if big:
379
- attr_name = 'big_'
380
- if traditional:
381
- attr_name += 't'
382
- else:
383
- attr_name += 's'
384
- else:
385
- if traditional:
386
- attr_name = 'traditional'
387
- else:
388
- attr_name = 'simplified'
389
-
390
- result = ''.join([getattr(s, attr_name) for s in result_symbols])
391
-
392
- # if not use_zeros:
393
- # result = result.strip(getattr(system.digits[0], attr_name))
394
-
395
- if alt_zero:
396
- result = result.replace(
397
- getattr(system.digits[0], attr_name), system.digits[0].alt_s)
398
-
399
- if alt_one:
400
- result = result.replace(
401
- getattr(system.digits[1], attr_name), system.digits[1].alt_s)
402
-
403
- for i, p in enumerate(POINT):
404
- if result.startswith(p):
405
- return CHINESE_DIGIS[0] + result
406
-
407
- # ^10, 11, .., 19
408
- if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0],
409
- SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \
410
- result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]:
411
- result = result[1:]
412
-
413
- return result
414
-
415
-
416
- # ================================================================================ #
417
- # different types of rewriters
418
- # ================================================================================ #
419
- class Cardinal:
420
- """
421
- CARDINAL类
422
- """
423
-
424
- def __init__(self, cardinal=None, chntext=None):
425
- self.cardinal = cardinal
426
- self.chntext = chntext
427
-
428
- def chntext2cardinal(self):
429
- return chn2num(self.chntext)
430
-
431
- def cardinal2chntext(self):
432
- return num2chn(self.cardinal)
433
-
434
-
435
- class Digit:
436
- """
437
- DIGIT类
438
- """
439
-
440
- def __init__(self, digit=None, chntext=None):
441
- self.digit = digit
442
- self.chntext = chntext
443
-
444
- # def chntext2digit(self):
445
- # return chn2num(self.chntext)
446
-
447
- def digit2chntext(self):
448
- return num2chn(self.digit, alt_two=False, use_units=False)
449
-
450
-
451
- class TelePhone:
452
- """
453
- TELEPHONE类
454
- """
455
-
456
- def __init__(self, telephone=None, raw_chntext=None, chntext=None):
457
- self.telephone = telephone
458
- self.raw_chntext = raw_chntext
459
- self.chntext = chntext
460
-
461
- # def chntext2telephone(self):
462
- # sil_parts = self.raw_chntext.split('<SIL>')
463
- # self.telephone = '-'.join([
464
- # str(chn2num(p)) for p in sil_parts
465
- # ])
466
- # return self.telephone
467
-
468
- def telephone2chntext(self, fixed=False):
469
-
470
- if fixed:
471
- sil_parts = self.telephone.split('-')
472
- self.raw_chntext = '<SIL>'.join([
473
- num2chn(part, alt_two=False, use_units=False) for part in sil_parts
474
- ])
475
- self.chntext = self.raw_chntext.replace('<SIL>', '')
476
- else:
477
- sp_parts = self.telephone.strip('+').split()
478
- self.raw_chntext = '<SP>'.join([
479
- num2chn(part, alt_two=False, use_units=False) for part in sp_parts
480
- ])
481
- self.chntext = self.raw_chntext.replace('<SP>', '')
482
- return self.chntext
483
-
484
-
485
- class Fraction:
486
- """
487
- FRACTION类
488
- """
489
-
490
- def __init__(self, fraction=None, chntext=None):
491
- self.fraction = fraction
492
- self.chntext = chntext
493
-
494
- def chntext2fraction(self):
495
- denominator, numerator = self.chntext.split('分之')
496
- return chn2num(numerator) + '/' + chn2num(denominator)
497
-
498
- def fraction2chntext(self):
499
- numerator, denominator = self.fraction.split('/')
500
- return num2chn(denominator) + '分之' + num2chn(numerator)
501
-
502
-
503
- class Date:
504
- """
505
- DATE类
506
- """
507
-
508
- def __init__(self, date=None, chntext=None):
509
- self.date = date
510
- self.chntext = chntext
511
-
512
- # def chntext2date(self):
513
- # chntext = self.chntext
514
- # try:
515
- # year, other = chntext.strip().split('年', maxsplit=1)
516
- # year = Digit(chntext=year).digit2chntext() + '年'
517
- # except ValueError:
518
- # other = chntext
519
- # year = ''
520
- # if other:
521
- # try:
522
- # month, day = other.strip().split('月', maxsplit=1)
523
- # month = Cardinal(chntext=month).chntext2cardinal() + '月'
524
- # except ValueError:
525
- # day = chntext
526
- # month = ''
527
- # if day:
528
- # day = Cardinal(chntext=day[:-1]).chntext2cardinal() + day[-1]
529
- # else:
530
- # month = ''
531
- # day = ''
532
- # date = year + month + day
533
- # self.date = date
534
- # return self.date
535
-
536
- def date2chntext(self):
537
- date = self.date
538
- try:
539
- year, other = date.strip().split('年', 1)
540
- year = Digit(digit=year).digit2chntext() + '年'
541
- except ValueError:
542
- other = date
543
- year = ''
544
- if other:
545
- try:
546
- month, day = other.strip().split('月', 1)
547
- month = Cardinal(cardinal=month).cardinal2chntext() + '月'
548
- except ValueError:
549
- day = date
550
- month = ''
551
- if day:
552
- day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1]
553
- else:
554
- month = ''
555
- day = ''
556
- chntext = year + month + day
557
- self.chntext = chntext
558
- return self.chntext
559
-
560
-
561
- class Money:
562
- """
563
- MONEY类
564
- """
565
-
566
- def __init__(self, money=None, chntext=None):
567
- self.money = money
568
- self.chntext = chntext
569
-
570
- # def chntext2money(self):
571
- # return self.money
572
-
573
- def money2chntext(self):
574
- money = self.money
575
- pattern = re.compile(r'(\d+(\.\d+)?)')
576
- matchers = pattern.findall(money)
577
- if matchers:
578
- for matcher in matchers:
579
- money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext())
580
- self.chntext = money
581
- return self.chntext
582
-
583
-
584
- class Percentage:
585
- """
586
- PERCENTAGE类
587
- """
588
-
589
- def __init__(self, percentage=None, chntext=None):
590
- self.percentage = percentage
591
- self.chntext = chntext
592
-
593
- def chntext2percentage(self):
594
- return chn2num(self.chntext.strip().strip('百分之')) + '%'
595
-
596
- def percentage2chntext(self):
597
- return '百分之' + num2chn(self.percentage.strip().strip('%'))
598
-
599
-
600
- # ================================================================================ #
601
- # NSW Normalizer
602
- # ================================================================================ #
603
- class NSWNormalizer:
604
- def __init__(self, raw_text):
605
- self.raw_text = '^' + raw_text + '$'
606
- self.norm_text = ''
607
-
608
- def _particular(self):
609
- text = self.norm_text
610
- pattern = re.compile(r"(([a-zA-Z]+)二([a-zA-Z]+))")
611
- matchers = pattern.findall(text)
612
- if matchers:
613
- # print('particular')
614
- for matcher in matchers:
615
- text = text.replace(matcher[0], matcher[1] + '2' + matcher[2], 1)
616
- self.norm_text = text
617
- return self.norm_text
618
-
619
- def normalize(self, remove_punc=True):
620
- text = self.raw_text
621
-
622
- # 规范化日期
623
- pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})年)?(\d{1,2}月(\d{1,2}[日号])?)?)")
624
- matchers = pattern.findall(text)
625
- if matchers:
626
- # print('date')
627
- for matcher in matchers:
628
- text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1)
629
-
630
- # 规范化金钱
631
- pattern = re.compile(r"\D+((\d+(\.\d+)?)[多余几]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)")
632
- matchers = pattern.findall(text)
633
- if matchers:
634
- # print('money')
635
- for matcher in matchers:
636
- text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1)
637
-
638
- # 规范化固话/手机号码
639
- # 手机
640
- # http://www.jihaoba.com/news/show/13680
641
- # 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198
642
- # 联通:130、131、132、156、155、186、185、176
643
- # 电信:133、153、189、180、181、177
644
- pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D")
645
- matchers = pattern.findall(text)
646
- if matchers:
647
- # print('telephone')
648
- for matcher in matchers:
649
- text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1)
650
- # 固话
651
- pattern = re.compile(r"\D((0(10|2[0-9]|[3-9]\d{2})-?)?[1-9]\d{6,7})\D")
652
- matchers = pattern.findall(text)
653
- if matchers:
654
- # print('fixed telephone')
655
- for matcher in matchers:
656
- text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(fixed=True), 1)
657
-
658
- # 规范化分数
659
- pattern = re.compile(r"(\d+/\d+)")
660
- matchers = pattern.findall(text)
661
- if matchers:
662
- # print('fraction')
663
- for matcher in matchers:
664
- text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1)
665
-
666
- # 规范化百分数
667
- text = text.replace('%', '%')
668
- pattern = re.compile(r"(\d+(\.\d+)?%)")
669
- matchers = pattern.findall(text)
670
- if matchers:
671
- # print('percentage')
672
- for matcher in matchers:
673
- text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1)
674
-
675
- # 规范化纯数+量词
676
- pattern = re.compile(r"(\d+(\.\d+)?)[多余几]?" + COM_QUANTIFIERS)
677
- matchers = pattern.findall(text)
678
- if matchers:
679
- # print('cardinal+quantifier')
680
- for matcher in matchers:
681
- text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1)
682
-
683
- # 规范化小数
684
- pattern = re.compile(r"(\d+\.\d+)")
685
- matchers = pattern.findall(text)
686
- if matchers:
687
- # print('cardinal')
688
- for matcher in matchers:
689
- text = text.replace(matcher, Cardinal(cardinal=matcher).cardinal2chntext(), 1)
690
-
691
- # 规范化数字编号
692
- pattern = re.compile(r"(\d{4,32})")
693
- matchers = pattern.findall(text)
694
- if matchers:
695
- # print('digit')
696
- for matcher in matchers:
697
- text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1)
698
-
699
- # 规范化其他数字
700
- pattern = re.compile(r"(\d+(\.\d+)?)")
701
- matchers = pattern.findall(text)
702
- if matchers:
703
- # print('cardinal')
704
- for matcher in matchers:
705
- text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1)
706
-
707
- self.norm_text = text
708
- self._particular()
709
-
710
- text = self.norm_text.lstrip('^').rstrip('$')
711
- if remove_punc:
712
- # Punctuations removal
713
- old_chars = CHINESE_PUNC_LIST + string.punctuation # includes all CN and EN punctuations
714
- new_chars = ' ' * len(old_chars)
715
- del_chars = ''
716
- text = text.translate(str.maketrans(old_chars, new_chars, del_chars))
717
- return text
718
-
719
-
720
- def nsw_test_case(raw_text):
721
- print('I:' + raw_text)
722
- print('O:' + NSWNormalizer(raw_text).normalize())
723
- print('')
724
-
725
-
726
- def nsw_test():
727
- nsw_test_case('固话:0595-23865596或者23880880。')
728
- nsw_test_case('手机:+86 19859213959或者15659451527。')
729
- nsw_test_case('分数:32477/76391。')
730
- nsw_test_case('百分数:80.03%。')
731
- nsw_test_case('编号:31520181154418。')
732
- nsw_test_case('纯数:2983.07克或12345.60米。')
733
- nsw_test_case('日期:1999年2月20日或09年3月15号。')
734
- nsw_test_case('金钱:12块5,34.5元,20.1万, 40多块钱')
735
- nsw_test_case('特殊:O2O或B2C。')
736
- nsw_test_case('3456万吨')
737
- nsw_test_case('2938478321947个')
738
- nsw_test_case('938')
739
- nsw_test_case('今天吃了115个小笼包231个馒头')
740
- nsw_test_case('有62%的概率')
741
-
742
-
743
- if __name__ == '__main__':
744
- # nsw_test()
745
-
746
- p = argparse.ArgumentParser()
747
- p.add_argument('ifile', help='input filename, assume utf-8 encoding')
748
- p.add_argument('ofile', help='output filename')
749
- p.add_argument('--to_upper', action='store_true', help='convert to upper case')
750
- p.add_argument('--to_lower', action='store_true', help='convert to lower case')
751
- p.add_argument('--has_key', action='store_true', help="input text has Kaldi's key as first field.")
752
- p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines')
753
- args = p.parse_args()
754
-
755
- ifile = codecs.open(args.ifile, 'r', 'utf8')
756
- ofile = codecs.open(args.ofile, 'w+', 'utf8')
757
-
758
- n = 0
759
- for l in ifile:
760
- key = ''
761
- text = ''
762
- if args.has_key:
763
- cols = l.split(maxsplit=1)
764
- key = cols[0]
765
- if len(cols) == 2:
766
- text = cols[1]
767
- else:
768
- text = ''
769
- else:
770
- text = l
771
-
772
- # cases
773
- if args.to_upper and args.to_lower:
774
- sys.stderr.write('text norm: to_upper OR to_lower?')
775
- exit(1)
776
- if args.to_upper:
777
- text = text.upper()
778
- if args.to_lower:
779
- text = text.lower()
780
-
781
- # NSW(Non-Standard-Word) normalization
782
- text = NSWNormalizer(text).normalize()
783
-
784
- #
785
- if args.has_key:
786
- ofile.write(key + '\t' + text)
787
- else:
788
- ofile.write(text)
789
-
790
- n += 1
791
- if n % args.log_interval == 0:
792
- sys.stderr.write("text norm: {} lines done.\n".format(n))
793
-
794
- sys.stderr.write("text norm: {} lines done in total.\n".format(n))
795
-
796
- ifile.close()
797
- ofile.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-300e_coco.py DELETED
@@ -1,17 +0,0 @@
1
- _base_ = './yolov6_s_syncbn_fast_8xb32-300e_coco.py'
2
-
3
- # ======================= Possible modified parameters =======================
4
- # -----model related-----
5
- # The scaling factor that controls the depth of the network structure
6
- deepen_factor = 0.33
7
- # The scaling factor that controls the width of the network structure
8
- widen_factor = 0.375
9
-
10
- # ============================== Unmodified in most cases ===================
11
- model = dict(
12
- backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor),
13
- neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor),
14
- bbox_head=dict(
15
- type='YOLOv6Head',
16
- head_module=dict(widen_factor=widen_factor),
17
- loss_bbox=dict(iou_mode='siou')))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/aws/resume.py DELETED
@@ -1,42 +0,0 @@
1
- # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2
- # Usage: $ python utils/aws/resume.py
3
-
4
- import os
5
- import sys
6
- from pathlib import Path
7
-
8
- import torch
9
- import yaml
10
-
11
- FILE = Path(__file__).resolve()
12
- ROOT = FILE.parents[2] # YOLOv5 root directory
13
- if str(ROOT) not in sys.path:
14
- sys.path.append(str(ROOT)) # add ROOT to PATH
15
-
16
- port = 0 # --master_port
17
- path = Path("").resolve()
18
- for last in path.rglob("*/**/last.pt"):
19
- ckpt = torch.load(last)
20
- if ckpt["optimizer"] is None:
21
- continue
22
-
23
- # Load opt.yaml
24
- with open(last.parent.parent / "opt.yaml", errors="ignore") as f:
25
- opt = yaml.safe_load(f)
26
-
27
- # Get device count
28
- d = opt["device"].split(",") # devices
29
- nd = len(d) # number of devices
30
- ddp = nd > 1 or (
31
- nd == 0 and torch.cuda.device_count() > 1
32
- ) # distributed data parallel
33
-
34
- if ddp: # multi-GPU
35
- port += 1
36
- cmd = f"python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}"
37
- else: # single-GPU
38
- cmd = f"python train.py --resume {last}"
39
-
40
- cmd += " > /dev/null 2>&1 &" # redirect output to dev/null and run in daemon thread
41
- print(cmd)
42
- os.system(cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import Pages from './Pages';
2
-
3
- export default function (
4
- config?: Pages.IConfig
5
- ): Pages;
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/inference/__init__.py DELETED
File without changes
spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py DELETED
@@ -1,509 +0,0 @@
1
- import copy
2
- import math
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- import commons
8
- import ONNXVITS_modules as modules
9
- import attentions
10
- import monotonic_align
11
-
12
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
13
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
14
- from commons import init_weights, get_padding
15
-
16
-
17
- class StochasticDurationPredictor(nn.Module):
18
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
19
- super().__init__()
20
- filter_channels = in_channels # it needs to be removed from future version.
21
- self.in_channels = in_channels
22
- self.filter_channels = filter_channels
23
- self.kernel_size = kernel_size
24
- self.p_dropout = p_dropout
25
- self.n_flows = n_flows
26
- self.gin_channels = gin_channels
27
-
28
- self.log_flow = modules.Log()
29
- self.flows = nn.ModuleList()
30
- self.flows.append(modules.ElementwiseAffine(2))
31
- for i in range(n_flows):
32
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
33
- self.flows.append(modules.Flip())
34
-
35
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
36
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
37
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
38
- self.post_flows = nn.ModuleList()
39
- self.post_flows.append(modules.ElementwiseAffine(2))
40
- for i in range(4):
41
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
42
- self.post_flows.append(modules.Flip())
43
-
44
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
45
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
46
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
47
- if gin_channels != 0:
48
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
49
-
50
- self.w = None
51
- self.reverse = None
52
- self.noise_scale = None
53
- def forward(self, x, x_mask, g=None):
54
- w = self.w
55
- reverse = self.reverse
56
- noise_scale = self.noise_scale
57
-
58
- x = torch.detach(x)
59
- x = self.pre(x)
60
- if g is not None:
61
- g = torch.detach(g)
62
- x = x + self.cond(g)
63
- x = self.convs(x, x_mask)
64
- x = self.proj(x) * x_mask
65
-
66
- if not reverse:
67
- flows = self.flows
68
- assert w is not None
69
-
70
- logdet_tot_q = 0
71
- h_w = self.post_pre(w)
72
- h_w = self.post_convs(h_w, x_mask)
73
- h_w = self.post_proj(h_w) * x_mask
74
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
75
- z_q = e_q
76
- for flow in self.post_flows:
77
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
78
- logdet_tot_q += logdet_q
79
- z_u, z1 = torch.split(z_q, [1, 1], 1)
80
- u = torch.sigmoid(z_u) * x_mask
81
- z0 = (w - u) * x_mask
82
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
83
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
84
-
85
- logdet_tot = 0
86
- z0, logdet = self.log_flow(z0, x_mask)
87
- logdet_tot += logdet
88
- z = torch.cat([z0, z1], 1)
89
- for flow in flows:
90
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
91
- logdet_tot = logdet_tot + logdet
92
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
93
- return nll + logq # [b]
94
- else:
95
- flows = list(reversed(self.flows))
96
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
97
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
98
- for flow in flows:
99
- z = flow(z, x_mask, g=x, reverse=reverse)
100
- z0, z1 = torch.split(z, [1, 1], 1)
101
- logw = z0
102
- return logw
103
-
104
-
105
- class TextEncoder(nn.Module):
106
- def __init__(self,
107
- n_vocab,
108
- out_channels,
109
- hidden_channels,
110
- filter_channels,
111
- n_heads,
112
- n_layers,
113
- kernel_size,
114
- p_dropout):
115
- super().__init__()
116
- self.n_vocab = n_vocab
117
- self.out_channels = out_channels
118
- self.hidden_channels = hidden_channels
119
- self.filter_channels = filter_channels
120
- self.n_heads = n_heads
121
- self.n_layers = n_layers
122
- self.kernel_size = kernel_size
123
- self.p_dropout = p_dropout
124
-
125
- self.emb = nn.Embedding(n_vocab, hidden_channels)
126
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
127
-
128
- self.encoder = attentions.Encoder(
129
- hidden_channels,
130
- filter_channels,
131
- n_heads,
132
- n_layers,
133
- kernel_size,
134
- p_dropout)
135
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
136
-
137
- def forward(self, x, x_lengths):
138
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
139
- x = torch.transpose(x, 1, -1) # [b, h, t]
140
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
141
-
142
- x = self.encoder(x * x_mask, x_mask)
143
- stats = self.proj(x) * x_mask
144
-
145
- m, logs = torch.split(stats, self.out_channels, dim=1)
146
- return x, m, logs, x_mask
147
-
148
-
149
- class ResidualCouplingBlock(nn.Module):
150
- def __init__(self,
151
- channels,
152
- hidden_channels,
153
- kernel_size,
154
- dilation_rate,
155
- n_layers,
156
- n_flows=4,
157
- gin_channels=0):
158
- super().__init__()
159
- self.channels = channels
160
- self.hidden_channels = hidden_channels
161
- self.kernel_size = kernel_size
162
- self.dilation_rate = dilation_rate
163
- self.n_layers = n_layers
164
- self.n_flows = n_flows
165
- self.gin_channels = gin_channels
166
-
167
- self.flows = nn.ModuleList()
168
- for i in range(n_flows):
169
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
170
- self.flows.append(modules.Flip())
171
-
172
- self.reverse = None
173
- def forward(self, x, x_mask, g=None):
174
- reverse = self.reverse
175
- if not reverse:
176
- for flow in self.flows:
177
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
178
- else:
179
- for flow in reversed(self.flows):
180
- x = flow(x, x_mask, g=g, reverse=reverse)
181
- return x
182
-
183
-
184
- class PosteriorEncoder(nn.Module):
185
- def __init__(self,
186
- in_channels,
187
- out_channels,
188
- hidden_channels,
189
- kernel_size,
190
- dilation_rate,
191
- n_layers,
192
- gin_channels=0):
193
- super().__init__()
194
- self.in_channels = in_channels
195
- self.out_channels = out_channels
196
- self.hidden_channels = hidden_channels
197
- self.kernel_size = kernel_size
198
- self.dilation_rate = dilation_rate
199
- self.n_layers = n_layers
200
- self.gin_channels = gin_channels
201
-
202
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
203
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
204
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
205
-
206
- def forward(self, x, x_lengths, g=None):
207
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
208
- x = self.pre(x) * x_mask # x_in : [b, c, t] -> [b, h, t]
209
- x = self.enc(x, x_mask, g=g) # x_in : [b, h, t], g : [b, h, 1], x = x_in + g
210
- stats = self.proj(x) * x_mask
211
- m, logs = torch.split(stats, self.out_channels, dim=1)
212
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
213
- return z, m, logs, x_mask # z, m, logs : [b, h, t]
214
-
215
-
216
- class Generator(torch.nn.Module):
217
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
218
- super(Generator, self).__init__()
219
- self.num_kernels = len(resblock_kernel_sizes)
220
- self.num_upsamples = len(upsample_rates)
221
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
222
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
223
-
224
- self.ups = nn.ModuleList()
225
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
226
- self.ups.append(weight_norm(
227
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
228
- k, u, padding=(k-u)//2)))
229
-
230
- self.resblocks = nn.ModuleList()
231
- for i in range(len(self.ups)):
232
- ch = upsample_initial_channel//(2**(i+1))
233
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
234
- self.resblocks.append(resblock(ch, k, d))
235
-
236
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
237
- self.ups.apply(init_weights)
238
-
239
- if gin_channels != 0:
240
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
241
-
242
- def forward(self, x, g=None):
243
- x = self.conv_pre(x)
244
- if g is not None:
245
- x = x + self.cond(g)
246
-
247
- for i in range(self.num_upsamples):
248
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
249
- x = self.ups[i](x)
250
- xs = None
251
- for j in range(self.num_kernels):
252
- if xs is None:
253
- xs = self.resblocks[i*self.num_kernels+j](x)
254
- else:
255
- xs += self.resblocks[i*self.num_kernels+j](x)
256
- x = xs / self.num_kernels
257
- x = F.leaky_relu(x)
258
- x = self.conv_post(x)
259
- x = torch.tanh(x)
260
-
261
- return x
262
-
263
- def remove_weight_norm(self):
264
- print('Removing weight norm...')
265
- for l in self.ups:
266
- remove_weight_norm(l)
267
- for l in self.resblocks:
268
- l.remove_weight_norm()
269
-
270
-
271
- class DiscriminatorP(torch.nn.Module):
272
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
273
- super(DiscriminatorP, self).__init__()
274
- self.period = period
275
- self.use_spectral_norm = use_spectral_norm
276
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
277
- self.convs = nn.ModuleList([
278
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
279
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
280
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
281
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
282
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
283
- ])
284
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
285
-
286
- def forward(self, x):
287
- fmap = []
288
-
289
- # 1d to 2d
290
- b, c, t = x.shape
291
- if t % self.period != 0: # pad first
292
- n_pad = self.period - (t % self.period)
293
- x = F.pad(x, (0, n_pad), "reflect")
294
- t = t + n_pad
295
- x = x.view(b, c, t // self.period, self.period)
296
-
297
- for l in self.convs:
298
- x = l(x)
299
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
300
- fmap.append(x)
301
- x = self.conv_post(x)
302
- fmap.append(x)
303
- x = torch.flatten(x, 1, -1)
304
-
305
- return x, fmap
306
-
307
-
308
- class DiscriminatorS(torch.nn.Module):
309
- def __init__(self, use_spectral_norm=False):
310
- super(DiscriminatorS, self).__init__()
311
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
312
- self.convs = nn.ModuleList([
313
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
314
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
315
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
316
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
317
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
318
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
319
- ])
320
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
321
-
322
- def forward(self, x):
323
- fmap = []
324
-
325
- for l in self.convs:
326
- x = l(x)
327
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
328
- fmap.append(x)
329
- x = self.conv_post(x)
330
- fmap.append(x)
331
- x = torch.flatten(x, 1, -1)
332
-
333
- return x, fmap
334
-
335
-
336
- class MultiPeriodDiscriminator(torch.nn.Module):
337
- def __init__(self, use_spectral_norm=False):
338
- super(MultiPeriodDiscriminator, self).__init__()
339
- periods = [2,3,5,7,11]
340
-
341
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
342
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
343
- self.discriminators = nn.ModuleList(discs)
344
-
345
- def forward(self, y, y_hat):
346
- y_d_rs = []
347
- y_d_gs = []
348
- fmap_rs = []
349
- fmap_gs = []
350
- for i, d in enumerate(self.discriminators):
351
- y_d_r, fmap_r = d(y)
352
- y_d_g, fmap_g = d(y_hat)
353
- y_d_rs.append(y_d_r)
354
- y_d_gs.append(y_d_g)
355
- fmap_rs.append(fmap_r)
356
- fmap_gs.append(fmap_g)
357
-
358
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
359
-
360
-
361
-
362
- class SynthesizerTrn(nn.Module):
363
- """
364
- Synthesizer for Training
365
- """
366
-
367
- def __init__(self,
368
- n_vocab,
369
- spec_channels,
370
- segment_size,
371
- inter_channels,
372
- hidden_channels,
373
- filter_channels,
374
- n_heads,
375
- n_layers,
376
- kernel_size,
377
- p_dropout,
378
- resblock,
379
- resblock_kernel_sizes,
380
- resblock_dilation_sizes,
381
- upsample_rates,
382
- upsample_initial_channel,
383
- upsample_kernel_sizes,
384
- n_speakers=0,
385
- gin_channels=0,
386
- use_sdp=True,
387
- **kwargs):
388
-
389
- super().__init__()
390
- self.n_vocab = n_vocab
391
- self.spec_channels = spec_channels
392
- self.inter_channels = inter_channels
393
- self.hidden_channels = hidden_channels
394
- self.filter_channels = filter_channels
395
- self.n_heads = n_heads
396
- self.n_layers = n_layers
397
- self.kernel_size = kernel_size
398
- self.p_dropout = p_dropout
399
- self.resblock = resblock
400
- self.resblock_kernel_sizes = resblock_kernel_sizes
401
- self.resblock_dilation_sizes = resblock_dilation_sizes
402
- self.upsample_rates = upsample_rates
403
- self.upsample_initial_channel = upsample_initial_channel
404
- self.upsample_kernel_sizes = upsample_kernel_sizes
405
- self.segment_size = segment_size
406
- self.n_speakers = n_speakers
407
- self.gin_channels = gin_channels
408
-
409
- self.use_sdp = use_sdp
410
-
411
- self.enc_p = TextEncoder(n_vocab,
412
- inter_channels,
413
- hidden_channels,
414
- filter_channels,
415
- n_heads,
416
- n_layers,
417
- kernel_size,
418
- p_dropout)
419
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
420
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
421
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
422
-
423
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
424
-
425
- if n_speakers > 0:
426
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
427
-
428
- def forward(self, x, x_lengths, sid=None, noise_scale=.667, length_scale=1, noise_scale_w=.8, max_len=None):
429
- torch.onnx.export(
430
- self.enc_p,
431
- (x, x_lengths),
432
- "ONNX_net/enc_p.onnx",
433
- input_names=["x", "x_lengths"],
434
- output_names=["xout", "m_p", "logs_p", "x_mask"],
435
- dynamic_axes={
436
- "x" : [1],
437
- "xout" : [2],
438
- "m_p" : [2],
439
- "logs_p" : [2],
440
- "x_mask" : [2]
441
- },
442
- verbose=True,
443
- )
444
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
445
-
446
- if self.n_speakers > 0:
447
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
448
- else:
449
- g = None
450
-
451
- self.dp.reverse = True
452
- self.dp.noise_scale = noise_scale_w
453
- torch.onnx.export(
454
- self.dp,
455
- (x, x_mask, g),
456
- "ONNX_net/dp.onnx",
457
- input_names=["x", "x_mask", "g"],
458
- output_names=["logw"],
459
- dynamic_axes={
460
- "x" : [2],
461
- "x_mask" : [2],
462
- "logw" : [2]
463
- },
464
- verbose=True,
465
- )
466
- logw = self.dp(x, x_mask, g=g)
467
- w = torch.exp(logw) * x_mask * length_scale
468
- w_ceil = torch.ceil(w)
469
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
470
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
471
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
472
- attn = commons.generate_path(w_ceil, attn_mask)
473
-
474
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
475
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
476
-
477
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
478
-
479
- self.flow.reverse = True
480
- torch.onnx.export(
481
- self.flow,
482
- (z_p, y_mask, g),
483
- "ONNX_net/flow.onnx",
484
- input_names=["z_p", "y_mask", "g"],
485
- output_names=["z"],
486
- dynamic_axes={
487
- "z_p" : [2],
488
- "y_mask" : [2],
489
- "z" : [2]
490
- },
491
- verbose=True,
492
- )
493
- z = self.flow(z_p, y_mask, g=g)
494
- z_in = (z * y_mask)[:,:,:max_len]
495
-
496
- torch.onnx.export(
497
- self.dec,
498
- (z_in, g),
499
- "ONNX_net/dec.onnx",
500
- input_names=["z_in", "g"],
501
- output_names=["o"],
502
- dynamic_axes={
503
- "z_in" : [2],
504
- "o" : [2]
505
- },
506
- verbose=True,
507
- )
508
- o = self.dec(z_in, g=g)
509
- return o
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/dataset/llff_dataset.py DELETED
@@ -1,292 +0,0 @@
1
- import torch
2
- from torch.utils.data import Dataset
3
- import glob
4
- import numpy as np
5
- import os
6
- from PIL import Image
7
- from torchvision import transforms as T
8
-
9
- from .ray_utils import *
10
-
11
-
12
- def normalize(v):
13
- """Normalize a vector."""
14
- return v / np.linalg.norm(v)
15
-
16
-
17
- def average_poses(poses):
18
- """
19
- Calculate the average pose, which is then used to center all poses
20
- using @center_poses. Its computation is as follows:
21
- 1. Compute the center: the average of pose centers.
22
- 2. Compute the z axis: the normalized average z axis.
23
- 3. Compute axis y': the average y axis.
24
- 4. Compute x' = y' cross product z, then normalize it as the x axis.
25
- 5. Compute the y axis: z cross product x.
26
-
27
- Note that at step 3, we cannot directly use y' as y axis since it's
28
- not necessarily orthogonal to z axis. We need to pass from x to y.
29
- Inputs:
30
- poses: (N_images, 3, 4)
31
- Outputs:
32
- pose_avg: (3, 4) the average pose
33
- """
34
- # 1. Compute the center
35
- center = poses[..., 3].mean(0) # (3)
36
-
37
- # 2. Compute the z axis
38
- z = normalize(poses[..., 2].mean(0)) # (3)
39
-
40
- # 3. Compute axis y' (no need to normalize as it's not the final output)
41
- y_ = poses[..., 1].mean(0) # (3)
42
-
43
- # 4. Compute the x axis
44
- x = normalize(np.cross(z, y_)) # (3)
45
-
46
- # 5. Compute the y axis (as z and x are normalized, y is already of norm 1)
47
- y = np.cross(x, z) # (3)
48
-
49
- pose_avg = np.stack([x, y, z, center], 1) # (3, 4)
50
-
51
- return pose_avg
52
-
53
-
54
- def center_poses(poses, blender2opencv):
55
- """
56
- Center the poses so that we can use NDC.
57
- See https://github.com/bmild/nerf/issues/34
58
- Inputs:
59
- poses: (N_images, 3, 4)
60
- Outputs:
61
- poses_centered: (N_images, 3, 4) the centered poses
62
- pose_avg: (3, 4) the average pose
63
- """
64
- poses = poses @ blender2opencv
65
- pose_avg = average_poses(poses) # (3, 4)
66
- pose_avg_homo = np.eye(4)
67
- pose_avg_homo[:3] = pose_avg # convert to homogeneous coordinate for faster computation
68
- pose_avg_homo = pose_avg_homo
69
- # by simply adding 0, 0, 0, 1 as the last row
70
- last_row = np.tile(np.array([0, 0, 0, 1]), (len(poses), 1, 1)) # (N_images, 1, 4)
71
- poses_homo = \
72
- np.concatenate([poses, last_row], 1) # (N_images, 4, 4) homogeneous coordinate
73
-
74
- poses_centered = np.linalg.inv(pose_avg_homo) @ poses_homo # (N_images, 4, 4)
75
- # poses_centered = poses_centered @ blender2opencv
76
- poses_centered = poses_centered[:, :3] # (N_images, 3, 4)
77
-
78
- return poses_centered, pose_avg_homo
79
-
80
-
81
- def viewmatrix(z, up, pos):
82
- vec2 = normalize(z)
83
- vec1_avg = up
84
- vec0 = normalize(np.cross(vec1_avg, vec2))
85
- vec1 = normalize(np.cross(vec2, vec0))
86
- m = np.eye(4)
87
- m[:3] = np.stack([-vec0, vec1, vec2, pos], 1)
88
- return m
89
-
90
-
91
- def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, N_rots=2, N=120):
92
- render_poses = []
93
- rads = np.array(list(rads) + [1.])
94
-
95
- for theta in np.linspace(0., 2. * np.pi * N_rots, N + 1)[:-1]:
96
- c = np.dot(c2w[:3, :4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.]) * rads)
97
- z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.])))
98
- render_poses.append(viewmatrix(z, up, c))
99
- return render_poses
100
-
101
-
102
- def get_spiral(c2ws_all, near_fars, rads_scale=1.0, N_views=120):
103
- # center pose
104
- c2w = average_poses(c2ws_all)
105
-
106
- # Get average pose
107
- up = normalize(c2ws_all[:, :3, 1].sum(0))
108
-
109
- # Find a reasonable "focus depth" for this dataset
110
- dt = 0.75
111
- close_depth, inf_depth = near_fars.min() * 0.9, near_fars.max() * 5.0
112
- focal = 1.0 / (((1.0 - dt) / close_depth + dt / inf_depth))
113
-
114
- # Get radii for spiral path
115
- zdelta = near_fars.min() * .2
116
- tt = c2ws_all[:, :3, 3]
117
- rads = np.percentile(np.abs(tt), 90, 0) * rads_scale
118
- render_poses = render_path_spiral(c2w, up, rads, focal, zdelta, zrate=.5, N=N_views)
119
- return np.stack(render_poses)
120
-
121
-
122
- def get_interpolation_path(c2ws_all, steps=30):
123
- # flower
124
- # idx0 = 1
125
- # idx1 = 10
126
-
127
- # trex
128
- # idx0 = 8
129
- # idx1 = 53
130
-
131
- # horns
132
- idx0 = 18
133
- idx1 = 47
134
-
135
- v = np.linspace(0, 1, num=steps)
136
-
137
- c2w0 = c2ws_all[idx0]
138
- c2w1 = c2ws_all[idx1]
139
-
140
- c2w_ = []
141
- for i in range(steps):
142
- c2w_.append(c2w0 * v[i] + c2w1 * (1 - v[i]))
143
-
144
- return np.stack(c2w_)
145
-
146
-
147
- class LLFFDataset(Dataset):
148
- def __init__(self, datadir, split='train', downsample=4, is_stack=False, hold_every=8, N_vis=-1):
149
-
150
- self.root_dir = datadir
151
- self.split = split
152
- self.hold_every = hold_every
153
- self.is_stack = is_stack
154
- self.downsample = downsample
155
- self.define_transforms()
156
-
157
- self.blender2opencv = np.eye(4) # np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
158
- self.read_meta()
159
- self.white_bg = False
160
-
161
- # self.near_far = [np.min(self.near_fars[:,0]),np.max(self.near_fars[:,1])]
162
- self.near_far = [0.0, 1.0]
163
- self.scene_bbox = torch.tensor([[-1.5, -1.67, -1.0], [1.5, 1.67, 1.0]])
164
- # self.scene_bbox = torch.tensor([[-1.67, -1.5, -1.0], [1.67, 1.5, 1.0]])
165
- self.center = torch.mean(self.scene_bbox, dim=0).float().view(1, 1, 3)
166
- self.invradius = 1.0 / (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
167
-
168
- def read_meta(self):
169
-
170
- poses_bounds = np.load(os.path.join(self.root_dir, 'poses_bounds.npy')) # (N_images, 17)
171
- self.image_paths = sorted(glob.glob(os.path.join(self.root_dir, 'images_4/*')))
172
- # load full resolution image then resize
173
- if self.split in ['train', 'test']:
174
- assert len(poses_bounds) == len(self.image_paths), \
175
- 'Mismatch between number of images and number of poses! Please rerun COLMAP!'
176
-
177
- poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5)
178
- self.near_fars = poses_bounds[:, -2:] # (N_images, 2)
179
- hwf = poses[:, :, -1]
180
-
181
- # Step 1: rescale focal length according to training resolution
182
- H, W, self.focal = poses[0, :, -1] # original intrinsics, same for all images
183
- self.img_wh = np.array([int(W / self.downsample), int(H / self.downsample)])
184
- self.focal = [self.focal * self.img_wh[0] / W, self.focal * self.img_wh[1] / H]
185
-
186
- # Step 2: correct poses
187
- # Original poses has rotation in form "down right back", change to "right up back"
188
- # See https://github.com/bmild/nerf/issues/34
189
- poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1)
190
- # (N_images, 3, 4) exclude H, W, focal
191
- self.poses, self.pose_avg = center_poses(poses, self.blender2opencv)
192
-
193
- # Step 3: correct scale so that the nearest depth is at a little more than 1.0
194
- # See https://github.com/bmild/nerf/issues/34
195
- near_original = self.near_fars.min()
196
- scale_factor = near_original * 0.75 # 0.75 is the default parameter
197
- # the nearest depth is at 1/0.75=1.33
198
- self.near_fars /= scale_factor
199
- self.poses[..., 3] /= scale_factor
200
-
201
- # build rendering path
202
- N_views, N_rots = 120, 2
203
- tt = self.poses[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T
204
- up = normalize(self.poses[:, :3, 1].sum(0))
205
- rads = np.percentile(np.abs(tt), 90, 0)
206
-
207
- self.render_path = get_spiral(self.poses, self.near_fars, N_views=N_views)
208
- # self.render_path = get_interpolation_path(self.poses)
209
-
210
- # distances_from_center = np.linalg.norm(self.poses[..., 3], axis=1)
211
- # val_idx = np.argmin(distances_from_center) # choose val image as the closest to
212
- # center image
213
-
214
- # ray directions for all pixels, same for all images (same H, W, focal)
215
- W, H = self.img_wh
216
- self.directions = get_ray_directions_blender(H, W, self.focal) # (H, W, 3)
217
-
218
- average_pose = average_poses(self.poses)
219
- dists = np.sum(np.square(average_pose[:3, 3] - self.poses[:, :3, 3]), -1)
220
- i_test = np.arange(0, self.poses.shape[0], self.hold_every) # [np.argmin(dists)]
221
- img_list = i_test if self.split != 'train' else list(set(np.arange(len(self.poses))) - set(i_test))
222
-
223
- # use first N_images-1 to train, the LAST is val
224
- self.all_rays = []
225
- self.all_rgbs = []
226
- for i in img_list:
227
- image_path = self.image_paths[i]
228
- c2w = torch.FloatTensor(self.poses[i])
229
-
230
- img = Image.open(image_path).convert('RGB')
231
- if self.downsample != 1.0:
232
- img = img.resize(self.img_wh, Image.LANCZOS)
233
- img = self.transform(img) # (3, h, w)
234
-
235
- img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB
236
- self.all_rgbs += [img]
237
- rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
238
- rays_o, rays_d = ndc_rays_blender(H, W, self.focal[0], 1.0, rays_o, rays_d)
239
- # viewdir = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
240
-
241
- self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6)
242
-
243
- all_rays = self.all_rays
244
- all_rgbs = self.all_rgbs
245
-
246
- self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w,6)
247
- self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w,3)
248
-
249
- if self.is_stack:
250
- self.all_rays_stack = torch.stack(all_rays, 0).reshape(-1, *self.img_wh[::-1],
251
- 6) # (len(self.meta['frames]),h,w,6)
252
- avg_pool = torch.nn.AvgPool2d(4, ceil_mode=True)
253
- self.ds_all_rays_stack = avg_pool(self.all_rays_stack.permute(0, 3, 1, 2)).permute(0, 2, 3,
254
- 1) # (len(self.meta['frames]),h/4,w/4,6)
255
- self.all_rgbs_stack = torch.stack(all_rgbs, 0).reshape(-1, *self.img_wh[::-1],
256
- 3) # (len(self.meta['frames]),h,w,3)
257
-
258
- @torch.no_grad()
259
- def prepare_feature_data(self, encoder, chunk=8):
260
- '''
261
- Prepare feature maps as training data.
262
- '''
263
- assert self.is_stack, 'Dataset should contain original stacked taining data!'
264
- print('====> prepare_feature_data ...')
265
-
266
- frames_num, h, w, _ = self.all_rgbs_stack.size()
267
- features = []
268
-
269
- for chunk_idx in range(frames_num // chunk + int(frames_num % chunk > 0)):
270
- rgbs_chunk = self.all_rgbs_stack[chunk_idx * chunk: (chunk_idx + 1) * chunk].cuda()
271
- features_chunk = encoder(normalize_vgg(rgbs_chunk.permute(0, 3, 1, 2))).relu3_1
272
- # resize to the size of rgb map so that rays can match
273
- features_chunk = T.functional.resize(features_chunk, size=(h, w),
274
- interpolation=T.InterpolationMode.BILINEAR)
275
- features.append(features_chunk.detach().cpu().requires_grad_(False))
276
-
277
- self.all_features_stack = torch.cat(features).permute(0, 2, 3, 1) # (len(self.meta['frames]),h,w,256)
278
- self.all_features = self.all_features_stack.reshape(-1, 256)
279
- print('prepare_feature_data Done!')
280
-
281
- def define_transforms(self):
282
- self.transform = T.ToTensor()
283
-
284
- def __len__(self):
285
- return len(self.all_rgbs)
286
-
287
- def __getitem__(self, idx):
288
-
289
- sample = {'rays': self.all_rays[idx],
290
- 'rgbs': self.all_rgbs[idx]}
291
-
292
- return sample
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anandbheesetti/MNIST_digit_predictor/app.py DELETED
@@ -1,105 +0,0 @@
1
-
2
-
3
- ## importing the necessary libraries
4
- import pandas as pd
5
- import numpy as np
6
- import matplotlib.pyplot as plt
7
- import seaborn as sns
8
- import tensorflow as tf
9
-
10
- ##loading the MNIST dataset
11
- mnist=tf.keras.datasets.mnist
12
-
13
- #splitting the data into training and testing datasets
14
- (x_train_full,y_train_full),(x_test,y_test)=mnist.load_data()
15
-
16
- ## let us check the shapes of the training and testing datasets
17
- x_train_full.shape
18
-
19
- x_test.shape
20
-
21
- ## As we know that the digits are stored in form of the pixels.
22
- ## each digit has 28 pixels
23
- ## All pixels ranges from 0 to 255 grey levels
24
-
25
- # Creating a validation data set from the training data set
26
- x_valid,x_train=x_train_full[:5000],x_train_full[5000:]
27
-
28
- # Now we will scale the data between 0 to 1 by dividing it by 255
29
- x_valid=x_valid/255
30
-
31
- x_train=x_train/255
32
-
33
- x_test=x_test/255
34
-
35
- y_valid,y_train=y_train_full[:5000],y_train_full[5000:]
36
-
37
- # Now let us visualize how the MNIST data looks like
38
- plt.imshow(x_train[0],cmap="binary")
39
- plt.show()
40
-
41
- # To visualize it in at grey levels
42
- plt.figure(figsize=(15,15))
43
- sns.heatmap(x_train[0],annot=True,cmap="binary")
44
-
45
- # Now we will create a Artificial neural network with some hidden layers to build a model that predicts the written digit
46
- Layers=[tf.keras.layers.Flatten(input_shape=[28,28],name="inputlayer"),
47
- tf.keras.layers.Dense(300,activation="relu",name="hiddenlayer1"),
48
- tf.keras.layers.Dense(100,activation="relu",name="hiddenlayer2"),
49
- tf.keras.layers.Dense(10,activation="softmax",name="outputlayer")]
50
-
51
- # Now we will buila a Sequential model
52
- model_clf=tf.keras.models.Sequential(Layers)
53
-
54
- model_clf.layers
55
-
56
- # Let us see the summary of the model
57
- model_clf.summary()
58
-
59
- weights,biases=model_clf.layers[1].get_weights()
60
-
61
- # Defining the parameters to train the model
62
- LOSS_FUNCTION="sparse_categorical_crossentropy"
63
- OPTIMIZER="SGD"
64
- METRICS=["accuracy"]
65
-
66
- model_clf.compile(loss=LOSS_FUNCTION,
67
- optimizer=OPTIMIZER,
68
- metrics=METRICS)
69
-
70
- EPOCHS=30
71
- VALIDATION_SET=(x_valid,y_valid)
72
-
73
- history=model_clf.fit(x_train,y_train,epochs=EPOCHS,
74
- validation_data=VALIDATION_SET,
75
- batch_size=32)
76
-
77
- history.params
78
-
79
- pd.DataFrame(history.history).plot()
80
-
81
- model_clf.evaluate(x_test,y_test)
82
-
83
- x_new=x_test[:3]
84
- actual=y_test[:3]
85
- y_prob=model_clf.predict(x_new)
86
- y_pred=np.argmax(y_prob,axis=-1)
87
-
88
- for i,j,k in zip(x_new,y_pred,actual):
89
- plt.imshow(i,cmap="binary")
90
- plt.title(f"predicted {j} and actual is {k}")
91
- plt.axis("off")
92
- plt.show()
93
- print('##########################')
94
-
95
- import gradio as gd
96
- def makePred(img):
97
- img_3d=img.reshape(-1,28,28)
98
- im_resize=img_3d/255.0
99
- predict=model_clf.predict(im_resize)
100
- pred=np.argmax(predict)
101
- return str(pred)
102
-
103
- demo = gd.Interface(makePred, inputs='sketchpad', outputs='label')
104
- demo.launch(debug='True')
105
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/safety_checker.py DELETED
@@ -1,59 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
5
-
6
- from ...utils import logging
7
-
8
-
9
- logger = logging.get_logger(__name__)
10
-
11
-
12
- class IFSafetyChecker(PreTrainedModel):
13
- config_class = CLIPConfig
14
-
15
- _no_split_modules = ["CLIPEncoderLayer"]
16
-
17
- def __init__(self, config: CLIPConfig):
18
- super().__init__(config)
19
-
20
- self.vision_model = CLIPVisionModelWithProjection(config.vision_config)
21
-
22
- self.p_head = nn.Linear(config.vision_config.projection_dim, 1)
23
- self.w_head = nn.Linear(config.vision_config.projection_dim, 1)
24
-
25
- @torch.no_grad()
26
- def forward(self, clip_input, images, p_threshold=0.5, w_threshold=0.5):
27
- image_embeds = self.vision_model(clip_input)[0]
28
-
29
- nsfw_detected = self.p_head(image_embeds)
30
- nsfw_detected = nsfw_detected.flatten()
31
- nsfw_detected = nsfw_detected > p_threshold
32
- nsfw_detected = nsfw_detected.tolist()
33
-
34
- if any(nsfw_detected):
35
- logger.warning(
36
- "Potential NSFW content was detected in one or more images. A black image will be returned instead."
37
- " Try again with a different prompt and/or seed."
38
- )
39
-
40
- for idx, nsfw_detected_ in enumerate(nsfw_detected):
41
- if nsfw_detected_:
42
- images[idx] = np.zeros(images[idx].shape)
43
-
44
- watermark_detected = self.w_head(image_embeds)
45
- watermark_detected = watermark_detected.flatten()
46
- watermark_detected = watermark_detected > w_threshold
47
- watermark_detected = watermark_detected.tolist()
48
-
49
- if any(watermark_detected):
50
- logger.warning(
51
- "Potential watermarked content was detected in one or more images. A black image will be returned instead."
52
- " Try again with a different prompt and/or seed."
53
- )
54
-
55
- for idx, watermark_detected_ in enumerate(watermark_detected):
56
- if watermark_detected_:
57
- images[idx] = np.zeros(images[idx].shape)
58
-
59
- return images, nsfw_detected, watermark_detected
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py DELETED
@@ -1,129 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import tempfile
18
- import unittest
19
-
20
- import numpy as np
21
- import torch
22
-
23
- from diffusers import VersatileDiffusionPipeline
24
- from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
25
-
26
-
27
- torch.backends.cuda.matmul.allow_tf32 = False
28
-
29
-
30
- class VersatileDiffusionMegaPipelineFastTests(unittest.TestCase):
31
- pass
32
-
33
-
34
- @nightly
35
- @require_torch_gpu
36
- class VersatileDiffusionMegaPipelineIntegrationTests(unittest.TestCase):
37
- def tearDown(self):
38
- # clean up the VRAM after each test
39
- super().tearDown()
40
- gc.collect()
41
- torch.cuda.empty_cache()
42
-
43
- def test_from_save_pretrained(self):
44
- pipe = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", torch_dtype=torch.float16)
45
- pipe.to(torch_device)
46
- pipe.set_progress_bar_config(disable=None)
47
-
48
- prompt_image = load_image(
49
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg"
50
- )
51
-
52
- generator = torch.manual_seed(0)
53
- image = pipe.dual_guided(
54
- prompt="first prompt",
55
- image=prompt_image,
56
- text_to_image_strength=0.75,
57
- generator=generator,
58
- guidance_scale=7.5,
59
- num_inference_steps=2,
60
- output_type="numpy",
61
- ).images
62
-
63
- with tempfile.TemporaryDirectory() as tmpdirname:
64
- pipe.save_pretrained(tmpdirname)
65
- pipe = VersatileDiffusionPipeline.from_pretrained(tmpdirname, torch_dtype=torch.float16)
66
- pipe.to(torch_device)
67
- pipe.set_progress_bar_config(disable=None)
68
-
69
- generator = generator.manual_seed(0)
70
- new_image = pipe.dual_guided(
71
- prompt="first prompt",
72
- image=prompt_image,
73
- text_to_image_strength=0.75,
74
- generator=generator,
75
- guidance_scale=7.5,
76
- num_inference_steps=2,
77
- output_type="numpy",
78
- ).images
79
-
80
- assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
81
-
82
- def test_inference_dual_guided_then_text_to_image(self):
83
- pipe = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", torch_dtype=torch.float16)
84
- pipe.to(torch_device)
85
- pipe.set_progress_bar_config(disable=None)
86
-
87
- prompt = "cyberpunk 2077"
88
- init_image = load_image(
89
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg"
90
- )
91
- generator = torch.manual_seed(0)
92
- image = pipe.dual_guided(
93
- prompt=prompt,
94
- image=init_image,
95
- text_to_image_strength=0.75,
96
- generator=generator,
97
- guidance_scale=7.5,
98
- num_inference_steps=50,
99
- output_type="numpy",
100
- ).images
101
-
102
- image_slice = image[0, 253:256, 253:256, -1]
103
-
104
- assert image.shape == (1, 512, 512, 3)
105
- expected_slice = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001])
106
-
107
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
108
-
109
- prompt = "A painting of a squirrel eating a burger "
110
- generator = torch.manual_seed(0)
111
- image = pipe.text_to_image(
112
- prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy"
113
- ).images
114
-
115
- image_slice = image[0, 253:256, 253:256, -1]
116
-
117
- assert image.shape == (1, 512, 512, 3)
118
- expected_slice = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
119
-
120
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
121
-
122
- image = pipe.image_variation(init_image, generator=generator, output_type="numpy").images
123
-
124
- image_slice = image[0, 253:256, 253:256, -1]
125
-
126
- assert image.shape == (1, 512, 512, 3)
127
- expected_slice = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456])
128
-
129
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x.py DELETED
@@ -1,28 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- roi_head=dict(
4
- type='DynamicRoIHead',
5
- bbox_head=dict(
6
- type='Shared2FCBBoxHead',
7
- in_channels=256,
8
- fc_out_channels=1024,
9
- roi_feat_size=7,
10
- num_classes=80,
11
- bbox_coder=dict(
12
- type='DeltaXYWHBBoxCoder',
13
- target_means=[0., 0., 0., 0.],
14
- target_stds=[0.1, 0.1, 0.2, 0.2]),
15
- reg_class_agnostic=False,
16
- loss_cls=dict(
17
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
18
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
19
- train_cfg=dict(
20
- rpn_proposal=dict(nms=dict(iou_threshold=0.85)),
21
- rcnn=dict(
22
- dynamic_rcnn=dict(
23
- iou_topk=75,
24
- beta_topk=10,
25
- update_iter_interval=100,
26
- initial_iou=0.4,
27
- initial_beta=1.0))),
28
- test_cfg=dict(rpn=dict(nms=dict(iou_threshold=0.85))))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/model_converters/regnet2mmdet.py DELETED
@@ -1,89 +0,0 @@
1
- import argparse
2
- from collections import OrderedDict
3
-
4
- import torch
5
-
6
-
7
- def convert_stem(model_key, model_weight, state_dict, converted_names):
8
- new_key = model_key.replace('stem.conv', 'conv1')
9
- new_key = new_key.replace('stem.bn', 'bn1')
10
- state_dict[new_key] = model_weight
11
- converted_names.add(model_key)
12
- print(f'Convert {model_key} to {new_key}')
13
-
14
-
15
- def convert_head(model_key, model_weight, state_dict, converted_names):
16
- new_key = model_key.replace('head.fc', 'fc')
17
- state_dict[new_key] = model_weight
18
- converted_names.add(model_key)
19
- print(f'Convert {model_key} to {new_key}')
20
-
21
-
22
- def convert_reslayer(model_key, model_weight, state_dict, converted_names):
23
- split_keys = model_key.split('.')
24
- layer, block, module = split_keys[:3]
25
- block_id = int(block[1:])
26
- layer_name = f'layer{int(layer[1:])}'
27
- block_name = f'{block_id - 1}'
28
-
29
- if block_id == 1 and module == 'bn':
30
- new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
31
- elif block_id == 1 and module == 'proj':
32
- new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
33
- elif module == 'f':
34
- if split_keys[3] == 'a_bn':
35
- module_name = 'bn1'
36
- elif split_keys[3] == 'b_bn':
37
- module_name = 'bn2'
38
- elif split_keys[3] == 'c_bn':
39
- module_name = 'bn3'
40
- elif split_keys[3] == 'a':
41
- module_name = 'conv1'
42
- elif split_keys[3] == 'b':
43
- module_name = 'conv2'
44
- elif split_keys[3] == 'c':
45
- module_name = 'conv3'
46
- new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
47
- else:
48
- raise ValueError(f'Unsupported conversion of key {model_key}')
49
- print(f'Convert {model_key} to {new_key}')
50
- state_dict[new_key] = model_weight
51
- converted_names.add(model_key)
52
-
53
-
54
- def convert(src, dst):
55
- """Convert keys in pycls pretrained RegNet models to mmdet style."""
56
- # load caffe model
57
- regnet_model = torch.load(src)
58
- blobs = regnet_model['model_state']
59
- # convert to pytorch style
60
- state_dict = OrderedDict()
61
- converted_names = set()
62
- for key, weight in blobs.items():
63
- if 'stem' in key:
64
- convert_stem(key, weight, state_dict, converted_names)
65
- elif 'head' in key:
66
- convert_head(key, weight, state_dict, converted_names)
67
- elif key.startswith('s'):
68
- convert_reslayer(key, weight, state_dict, converted_names)
69
-
70
- # check if all layers are converted
71
- for key in blobs:
72
- if key not in converted_names:
73
- print(f'not converted: {key}')
74
- # save checkpoint
75
- checkpoint = dict()
76
- checkpoint['state_dict'] = state_dict
77
- torch.save(checkpoint, dst)
78
-
79
-
80
- def main():
81
- parser = argparse.ArgumentParser(description='Convert model keys')
82
- parser.add_argument('src', help='src detectron model path')
83
- parser.add_argument('dst', help='save path')
84
- args = parser.parse_args()
85
- convert(args.src, args.dst)
86
-
87
-
88
- if __name__ == '__main__':
89
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/llamacpp_hf.py DELETED
@@ -1,213 +0,0 @@
1
- import os
2
- from pathlib import Path
3
- from typing import Any, Dict, Optional, Union
4
-
5
- import torch
6
- from torch.nn import CrossEntropyLoss
7
- from transformers import GenerationConfig, PretrainedConfig, PreTrainedModel
8
- from transformers.modeling_outputs import CausalLMOutputWithPast
9
-
10
- from modules import RoPE, shared
11
- from modules.logging_colors import logger
12
-
13
- try:
14
- import llama_cpp
15
- except:
16
- llama_cpp = None
17
-
18
- try:
19
- import llama_cpp_cuda
20
- except:
21
- llama_cpp_cuda = None
22
-
23
-
24
- def llama_cpp_lib():
25
- if (shared.args.cpu and llama_cpp is not None) or llama_cpp_cuda is None:
26
- return llama_cpp
27
- else:
28
- return llama_cpp_cuda
29
-
30
-
31
- class LlamacppHF(PreTrainedModel):
32
- def __init__(self, model, path):
33
- super().__init__(PretrainedConfig())
34
- self.model = model
35
- self.generation_config = GenerationConfig()
36
-
37
- self.past_seq = None
38
- self.llamacpp_cache = {
39
- 'n_tokens': self.model.n_tokens,
40
- 'input_ids': self.model.input_ids,
41
- 'scores': self.model.scores,
42
- 'ctx': self.model.ctx
43
- }
44
-
45
- if shared.args.cfg_cache:
46
- self.past_seq_negative = None
47
- self.llamacpp_cache_negative = {
48
- 'n_tokens': self.model.n_tokens,
49
- 'input_ids': self.model.input_ids.copy(),
50
- 'scores': self.model.scores.copy(),
51
- 'ctx': llama_cpp_lib().llama_new_context_with_model(model.model, model.context_params)
52
- }
53
-
54
- def _validate_model_class(self):
55
- pass
56
-
57
- def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
58
- pass
59
-
60
- def prepare_inputs_for_generation(self, input_ids, **kwargs):
61
- return {'input_ids': input_ids, **kwargs}
62
-
63
- def save_cache(self):
64
- self.llamacpp_cache.update({
65
- 'n_tokens': self.model.n_tokens,
66
- 'input_ids': self.model.input_ids,
67
- 'scores': self.model.scores,
68
- 'ctx': self.model.ctx
69
- })
70
-
71
- def save_negative_cache(self):
72
- self.llamacpp_cache_negative.update({
73
- 'n_tokens': self.model.n_tokens,
74
- 'input_ids': self.model.input_ids,
75
- 'scores': self.model.scores,
76
- 'ctx': self.model.ctx
77
- })
78
-
79
- def load_cache(self):
80
- self.model.n_tokens = self.llamacpp_cache['n_tokens']
81
- self.model.input_ids = self.llamacpp_cache['input_ids']
82
- self.model.scores = self.llamacpp_cache['scores']
83
- self.model.ctx = self.llamacpp_cache['ctx']
84
-
85
- def load_negative_cache(self):
86
- self.model.n_tokens = self.llamacpp_cache_negative['n_tokens']
87
- self.model.input_ids = self.llamacpp_cache_negative['input_ids']
88
- self.model.scores = self.llamacpp_cache_negative['scores']
89
- self.model.ctx = self.llamacpp_cache_negative['ctx']
90
-
91
- @property
92
- def device(self) -> torch.device:
93
- return torch.device(0)
94
-
95
- def __call__(self, *args, **kwargs):
96
- use_cache = kwargs.get('use_cache', True)
97
- labels = kwargs.get('labels', None)
98
- past_key_values = kwargs.get('past_key_values', None)
99
-
100
- if len(args) > 0:
101
- if not shared.args.cfg_cache:
102
- logger.error("Please enable the cfg-cache option to use CFG with llamacpp_HF.")
103
- return
104
-
105
- input_ids = args[0]
106
- is_negative = True
107
- past_seq = self.past_seq_negative
108
- self.load_negative_cache()
109
- else:
110
- input_ids = kwargs['input_ids']
111
- is_negative = False
112
- past_seq = self.past_seq
113
- self.load_cache()
114
-
115
- seq = input_ids[0].tolist()
116
- if is_negative and past_key_values is not None:
117
- seq = past_key_values + seq
118
-
119
- seq_tensor = torch.tensor(seq)
120
- reset = True
121
-
122
- # Make the forward call. The prefix-match code has been adapted from
123
- # https://github.com/abetlen/llama-cpp-python/commit/f4090a0bb2a2a25acfe28d31c82cc1aa273bedee
124
- if labels is None:
125
- if past_seq is not None:
126
- min_length = min(past_seq.shape[0], seq_tensor.shape[0])
127
- indices = torch.nonzero(~torch.eq(past_seq[:min_length], seq_tensor[:min_length]))
128
- if len(indices) > 0:
129
- longest_prefix = indices[0].item()
130
- else:
131
- longest_prefix = min_length
132
-
133
- if longest_prefix > 0:
134
- reset = False
135
- self.model.n_tokens = longest_prefix
136
- if len(seq_tensor) - longest_prefix > 0:
137
- self.model.eval(seq[longest_prefix:])
138
-
139
- if reset:
140
- self.model.reset()
141
- self.model.eval(seq)
142
-
143
- logits = torch.tensor(self.model.scores[self.model.n_tokens - 1, :]).view(1, 1, -1).to(input_ids.device)
144
- else:
145
- self.model.reset()
146
- self.model.eval(seq)
147
- logits = torch.tensor(self.model.eval_logits)
148
- logits = logits.view(1, logits.shape[0], logits.shape[1]).to(input_ids.device)
149
-
150
- if is_negative:
151
- self.save_negative_cache()
152
- self.past_seq_negative = seq_tensor
153
- else:
154
- self.save_cache()
155
- self.past_seq = seq_tensor
156
-
157
- loss = None
158
- if labels is not None:
159
- # Shift so that tokens < n predict n
160
- shift_logits = logits[..., :-1, :].contiguous()
161
- shift_labels = labels[..., 1:].contiguous()
162
- # Flatten the tokens
163
- loss_fct = CrossEntropyLoss()
164
- shift_logits = shift_logits.view(-1, logits.shape[-1])
165
- shift_labels = shift_labels.view(-1)
166
- # Enable model parallelism
167
- shift_labels = shift_labels.to(shift_logits.device)
168
- loss = loss_fct(shift_logits, shift_labels)
169
-
170
- return CausalLMOutputWithPast(logits=logits, past_key_values=seq if use_cache else None, loss=loss)
171
-
172
- @classmethod
173
- def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
174
- assert len(model_args) == 0 and len(kwargs) == 0, "extra args is currently not supported"
175
-
176
- if isinstance(pretrained_model_name_or_path, str):
177
- pretrained_model_name_or_path = Path(pretrained_model_name_or_path)
178
-
179
- path = Path(f'{shared.args.model_dir}') / Path(pretrained_model_name_or_path)
180
- if path.is_file():
181
- model_file = path
182
- else:
183
- model_file = list(path.glob('*.gguf'))[0]
184
-
185
- logger.info(f"llama.cpp weights detected: {model_file}\n")
186
-
187
- if shared.args.tensor_split is None or shared.args.tensor_split.strip() == '':
188
- tensor_split_list = None
189
- else:
190
- tensor_split_list = [float(x) for x in shared.args.tensor_split.strip().split(",")]
191
-
192
- params = {
193
- 'model_path': str(model_file),
194
- 'n_ctx': shared.args.n_ctx,
195
- 'seed': int(shared.args.llama_cpp_seed),
196
- 'n_threads': shared.args.threads or None,
197
- 'n_threads_batch': shared.args.threads_batch or None,
198
- 'n_batch': shared.args.n_batch,
199
- 'use_mmap': not shared.args.no_mmap,
200
- 'use_mlock': shared.args.mlock,
201
- 'mul_mat_q': shared.args.mul_mat_q,
202
- 'numa': shared.args.numa,
203
- 'n_gpu_layers': shared.args.n_gpu_layers,
204
- 'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base),
205
- 'tensor_split': tensor_split_list,
206
- 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb,
207
- 'logits_all': True,
208
- }
209
-
210
- Llama = llama_cpp_lib().Llama
211
- model = Llama(**params)
212
-
213
- return LlamacppHF(model, model_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_20k.py DELETED
@@ -1,9 +0,0 @@
1
- # optimizer
2
- optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
3
- optimizer_config = dict()
4
- # learning policy
5
- lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
6
- # runtime settings
7
- runner = dict(type='IterBasedRunner', max_iters=20000)
8
- checkpoint_config = dict(by_epoch=False, interval=2000)
9
- evaluation = dict(interval=2000, metric='mIoU')
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/builder.py DELETED
@@ -1,8 +0,0 @@
1
- from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
2
-
3
- PIXEL_SAMPLERS = Registry('pixel sampler')
4
-
5
-
6
- def build_pixel_sampler(cfg, **default_args):
7
- """Build pixel sampler for segmentation map."""
8
- return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
 
 
 
 
 
 
 
 
 
spaces/ArkanDash/rvc-models-new/lib/infer_pack/models.py DELETED
@@ -1,1142 +0,0 @@
1
- import math, pdb, os
2
- from time import time as ttime
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from lib.infer_pack import modules
7
- from lib.infer_pack import attentions
8
- from lib.infer_pack import commons
9
- from lib.infer_pack.commons import init_weights, get_padding
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from lib.infer_pack.commons import init_weights
13
- import numpy as np
14
- from lib.infer_pack import commons
15
-
16
-
17
- class TextEncoder256(nn.Module):
18
- def __init__(
19
- self,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- f0=True,
28
- ):
29
- super().__init__()
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emb_phone = nn.Linear(256, hidden_channels)
38
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
- if f0 == True:
40
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
- self.encoder = attentions.Encoder(
42
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
- )
44
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
-
46
- def forward(self, phone, pitch, lengths):
47
- if pitch == None:
48
- x = self.emb_phone(phone)
49
- else:
50
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
- x = self.lrelu(x)
53
- x = torch.transpose(x, 1, -1) # [b, h, t]
54
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
- x.dtype
56
- )
57
- x = self.encoder(x * x_mask, x_mask)
58
- stats = self.proj(x) * x_mask
59
-
60
- m, logs = torch.split(stats, self.out_channels, dim=1)
61
- return m, logs, x_mask
62
-
63
-
64
- class TextEncoder768(nn.Module):
65
- def __init__(
66
- self,
67
- out_channels,
68
- hidden_channels,
69
- filter_channels,
70
- n_heads,
71
- n_layers,
72
- kernel_size,
73
- p_dropout,
74
- f0=True,
75
- ):
76
- super().__init__()
77
- self.out_channels = out_channels
78
- self.hidden_channels = hidden_channels
79
- self.filter_channels = filter_channels
80
- self.n_heads = n_heads
81
- self.n_layers = n_layers
82
- self.kernel_size = kernel_size
83
- self.p_dropout = p_dropout
84
- self.emb_phone = nn.Linear(768, hidden_channels)
85
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
- if f0 == True:
87
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
- self.encoder = attentions.Encoder(
89
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
- )
91
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
-
93
- def forward(self, phone, pitch, lengths):
94
- if pitch == None:
95
- x = self.emb_phone(phone)
96
- else:
97
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
- x = self.lrelu(x)
100
- x = torch.transpose(x, 1, -1) # [b, h, t]
101
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
- x.dtype
103
- )
104
- x = self.encoder(x * x_mask, x_mask)
105
- stats = self.proj(x) * x_mask
106
-
107
- m, logs = torch.split(stats, self.out_channels, dim=1)
108
- return m, logs, x_mask
109
-
110
-
111
- class ResidualCouplingBlock(nn.Module):
112
- def __init__(
113
- self,
114
- channels,
115
- hidden_channels,
116
- kernel_size,
117
- dilation_rate,
118
- n_layers,
119
- n_flows=4,
120
- gin_channels=0,
121
- ):
122
- super().__init__()
123
- self.channels = channels
124
- self.hidden_channels = hidden_channels
125
- self.kernel_size = kernel_size
126
- self.dilation_rate = dilation_rate
127
- self.n_layers = n_layers
128
- self.n_flows = n_flows
129
- self.gin_channels = gin_channels
130
-
131
- self.flows = nn.ModuleList()
132
- for i in range(n_flows):
133
- self.flows.append(
134
- modules.ResidualCouplingLayer(
135
- channels,
136
- hidden_channels,
137
- kernel_size,
138
- dilation_rate,
139
- n_layers,
140
- gin_channels=gin_channels,
141
- mean_only=True,
142
- )
143
- )
144
- self.flows.append(modules.Flip())
145
-
146
- def forward(self, x, x_mask, g=None, reverse=False):
147
- if not reverse:
148
- for flow in self.flows:
149
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
- else:
151
- for flow in reversed(self.flows):
152
- x = flow(x, x_mask, g=g, reverse=reverse)
153
- return x
154
-
155
- def remove_weight_norm(self):
156
- for i in range(self.n_flows):
157
- self.flows[i * 2].remove_weight_norm()
158
-
159
-
160
- class PosteriorEncoder(nn.Module):
161
- def __init__(
162
- self,
163
- in_channels,
164
- out_channels,
165
- hidden_channels,
166
- kernel_size,
167
- dilation_rate,
168
- n_layers,
169
- gin_channels=0,
170
- ):
171
- super().__init__()
172
- self.in_channels = in_channels
173
- self.out_channels = out_channels
174
- self.hidden_channels = hidden_channels
175
- self.kernel_size = kernel_size
176
- self.dilation_rate = dilation_rate
177
- self.n_layers = n_layers
178
- self.gin_channels = gin_channels
179
-
180
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
- self.enc = modules.WN(
182
- hidden_channels,
183
- kernel_size,
184
- dilation_rate,
185
- n_layers,
186
- gin_channels=gin_channels,
187
- )
188
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
-
190
- def forward(self, x, x_lengths, g=None):
191
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
- x.dtype
193
- )
194
- x = self.pre(x) * x_mask
195
- x = self.enc(x, x_mask, g=g)
196
- stats = self.proj(x) * x_mask
197
- m, logs = torch.split(stats, self.out_channels, dim=1)
198
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
- return z, m, logs, x_mask
200
-
201
- def remove_weight_norm(self):
202
- self.enc.remove_weight_norm()
203
-
204
-
205
- class Generator(torch.nn.Module):
206
- def __init__(
207
- self,
208
- initial_channel,
209
- resblock,
210
- resblock_kernel_sizes,
211
- resblock_dilation_sizes,
212
- upsample_rates,
213
- upsample_initial_channel,
214
- upsample_kernel_sizes,
215
- gin_channels=0,
216
- ):
217
- super(Generator, self).__init__()
218
- self.num_kernels = len(resblock_kernel_sizes)
219
- self.num_upsamples = len(upsample_rates)
220
- self.conv_pre = Conv1d(
221
- initial_channel, upsample_initial_channel, 7, 1, padding=3
222
- )
223
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
-
225
- self.ups = nn.ModuleList()
226
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
- self.ups.append(
228
- weight_norm(
229
- ConvTranspose1d(
230
- upsample_initial_channel // (2**i),
231
- upsample_initial_channel // (2 ** (i + 1)),
232
- k,
233
- u,
234
- padding=(k - u) // 2,
235
- )
236
- )
237
- )
238
-
239
- self.resblocks = nn.ModuleList()
240
- for i in range(len(self.ups)):
241
- ch = upsample_initial_channel // (2 ** (i + 1))
242
- for j, (k, d) in enumerate(
243
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
- ):
245
- self.resblocks.append(resblock(ch, k, d))
246
-
247
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
- self.ups.apply(init_weights)
249
-
250
- if gin_channels != 0:
251
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
-
253
- def forward(self, x, g=None):
254
- x = self.conv_pre(x)
255
- if g is not None:
256
- x = x + self.cond(g)
257
-
258
- for i in range(self.num_upsamples):
259
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
- x = self.ups[i](x)
261
- xs = None
262
- for j in range(self.num_kernels):
263
- if xs is None:
264
- xs = self.resblocks[i * self.num_kernels + j](x)
265
- else:
266
- xs += self.resblocks[i * self.num_kernels + j](x)
267
- x = xs / self.num_kernels
268
- x = F.leaky_relu(x)
269
- x = self.conv_post(x)
270
- x = torch.tanh(x)
271
-
272
- return x
273
-
274
- def remove_weight_norm(self):
275
- for l in self.ups:
276
- remove_weight_norm(l)
277
- for l in self.resblocks:
278
- l.remove_weight_norm()
279
-
280
-
281
- class SineGen(torch.nn.Module):
282
- """Definition of sine generator
283
- SineGen(samp_rate, harmonic_num = 0,
284
- sine_amp = 0.1, noise_std = 0.003,
285
- voiced_threshold = 0,
286
- flag_for_pulse=False)
287
- samp_rate: sampling rate in Hz
288
- harmonic_num: number of harmonic overtones (default 0)
289
- sine_amp: amplitude of sine-wavefrom (default 0.1)
290
- noise_std: std of Gaussian noise (default 0.003)
291
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
- Note: when flag_for_pulse is True, the first time step of a voiced
294
- segment is always sin(np.pi) or cos(0)
295
- """
296
-
297
- def __init__(
298
- self,
299
- samp_rate,
300
- harmonic_num=0,
301
- sine_amp=0.1,
302
- noise_std=0.003,
303
- voiced_threshold=0,
304
- flag_for_pulse=False,
305
- ):
306
- super(SineGen, self).__init__()
307
- self.sine_amp = sine_amp
308
- self.noise_std = noise_std
309
- self.harmonic_num = harmonic_num
310
- self.dim = self.harmonic_num + 1
311
- self.sampling_rate = samp_rate
312
- self.voiced_threshold = voiced_threshold
313
-
314
- def _f02uv(self, f0):
315
- # generate uv signal
316
- uv = torch.ones_like(f0)
317
- uv = uv * (f0 > self.voiced_threshold)
318
- return uv
319
-
320
- def forward(self, f0, upp):
321
- """sine_tensor, uv = forward(f0)
322
- input F0: tensor(batchsize=1, length, dim=1)
323
- f0 for unvoiced steps should be 0
324
- output sine_tensor: tensor(batchsize=1, length, dim)
325
- output uv: tensor(batchsize=1, length, 1)
326
- """
327
- with torch.no_grad():
328
- f0 = f0[:, None].transpose(1, 2)
329
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
330
- # fundamental component
331
- f0_buf[:, :, 0] = f0[:, :, 0]
332
- for idx in np.arange(self.harmonic_num):
333
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
334
- idx + 2
335
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
336
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
337
- rand_ini = torch.rand(
338
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
339
- )
340
- rand_ini[:, 0] = 0
341
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
342
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
343
- tmp_over_one *= upp
344
- tmp_over_one = F.interpolate(
345
- tmp_over_one.transpose(2, 1),
346
- scale_factor=upp,
347
- mode="linear",
348
- align_corners=True,
349
- ).transpose(2, 1)
350
- rad_values = F.interpolate(
351
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
352
- ).transpose(
353
- 2, 1
354
- ) #######
355
- tmp_over_one %= 1
356
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
357
- cumsum_shift = torch.zeros_like(rad_values)
358
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
359
- sine_waves = torch.sin(
360
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
361
- )
362
- sine_waves = sine_waves * self.sine_amp
363
- uv = self._f02uv(f0)
364
- uv = F.interpolate(
365
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
366
- ).transpose(2, 1)
367
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
368
- noise = noise_amp * torch.randn_like(sine_waves)
369
- sine_waves = sine_waves * uv + noise
370
- return sine_waves, uv, noise
371
-
372
-
373
- class SourceModuleHnNSF(torch.nn.Module):
374
- """SourceModule for hn-nsf
375
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
376
- add_noise_std=0.003, voiced_threshod=0)
377
- sampling_rate: sampling_rate in Hz
378
- harmonic_num: number of harmonic above F0 (default: 0)
379
- sine_amp: amplitude of sine source signal (default: 0.1)
380
- add_noise_std: std of additive Gaussian noise (default: 0.003)
381
- note that amplitude of noise in unvoiced is decided
382
- by sine_amp
383
- voiced_threshold: threhold to set U/V given F0 (default: 0)
384
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
385
- F0_sampled (batchsize, length, 1)
386
- Sine_source (batchsize, length, 1)
387
- noise_source (batchsize, length 1)
388
- uv (batchsize, length, 1)
389
- """
390
-
391
- def __init__(
392
- self,
393
- sampling_rate,
394
- harmonic_num=0,
395
- sine_amp=0.1,
396
- add_noise_std=0.003,
397
- voiced_threshod=0,
398
- is_half=True,
399
- ):
400
- super(SourceModuleHnNSF, self).__init__()
401
-
402
- self.sine_amp = sine_amp
403
- self.noise_std = add_noise_std
404
- self.is_half = is_half
405
- # to produce sine waveforms
406
- self.l_sin_gen = SineGen(
407
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
408
- )
409
-
410
- # to merge source harmonics into a single excitation
411
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
412
- self.l_tanh = torch.nn.Tanh()
413
-
414
- def forward(self, x, upp=None):
415
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
416
- if self.is_half:
417
- sine_wavs = sine_wavs.half()
418
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
419
- return sine_merge, None, None # noise, uv
420
-
421
-
422
- class GeneratorNSF(torch.nn.Module):
423
- def __init__(
424
- self,
425
- initial_channel,
426
- resblock,
427
- resblock_kernel_sizes,
428
- resblock_dilation_sizes,
429
- upsample_rates,
430
- upsample_initial_channel,
431
- upsample_kernel_sizes,
432
- gin_channels,
433
- sr,
434
- is_half=False,
435
- ):
436
- super(GeneratorNSF, self).__init__()
437
- self.num_kernels = len(resblock_kernel_sizes)
438
- self.num_upsamples = len(upsample_rates)
439
-
440
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
441
- self.m_source = SourceModuleHnNSF(
442
- sampling_rate=sr, harmonic_num=0, is_half=is_half
443
- )
444
- self.noise_convs = nn.ModuleList()
445
- self.conv_pre = Conv1d(
446
- initial_channel, upsample_initial_channel, 7, 1, padding=3
447
- )
448
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
449
-
450
- self.ups = nn.ModuleList()
451
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
452
- c_cur = upsample_initial_channel // (2 ** (i + 1))
453
- self.ups.append(
454
- weight_norm(
455
- ConvTranspose1d(
456
- upsample_initial_channel // (2**i),
457
- upsample_initial_channel // (2 ** (i + 1)),
458
- k,
459
- u,
460
- padding=(k - u) // 2,
461
- )
462
- )
463
- )
464
- if i + 1 < len(upsample_rates):
465
- stride_f0 = np.prod(upsample_rates[i + 1 :])
466
- self.noise_convs.append(
467
- Conv1d(
468
- 1,
469
- c_cur,
470
- kernel_size=stride_f0 * 2,
471
- stride=stride_f0,
472
- padding=stride_f0 // 2,
473
- )
474
- )
475
- else:
476
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
477
-
478
- self.resblocks = nn.ModuleList()
479
- for i in range(len(self.ups)):
480
- ch = upsample_initial_channel // (2 ** (i + 1))
481
- for j, (k, d) in enumerate(
482
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
483
- ):
484
- self.resblocks.append(resblock(ch, k, d))
485
-
486
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
487
- self.ups.apply(init_weights)
488
-
489
- if gin_channels != 0:
490
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
491
-
492
- self.upp = np.prod(upsample_rates)
493
-
494
- def forward(self, x, f0, g=None):
495
- har_source, noi_source, uv = self.m_source(f0, self.upp)
496
- har_source = har_source.transpose(1, 2)
497
- x = self.conv_pre(x)
498
- if g is not None:
499
- x = x + self.cond(g)
500
-
501
- for i in range(self.num_upsamples):
502
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
503
- x = self.ups[i](x)
504
- x_source = self.noise_convs[i](har_source)
505
- x = x + x_source
506
- xs = None
507
- for j in range(self.num_kernels):
508
- if xs is None:
509
- xs = self.resblocks[i * self.num_kernels + j](x)
510
- else:
511
- xs += self.resblocks[i * self.num_kernels + j](x)
512
- x = xs / self.num_kernels
513
- x = F.leaky_relu(x)
514
- x = self.conv_post(x)
515
- x = torch.tanh(x)
516
- return x
517
-
518
- def remove_weight_norm(self):
519
- for l in self.ups:
520
- remove_weight_norm(l)
521
- for l in self.resblocks:
522
- l.remove_weight_norm()
523
-
524
-
525
- sr2sr = {
526
- "32k": 32000,
527
- "40k": 40000,
528
- "48k": 48000,
529
- }
530
-
531
-
532
- class SynthesizerTrnMs256NSFsid(nn.Module):
533
- def __init__(
534
- self,
535
- spec_channels,
536
- segment_size,
537
- inter_channels,
538
- hidden_channels,
539
- filter_channels,
540
- n_heads,
541
- n_layers,
542
- kernel_size,
543
- p_dropout,
544
- resblock,
545
- resblock_kernel_sizes,
546
- resblock_dilation_sizes,
547
- upsample_rates,
548
- upsample_initial_channel,
549
- upsample_kernel_sizes,
550
- spk_embed_dim,
551
- gin_channels,
552
- sr,
553
- **kwargs
554
- ):
555
- super().__init__()
556
- if type(sr) == type("strr"):
557
- sr = sr2sr[sr]
558
- self.spec_channels = spec_channels
559
- self.inter_channels = inter_channels
560
- self.hidden_channels = hidden_channels
561
- self.filter_channels = filter_channels
562
- self.n_heads = n_heads
563
- self.n_layers = n_layers
564
- self.kernel_size = kernel_size
565
- self.p_dropout = p_dropout
566
- self.resblock = resblock
567
- self.resblock_kernel_sizes = resblock_kernel_sizes
568
- self.resblock_dilation_sizes = resblock_dilation_sizes
569
- self.upsample_rates = upsample_rates
570
- self.upsample_initial_channel = upsample_initial_channel
571
- self.upsample_kernel_sizes = upsample_kernel_sizes
572
- self.segment_size = segment_size
573
- self.gin_channels = gin_channels
574
- # self.hop_length = hop_length#
575
- self.spk_embed_dim = spk_embed_dim
576
- self.enc_p = TextEncoder256(
577
- inter_channels,
578
- hidden_channels,
579
- filter_channels,
580
- n_heads,
581
- n_layers,
582
- kernel_size,
583
- p_dropout,
584
- )
585
- self.dec = GeneratorNSF(
586
- inter_channels,
587
- resblock,
588
- resblock_kernel_sizes,
589
- resblock_dilation_sizes,
590
- upsample_rates,
591
- upsample_initial_channel,
592
- upsample_kernel_sizes,
593
- gin_channels=gin_channels,
594
- sr=sr,
595
- is_half=kwargs["is_half"],
596
- )
597
- self.enc_q = PosteriorEncoder(
598
- spec_channels,
599
- inter_channels,
600
- hidden_channels,
601
- 5,
602
- 1,
603
- 16,
604
- gin_channels=gin_channels,
605
- )
606
- self.flow = ResidualCouplingBlock(
607
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
608
- )
609
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
610
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
611
-
612
- def remove_weight_norm(self):
613
- self.dec.remove_weight_norm()
614
- self.flow.remove_weight_norm()
615
- self.enc_q.remove_weight_norm()
616
-
617
- def forward(
618
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
619
- ): # 这里ds是id,[bs,1]
620
- # print(1,pitch.shape)#[bs,t]
621
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
622
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
623
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
624
- z_p = self.flow(z, y_mask, g=g)
625
- z_slice, ids_slice = commons.rand_slice_segments(
626
- z, y_lengths, self.segment_size
627
- )
628
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
629
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
630
- # print(-2,pitchf.shape,z_slice.shape)
631
- o = self.dec(z_slice, pitchf, g=g)
632
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
633
-
634
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
635
- g = self.emb_g(sid).unsqueeze(-1)
636
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
637
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
638
- if rate:
639
- head = int(z_p.shape[2] * rate)
640
- z_p = z_p[:, :, -head:]
641
- x_mask = x_mask[:, :, -head:]
642
- nsff0 = nsff0[:, -head:]
643
- z = self.flow(z_p, x_mask, g=g, reverse=True)
644
- o = self.dec(z * x_mask, nsff0, g=g)
645
- return o, x_mask, (z, z_p, m_p, logs_p)
646
-
647
-
648
- class SynthesizerTrnMs768NSFsid(nn.Module):
649
- def __init__(
650
- self,
651
- spec_channels,
652
- segment_size,
653
- inter_channels,
654
- hidden_channels,
655
- filter_channels,
656
- n_heads,
657
- n_layers,
658
- kernel_size,
659
- p_dropout,
660
- resblock,
661
- resblock_kernel_sizes,
662
- resblock_dilation_sizes,
663
- upsample_rates,
664
- upsample_initial_channel,
665
- upsample_kernel_sizes,
666
- spk_embed_dim,
667
- gin_channels,
668
- sr,
669
- **kwargs
670
- ):
671
- super().__init__()
672
- if type(sr) == type("strr"):
673
- sr = sr2sr[sr]
674
- self.spec_channels = spec_channels
675
- self.inter_channels = inter_channels
676
- self.hidden_channels = hidden_channels
677
- self.filter_channels = filter_channels
678
- self.n_heads = n_heads
679
- self.n_layers = n_layers
680
- self.kernel_size = kernel_size
681
- self.p_dropout = p_dropout
682
- self.resblock = resblock
683
- self.resblock_kernel_sizes = resblock_kernel_sizes
684
- self.resblock_dilation_sizes = resblock_dilation_sizes
685
- self.upsample_rates = upsample_rates
686
- self.upsample_initial_channel = upsample_initial_channel
687
- self.upsample_kernel_sizes = upsample_kernel_sizes
688
- self.segment_size = segment_size
689
- self.gin_channels = gin_channels
690
- # self.hop_length = hop_length#
691
- self.spk_embed_dim = spk_embed_dim
692
- self.enc_p = TextEncoder768(
693
- inter_channels,
694
- hidden_channels,
695
- filter_channels,
696
- n_heads,
697
- n_layers,
698
- kernel_size,
699
- p_dropout,
700
- )
701
- self.dec = GeneratorNSF(
702
- inter_channels,
703
- resblock,
704
- resblock_kernel_sizes,
705
- resblock_dilation_sizes,
706
- upsample_rates,
707
- upsample_initial_channel,
708
- upsample_kernel_sizes,
709
- gin_channels=gin_channels,
710
- sr=sr,
711
- is_half=kwargs["is_half"],
712
- )
713
- self.enc_q = PosteriorEncoder(
714
- spec_channels,
715
- inter_channels,
716
- hidden_channels,
717
- 5,
718
- 1,
719
- 16,
720
- gin_channels=gin_channels,
721
- )
722
- self.flow = ResidualCouplingBlock(
723
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
724
- )
725
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
726
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
727
-
728
- def remove_weight_norm(self):
729
- self.dec.remove_weight_norm()
730
- self.flow.remove_weight_norm()
731
- self.enc_q.remove_weight_norm()
732
-
733
- def forward(
734
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
735
- ): # 这里ds是id,[bs,1]
736
- # print(1,pitch.shape)#[bs,t]
737
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
738
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
739
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
740
- z_p = self.flow(z, y_mask, g=g)
741
- z_slice, ids_slice = commons.rand_slice_segments(
742
- z, y_lengths, self.segment_size
743
- )
744
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
745
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
746
- # print(-2,pitchf.shape,z_slice.shape)
747
- o = self.dec(z_slice, pitchf, g=g)
748
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
749
-
750
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
751
- g = self.emb_g(sid).unsqueeze(-1)
752
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
753
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
754
- if rate:
755
- head = int(z_p.shape[2] * rate)
756
- z_p = z_p[:, :, -head:]
757
- x_mask = x_mask[:, :, -head:]
758
- nsff0 = nsff0[:, -head:]
759
- z = self.flow(z_p, x_mask, g=g, reverse=True)
760
- o = self.dec(z * x_mask, nsff0, g=g)
761
- return o, x_mask, (z, z_p, m_p, logs_p)
762
-
763
-
764
- class SynthesizerTrnMs256NSFsid_nono(nn.Module):
765
- def __init__(
766
- self,
767
- spec_channels,
768
- segment_size,
769
- inter_channels,
770
- hidden_channels,
771
- filter_channels,
772
- n_heads,
773
- n_layers,
774
- kernel_size,
775
- p_dropout,
776
- resblock,
777
- resblock_kernel_sizes,
778
- resblock_dilation_sizes,
779
- upsample_rates,
780
- upsample_initial_channel,
781
- upsample_kernel_sizes,
782
- spk_embed_dim,
783
- gin_channels,
784
- sr=None,
785
- **kwargs
786
- ):
787
- super().__init__()
788
- self.spec_channels = spec_channels
789
- self.inter_channels = inter_channels
790
- self.hidden_channels = hidden_channels
791
- self.filter_channels = filter_channels
792
- self.n_heads = n_heads
793
- self.n_layers = n_layers
794
- self.kernel_size = kernel_size
795
- self.p_dropout = p_dropout
796
- self.resblock = resblock
797
- self.resblock_kernel_sizes = resblock_kernel_sizes
798
- self.resblock_dilation_sizes = resblock_dilation_sizes
799
- self.upsample_rates = upsample_rates
800
- self.upsample_initial_channel = upsample_initial_channel
801
- self.upsample_kernel_sizes = upsample_kernel_sizes
802
- self.segment_size = segment_size
803
- self.gin_channels = gin_channels
804
- # self.hop_length = hop_length#
805
- self.spk_embed_dim = spk_embed_dim
806
- self.enc_p = TextEncoder256(
807
- inter_channels,
808
- hidden_channels,
809
- filter_channels,
810
- n_heads,
811
- n_layers,
812
- kernel_size,
813
- p_dropout,
814
- f0=False,
815
- )
816
- self.dec = Generator(
817
- inter_channels,
818
- resblock,
819
- resblock_kernel_sizes,
820
- resblock_dilation_sizes,
821
- upsample_rates,
822
- upsample_initial_channel,
823
- upsample_kernel_sizes,
824
- gin_channels=gin_channels,
825
- )
826
- self.enc_q = PosteriorEncoder(
827
- spec_channels,
828
- inter_channels,
829
- hidden_channels,
830
- 5,
831
- 1,
832
- 16,
833
- gin_channels=gin_channels,
834
- )
835
- self.flow = ResidualCouplingBlock(
836
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
837
- )
838
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
839
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
840
-
841
- def remove_weight_norm(self):
842
- self.dec.remove_weight_norm()
843
- self.flow.remove_weight_norm()
844
- self.enc_q.remove_weight_norm()
845
-
846
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
847
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
848
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
849
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
850
- z_p = self.flow(z, y_mask, g=g)
851
- z_slice, ids_slice = commons.rand_slice_segments(
852
- z, y_lengths, self.segment_size
853
- )
854
- o = self.dec(z_slice, g=g)
855
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
856
-
857
- def infer(self, phone, phone_lengths, sid, rate=None):
858
- g = self.emb_g(sid).unsqueeze(-1)
859
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
860
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
861
- if rate:
862
- head = int(z_p.shape[2] * rate)
863
- z_p = z_p[:, :, -head:]
864
- x_mask = x_mask[:, :, -head:]
865
- z = self.flow(z_p, x_mask, g=g, reverse=True)
866
- o = self.dec(z * x_mask, g=g)
867
- return o, x_mask, (z, z_p, m_p, logs_p)
868
-
869
-
870
- class SynthesizerTrnMs768NSFsid_nono(nn.Module):
871
- def __init__(
872
- self,
873
- spec_channels,
874
- segment_size,
875
- inter_channels,
876
- hidden_channels,
877
- filter_channels,
878
- n_heads,
879
- n_layers,
880
- kernel_size,
881
- p_dropout,
882
- resblock,
883
- resblock_kernel_sizes,
884
- resblock_dilation_sizes,
885
- upsample_rates,
886
- upsample_initial_channel,
887
- upsample_kernel_sizes,
888
- spk_embed_dim,
889
- gin_channels,
890
- sr=None,
891
- **kwargs
892
- ):
893
- super().__init__()
894
- self.spec_channels = spec_channels
895
- self.inter_channels = inter_channels
896
- self.hidden_channels = hidden_channels
897
- self.filter_channels = filter_channels
898
- self.n_heads = n_heads
899
- self.n_layers = n_layers
900
- self.kernel_size = kernel_size
901
- self.p_dropout = p_dropout
902
- self.resblock = resblock
903
- self.resblock_kernel_sizes = resblock_kernel_sizes
904
- self.resblock_dilation_sizes = resblock_dilation_sizes
905
- self.upsample_rates = upsample_rates
906
- self.upsample_initial_channel = upsample_initial_channel
907
- self.upsample_kernel_sizes = upsample_kernel_sizes
908
- self.segment_size = segment_size
909
- self.gin_channels = gin_channels
910
- # self.hop_length = hop_length#
911
- self.spk_embed_dim = spk_embed_dim
912
- self.enc_p = TextEncoder768(
913
- inter_channels,
914
- hidden_channels,
915
- filter_channels,
916
- n_heads,
917
- n_layers,
918
- kernel_size,
919
- p_dropout,
920
- f0=False,
921
- )
922
- self.dec = Generator(
923
- inter_channels,
924
- resblock,
925
- resblock_kernel_sizes,
926
- resblock_dilation_sizes,
927
- upsample_rates,
928
- upsample_initial_channel,
929
- upsample_kernel_sizes,
930
- gin_channels=gin_channels,
931
- )
932
- self.enc_q = PosteriorEncoder(
933
- spec_channels,
934
- inter_channels,
935
- hidden_channels,
936
- 5,
937
- 1,
938
- 16,
939
- gin_channels=gin_channels,
940
- )
941
- self.flow = ResidualCouplingBlock(
942
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
943
- )
944
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
945
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
946
-
947
- def remove_weight_norm(self):
948
- self.dec.remove_weight_norm()
949
- self.flow.remove_weight_norm()
950
- self.enc_q.remove_weight_norm()
951
-
952
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
953
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
954
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
955
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
956
- z_p = self.flow(z, y_mask, g=g)
957
- z_slice, ids_slice = commons.rand_slice_segments(
958
- z, y_lengths, self.segment_size
959
- )
960
- o = self.dec(z_slice, g=g)
961
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
962
-
963
- def infer(self, phone, phone_lengths, sid, rate=None):
964
- g = self.emb_g(sid).unsqueeze(-1)
965
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
966
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
967
- if rate:
968
- head = int(z_p.shape[2] * rate)
969
- z_p = z_p[:, :, -head:]
970
- x_mask = x_mask[:, :, -head:]
971
- z = self.flow(z_p, x_mask, g=g, reverse=True)
972
- o = self.dec(z * x_mask, g=g)
973
- return o, x_mask, (z, z_p, m_p, logs_p)
974
-
975
-
976
- class MultiPeriodDiscriminator(torch.nn.Module):
977
- def __init__(self, use_spectral_norm=False):
978
- super(MultiPeriodDiscriminator, self).__init__()
979
- periods = [2, 3, 5, 7, 11, 17]
980
- # periods = [3, 5, 7, 11, 17, 23, 37]
981
-
982
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
983
- discs = discs + [
984
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
985
- ]
986
- self.discriminators = nn.ModuleList(discs)
987
-
988
- def forward(self, y, y_hat):
989
- y_d_rs = [] #
990
- y_d_gs = []
991
- fmap_rs = []
992
- fmap_gs = []
993
- for i, d in enumerate(self.discriminators):
994
- y_d_r, fmap_r = d(y)
995
- y_d_g, fmap_g = d(y_hat)
996
- # for j in range(len(fmap_r)):
997
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
998
- y_d_rs.append(y_d_r)
999
- y_d_gs.append(y_d_g)
1000
- fmap_rs.append(fmap_r)
1001
- fmap_gs.append(fmap_g)
1002
-
1003
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1004
-
1005
-
1006
- class MultiPeriodDiscriminatorV2(torch.nn.Module):
1007
- def __init__(self, use_spectral_norm=False):
1008
- super(MultiPeriodDiscriminatorV2, self).__init__()
1009
- # periods = [2, 3, 5, 7, 11, 17]
1010
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
1011
-
1012
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
1013
- discs = discs + [
1014
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
1015
- ]
1016
- self.discriminators = nn.ModuleList(discs)
1017
-
1018
- def forward(self, y, y_hat):
1019
- y_d_rs = [] #
1020
- y_d_gs = []
1021
- fmap_rs = []
1022
- fmap_gs = []
1023
- for i, d in enumerate(self.discriminators):
1024
- y_d_r, fmap_r = d(y)
1025
- y_d_g, fmap_g = d(y_hat)
1026
- # for j in range(len(fmap_r)):
1027
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1028
- y_d_rs.append(y_d_r)
1029
- y_d_gs.append(y_d_g)
1030
- fmap_rs.append(fmap_r)
1031
- fmap_gs.append(fmap_g)
1032
-
1033
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1034
-
1035
-
1036
- class DiscriminatorS(torch.nn.Module):
1037
- def __init__(self, use_spectral_norm=False):
1038
- super(DiscriminatorS, self).__init__()
1039
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1040
- self.convs = nn.ModuleList(
1041
- [
1042
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1043
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1044
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1045
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1046
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1047
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1048
- ]
1049
- )
1050
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1051
-
1052
- def forward(self, x):
1053
- fmap = []
1054
-
1055
- for l in self.convs:
1056
- x = l(x)
1057
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
1058
- fmap.append(x)
1059
- x = self.conv_post(x)
1060
- fmap.append(x)
1061
- x = torch.flatten(x, 1, -1)
1062
-
1063
- return x, fmap
1064
-
1065
-
1066
- class DiscriminatorP(torch.nn.Module):
1067
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1068
- super(DiscriminatorP, self).__init__()
1069
- self.period = period
1070
- self.use_spectral_norm = use_spectral_norm
1071
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1072
- self.convs = nn.ModuleList(
1073
- [
1074
- norm_f(
1075
- Conv2d(
1076
- 1,
1077
- 32,
1078
- (kernel_size, 1),
1079
- (stride, 1),
1080
- padding=(get_padding(kernel_size, 1), 0),
1081
- )
1082
- ),
1083
- norm_f(
1084
- Conv2d(
1085
- 32,
1086
- 128,
1087
- (kernel_size, 1),
1088
- (stride, 1),
1089
- padding=(get_padding(kernel_size, 1), 0),
1090
- )
1091
- ),
1092
- norm_f(
1093
- Conv2d(
1094
- 128,
1095
- 512,
1096
- (kernel_size, 1),
1097
- (stride, 1),
1098
- padding=(get_padding(kernel_size, 1), 0),
1099
- )
1100
- ),
1101
- norm_f(
1102
- Conv2d(
1103
- 512,
1104
- 1024,
1105
- (kernel_size, 1),
1106
- (stride, 1),
1107
- padding=(get_padding(kernel_size, 1), 0),
1108
- )
1109
- ),
1110
- norm_f(
1111
- Conv2d(
1112
- 1024,
1113
- 1024,
1114
- (kernel_size, 1),
1115
- 1,
1116
- padding=(get_padding(kernel_size, 1), 0),
1117
- )
1118
- ),
1119
- ]
1120
- )
1121
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1122
-
1123
- def forward(self, x):
1124
- fmap = []
1125
-
1126
- # 1d to 2d
1127
- b, c, t = x.shape
1128
- if t % self.period != 0: # pad first
1129
- n_pad = self.period - (t % self.period)
1130
- x = F.pad(x, (0, n_pad), "reflect")
1131
- t = t + n_pad
1132
- x = x.view(b, c, t // self.period, self.period)
1133
-
1134
- for l in self.convs:
1135
- x = l(x)
1136
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
1137
- fmap.append(x)
1138
- x = self.conv_post(x)
1139
- fmap.append(x)
1140
- x = torch.flatten(x, 1, -1)
1141
-
1142
- return x, fmap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/utils/utils.py DELETED
@@ -1,234 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from concurrent.futures import ProcessPoolExecutor
8
- from functools import wraps
9
- import hashlib
10
- import logging
11
- import typing as tp
12
-
13
- import flashy
14
- import flashy.distrib
15
- import omegaconf
16
- import torch
17
- from torch.nn.utils.rnn import pad_sequence
18
-
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
-
23
- def dict_from_config(cfg: omegaconf.DictConfig) -> dict:
24
- """Convenience function to map an omegaconf configuration to a dictionary.
25
-
26
- Args:
27
- cfg (omegaconf.DictConfig): Original configuration to map to dict.
28
- Returns:
29
- dict: Config as dictionary object.
30
- """
31
- dct = omegaconf.OmegaConf.to_container(cfg, resolve=True)
32
- assert isinstance(dct, dict)
33
- return dct
34
-
35
-
36
- def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset:
37
- if max_samples >= len(dataset):
38
- return dataset
39
-
40
- generator = torch.Generator().manual_seed(seed)
41
- perm = torch.randperm(len(dataset), generator=generator)
42
- return torch.utils.data.Subset(dataset, perm[:max_samples].tolist())
43
-
44
-
45
- def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int,
46
- num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader:
47
- """Convenience function to load dataset into a dataloader with optional subset sampling.
48
-
49
- Args:
50
- dataset: Dataset to load.
51
- num_samples (Optional[int]): Number of samples to limit subset size.
52
- batch_size (int): Batch size.
53
- num_workers (int): Number of workers for data loading.
54
- seed (int): Random seed.
55
- """
56
- if num_samples is not None:
57
- dataset = random_subset(dataset, num_samples, seed)
58
-
59
- dataloader = flashy.distrib.loader(
60
- dataset,
61
- batch_size=batch_size,
62
- num_workers=num_workers,
63
- **kwargs
64
- )
65
- return dataloader
66
-
67
-
68
- def get_dataset_from_loader(dataloader):
69
- dataset = dataloader.dataset
70
- if isinstance(dataset, torch.utils.data.Subset):
71
- return dataset.dataset
72
- else:
73
- return dataset
74
-
75
-
76
- def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
77
- """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
78
-
79
- Args:
80
- input (torch.Tensor): The input tensor containing probabilities.
81
- num_samples (int): Number of samples to draw.
82
- replacement (bool): Whether to draw with replacement or not.
83
- Keywords args:
84
- generator (torch.Generator): A pseudorandom number generator for sampling.
85
- Returns:
86
- torch.Tensor: Last dimension contains num_samples indices
87
- sampled from the multinomial probability distribution
88
- located in the last dimension of tensor input.
89
- """
90
- input_ = input.reshape(-1, input.shape[-1])
91
- output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
92
- output = output_.reshape(*list(input.shape[:-1]), -1)
93
- return output
94
-
95
-
96
- def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor:
97
- """Sample next token from top K values along the last dimension of the input probs tensor.
98
-
99
- Args:
100
- probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
101
- k (int): The k in “top-k”.
102
- Returns:
103
- torch.Tensor: Sampled tokens.
104
- """
105
- top_k_value, _ = torch.topk(probs, k, dim=-1)
106
- min_value_top_k = top_k_value[..., [-1]]
107
- probs *= (probs >= min_value_top_k).float()
108
- probs.div_(probs.sum(dim=-1, keepdim=True))
109
- next_token = multinomial(probs, num_samples=1)
110
- return next_token
111
-
112
-
113
- def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
114
- """Sample next token from top P probabilities along the last dimension of the input probs tensor.
115
-
116
- Args:
117
- probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
118
- p (int): The p in “top-p”.
119
- Returns:
120
- torch.Tensor: Sampled tokens.
121
- """
122
- probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
123
- probs_sum = torch.cumsum(probs_sort, dim=-1)
124
- mask = probs_sum - probs_sort > p
125
- probs_sort *= (~mask).float()
126
- probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
127
- next_token = multinomial(probs_sort, num_samples=1)
128
- next_token = torch.gather(probs_idx, -1, next_token)
129
- return next_token
130
-
131
-
132
- class DummyPoolExecutor:
133
- """Dummy pool executor to use when we actually have only 1 worker.
134
- (e.g. instead of ProcessPoolExecutor).
135
- """
136
- class DummyResult:
137
- def __init__(self, func, *args, **kwargs):
138
- self.func = func
139
- self.args = args
140
- self.kwargs = kwargs
141
-
142
- def result(self):
143
- return self.func(*self.args, **self.kwargs)
144
-
145
- def __init__(self, workers, mp_context=None):
146
- pass
147
-
148
- def submit(self, func, *args, **kwargs):
149
- return DummyPoolExecutor.DummyResult(func, *args, **kwargs)
150
-
151
- def __enter__(self):
152
- return self
153
-
154
- def __exit__(self, exc_type, exc_value, exc_tb):
155
- return
156
-
157
-
158
- def get_pool_executor(num_workers: int, mp_context=None):
159
- return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1)
160
-
161
-
162
- def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:
163
- """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).
164
- For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]
165
-
166
- Args:
167
- lengths (torch.Tensor): tensor with lengths
168
- max_len (int): can set the max length manually. Defaults to None.
169
- Returns:
170
- torch.Tensor: mask with 0s where there is pad tokens else 1s
171
- """
172
- assert len(lengths.shape) == 1, "Length shape should be 1 dimensional."
173
- final_length = lengths.max().item() if not max_len else max_len
174
- final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor
175
- return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None]
176
-
177
-
178
- def hash_trick(word: str, vocab_size: int) -> int:
179
- """Hash trick to pair each word with an index
180
-
181
- Args:
182
- word (str): word we wish to convert to an index
183
- vocab_size (int): size of the vocabulary
184
- Returns:
185
- int: index of the word in the embedding LUT
186
- """
187
- hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16)
188
- return hash % vocab_size
189
-
190
-
191
- def with_rank_rng(base_seed: int = 1234):
192
- """Decorator for a function so that the function will use a Random Number Generator
193
- whose state depend on the GPU rank. The original RNG state is restored upon returning.
194
-
195
- Args:
196
- base_seed (int): Random seed.
197
- """
198
- def _decorator(fun: tp.Callable):
199
- @wraps(fun)
200
- def _decorated(*args, **kwargs):
201
- state = torch.get_rng_state()
202
- seed = base_seed ^ flashy.distrib.rank()
203
- torch.manual_seed(seed)
204
- logger.debug('Rank dependent seed set to %d', seed)
205
- try:
206
- return fun(*args, **kwargs)
207
- finally:
208
- torch.set_rng_state(state)
209
- logger.debug('RNG state restored.')
210
- return _decorated
211
- return _decorator
212
-
213
-
214
- def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:
215
- """Get a list of tensors and collate them to a single tensor. according to the following logic:
216
- - `dim` specifies the time dimension which will be stacked and padded.
217
- - The output will contain 1 new dimension (dimension index 0) which will be the size of
218
- of the original list.
219
-
220
- Args:
221
- tensors (tp.List[torch.Tensor]): List of tensors to collate.
222
- dim (int): Dimension which will be stacked and padded.
223
- Returns:
224
- tp.Tuple[torch.Tensor, torch.Tensor]:
225
- torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension
226
- (dimension index 0) which will be the size of the original list.
227
- torch.Tensor: Tensor containing length of original tensor sizes (without padding).
228
- """
229
- tensors = [x.transpose(0, dim) for x in tensors]
230
- lens = torch.LongTensor([len(x) for x in tensors])
231
- padded_tensors = pad_sequence(tensors)
232
- padded_tensors = padded_tensors.transpose(0, 1)
233
- padded_tensors = padded_tensors.transpose(1, dim + 1)
234
- return padded_tensors, lens
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py DELETED
@@ -1,170 +0,0 @@
1
- """
2
- pygments.formatters.groff
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~
4
-
5
- Formatter for groff output.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import math
12
- from pip._vendor.pygments.formatter import Formatter
13
- from pip._vendor.pygments.util import get_bool_opt, get_int_opt
14
-
15
- __all__ = ['GroffFormatter']
16
-
17
-
18
- class GroffFormatter(Formatter):
19
- """
20
- Format tokens with groff escapes to change their color and font style.
21
-
22
- .. versionadded:: 2.11
23
-
24
- Additional options accepted:
25
-
26
- `style`
27
- The style to use, can be a string or a Style subclass (default:
28
- ``'default'``).
29
-
30
- `monospaced`
31
- If set to true, monospace font will be used (default: ``true``).
32
-
33
- `linenos`
34
- If set to true, print the line numbers (default: ``false``).
35
-
36
- `wrap`
37
- Wrap lines to the specified number of characters. Disabled if set to 0
38
- (default: ``0``).
39
- """
40
-
41
- name = 'groff'
42
- aliases = ['groff','troff','roff']
43
- filenames = []
44
-
45
- def __init__(self, **options):
46
- Formatter.__init__(self, **options)
47
-
48
- self.monospaced = get_bool_opt(options, 'monospaced', True)
49
- self.linenos = get_bool_opt(options, 'linenos', False)
50
- self._lineno = 0
51
- self.wrap = get_int_opt(options, 'wrap', 0)
52
- self._linelen = 0
53
-
54
- self.styles = {}
55
- self._make_styles()
56
-
57
-
58
- def _make_styles(self):
59
- regular = '\\f[CR]' if self.monospaced else '\\f[R]'
60
- bold = '\\f[CB]' if self.monospaced else '\\f[B]'
61
- italic = '\\f[CI]' if self.monospaced else '\\f[I]'
62
-
63
- for ttype, ndef in self.style:
64
- start = end = ''
65
- if ndef['color']:
66
- start += '\\m[%s]' % ndef['color']
67
- end = '\\m[]' + end
68
- if ndef['bold']:
69
- start += bold
70
- end = regular + end
71
- if ndef['italic']:
72
- start += italic
73
- end = regular + end
74
- if ndef['bgcolor']:
75
- start += '\\M[%s]' % ndef['bgcolor']
76
- end = '\\M[]' + end
77
-
78
- self.styles[ttype] = start, end
79
-
80
-
81
- def _define_colors(self, outfile):
82
- colors = set()
83
- for _, ndef in self.style:
84
- if ndef['color'] is not None:
85
- colors.add(ndef['color'])
86
-
87
- for color in colors:
88
- outfile.write('.defcolor ' + color + ' rgb #' + color + '\n')
89
-
90
-
91
- def _write_lineno(self, outfile):
92
- self._lineno += 1
93
- outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno))
94
-
95
-
96
- def _wrap_line(self, line):
97
- length = len(line.rstrip('\n'))
98
- space = ' ' if self.linenos else ''
99
- newline = ''
100
-
101
- if length > self.wrap:
102
- for i in range(0, math.floor(length / self.wrap)):
103
- chunk = line[i*self.wrap:i*self.wrap+self.wrap]
104
- newline += (chunk + '\n' + space)
105
- remainder = length % self.wrap
106
- if remainder > 0:
107
- newline += line[-remainder-1:]
108
- self._linelen = remainder
109
- elif self._linelen + length > self.wrap:
110
- newline = ('\n' + space) + line
111
- self._linelen = length
112
- else:
113
- newline = line
114
- self._linelen += length
115
-
116
- return newline
117
-
118
-
119
- def _escape_chars(self, text):
120
- text = text.replace('\\', '\\[u005C]'). \
121
- replace('.', '\\[char46]'). \
122
- replace('\'', '\\[u0027]'). \
123
- replace('`', '\\[u0060]'). \
124
- replace('~', '\\[u007E]')
125
- copy = text
126
-
127
- for char in copy:
128
- if len(char) != len(char.encode()):
129
- uni = char.encode('unicode_escape') \
130
- .decode()[1:] \
131
- .replace('x', 'u00') \
132
- .upper()
133
- text = text.replace(char, '\\[u' + uni[1:] + ']')
134
-
135
- return text
136
-
137
-
138
- def format_unencoded(self, tokensource, outfile):
139
- self._define_colors(outfile)
140
-
141
- outfile.write('.nf\n\\f[CR]\n')
142
-
143
- if self.linenos:
144
- self._write_lineno(outfile)
145
-
146
- for ttype, value in tokensource:
147
- while ttype not in self.styles:
148
- ttype = ttype.parent
149
- start, end = self.styles[ttype]
150
-
151
- for line in value.splitlines(True):
152
- if self.wrap > 0:
153
- line = self._wrap_line(line)
154
-
155
- if start and end:
156
- text = self._escape_chars(line.rstrip('\n'))
157
- if text != '':
158
- outfile.write(''.join((start, text, end)))
159
- else:
160
- outfile.write(self._escape_chars(line.rstrip('\n')))
161
-
162
- if line.endswith('\n'):
163
- if self.linenos:
164
- self._write_lineno(outfile)
165
- self._linelen = 0
166
- else:
167
- outfile.write('\n')
168
- self._linelen = 0
169
-
170
- outfile.write('\n.fi')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/build_py.py DELETED
@@ -1,407 +0,0 @@
1
- """distutils.command.build_py
2
-
3
- Implements the Distutils 'build_py' command."""
4
-
5
- import os
6
- import importlib.util
7
- import sys
8
- import glob
9
-
10
- from distutils.core import Command
11
- from distutils.errors import DistutilsOptionError, DistutilsFileError
12
- from distutils.util import convert_path
13
- from distutils import log
14
-
15
-
16
- class build_py(Command):
17
-
18
- description = "\"build\" pure Python modules (copy to build directory)"
19
-
20
- user_options = [
21
- ('build-lib=', 'd', "directory to \"build\" (copy) to"),
22
- ('compile', 'c', "compile .py to .pyc"),
23
- ('no-compile', None, "don't compile .py files [default]"),
24
- (
25
- 'optimize=',
26
- 'O',
27
- "also compile with optimization: -O1 for \"python -O\", "
28
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]",
29
- ),
30
- ('force', 'f', "forcibly build everything (ignore file timestamps)"),
31
- ]
32
-
33
- boolean_options = ['compile', 'force']
34
- negative_opt = {'no-compile': 'compile'}
35
-
36
- def initialize_options(self):
37
- self.build_lib = None
38
- self.py_modules = None
39
- self.package = None
40
- self.package_data = None
41
- self.package_dir = None
42
- self.compile = 0
43
- self.optimize = 0
44
- self.force = None
45
-
46
- def finalize_options(self):
47
- self.set_undefined_options(
48
- 'build', ('build_lib', 'build_lib'), ('force', 'force')
49
- )
50
-
51
- # Get the distribution options that are aliases for build_py
52
- # options -- list of packages and list of modules.
53
- self.packages = self.distribution.packages
54
- self.py_modules = self.distribution.py_modules
55
- self.package_data = self.distribution.package_data
56
- self.package_dir = {}
57
- if self.distribution.package_dir:
58
- for name, path in self.distribution.package_dir.items():
59
- self.package_dir[name] = convert_path(path)
60
- self.data_files = self.get_data_files()
61
-
62
- # Ick, copied straight from install_lib.py (fancy_getopt needs a
63
- # type system! Hell, *everything* needs a type system!!!)
64
- if not isinstance(self.optimize, int):
65
- try:
66
- self.optimize = int(self.optimize)
67
- assert 0 <= self.optimize <= 2
68
- except (ValueError, AssertionError):
69
- raise DistutilsOptionError("optimize must be 0, 1, or 2")
70
-
71
- def run(self):
72
- # XXX copy_file by default preserves atime and mtime. IMHO this is
73
- # the right thing to do, but perhaps it should be an option -- in
74
- # particular, a site administrator might want installed files to
75
- # reflect the time of installation rather than the last
76
- # modification time before the installed release.
77
-
78
- # XXX copy_file by default preserves mode, which appears to be the
79
- # wrong thing to do: if a file is read-only in the working
80
- # directory, we want it to be installed read/write so that the next
81
- # installation of the same module distribution can overwrite it
82
- # without problems. (This might be a Unix-specific issue.) Thus
83
- # we turn off 'preserve_mode' when copying to the build directory,
84
- # since the build directory is supposed to be exactly what the
85
- # installation will look like (ie. we preserve mode when
86
- # installing).
87
-
88
- # Two options control which modules will be installed: 'packages'
89
- # and 'py_modules'. The former lets us work with whole packages, not
90
- # specifying individual modules at all; the latter is for
91
- # specifying modules one-at-a-time.
92
-
93
- if self.py_modules:
94
- self.build_modules()
95
- if self.packages:
96
- self.build_packages()
97
- self.build_package_data()
98
-
99
- self.byte_compile(self.get_outputs(include_bytecode=0))
100
-
101
- def get_data_files(self):
102
- """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
103
- data = []
104
- if not self.packages:
105
- return data
106
- for package in self.packages:
107
- # Locate package source directory
108
- src_dir = self.get_package_dir(package)
109
-
110
- # Compute package build directory
111
- build_dir = os.path.join(*([self.build_lib] + package.split('.')))
112
-
113
- # Length of path to strip from found files
114
- plen = 0
115
- if src_dir:
116
- plen = len(src_dir) + 1
117
-
118
- # Strip directory from globbed filenames
119
- filenames = [file[plen:] for file in self.find_data_files(package, src_dir)]
120
- data.append((package, src_dir, build_dir, filenames))
121
- return data
122
-
123
- def find_data_files(self, package, src_dir):
124
- """Return filenames for package's data files in 'src_dir'"""
125
- globs = self.package_data.get('', []) + self.package_data.get(package, [])
126
- files = []
127
- for pattern in globs:
128
- # Each pattern has to be converted to a platform-specific path
129
- filelist = glob.glob(
130
- os.path.join(glob.escape(src_dir), convert_path(pattern))
131
- )
132
- # Files that match more than one pattern are only added once
133
- files.extend(
134
- [fn for fn in filelist if fn not in files and os.path.isfile(fn)]
135
- )
136
- return files
137
-
138
- def build_package_data(self):
139
- """Copy data files into build directory"""
140
- for package, src_dir, build_dir, filenames in self.data_files:
141
- for filename in filenames:
142
- target = os.path.join(build_dir, filename)
143
- self.mkpath(os.path.dirname(target))
144
- self.copy_file(
145
- os.path.join(src_dir, filename), target, preserve_mode=False
146
- )
147
-
148
- def get_package_dir(self, package):
149
- """Return the directory, relative to the top of the source
150
- distribution, where package 'package' should be found
151
- (at least according to the 'package_dir' option, if any)."""
152
- path = package.split('.')
153
-
154
- if not self.package_dir:
155
- if path:
156
- return os.path.join(*path)
157
- else:
158
- return ''
159
- else:
160
- tail = []
161
- while path:
162
- try:
163
- pdir = self.package_dir['.'.join(path)]
164
- except KeyError:
165
- tail.insert(0, path[-1])
166
- del path[-1]
167
- else:
168
- tail.insert(0, pdir)
169
- return os.path.join(*tail)
170
- else:
171
- # Oops, got all the way through 'path' without finding a
172
- # match in package_dir. If package_dir defines a directory
173
- # for the root (nameless) package, then fallback on it;
174
- # otherwise, we might as well have not consulted
175
- # package_dir at all, as we just use the directory implied
176
- # by 'tail' (which should be the same as the original value
177
- # of 'path' at this point).
178
- pdir = self.package_dir.get('')
179
- if pdir is not None:
180
- tail.insert(0, pdir)
181
-
182
- if tail:
183
- return os.path.join(*tail)
184
- else:
185
- return ''
186
-
187
- def check_package(self, package, package_dir):
188
- # Empty dir name means current directory, which we can probably
189
- # assume exists. Also, os.path.exists and isdir don't know about
190
- # my "empty string means current dir" convention, so we have to
191
- # circumvent them.
192
- if package_dir != "":
193
- if not os.path.exists(package_dir):
194
- raise DistutilsFileError(
195
- "package directory '%s' does not exist" % package_dir
196
- )
197
- if not os.path.isdir(package_dir):
198
- raise DistutilsFileError(
199
- "supposed package directory '%s' exists, "
200
- "but is not a directory" % package_dir
201
- )
202
-
203
- # Directories without __init__.py are namespace packages (PEP 420).
204
- if package:
205
- init_py = os.path.join(package_dir, "__init__.py")
206
- if os.path.isfile(init_py):
207
- return init_py
208
-
209
- # Either not in a package at all (__init__.py not expected), or
210
- # __init__.py doesn't exist -- so don't return the filename.
211
- return None
212
-
213
- def check_module(self, module, module_file):
214
- if not os.path.isfile(module_file):
215
- log.warn("file %s (for module %s) not found", module_file, module)
216
- return False
217
- else:
218
- return True
219
-
220
- def find_package_modules(self, package, package_dir):
221
- self.check_package(package, package_dir)
222
- module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py"))
223
- modules = []
224
- setup_script = os.path.abspath(self.distribution.script_name)
225
-
226
- for f in module_files:
227
- abs_f = os.path.abspath(f)
228
- if abs_f != setup_script:
229
- module = os.path.splitext(os.path.basename(f))[0]
230
- modules.append((package, module, f))
231
- else:
232
- self.debug_print("excluding %s" % setup_script)
233
- return modules
234
-
235
- def find_modules(self):
236
- """Finds individually-specified Python modules, ie. those listed by
237
- module name in 'self.py_modules'. Returns a list of tuples (package,
238
- module_base, filename): 'package' is a tuple of the path through
239
- package-space to the module; 'module_base' is the bare (no
240
- packages, no dots) module name, and 'filename' is the path to the
241
- ".py" file (relative to the distribution root) that implements the
242
- module.
243
- """
244
- # Map package names to tuples of useful info about the package:
245
- # (package_dir, checked)
246
- # package_dir - the directory where we'll find source files for
247
- # this package
248
- # checked - true if we have checked that the package directory
249
- # is valid (exists, contains __init__.py, ... ?)
250
- packages = {}
251
-
252
- # List of (package, module, filename) tuples to return
253
- modules = []
254
-
255
- # We treat modules-in-packages almost the same as toplevel modules,
256
- # just the "package" for a toplevel is empty (either an empty
257
- # string or empty list, depending on context). Differences:
258
- # - don't check for __init__.py in directory for empty package
259
- for module in self.py_modules:
260
- path = module.split('.')
261
- package = '.'.join(path[0:-1])
262
- module_base = path[-1]
263
-
264
- try:
265
- (package_dir, checked) = packages[package]
266
- except KeyError:
267
- package_dir = self.get_package_dir(package)
268
- checked = 0
269
-
270
- if not checked:
271
- init_py = self.check_package(package, package_dir)
272
- packages[package] = (package_dir, 1)
273
- if init_py:
274
- modules.append((package, "__init__", init_py))
275
-
276
- # XXX perhaps we should also check for just .pyc files
277
- # (so greedy closed-source bastards can distribute Python
278
- # modules too)
279
- module_file = os.path.join(package_dir, module_base + ".py")
280
- if not self.check_module(module, module_file):
281
- continue
282
-
283
- modules.append((package, module_base, module_file))
284
-
285
- return modules
286
-
287
- def find_all_modules(self):
288
- """Compute the list of all modules that will be built, whether
289
- they are specified one-module-at-a-time ('self.py_modules') or
290
- by whole packages ('self.packages'). Return a list of tuples
291
- (package, module, module_file), just like 'find_modules()' and
292
- 'find_package_modules()' do."""
293
- modules = []
294
- if self.py_modules:
295
- modules.extend(self.find_modules())
296
- if self.packages:
297
- for package in self.packages:
298
- package_dir = self.get_package_dir(package)
299
- m = self.find_package_modules(package, package_dir)
300
- modules.extend(m)
301
- return modules
302
-
303
- def get_source_files(self):
304
- return [module[-1] for module in self.find_all_modules()]
305
-
306
- def get_module_outfile(self, build_dir, package, module):
307
- outfile_path = [build_dir] + list(package) + [module + ".py"]
308
- return os.path.join(*outfile_path)
309
-
310
- def get_outputs(self, include_bytecode=1):
311
- modules = self.find_all_modules()
312
- outputs = []
313
- for (package, module, module_file) in modules:
314
- package = package.split('.')
315
- filename = self.get_module_outfile(self.build_lib, package, module)
316
- outputs.append(filename)
317
- if include_bytecode:
318
- if self.compile:
319
- outputs.append(
320
- importlib.util.cache_from_source(filename, optimization='')
321
- )
322
- if self.optimize > 0:
323
- outputs.append(
324
- importlib.util.cache_from_source(
325
- filename, optimization=self.optimize
326
- )
327
- )
328
-
329
- outputs += [
330
- os.path.join(build_dir, filename)
331
- for package, src_dir, build_dir, filenames in self.data_files
332
- for filename in filenames
333
- ]
334
-
335
- return outputs
336
-
337
- def build_module(self, module, module_file, package):
338
- if isinstance(package, str):
339
- package = package.split('.')
340
- elif not isinstance(package, (list, tuple)):
341
- raise TypeError(
342
- "'package' must be a string (dot-separated), list, or tuple"
343
- )
344
-
345
- # Now put the module source file into the "build" area -- this is
346
- # easy, we just copy it somewhere under self.build_lib (the build
347
- # directory for Python source).
348
- outfile = self.get_module_outfile(self.build_lib, package, module)
349
- dir = os.path.dirname(outfile)
350
- self.mkpath(dir)
351
- return self.copy_file(module_file, outfile, preserve_mode=0)
352
-
353
- def build_modules(self):
354
- modules = self.find_modules()
355
- for (package, module, module_file) in modules:
356
- # Now "build" the module -- ie. copy the source file to
357
- # self.build_lib (the build directory for Python source).
358
- # (Actually, it gets copied to the directory for this package
359
- # under self.build_lib.)
360
- self.build_module(module, module_file, package)
361
-
362
- def build_packages(self):
363
- for package in self.packages:
364
- # Get list of (package, module, module_file) tuples based on
365
- # scanning the package directory. 'package' is only included
366
- # in the tuple so that 'find_modules()' and
367
- # 'find_package_tuples()' have a consistent interface; it's
368
- # ignored here (apart from a sanity check). Also, 'module' is
369
- # the *unqualified* module name (ie. no dots, no package -- we
370
- # already know its package!), and 'module_file' is the path to
371
- # the .py file, relative to the current directory
372
- # (ie. including 'package_dir').
373
- package_dir = self.get_package_dir(package)
374
- modules = self.find_package_modules(package, package_dir)
375
-
376
- # Now loop over the modules we found, "building" each one (just
377
- # copy it to self.build_lib).
378
- for (package_, module, module_file) in modules:
379
- assert package == package_
380
- self.build_module(module, module_file, package)
381
-
382
- def byte_compile(self, files):
383
- if sys.dont_write_bytecode:
384
- self.warn('byte-compiling is disabled, skipping.')
385
- return
386
-
387
- from distutils.util import byte_compile
388
-
389
- prefix = self.build_lib
390
- if prefix[-1] != os.sep:
391
- prefix = prefix + os.sep
392
-
393
- # XXX this code is essentially the same as the 'byte_compile()
394
- # method of the "install_lib" command, except for the determination
395
- # of the 'prefix' string. Hmmm.
396
- if self.compile:
397
- byte_compile(
398
- files, optimize=0, force=self.force, prefix=prefix, dry_run=self.dry_run
399
- )
400
- if self.optimize > 0:
401
- byte_compile(
402
- files,
403
- optimize=self.optimize,
404
- force=self.force,
405
- prefix=prefix,
406
- dry_run=self.dry_run,
407
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aveygo/AstroSleuth/file_queue.py DELETED
@@ -1,109 +0,0 @@
1
- import time, random, marshal, os
2
-
3
- MAX_AGE = 5
4
-
5
- class FileQueue:
6
- def __init__(self, est_time=60, id=None):
7
- queue:list = self.load()
8
- self.id = random.randint(0, 2**16) if id is None else id
9
- self.est_time = est_time
10
- self.start = time.time()
11
- queue.append((self.id, self.est_time, self.start, self.start))
12
- self.save(queue)
13
-
14
- def load(self) -> list:
15
- if not os.path.exists("queue"):
16
- self.save([])
17
-
18
- try:
19
- with open("queue", "rb") as f:
20
- return marshal.load(f)
21
- except EOFError:
22
- time.sleep(random.random())
23
- return self.load()
24
-
25
- def save(self, queue:list):
26
- try:
27
- with open("queue", "wb") as f:
28
- marshal.dump(queue, f)
29
- except OSError:
30
- time.sleep(random.random())
31
- self.save(queue)
32
-
33
- def heartbeat(self):
34
- queue = self.load()
35
- for i, q in enumerate(queue):
36
- if q[0] == self.id:
37
- queue[i] = (self.id, self.est_time, self.start, time.time())
38
- break
39
- self.save(queue)
40
-
41
- def should_run(self) -> bool:
42
- queue = self.load()
43
- queue = [q for q in queue if q[3] > time.time() - MAX_AGE and q[2] < self.start]
44
- queue.sort(key=lambda x: x[2])
45
- if len(queue) == 0:
46
- return True
47
- return queue[0][0] == self.id # First in queue
48
-
49
- def update_est_time(self, est_time:float):
50
- queue = self.load()
51
- for i, q in enumerate(queue):
52
- if q[0] == self.id:
53
- queue[i] = (self.id, est_time, self.start, time.time())
54
- break
55
- self.save(queue)
56
-
57
- def get_queue_len(self) -> int:
58
- queue = self.load()
59
- count = 0
60
- for q in queue:
61
- if q[3] > time.time() - MAX_AGE and q[2] < self.start:
62
- count += 1
63
- return count
64
-
65
- def get_queue_est_time(self) -> float:
66
- queue = self.load()
67
- count = 0
68
- for q in queue:
69
- if q[3] > time.time() - MAX_AGE and q[2] < self.start:
70
- count += q[1]
71
- return count
72
-
73
- def quit(self):
74
- queue = self.load()
75
- for i, q in enumerate(queue):
76
- if q[0] == self.id:
77
- del queue[i]
78
- break
79
- self.save(queue)
80
-
81
- def __del__(self):
82
- self.quit()
83
-
84
- if __name__ == '__main__':
85
- import threading
86
-
87
- def test(worker_id):
88
- q = FileQueue()
89
-
90
- # Wait to be first in queue
91
- while not q.should_run():
92
- time.sleep(1)
93
- q.heartbeat()
94
-
95
- # Do stuff
96
- print(f"Worker {worker_id} started")
97
- for i in range(10):
98
- time.sleep(1)
99
- q.heartbeat()
100
- print(f"Worker {worker_id} progress: {i + 1}/10")
101
-
102
- # Leave queue
103
- print(f"Worker {worker_id} finished")
104
- q.quit()
105
-
106
- for i in range(5):
107
- threading.Thread(target=test, args=(i,)).start()
108
- time.sleep(0.123)
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/format_control.py DELETED
@@ -1,80 +0,0 @@
1
- from typing import FrozenSet, Optional, Set
2
-
3
- from pip._vendor.packaging.utils import canonicalize_name
4
-
5
- from pip._internal.exceptions import CommandError
6
-
7
-
8
- class FormatControl:
9
- """Helper for managing formats from which a package can be installed."""
10
-
11
- __slots__ = ["no_binary", "only_binary"]
12
-
13
- def __init__(
14
- self,
15
- no_binary: Optional[Set[str]] = None,
16
- only_binary: Optional[Set[str]] = None,
17
- ) -> None:
18
- if no_binary is None:
19
- no_binary = set()
20
- if only_binary is None:
21
- only_binary = set()
22
-
23
- self.no_binary = no_binary
24
- self.only_binary = only_binary
25
-
26
- def __eq__(self, other: object) -> bool:
27
- if not isinstance(other, self.__class__):
28
- return NotImplemented
29
-
30
- if self.__slots__ != other.__slots__:
31
- return False
32
-
33
- return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
34
-
35
- def __repr__(self) -> str:
36
- return "{}({}, {})".format(
37
- self.__class__.__name__, self.no_binary, self.only_binary
38
- )
39
-
40
- @staticmethod
41
- def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None:
42
- if value.startswith("-"):
43
- raise CommandError(
44
- "--no-binary / --only-binary option requires 1 argument."
45
- )
46
- new = value.split(",")
47
- while ":all:" in new:
48
- other.clear()
49
- target.clear()
50
- target.add(":all:")
51
- del new[: new.index(":all:") + 1]
52
- # Without a none, we want to discard everything as :all: covers it
53
- if ":none:" not in new:
54
- return
55
- for name in new:
56
- if name == ":none:":
57
- target.clear()
58
- continue
59
- name = canonicalize_name(name)
60
- other.discard(name)
61
- target.add(name)
62
-
63
- def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]:
64
- result = {"binary", "source"}
65
- if canonical_name in self.only_binary:
66
- result.discard("source")
67
- elif canonical_name in self.no_binary:
68
- result.discard("binary")
69
- elif ":all:" in self.only_binary:
70
- result.discard("source")
71
- elif ":all:" in self.no_binary:
72
- result.discard("binary")
73
- return frozenset(result)
74
-
75
- def disallow_binaries(self) -> None:
76
- self.handle_mutual_excludes(
77
- ":all:",
78
- self.no_binary,
79
- self.only_binary,
80
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py DELETED
@@ -1,57 +0,0 @@
1
- # coding: utf-8
2
- from .exceptions import *
3
- from .ext import ExtType, Timestamp
4
-
5
- import os
6
- import sys
7
-
8
-
9
- version = (1, 0, 5)
10
- __version__ = "1.0.5"
11
-
12
-
13
- if os.environ.get("MSGPACK_PUREPYTHON") or sys.version_info[0] == 2:
14
- from .fallback import Packer, unpackb, Unpacker
15
- else:
16
- try:
17
- from ._cmsgpack import Packer, unpackb, Unpacker
18
- except ImportError:
19
- from .fallback import Packer, unpackb, Unpacker
20
-
21
-
22
- def pack(o, stream, **kwargs):
23
- """
24
- Pack object `o` and write it to `stream`
25
-
26
- See :class:`Packer` for options.
27
- """
28
- packer = Packer(**kwargs)
29
- stream.write(packer.pack(o))
30
-
31
-
32
- def packb(o, **kwargs):
33
- """
34
- Pack object `o` and return packed bytes
35
-
36
- See :class:`Packer` for options.
37
- """
38
- return Packer(**kwargs).pack(o)
39
-
40
-
41
- def unpack(stream, **kwargs):
42
- """
43
- Unpack an object from `stream`.
44
-
45
- Raises `ExtraData` when `stream` contains extra bytes.
46
- See :class:`Unpacker` for options.
47
- """
48
- data = stream.read()
49
- return unpackb(data, **kwargs)
50
-
51
-
52
- # alias for compatibility to simplejson/marshal/pickle.
53
- load = unpack
54
- loads = unpackb
55
-
56
- dump = pack
57
- dumps = packb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Binguii/Ballen/Dockerfile DELETED
@@ -1,20 +0,0 @@
1
- FROM node:18-bullseye-slim
2
-
3
- RUN apt-get update && \
4
- apt-get install -y git
5
-
6
- RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
7
-
8
- WORKDIR /app
9
-
10
- RUN npm install
11
-
12
- COPY Dockerfile greeting.md* .env* ./
13
-
14
- RUN npm run build
15
-
16
- EXPOSE 7860
17
-
18
- ENV NODE_ENV=production
19
-
20
- CMD [ "npm", "start" ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/train_engine.py DELETED
@@ -1,311 +0,0 @@
1
- # --------------------------------------------------------
2
- # OpenVQA
3
- # Written by Yuhao Cui https://github.com/cuiyuhao1996
4
- # --------------------------------------------------------
5
-
6
- import os, torch, datetime, shutil, time
7
- import numpy as np
8
- import torch.nn as nn
9
- import torch.nn.functional as F
10
- import torch.utils.data as Data
11
- from openvqa.models.model_loader import ModelLoader
12
- from openvqa.utils.optim import get_optim, adjust_lr
13
- from utils.test_engine import test_engine, ckpt_proc
14
- from utils.extract_engine import extract_engine
15
-
16
-
17
- def train_engine(__C, dataset, dataset_eval=None):
18
-
19
- data_size = dataset.data_size
20
- token_size = dataset.token_size
21
- ans_size = dataset.ans_size
22
- pretrained_emb = dataset.pretrained_emb
23
-
24
- net = ModelLoader(__C).Net(
25
- __C,
26
- pretrained_emb,
27
- token_size,
28
- ans_size
29
- )
30
- net.cuda()
31
- net.train()
32
-
33
- if __C.N_GPU > 1:
34
- net = nn.DataParallel(net, device_ids=__C.DEVICES)
35
-
36
- # Define Loss Function
37
- loss_fn = eval('torch.nn.' + __C.LOSS_FUNC_NAME_DICT[__C.LOSS_FUNC] + "(reduction='" + __C.LOSS_REDUCTION + "').cuda()")
38
-
39
- # Load checkpoint if resume training
40
- if __C.RESUME:
41
- print(' ========== Resume training')
42
-
43
- if __C.CKPT_PATH is not None:
44
- print('Warning: Now using CKPT_PATH args, '
45
- 'CKPT_VERSION and CKPT_EPOCH will not work')
46
-
47
- path = __C.CKPT_PATH
48
- else:
49
- path = __C.CKPTS_PATH + \
50
- '/ckpt_' + __C.CKPT_VERSION + \
51
- '/epoch' + str(__C.CKPT_EPOCH) + '.pkl'
52
-
53
- # Load the network parameters
54
- print('Loading ckpt from {}'.format(path))
55
- ckpt = torch.load(path)
56
- print('Finish!')
57
-
58
- if __C.N_GPU > 1:
59
- net.load_state_dict(ckpt_proc(ckpt['state_dict']))
60
- else:
61
- net.load_state_dict(ckpt['state_dict'])
62
- start_epoch = ckpt['epoch']
63
-
64
- # Load the optimizer paramters
65
- optim = get_optim(__C, net, data_size, ckpt['lr_base'])
66
- optim._step = int(data_size / __C.BATCH_SIZE * start_epoch)
67
- optim.optimizer.load_state_dict(ckpt['optimizer'])
68
-
69
- if ('ckpt_' + __C.VERSION) not in os.listdir(__C.CKPTS_PATH):
70
- os.mkdir(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION)
71
-
72
- else:
73
- if ('ckpt_' + __C.VERSION) not in os.listdir(__C.CKPTS_PATH):
74
- #shutil.rmtree(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION)
75
- os.mkdir(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION)
76
-
77
- optim = get_optim(__C, net, data_size)
78
- start_epoch = 0
79
-
80
- loss_sum = 0
81
- named_params = list(net.named_parameters())
82
- grad_norm = np.zeros(len(named_params))
83
-
84
- # Define multi-thread dataloader
85
- # if __C.SHUFFLE_MODE in ['external']:
86
- # dataloader = Data.DataLoader(
87
- # dataset,
88
- # batch_size=__C.BATCH_SIZE,
89
- # shuffle=False,
90
- # num_workers=__C.NUM_WORKERS,
91
- # pin_memory=__C.PIN_MEM,
92
- # drop_last=True
93
- # )
94
- # else:
95
- dataloader = Data.DataLoader(
96
- dataset,
97
- batch_size=__C.BATCH_SIZE,
98
- shuffle=True,
99
- num_workers=__C.NUM_WORKERS,
100
- pin_memory=__C.PIN_MEM,
101
- drop_last=True
102
- )
103
-
104
- logfile = open(
105
- __C.LOG_PATH +
106
- '/log_run_' + __C.VERSION + '.txt',
107
- 'a+'
108
- )
109
- logfile.write(str(__C))
110
- logfile.close()
111
-
112
- # Training script
113
- for epoch in range(start_epoch, __C.MAX_EPOCH):
114
-
115
- # Save log to file
116
- logfile = open(
117
- __C.LOG_PATH +
118
- '/log_run_' + __C.VERSION + '.txt',
119
- 'a+'
120
- )
121
- logfile.write(
122
- '=====================================\nnowTime: ' +
123
- datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
124
- '\n'
125
- )
126
- logfile.close()
127
-
128
- # Learning Rate Decay
129
- if epoch in __C.LR_DECAY_LIST:
130
- adjust_lr(optim, __C.LR_DECAY_R)
131
-
132
- # Externally shuffle data list
133
- # if __C.SHUFFLE_MODE == 'external':
134
- # dataset.shuffle_list(dataset.ans_list)
135
-
136
- time_start = time.time()
137
- # Iteration
138
- for step, (
139
- frcn_feat_iter,
140
- grid_feat_iter,
141
- bbox_feat_iter,
142
- ques_ix_iter,
143
- ans_iter
144
- ) in enumerate(dataloader):
145
-
146
- optim.zero_grad()
147
-
148
- frcn_feat_iter = frcn_feat_iter.cuda()
149
- grid_feat_iter = grid_feat_iter.cuda()
150
- bbox_feat_iter = bbox_feat_iter.cuda()
151
- ques_ix_iter = ques_ix_iter.cuda()
152
- ans_iter = ans_iter.cuda()
153
-
154
- loss_tmp = 0
155
- for accu_step in range(__C.GRAD_ACCU_STEPS):
156
- loss_tmp = 0
157
-
158
- sub_frcn_feat_iter = \
159
- frcn_feat_iter[accu_step * __C.SUB_BATCH_SIZE:
160
- (accu_step + 1) * __C.SUB_BATCH_SIZE]
161
- sub_grid_feat_iter = \
162
- grid_feat_iter[accu_step * __C.SUB_BATCH_SIZE:
163
- (accu_step + 1) * __C.SUB_BATCH_SIZE]
164
- sub_bbox_feat_iter = \
165
- bbox_feat_iter[accu_step * __C.SUB_BATCH_SIZE:
166
- (accu_step + 1) * __C.SUB_BATCH_SIZE]
167
- sub_ques_ix_iter = \
168
- ques_ix_iter[accu_step * __C.SUB_BATCH_SIZE:
169
- (accu_step + 1) * __C.SUB_BATCH_SIZE]
170
- sub_ans_iter = \
171
- ans_iter[accu_step * __C.SUB_BATCH_SIZE:
172
- (accu_step + 1) * __C.SUB_BATCH_SIZE]
173
-
174
- pred = net(
175
- sub_frcn_feat_iter,
176
- sub_grid_feat_iter,
177
- sub_bbox_feat_iter,
178
- sub_ques_ix_iter
179
- )
180
-
181
- loss_item = [pred, sub_ans_iter]
182
- loss_nonlinear_list = __C.LOSS_FUNC_NONLINEAR[__C.LOSS_FUNC]
183
- for item_ix, loss_nonlinear in enumerate(loss_nonlinear_list):
184
- if loss_nonlinear in ['flat']:
185
- loss_item[item_ix] = loss_item[item_ix].view(-1)
186
- elif loss_nonlinear:
187
- loss_item[item_ix] = eval('F.' + loss_nonlinear + '(loss_item[item_ix], dim=1)')
188
-
189
- loss = loss_fn(loss_item[0], loss_item[1])
190
- if __C.LOSS_REDUCTION == 'mean':
191
- # only mean-reduction needs be divided by grad_accu_steps
192
- loss /= __C.GRAD_ACCU_STEPS
193
- loss.backward()
194
-
195
- loss_tmp += loss.cpu().data.numpy() * __C.GRAD_ACCU_STEPS
196
- loss_sum += loss.cpu().data.numpy() * __C.GRAD_ACCU_STEPS
197
-
198
- if __C.VERBOSE:
199
- if dataset_eval is not None:
200
- mode_str = __C.SPLIT['train'] + '->' + __C.SPLIT['val']
201
- else:
202
- mode_str = __C.SPLIT['train'] + '->' + __C.SPLIT['test']
203
-
204
- print("\r[Version %s][Model %s][Dataset %s][Epoch %2d][Step %4d/%4d][%s] Loss: %.4f, Lr: %.2e" % (
205
- __C.VERSION,
206
- __C.MODEL_USE,
207
- __C.DATASET,
208
- epoch + 1,
209
- step,
210
- int(data_size / __C.BATCH_SIZE),
211
- mode_str,
212
- loss_tmp / __C.SUB_BATCH_SIZE,
213
- optim._rate
214
- ), end=' ')
215
-
216
- # Gradient norm clipping
217
- if __C.GRAD_NORM_CLIP > 0:
218
- nn.utils.clip_grad_norm_(
219
- net.parameters(),
220
- __C.GRAD_NORM_CLIP
221
- )
222
-
223
- # Save the gradient information
224
- for name in range(len(named_params)):
225
- norm_v = torch.norm(named_params[name][1].grad).cpu().data.numpy() \
226
- if named_params[name][1].grad is not None else 0
227
- grad_norm[name] += norm_v * __C.GRAD_ACCU_STEPS
228
- # print('Param %-3s Name %-80s Grad_Norm %-20s'%
229
- # (str(grad_wt),
230
- # params[grad_wt][0],
231
- # str(norm_v)))
232
-
233
- optim.step()
234
-
235
- time_end = time.time()
236
- elapse_time = time_end-time_start
237
- print('Finished in {}s'.format(int(elapse_time)))
238
- epoch_finish = epoch + 1
239
-
240
- # Save checkpoint
241
- if not __C.SAVE_LAST or epoch_finish == __C.MAX_EPOCH:
242
- if __C.N_GPU > 1:
243
- state = {
244
- 'state_dict': net.module.state_dict(),
245
- 'optimizer': optim.optimizer.state_dict(),
246
- 'lr_base': optim.lr_base,
247
- 'epoch': epoch_finish
248
- }
249
- else:
250
- state = {
251
- 'state_dict': net.state_dict(),
252
- 'optimizer': optim.optimizer.state_dict(),
253
- 'lr_base': optim.lr_base,
254
- 'epoch': epoch_finish
255
- }
256
- torch.save(
257
- state,
258
- __C.CKPTS_PATH +
259
- '/ckpt_' + __C.VERSION +
260
- '/epoch' + str(epoch_finish) +
261
- '.pkl'
262
- )
263
-
264
- # Logging
265
- logfile = open(
266
- __C.LOG_PATH +
267
- '/log_run_' + __C.VERSION + '.txt',
268
- 'a+'
269
- )
270
- logfile.write(
271
- 'Epoch: ' + str(epoch_finish) +
272
- ', Loss: ' + str(loss_sum / data_size) +
273
- ', Lr: ' + str(optim._rate) + '\n' +
274
- 'Elapsed time: ' + str(int(elapse_time)) +
275
- ', Speed(s/batch): ' + str(elapse_time / step) +
276
- '\n\n'
277
- )
278
- logfile.close()
279
-
280
- # Eval after every epoch
281
- if dataset_eval is not None:
282
- test_engine(
283
- __C,
284
- dataset_eval,
285
- state_dict=net.state_dict(),
286
- validation=True
287
- )
288
-
289
- # if self.__C.VERBOSE:
290
- # logfile = open(
291
- # self.__C.LOG_PATH +
292
- # '/log_run_' + self.__C.VERSION + '.txt',
293
- # 'a+'
294
- # )
295
- # for name in range(len(named_params)):
296
- # logfile.write(
297
- # 'Param %-3s Name %-80s Grad_Norm %-25s\n' % (
298
- # str(name),
299
- # named_params[name][0],
300
- # str(grad_norm[name] / data_size * self.__C.BATCH_SIZE)
301
- # )
302
- # )
303
- # logfile.write('\n')
304
- # logfile.close()
305
-
306
- loss_sum = 0
307
- grad_norm = np.zeros(len(named_params))
308
-
309
- # Modification - optionally run full result extract after training ends
310
- if __C.EXTRACT_AFTER:
311
- extract_engine(__C, state_dict=net.state_dict())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/par.h DELETED
@@ -1,62 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/allocator_aware_execution_policy.h>
21
- #include <thrust/system/cpp/detail/execution_policy.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace cpp
28
- {
29
- namespace detail
30
- {
31
-
32
-
33
- struct par_t : thrust::system::cpp::detail::execution_policy<par_t>,
34
- thrust::detail::allocator_aware_execution_policy<
35
- thrust::system::cpp::detail::execution_policy>
36
- {
37
- __host__ __device__
38
- THRUST_CONSTEXPR par_t() : thrust::system::cpp::detail::execution_policy<par_t>() {}
39
- };
40
-
41
-
42
- } // end detail
43
-
44
-
45
- THRUST_INLINE_CONSTANT detail::par_t par;
46
-
47
-
48
- } // end cpp
49
- } // end system
50
-
51
-
52
- // alias par here
53
- namespace cpp
54
- {
55
-
56
-
57
- using thrust::system::cpp::par;
58
-
59
-
60
- } // end cpp
61
- } // end thrust
62
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/iou_calculators/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .builder import build_iou_calculator
2
- from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps
3
-
4
- __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps']
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/dense_heads/pisa_ssd_head.py DELETED
@@ -1,139 +0,0 @@
1
- import torch
2
-
3
- from mmdet.core import multi_apply
4
- from ..builder import HEADS
5
- from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p
6
- from .ssd_head import SSDHead
7
-
8
-
9
- # TODO: add loss evaluator for SSD
10
- @HEADS.register_module()
11
- class PISASSDHead(SSDHead):
12
-
13
- def loss(self,
14
- cls_scores,
15
- bbox_preds,
16
- gt_bboxes,
17
- gt_labels,
18
- img_metas,
19
- gt_bboxes_ignore=None):
20
- """Compute losses of the head.
21
-
22
- Args:
23
- cls_scores (list[Tensor]): Box scores for each scale level
24
- Has shape (N, num_anchors * num_classes, H, W)
25
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
26
- level with shape (N, num_anchors * 4, H, W)
27
- gt_bboxes (list[Tensor]): Ground truth bboxes of each image
28
- with shape (num_obj, 4).
29
- gt_labels (list[Tensor]): Ground truth labels of each image
30
- with shape (num_obj, 4).
31
- img_metas (list[dict]): Meta information of each image, e.g.,
32
- image size, scaling factor, etc.
33
- gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
34
- Default: None.
35
-
36
- Returns:
37
- dict: Loss dict, comprise classification loss regression loss and
38
- carl loss.
39
- """
40
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
41
- assert len(featmap_sizes) == self.anchor_generator.num_levels
42
-
43
- device = cls_scores[0].device
44
-
45
- anchor_list, valid_flag_list = self.get_anchors(
46
- featmap_sizes, img_metas, device=device)
47
- cls_reg_targets = self.get_targets(
48
- anchor_list,
49
- valid_flag_list,
50
- gt_bboxes,
51
- img_metas,
52
- gt_bboxes_ignore_list=gt_bboxes_ignore,
53
- gt_labels_list=gt_labels,
54
- label_channels=1,
55
- unmap_outputs=False,
56
- return_sampling_results=True)
57
- if cls_reg_targets is None:
58
- return None
59
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
60
- num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
61
-
62
- num_images = len(img_metas)
63
- all_cls_scores = torch.cat([
64
- s.permute(0, 2, 3, 1).reshape(
65
- num_images, -1, self.cls_out_channels) for s in cls_scores
66
- ], 1)
67
- all_labels = torch.cat(labels_list, -1).view(num_images, -1)
68
- all_label_weights = torch.cat(label_weights_list,
69
- -1).view(num_images, -1)
70
- all_bbox_preds = torch.cat([
71
- b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
72
- for b in bbox_preds
73
- ], -2)
74
- all_bbox_targets = torch.cat(bbox_targets_list,
75
- -2).view(num_images, -1, 4)
76
- all_bbox_weights = torch.cat(bbox_weights_list,
77
- -2).view(num_images, -1, 4)
78
-
79
- # concat all level anchors to a single tensor
80
- all_anchors = []
81
- for i in range(num_images):
82
- all_anchors.append(torch.cat(anchor_list[i]))
83
-
84
- isr_cfg = self.train_cfg.get('isr', None)
85
- all_targets = (all_labels.view(-1), all_label_weights.view(-1),
86
- all_bbox_targets.view(-1,
87
- 4), all_bbox_weights.view(-1, 4))
88
- # apply ISR-P
89
- if isr_cfg is not None:
90
- all_targets = isr_p(
91
- all_cls_scores.view(-1, all_cls_scores.size(-1)),
92
- all_bbox_preds.view(-1, 4),
93
- all_targets,
94
- torch.cat(all_anchors),
95
- sampling_results_list,
96
- loss_cls=CrossEntropyLoss(),
97
- bbox_coder=self.bbox_coder,
98
- **self.train_cfg.isr,
99
- num_class=self.num_classes)
100
- (new_labels, new_label_weights, new_bbox_targets,
101
- new_bbox_weights) = all_targets
102
- all_labels = new_labels.view(all_labels.shape)
103
- all_label_weights = new_label_weights.view(all_label_weights.shape)
104
- all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)
105
- all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)
106
-
107
- # add CARL loss
108
- carl_loss_cfg = self.train_cfg.get('carl', None)
109
- if carl_loss_cfg is not None:
110
- loss_carl = carl_loss(
111
- all_cls_scores.view(-1, all_cls_scores.size(-1)),
112
- all_targets[0],
113
- all_bbox_preds.view(-1, 4),
114
- all_targets[2],
115
- SmoothL1Loss(beta=1.),
116
- **self.train_cfg.carl,
117
- avg_factor=num_total_pos,
118
- num_class=self.num_classes)
119
-
120
- # check NaN and Inf
121
- assert torch.isfinite(all_cls_scores).all().item(), \
122
- 'classification scores become infinite or NaN!'
123
- assert torch.isfinite(all_bbox_preds).all().item(), \
124
- 'bbox predications become infinite or NaN!'
125
-
126
- losses_cls, losses_bbox = multi_apply(
127
- self.loss_single,
128
- all_cls_scores,
129
- all_bbox_preds,
130
- all_anchors,
131
- all_labels,
132
- all_label_weights,
133
- all_bbox_targets,
134
- all_bbox_weights,
135
- num_total_samples=num_total_pos)
136
- loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
137
- if carl_loss_cfg is not None:
138
- loss_dict.update(loss_carl)
139
- return loss_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/functional_pil.py DELETED
@@ -1,352 +0,0 @@
1
- import numbers
2
- from typing import Any, List, Sequence
3
-
4
- import numpy as np
5
- import torch
6
- from PIL import Image, ImageOps, ImageEnhance
7
-
8
- try:
9
- import accimage
10
- except ImportError:
11
- accimage = None
12
-
13
-
14
- @torch.jit.unused
15
- def _is_pil_image(img: Any) -> bool:
16
- if accimage is not None:
17
- return isinstance(img, (Image.Image, accimage.Image))
18
- else:
19
- return isinstance(img, Image.Image)
20
-
21
-
22
- @torch.jit.unused
23
- def _get_image_size(img: Any) -> List[int]:
24
- if _is_pil_image(img):
25
- return img.size
26
- raise TypeError("Unexpected type {}".format(type(img)))
27
-
28
-
29
- @torch.jit.unused
30
- def _get_image_num_channels(img: Any) -> int:
31
- if _is_pil_image(img):
32
- return 1 if img.mode == 'L' else 3
33
- raise TypeError("Unexpected type {}".format(type(img)))
34
-
35
-
36
- @torch.jit.unused
37
- def hflip(img):
38
- if not _is_pil_image(img):
39
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
40
-
41
- return img.transpose(Image.FLIP_LEFT_RIGHT)
42
-
43
-
44
- @torch.jit.unused
45
- def vflip(img):
46
- if not _is_pil_image(img):
47
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
48
-
49
- return img.transpose(Image.FLIP_TOP_BOTTOM)
50
-
51
-
52
- @torch.jit.unused
53
- def adjust_brightness(img, brightness_factor):
54
- if not _is_pil_image(img):
55
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
56
-
57
- enhancer = ImageEnhance.Brightness(img)
58
- img = enhancer.enhance(brightness_factor)
59
- return img
60
-
61
-
62
- @torch.jit.unused
63
- def adjust_contrast(img, contrast_factor):
64
- if not _is_pil_image(img):
65
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
66
-
67
- enhancer = ImageEnhance.Contrast(img)
68
- img = enhancer.enhance(contrast_factor)
69
- return img
70
-
71
-
72
- @torch.jit.unused
73
- def adjust_saturation(img, saturation_factor):
74
- if not _is_pil_image(img):
75
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
76
-
77
- enhancer = ImageEnhance.Color(img)
78
- img = enhancer.enhance(saturation_factor)
79
- return img
80
-
81
-
82
- @torch.jit.unused
83
- def adjust_hue(img, hue_factor):
84
- if not(-0.5 <= hue_factor <= 0.5):
85
- raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
86
-
87
- if not _is_pil_image(img):
88
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
89
-
90
- input_mode = img.mode
91
- if input_mode in {'L', '1', 'I', 'F'}:
92
- return img
93
-
94
- h, s, v = img.convert('HSV').split()
95
-
96
- np_h = np.array(h, dtype=np.uint8)
97
- # uint8 addition take cares of rotation across boundaries
98
- with np.errstate(over='ignore'):
99
- np_h += np.uint8(hue_factor * 255)
100
- h = Image.fromarray(np_h, 'L')
101
-
102
- img = Image.merge('HSV', (h, s, v)).convert(input_mode)
103
- return img
104
-
105
-
106
- @torch.jit.unused
107
- def adjust_gamma(img, gamma, gain=1):
108
- if not _is_pil_image(img):
109
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
110
-
111
- if gamma < 0:
112
- raise ValueError('Gamma should be a non-negative real number')
113
-
114
- input_mode = img.mode
115
- img = img.convert('RGB')
116
- gamma_map = [(255 + 1 - 1e-3) * gain * pow(ele / 255., gamma) for ele in range(256)] * 3
117
- img = img.point(gamma_map) # use PIL's point-function to accelerate this part
118
-
119
- img = img.convert(input_mode)
120
- return img
121
-
122
-
123
- @torch.jit.unused
124
- def pad(img, padding, fill=0, padding_mode="constant"):
125
- if not _is_pil_image(img):
126
- raise TypeError("img should be PIL Image. Got {}".format(type(img)))
127
-
128
- if not isinstance(padding, (numbers.Number, tuple, list)):
129
- raise TypeError("Got inappropriate padding arg")
130
- if not isinstance(fill, (numbers.Number, str, tuple)):
131
- raise TypeError("Got inappropriate fill arg")
132
- if not isinstance(padding_mode, str):
133
- raise TypeError("Got inappropriate padding_mode arg")
134
-
135
- if isinstance(padding, list):
136
- padding = tuple(padding)
137
-
138
- if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
139
- raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
140
- "{} element tuple".format(len(padding)))
141
-
142
- if isinstance(padding, tuple) and len(padding) == 1:
143
- # Compatibility with `functional_tensor.pad`
144
- padding = padding[0]
145
-
146
- if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
147
- raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
148
-
149
- if padding_mode == "constant":
150
- opts = _parse_fill(fill, img, name="fill")
151
- if img.mode == "P":
152
- palette = img.getpalette()
153
- image = ImageOps.expand(img, border=padding, **opts)
154
- image.putpalette(palette)
155
- return image
156
-
157
- return ImageOps.expand(img, border=padding, **opts)
158
- else:
159
- if isinstance(padding, int):
160
- pad_left = pad_right = pad_top = pad_bottom = padding
161
- if isinstance(padding, tuple) and len(padding) == 2:
162
- pad_left = pad_right = padding[0]
163
- pad_top = pad_bottom = padding[1]
164
- if isinstance(padding, tuple) and len(padding) == 4:
165
- pad_left = padding[0]
166
- pad_top = padding[1]
167
- pad_right = padding[2]
168
- pad_bottom = padding[3]
169
-
170
- p = [pad_left, pad_top, pad_right, pad_bottom]
171
- cropping = -np.minimum(p, 0)
172
-
173
- if cropping.any():
174
- crop_left, crop_top, crop_right, crop_bottom = cropping
175
- img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom))
176
-
177
- pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0)
178
-
179
- if img.mode == 'P':
180
- palette = img.getpalette()
181
- img = np.asarray(img)
182
- img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
183
- img = Image.fromarray(img)
184
- img.putpalette(palette)
185
- return img
186
-
187
- img = np.asarray(img)
188
- # RGB image
189
- if len(img.shape) == 3:
190
- img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
191
- # Grayscale image
192
- if len(img.shape) == 2:
193
- img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
194
-
195
- return Image.fromarray(img)
196
-
197
-
198
- @torch.jit.unused
199
- def crop(img: Image.Image, top: int, left: int, height: int, width: int) -> Image.Image:
200
- if not _is_pil_image(img):
201
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
202
-
203
- return img.crop((left, top, left + width, top + height))
204
-
205
-
206
- @torch.jit.unused
207
- def resize(img, size, interpolation=Image.BILINEAR, max_size=None):
208
- if not _is_pil_image(img):
209
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
210
- if not (isinstance(size, int) or (isinstance(size, Sequence) and len(size) in (1, 2))):
211
- raise TypeError('Got inappropriate size arg: {}'.format(size))
212
-
213
- if isinstance(size, Sequence) and len(size) == 1:
214
- size = size[0]
215
- if isinstance(size, int):
216
- w, h = img.size
217
-
218
- short, long = (w, h) if w <= h else (h, w)
219
- if short == size:
220
- return img
221
-
222
- new_short, new_long = size, int(size * long / short)
223
-
224
- if max_size is not None:
225
- if max_size <= size:
226
- raise ValueError(
227
- f"max_size = {max_size} must be strictly greater than the requested "
228
- f"size for the smaller edge size = {size}"
229
- )
230
- if new_long > max_size:
231
- new_short, new_long = int(max_size * new_short / new_long), max_size
232
-
233
- new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
234
- return img.resize((new_w, new_h), interpolation)
235
- else:
236
- if max_size is not None:
237
- raise ValueError(
238
- "max_size should only be passed if size specifies the length of the smaller edge, "
239
- "i.e. size should be an int or a sequence of length 1 in torchscript mode."
240
- )
241
- return img.resize(size[::-1], interpolation)
242
-
243
-
244
- @torch.jit.unused
245
- def _parse_fill(fill, img, name="fillcolor"):
246
- # Process fill color for affine transforms
247
- num_bands = len(img.getbands())
248
- if fill is None:
249
- fill = 0
250
- if isinstance(fill, (int, float)) and num_bands > 1:
251
- fill = tuple([fill] * num_bands)
252
- if isinstance(fill, (list, tuple)):
253
- if len(fill) != num_bands:
254
- msg = ("The number of elements in 'fill' does not match the number of "
255
- "bands of the image ({} != {})")
256
- raise ValueError(msg.format(len(fill), num_bands))
257
-
258
- fill = tuple(fill)
259
-
260
- return {name: fill}
261
-
262
-
263
- @torch.jit.unused
264
- def affine(img, matrix, interpolation=0, fill=None):
265
- if not _is_pil_image(img):
266
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
267
-
268
- output_size = img.size
269
- opts = _parse_fill(fill, img)
270
- return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts)
271
-
272
-
273
- @torch.jit.unused
274
- def rotate(img, angle, interpolation=0, expand=False, center=None, fill=None):
275
- if not _is_pil_image(img):
276
- raise TypeError("img should be PIL Image. Got {}".format(type(img)))
277
-
278
- opts = _parse_fill(fill, img)
279
- return img.rotate(angle, interpolation, expand, center, **opts)
280
-
281
-
282
- @torch.jit.unused
283
- def perspective(img, perspective_coeffs, interpolation=Image.BICUBIC, fill=None):
284
- if not _is_pil_image(img):
285
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
286
-
287
- opts = _parse_fill(fill, img)
288
-
289
- return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts)
290
-
291
-
292
- @torch.jit.unused
293
- def to_grayscale(img, num_output_channels):
294
- if not _is_pil_image(img):
295
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
296
-
297
- if num_output_channels == 1:
298
- img = img.convert('L')
299
- elif num_output_channels == 3:
300
- img = img.convert('L')
301
- np_img = np.array(img, dtype=np.uint8)
302
- np_img = np.dstack([np_img, np_img, np_img])
303
- img = Image.fromarray(np_img, 'RGB')
304
- else:
305
- raise ValueError('num_output_channels should be either 1 or 3')
306
-
307
- return img
308
-
309
-
310
- @torch.jit.unused
311
- def invert(img):
312
- if not _is_pil_image(img):
313
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
314
- return ImageOps.invert(img)
315
-
316
-
317
- @torch.jit.unused
318
- def posterize(img, bits):
319
- if not _is_pil_image(img):
320
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
321
- return ImageOps.posterize(img, bits)
322
-
323
-
324
- @torch.jit.unused
325
- def solarize(img, threshold):
326
- if not _is_pil_image(img):
327
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
328
- return ImageOps.solarize(img, threshold)
329
-
330
-
331
- @torch.jit.unused
332
- def adjust_sharpness(img, sharpness_factor):
333
- if not _is_pil_image(img):
334
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
335
-
336
- enhancer = ImageEnhance.Sharpness(img)
337
- img = enhancer.enhance(sharpness_factor)
338
- return img
339
-
340
-
341
- @torch.jit.unused
342
- def autocontrast(img):
343
- if not _is_pil_image(img):
344
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
345
- return ImageOps.autocontrast(img)
346
-
347
-
348
- @torch.jit.unused
349
- def equalize(img):
350
- if not _is_pil_image(img):
351
- raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
352
- return ImageOps.equalize(img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h DELETED
@@ -1,64 +0,0 @@
1
- /*!
2
- **************************************************************************************************
3
- * Deformable DETR
4
- * Copyright (c) 2020 SenseTime. All Rights Reserved.
5
- * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- **************************************************************************************************
7
- * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8
- **************************************************************************************************
9
- */
10
-
11
- #pragma once
12
-
13
- #include "ms_deform_attn_cpu.h"
14
-
15
- #ifdef WITH_CUDA
16
- #include "ms_deform_attn_cuda.h"
17
- #endif
18
-
19
- namespace groundingdino {
20
-
21
- at::Tensor
22
- ms_deform_attn_forward(
23
- const at::Tensor &value,
24
- const at::Tensor &spatial_shapes,
25
- const at::Tensor &level_start_index,
26
- const at::Tensor &sampling_loc,
27
- const at::Tensor &attn_weight,
28
- const int im2col_step)
29
- {
30
- if (value.type().is_cuda())
31
- {
32
- #ifdef WITH_CUDA
33
- return ms_deform_attn_cuda_forward(
34
- value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
35
- #else
36
- AT_ERROR("Not compiled with GPU support");
37
- #endif
38
- }
39
- AT_ERROR("Not implemented on the CPU");
40
- }
41
-
42
- std::vector<at::Tensor>
43
- ms_deform_attn_backward(
44
- const at::Tensor &value,
45
- const at::Tensor &spatial_shapes,
46
- const at::Tensor &level_start_index,
47
- const at::Tensor &sampling_loc,
48
- const at::Tensor &attn_weight,
49
- const at::Tensor &grad_output,
50
- const int im2col_step)
51
- {
52
- if (value.type().is_cuda())
53
- {
54
- #ifdef WITH_CUDA
55
- return ms_deform_attn_cuda_backward(
56
- value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
57
- #else
58
- AT_ERROR("Not compiled with GPU support");
59
- #endif
60
- }
61
- AT_ERROR("Not implemented on the CPU");
62
- }
63
-
64
- } // namespace groundingdino
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChevyWithAI/rvc-aicover/infer_pack/commons.py DELETED
@@ -1,166 +0,0 @@
1
- import math
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
-
8
- def init_weights(m, mean=0.0, std=0.01):
9
- classname = m.__class__.__name__
10
- if classname.find("Conv") != -1:
11
- m.weight.data.normal_(mean, std)
12
-
13
-
14
- def get_padding(kernel_size, dilation=1):
15
- return int((kernel_size * dilation - dilation) / 2)
16
-
17
-
18
- def convert_pad_shape(pad_shape):
19
- l = pad_shape[::-1]
20
- pad_shape = [item for sublist in l for item in sublist]
21
- return pad_shape
22
-
23
-
24
- def kl_divergence(m_p, logs_p, m_q, logs_q):
25
- """KL(P||Q)"""
26
- kl = (logs_q - logs_p) - 0.5
27
- kl += (
28
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
29
- )
30
- return kl
31
-
32
-
33
- def rand_gumbel(shape):
34
- """Sample from the Gumbel distribution, protect from overflows."""
35
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
36
- return -torch.log(-torch.log(uniform_samples))
37
-
38
-
39
- def rand_gumbel_like(x):
40
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
41
- return g
42
-
43
-
44
- def slice_segments(x, ids_str, segment_size=4):
45
- ret = torch.zeros_like(x[:, :, :segment_size])
46
- for i in range(x.size(0)):
47
- idx_str = ids_str[i]
48
- idx_end = idx_str + segment_size
49
- ret[i] = x[i, :, idx_str:idx_end]
50
- return ret
51
-
52
-
53
- def slice_segments2(x, ids_str, segment_size=4):
54
- ret = torch.zeros_like(x[:, :segment_size])
55
- for i in range(x.size(0)):
56
- idx_str = ids_str[i]
57
- idx_end = idx_str + segment_size
58
- ret[i] = x[i, idx_str:idx_end]
59
- return ret
60
-
61
-
62
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
63
- b, d, t = x.size()
64
- if x_lengths is None:
65
- x_lengths = t
66
- ids_str_max = x_lengths - segment_size + 1
67
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
68
- ret = slice_segments(x, ids_str, segment_size)
69
- return ret, ids_str
70
-
71
-
72
- def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
73
- position = torch.arange(length, dtype=torch.float)
74
- num_timescales = channels // 2
75
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
76
- num_timescales - 1
77
- )
78
- inv_timescales = min_timescale * torch.exp(
79
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
80
- )
81
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
82
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
83
- signal = F.pad(signal, [0, 0, 0, channels % 2])
84
- signal = signal.view(1, channels, length)
85
- return signal
86
-
87
-
88
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
89
- b, channels, length = x.size()
90
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
91
- return x + signal.to(dtype=x.dtype, device=x.device)
92
-
93
-
94
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
98
-
99
-
100
- def subsequent_mask(length):
101
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
102
- return mask
103
-
104
-
105
- @torch.jit.script
106
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
107
- n_channels_int = n_channels[0]
108
- in_act = input_a + input_b
109
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
110
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
111
- acts = t_act * s_act
112
- return acts
113
-
114
-
115
- def convert_pad_shape(pad_shape):
116
- l = pad_shape[::-1]
117
- pad_shape = [item for sublist in l for item in sublist]
118
- return pad_shape
119
-
120
-
121
- def shift_1d(x):
122
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
123
- return x
124
-
125
-
126
- def sequence_mask(length, max_length=None):
127
- if max_length is None:
128
- max_length = length.max()
129
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
130
- return x.unsqueeze(0) < length.unsqueeze(1)
131
-
132
-
133
- def generate_path(duration, mask):
134
- """
135
- duration: [b, 1, t_x]
136
- mask: [b, 1, t_y, t_x]
137
- """
138
- device = duration.device
139
-
140
- b, _, t_y, t_x = mask.shape
141
- cum_duration = torch.cumsum(duration, -1)
142
-
143
- cum_duration_flat = cum_duration.view(b * t_x)
144
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
145
- path = path.view(b, t_x, t_y)
146
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
147
- path = path.unsqueeze(1).transpose(2, 3) * mask
148
- return path
149
-
150
-
151
- def clip_grad_value_(parameters, clip_value, norm_type=2):
152
- if isinstance(parameters, torch.Tensor):
153
- parameters = [parameters]
154
- parameters = list(filter(lambda p: p.grad is not None, parameters))
155
- norm_type = float(norm_type)
156
- if clip_value is not None:
157
- clip_value = float(clip_value)
158
-
159
- total_norm = 0
160
- for p in parameters:
161
- param_norm = p.grad.data.norm(norm_type)
162
- total_norm += param_norm.item() ** norm_type
163
- if clip_value is not None:
164
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
165
- total_norm = total_norm ** (1.0 / norm_type)
166
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/core.py DELETED
@@ -1,35 +0,0 @@
1
- import numba
2
-
3
-
4
- @numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True)
5
- def maximum_path_jit(paths, values, t_ys, t_xs):
6
- b = paths.shape[0]
7
- max_neg_val=-1e9
8
- for i in range(int(b)):
9
- path = paths[i]
10
- value = values[i]
11
- t_y = t_ys[i]
12
- t_x = t_xs[i]
13
-
14
- v_prev = v_cur = 0.0
15
- index = t_x - 1
16
-
17
- for y in range(t_y):
18
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
19
- if x == y:
20
- v_cur = max_neg_val
21
- else:
22
- v_cur = value[y-1, x]
23
- if x == 0:
24
- if y == 0:
25
- v_prev = 0.
26
- else:
27
- v_prev = max_neg_val
28
- else:
29
- v_prev = value[y-1, x-1]
30
- value[y, x] += max(v_prev, v_cur)
31
-
32
- for y in range(t_y - 1, -1, -1):
33
- path[y, index] = 1
34
- if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
35
- index = index - 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/Easychat.py DELETED
@@ -1,55 +0,0 @@
1
- import requests
2
- import os
3
- import json
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://free.easychat.work'
7
- model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k',
8
- 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
9
- supports_stream = True
10
- needs_auth = False
11
-
12
-
13
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
- headers = {
15
- 'authority': 'free.easychat.work',
16
- 'accept': 'text/event-stream',
17
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
18
- 'content-type': 'application/json',
19
- 'endpoint': '',
20
- 'origin': 'https://free.easychat.work',
21
- 'plugins': '0',
22
- 'referer': 'https://free.easychat.work/',
23
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
24
- 'sec-ch-ua-mobile': '?0',
25
- 'sec-ch-ua-platform': '"macOS"',
26
- 'sec-fetch-dest': 'empty',
27
- 'sec-fetch-mode': 'cors',
28
- 'sec-fetch-site': 'same-origin',
29
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
30
- 'usesearch': 'false',
31
- 'x-requested-with': 'XMLHttpRequest',
32
- }
33
-
34
- json_data = {
35
- 'messages': messages,
36
- 'stream': True,
37
- 'model': model,
38
- 'temperature': 0.5,
39
- 'presence_penalty': 0,
40
- 'frequency_penalty': 0,
41
- 'top_p': 1,
42
- }
43
-
44
- response = requests.post('https://free.easychat.work/api/openai/v1/chat/completions',
45
- headers=headers, json=json_data)
46
-
47
- for chunk in response.iter_lines():
48
- if b'content' in chunk:
49
- data = json.loads(chunk.decode().split('data: ')[1])
50
- yield (data['choices'][0]['delta']['content'])
51
-
52
-
53
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
54
- '(%s)' % ', '.join(
55
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/g4f/Provider/Providers/Yqcloud.py DELETED
@@ -1,39 +0,0 @@
1
- import os
2
- import time
3
- import requests
4
-
5
- from ...typing import sha256, Dict, get_type_hints
6
- url = 'https://chat9.yqcloud.top/'
7
- model = [
8
- 'gpt-3.5-turbo',
9
- ]
10
- supports_stream = True
11
- needs_auth = False
12
-
13
-
14
- def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs):
15
-
16
- headers = {
17
- 'authority': 'api.aichatos.cloud',
18
- 'origin': 'https://chat9.yqcloud.top',
19
- 'referer': 'https://chat9.yqcloud.top/',
20
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
21
- }
22
-
23
- json_data = {
24
- 'prompt': str(messages),
25
- 'userId': f'#/chat/{chatId}',
26
- 'network': True,
27
- 'apikey': '',
28
- 'system': '',
29
- 'withoutContext': False,
30
- }
31
- response = requests.post('https://api.aichatos.cloud/api/generateStream',
32
- headers=headers, json=json_data, stream=True)
33
- for token in response.iter_content(chunk_size=2046):
34
- yield (token.decode('utf-8'))
35
-
36
-
37
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
38
- '(%s)' % ', '.join(
39
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/dec.py DELETED
@@ -1,78 +0,0 @@
1
- #encoding=utf-8
2
- import logging
3
- import time
4
- def print_calling(fn):
5
- def wrapper(*args1, ** args2):
6
- s = "calling function %s"%(fn.__name__)
7
- logging.info(s)
8
- start = time.time()
9
- ret = fn(*args1, **args2)
10
- end = time.time()
11
- # s = "%s. time used = %f seconds"%(s, (end - start))
12
- s = "function [%s] has been called, taking %f seconds"%(fn.__name__, (end - start))
13
- logging.debug(s)
14
- return ret
15
- return wrapper
16
-
17
-
18
- def print_test(fn):
19
- def wrapper(*args1, ** args2):
20
- s = "running test: %s..."%(fn.__name__)
21
- logging.info(s)
22
- ret = fn(*args1, **args2)
23
- s = "running test: %s...succeed"%(fn.__name__)
24
- logging.debug(s)
25
- return ret
26
- return wrapper
27
-
28
- def print_calling_in_short(fn):
29
- def wrapper(*args1, ** args2):
30
- start = time.time()
31
- ret = fn(*args1, **args2)
32
- end = time.time()
33
- s = "function [%s] has been called, taking %f seconds"%(fn.__name__, (end - start))
34
- logging.debug(s)
35
- return ret
36
- return wrapper
37
-
38
- import collections
39
- counter = collections.defaultdict(int)
40
- count_times =collections.defaultdict(int)
41
- def print_calling_in_short_for_tf(fn):
42
- import tensorflow as tf
43
- import util
44
- def wrapper(*args1, ** args2):
45
- start = time.time()
46
- thread_name = util.thread.get_current_thread_name()
47
- ret = fn(*args1, **args2)
48
- end = time.time()
49
- counter[fn.__name__] = counter[fn.__name__] + (end - start)
50
- count_times[fn.__name__] += 1
51
- all_time = sum([counter[name] for name in counter]) * 1.0
52
- for name in counter:
53
- # tf.logging.info('\t %s: %f, %f seconds'%(name, counter[name] / all_time, counter[name]))
54
- tf.logging.info('\t %s: %d callings, %fsper calling'%(name, count_times[name], counter[name] * 1.0 / count_times[name]))
55
- s = "Thread [%s]:function [%s] has been called, taking %f seconds"%(thread_name, fn.__name__, (end - start))
56
- tf.logging.info(s)
57
- return ret
58
- return wrapper
59
-
60
- def timeit(fn):
61
- import util
62
- def wrapper(*args1, ** args2):
63
- start = time.time()
64
- thread_name = util.thread.get_current_thread_name()
65
- ret = fn(*args1, **args2)
66
- end = time.time()
67
- counter[fn.__name__] = counter[fn.__name__] + (end - start)
68
- count_times[fn.__name__] += 1
69
- all_time = sum([counter[name] for name in counter]) * 1.0
70
- for name in counter:
71
- logging.info('\t %s: %f, %f seconds'%(name, counter[name] / all_time, counter[name]))
72
- logging.info('\t %s: %d callings, %f seconds per calling'%(name, count_times[name], counter[name] * 1.0 / count_times[name]))
73
- s = "Thread [%s]:function [%s] has been called, taking %f seconds"%(thread_name, fn.__name__, (end - start))
74
- # logging.info(s)
75
- return ret
76
- return wrapper
77
-
78
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/backbone.py DELETED
@@ -1,119 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- from collections import OrderedDict
3
-
4
- from torch import nn
5
-
6
- from maskrcnn_benchmark.modeling import registry
7
- from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform
8
- from . import fpn as fpn_module
9
- from .pan import PAN
10
- from .msr import MSR
11
- from . import resnet
12
-
13
-
14
- @registry.BACKBONES.register("R-50-C4")
15
- @registry.BACKBONES.register("R-50-C5")
16
- @registry.BACKBONES.register("R-101-C4")
17
- @registry.BACKBONES.register("R-101-C5")
18
- def build_resnet_backbone(cfg):
19
- body = resnet.ResNet(cfg)
20
- model = nn.Sequential(OrderedDict([("body", body)]))
21
- model.out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
22
- return model
23
-
24
-
25
- @registry.BACKBONES.register("R-50-FPN")
26
- @registry.BACKBONES.register("R-101-FPN")
27
- @registry.BACKBONES.register("R-152-FPN")
28
- def build_resnet_fpn_backbone(cfg):
29
- in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS # 256
30
- in_channels_list = [
31
- in_channels_stage2, # 256
32
- in_channels_stage2 * 2, # 512
33
- in_channels_stage2 * 4, # 1024
34
- in_channels_stage2 * 8, # 2048
35
- ]
36
- body = resnet.ResNet(cfg)
37
- out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS # 256
38
- fpn = fpn_module.FPN(
39
- in_channels_list=in_channels_list,
40
- out_channels=out_channels,
41
- conv_block=conv_with_kaiming_uniform(
42
- cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU,
43
- cfg.MODEL.FPN.USE_DEFORMABLE
44
- ),
45
- top_blocks=fpn_module.LastLevelMaxPool(),
46
- )
47
- if cfg.MODEL.MSR_ON:
48
- model = MSR(body, in_channels_list, fpn=fpn)
49
- else:
50
- model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
51
- model.out_channels = out_channels
52
- return model
53
-
54
-
55
- @registry.BACKBONES.register("R-50-PAN")
56
- @registry.BACKBONES.register("R-101-PAN")
57
- @registry.BACKBONES.register("R-152-PAN")
58
- def build_resnet_fpn_backbone(cfg):
59
- in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
60
- in_channels_list = [
61
- in_channels_stage2,
62
- in_channels_stage2 * 2,
63
- in_channels_stage2 * 4,
64
- in_channels_stage2 * 8,
65
- ]
66
- body = resnet.ResNet(cfg)
67
- out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
68
- fpn = fpn_module.FPN(
69
- in_channels_list=in_channels_list,
70
- out_channels=out_channels,
71
- conv_block=conv_with_kaiming_uniform(
72
- cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU,
73
- cfg.MODEL.FPN.USE_DEFORMABLE
74
- ),
75
- top_blocks=fpn_module.LastLevelMaxPool(),
76
- )
77
- pan = PAN()
78
- if cfg.MODEL.MSR_ON:
79
- model = MSR(body, in_channels_list, fpn=fpn, pan=pan)
80
- else:
81
- model = nn.Sequential(OrderedDict([("body", body),
82
- ("pan", pan),
83
- ("fpn", fpn)]))
84
- model.out_channels = out_channels
85
- return model
86
-
87
-
88
- @registry.BACKBONES.register("R-50-FPN-RETINANET")
89
- @registry.BACKBONES.register("R-101-FPN-RETINANET")
90
- def build_resnet_fpn_p3p7_backbone(cfg):
91
- body = resnet.ResNet(cfg)
92
- in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
93
- out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
94
- in_channels_p6p7 = in_channels_stage2 * 8 if cfg.MODEL.RETINANET.USE_C5 \
95
- else out_channels
96
- fpn = fpn_module.FPN(
97
- in_channels_list=[
98
- 0,
99
- in_channels_stage2 * 2,
100
- in_channels_stage2 * 4,
101
- in_channels_stage2 * 8,
102
- ],
103
- out_channels=out_channels,
104
- conv_block=conv_with_kaiming_uniform(
105
- cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU
106
- ),
107
- top_blocks=fpn_module.LastLevelP6P7(in_channels_p6p7, out_channels),
108
- )
109
- model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)]))
110
- model.out_channels = out_channels
111
- return model
112
-
113
-
114
- def build_backbone(cfg):
115
- assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \
116
- "cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format(
117
- cfg.MODEL.BACKBONE.CONV_BODY
118
- )
119
- return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/WmfImagePlugin.py DELETED
@@ -1,178 +0,0 @@
1
- #
2
- # The Python Imaging Library
3
- # $Id$
4
- #
5
- # WMF stub codec
6
- #
7
- # history:
8
- # 1996-12-14 fl Created
9
- # 2004-02-22 fl Turned into a stub driver
10
- # 2004-02-23 fl Added EMF support
11
- #
12
- # Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
13
- # Copyright (c) Fredrik Lundh 1996.
14
- #
15
- # See the README file for information on usage and redistribution.
16
- #
17
- # WMF/EMF reference documentation:
18
- # https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf
19
- # http://wvware.sourceforge.net/caolan/index.html
20
- # http://wvware.sourceforge.net/caolan/ora-wmf.html
21
-
22
- from . import Image, ImageFile
23
- from ._binary import i16le as word
24
- from ._binary import si16le as short
25
- from ._binary import si32le as _long
26
-
27
- _handler = None
28
-
29
-
30
- def register_handler(handler):
31
- """
32
- Install application-specific WMF image handler.
33
-
34
- :param handler: Handler object.
35
- """
36
- global _handler
37
- _handler = handler
38
-
39
-
40
- if hasattr(Image.core, "drawwmf"):
41
- # install default handler (windows only)
42
-
43
- class WmfHandler:
44
- def open(self, im):
45
- im.mode = "RGB"
46
- self.bbox = im.info["wmf_bbox"]
47
-
48
- def load(self, im):
49
- im.fp.seek(0) # rewind
50
- return Image.frombytes(
51
- "RGB",
52
- im.size,
53
- Image.core.drawwmf(im.fp.read(), im.size, self.bbox),
54
- "raw",
55
- "BGR",
56
- (im.size[0] * 3 + 3) & -4,
57
- -1,
58
- )
59
-
60
- register_handler(WmfHandler())
61
-
62
- #
63
- # --------------------------------------------------------------------
64
- # Read WMF file
65
-
66
-
67
- def _accept(prefix):
68
- return (
69
- prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or prefix[:4] == b"\x01\x00\x00\x00"
70
- )
71
-
72
-
73
- ##
74
- # Image plugin for Windows metafiles.
75
-
76
-
77
- class WmfStubImageFile(ImageFile.StubImageFile):
78
- format = "WMF"
79
- format_description = "Windows Metafile"
80
-
81
- def _open(self):
82
- self._inch = None
83
-
84
- # check placable header
85
- s = self.fp.read(80)
86
-
87
- if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00":
88
- # placeable windows metafile
89
-
90
- # get units per inch
91
- self._inch = word(s, 14)
92
-
93
- # get bounding box
94
- x0 = short(s, 6)
95
- y0 = short(s, 8)
96
- x1 = short(s, 10)
97
- y1 = short(s, 12)
98
-
99
- # normalize size to 72 dots per inch
100
- self.info["dpi"] = 72
101
- size = (
102
- (x1 - x0) * self.info["dpi"] // self._inch,
103
- (y1 - y0) * self.info["dpi"] // self._inch,
104
- )
105
-
106
- self.info["wmf_bbox"] = x0, y0, x1, y1
107
-
108
- # sanity check (standard metafile header)
109
- if s[22:26] != b"\x01\x00\t\x00":
110
- msg = "Unsupported WMF file format"
111
- raise SyntaxError(msg)
112
-
113
- elif s[:4] == b"\x01\x00\x00\x00" and s[40:44] == b" EMF":
114
- # enhanced metafile
115
-
116
- # get bounding box
117
- x0 = _long(s, 8)
118
- y0 = _long(s, 12)
119
- x1 = _long(s, 16)
120
- y1 = _long(s, 20)
121
-
122
- # get frame (in 0.01 millimeter units)
123
- frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)
124
-
125
- size = x1 - x0, y1 - y0
126
-
127
- # calculate dots per inch from bbox and frame
128
- xdpi = 2540.0 * (x1 - y0) / (frame[2] - frame[0])
129
- ydpi = 2540.0 * (y1 - y0) / (frame[3] - frame[1])
130
-
131
- self.info["wmf_bbox"] = x0, y0, x1, y1
132
-
133
- if xdpi == ydpi:
134
- self.info["dpi"] = xdpi
135
- else:
136
- self.info["dpi"] = xdpi, ydpi
137
-
138
- else:
139
- msg = "Unsupported file format"
140
- raise SyntaxError(msg)
141
-
142
- self.mode = "RGB"
143
- self._size = size
144
-
145
- loader = self._load()
146
- if loader:
147
- loader.open(self)
148
-
149
- def _load(self):
150
- return _handler
151
-
152
- def load(self, dpi=None):
153
- if dpi is not None and self._inch is not None:
154
- self.info["dpi"] = dpi
155
- x0, y0, x1, y1 = self.info["wmf_bbox"]
156
- self._size = (
157
- (x1 - x0) * self.info["dpi"] // self._inch,
158
- (y1 - y0) * self.info["dpi"] // self._inch,
159
- )
160
- return super().load()
161
-
162
-
163
- def _save(im, fp, filename):
164
- if _handler is None or not hasattr(_handler, "save"):
165
- msg = "WMF save handler not installed"
166
- raise OSError(msg)
167
- _handler.save(im, fp, filename)
168
-
169
-
170
- #
171
- # --------------------------------------------------------------------
172
- # Registry stuff
173
-
174
-
175
- Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
176
- Image.register_save(WmfStubImageFile.format, _save)
177
-
178
- Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Info-5611e10f.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as i,e as r,s as u,a9 as f,N as _,K as c,p,ab as d,ac as m,ad as $,z as v,v as g,A as b}from"./index-3370be2a.js";import"./Button-89624748.js";function h(n){let s,a;const l=n[1].default,e=f(l,n,n[0],null);return{c(){s=_("div"),e&&e.c(),c(s,"class","svelte-e8n7p6")},m(t,o){p(t,s,o),e&&e.m(s,null),a=!0},p(t,[o]){e&&e.p&&(!a||o&1)&&d(e,l,t,t[0],a?$(l,t[0],o,null):m(t[0]),null)},i(t){a||(v(e,t),a=!0)},o(t){g(e,t),a=!1},d(t){t&&b(s),e&&e.d(t)}}}function I(n,s,a){let{$$slots:l={},$$scope:e}=s;return n.$$set=t=>{"$$scope"in t&&a(0,e=t.$$scope)},[e,l]}class z extends i{constructor(s){super(),r(this,s,I,h,u,{})}}export{z as I};
2
- //# sourceMappingURL=Info-5611e10f.js.map
 
 
 
spaces/Daniton/MagicPrompt-Stable-Diffusion/style.css DELETED
@@ -1,84 +0,0 @@
1
- #col-container {
2
- max-width: 800px;
3
- margin-left: auto;
4
- margin-right: auto;
5
- }
6
- a {
7
- color: inherit;
8
- text-decoration: underline;
9
- }
10
- .gradio-container {
11
- font-family: 'IBM Plex Sans', sans-serif;
12
- }
13
- .gr-button {
14
- color: white;
15
- border-color: #9d66e5;
16
- background: #9d66e5;
17
- }
18
- input[type='range'] {
19
- accent-color: #9d66e5;
20
- }
21
- .dark input[type='range'] {
22
- accent-color: #dfdfdf;
23
- }
24
- .container {
25
- max-width: 800px;
26
- margin: auto;
27
- padding-top: 1.5rem;
28
- }
29
- #gallery {
30
- min-height: 22rem;
31
- margin-bottom: 15px;
32
- margin-left: auto;
33
- margin-right: auto;
34
- border-bottom-right-radius: .5rem !important;
35
- border-bottom-left-radius: .5rem !important;
36
- }
37
- #gallery>div>.h-full {
38
- min-height: 20rem;
39
- }
40
- .details:hover {
41
- text-decoration: underline;
42
- }
43
- .gr-button {
44
- white-space: nowrap;
45
- }
46
- .gr-button:focus {
47
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
48
- outline: none;
49
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
50
- --tw-border-opacity: 1;
51
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
52
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
53
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
54
- --tw-ring-opacity: .5;
55
- }
56
- #advanced-options {
57
- margin-bottom: 20px;
58
- }
59
- .footer {
60
- margin-bottom: 45px;
61
- margin-top: 35px;
62
- text-align: center;
63
- border-bottom: 1px solid #e5e5e5;
64
- }
65
- .footer>p {
66
- font-size: .8rem;
67
- display: inline-block;
68
- padding: 0 10px;
69
- transform: translateY(10px);
70
- background: white;
71
- }
72
- .dark .logo{ filter: invert(1); }
73
- .dark .footer {
74
- border-color: #303030;
75
- }
76
- .dark .footer>p {
77
- background: #0b0f19;
78
- }
79
- .acknowledgments h4{
80
- margin: 1.25em 0 .25em 0;
81
- font-weight: bold;
82
- font-size: 115%;
83
- }
84
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DataScienceEngineering/1-SimPhysics-HTML5/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan/gui_utils/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- # empty
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan/stylegan_human/dnnlib/util.py DELETED
@@ -1,479 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3
- #
4
- # NVIDIA CORPORATION and its licensors retain all intellectual property
5
- # and proprietary rights in and to this software, related documentation
6
- # and any modifications thereto. Any use, reproduction, disclosure or
7
- # distribution of this software and related documentation without an express
8
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
9
-
10
- """Miscellaneous utility classes and functions."""
11
-
12
- import ctypes
13
- import fnmatch
14
- import importlib
15
- import inspect
16
- import numpy as np
17
- import os
18
- import shutil
19
- import sys
20
- import types
21
- import io
22
- import pickle
23
- import re
24
- import requests
25
- import html
26
- import hashlib
27
- import glob
28
- import tempfile
29
- import urllib
30
- import urllib.request
31
- import uuid
32
-
33
- from distutils.util import strtobool
34
- from typing import Any, List, Tuple, Union
35
-
36
-
37
- # Util classes
38
- # ------------------------------------------------------------------------------------------
39
-
40
-
41
- class EasyDict(dict):
42
- """Convenience class that behaves like a dict but allows access with the attribute syntax."""
43
-
44
- def __getattr__(self, name: str) -> Any:
45
- try:
46
- return self[name]
47
- except KeyError:
48
- raise AttributeError(name)
49
-
50
- def __setattr__(self, name: str, value: Any) -> None:
51
- self[name] = value
52
-
53
- def __delattr__(self, name: str) -> None:
54
- del self[name]
55
-
56
-
57
- class Logger(object):
58
- """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
59
-
60
- def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
61
- self.file = None
62
-
63
- if file_name is not None:
64
- self.file = open(file_name, file_mode)
65
-
66
- self.should_flush = should_flush
67
- self.stdout = sys.stdout
68
- self.stderr = sys.stderr
69
-
70
- sys.stdout = self
71
- sys.stderr = self
72
-
73
- def __enter__(self) -> "Logger":
74
- return self
75
-
76
- def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
77
- self.close()
78
-
79
- def write(self, text: Union[str, bytes]) -> None:
80
- """Write text to stdout (and a file) and optionally flush."""
81
- if isinstance(text, bytes):
82
- text = text.decode()
83
- if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
84
- return
85
-
86
- if self.file is not None:
87
- self.file.write(text)
88
-
89
- self.stdout.write(text)
90
-
91
- if self.should_flush:
92
- self.flush()
93
-
94
- def flush(self) -> None:
95
- """Flush written text to both stdout and a file, if open."""
96
- if self.file is not None:
97
- self.file.flush()
98
-
99
- self.stdout.flush()
100
-
101
- def close(self) -> None:
102
- """Flush, close possible files, and remove stdout/stderr mirroring."""
103
- self.flush()
104
-
105
- # if using multiple loggers, prevent closing in wrong order
106
- if sys.stdout is self:
107
- sys.stdout = self.stdout
108
- if sys.stderr is self:
109
- sys.stderr = self.stderr
110
-
111
- if self.file is not None:
112
- self.file.close()
113
- self.file = None
114
-
115
-
116
- # Cache directories
117
- # ------------------------------------------------------------------------------------------
118
-
119
- _dnnlib_cache_dir = None
120
-
121
- def set_cache_dir(path: str) -> None:
122
- global _dnnlib_cache_dir
123
- _dnnlib_cache_dir = path
124
-
125
- def make_cache_dir_path(*paths: str) -> str:
126
- if _dnnlib_cache_dir is not None:
127
- return os.path.join(_dnnlib_cache_dir, *paths)
128
- if 'DNNLIB_CACHE_DIR' in os.environ:
129
- return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
130
- if 'HOME' in os.environ:
131
- return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
132
- if 'USERPROFILE' in os.environ:
133
- return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
134
- return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
135
-
136
- # Small util functions
137
- # ------------------------------------------------------------------------------------------
138
-
139
-
140
- def format_time(seconds: Union[int, float]) -> str:
141
- """Convert the seconds to human readable string with days, hours, minutes and seconds."""
142
- s = int(np.rint(seconds))
143
-
144
- if s < 60:
145
- return "{0}s".format(s)
146
- elif s < 60 * 60:
147
- return "{0}m {1:02}s".format(s // 60, s % 60)
148
- elif s < 24 * 60 * 60:
149
- return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
150
- else:
151
- return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
152
-
153
-
154
- def ask_yes_no(question: str) -> bool:
155
- """Ask the user the question until the user inputs a valid answer."""
156
- while True:
157
- try:
158
- print("{0} [y/n]".format(question))
159
- return strtobool(input().lower())
160
- except ValueError:
161
- pass
162
-
163
-
164
- def tuple_product(t: Tuple) -> Any:
165
- """Calculate the product of the tuple elements."""
166
- result = 1
167
-
168
- for v in t:
169
- result *= v
170
-
171
- return result
172
-
173
-
174
- _str_to_ctype = {
175
- "uint8": ctypes.c_ubyte,
176
- "uint16": ctypes.c_uint16,
177
- "uint32": ctypes.c_uint32,
178
- "uint64": ctypes.c_uint64,
179
- "int8": ctypes.c_byte,
180
- "int16": ctypes.c_int16,
181
- "int32": ctypes.c_int32,
182
- "int64": ctypes.c_int64,
183
- "float32": ctypes.c_float,
184
- "float64": ctypes.c_double
185
- }
186
-
187
-
188
- def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
189
- """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
190
- type_str = None
191
-
192
- if isinstance(type_obj, str):
193
- type_str = type_obj
194
- elif hasattr(type_obj, "__name__"):
195
- type_str = type_obj.__name__
196
- elif hasattr(type_obj, "name"):
197
- type_str = type_obj.name
198
- else:
199
- raise RuntimeError("Cannot infer type name from input")
200
-
201
- assert type_str in _str_to_ctype.keys()
202
-
203
- my_dtype = np.dtype(type_str)
204
- my_ctype = _str_to_ctype[type_str]
205
-
206
- assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
207
-
208
- return my_dtype, my_ctype
209
-
210
-
211
- def is_pickleable(obj: Any) -> bool:
212
- try:
213
- with io.BytesIO() as stream:
214
- pickle.dump(obj, stream)
215
- return True
216
- except:
217
- return False
218
-
219
-
220
- # Functionality to import modules/objects by name, and call functions by name
221
- # ------------------------------------------------------------------------------------------
222
-
223
- def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
224
- """Searches for the underlying module behind the name to some python object.
225
- Returns the module and the object name (original name with module part removed)."""
226
-
227
- # allow convenience shorthands, substitute them by full names
228
- obj_name = re.sub("^np.", "numpy.", obj_name)
229
- obj_name = re.sub("^tf.", "tensorflow.", obj_name)
230
-
231
- # list alternatives for (module_name, local_obj_name)
232
- parts = obj_name.split(".")
233
- name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
234
-
235
- # try each alternative in turn
236
- for module_name, local_obj_name in name_pairs:
237
- try:
238
- module = importlib.import_module(module_name) # may raise ImportError
239
- get_obj_from_module(module, local_obj_name) # may raise AttributeError
240
- return module, local_obj_name
241
- except:
242
- pass
243
-
244
- # maybe some of the modules themselves contain errors?
245
- for module_name, _local_obj_name in name_pairs:
246
- try:
247
- importlib.import_module(module_name) # may raise ImportError
248
- except ImportError:
249
- if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
250
- raise
251
-
252
- # maybe the requested attribute is missing?
253
- for module_name, local_obj_name in name_pairs:
254
- try:
255
- module = importlib.import_module(module_name) # may raise ImportError
256
- get_obj_from_module(module, local_obj_name) # may raise AttributeError
257
- except ImportError:
258
- pass
259
-
260
- # we are out of luck, but we have no idea why
261
- raise ImportError(obj_name)
262
-
263
-
264
- def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
265
- """Traverses the object name and returns the last (rightmost) python object."""
266
- if obj_name == '':
267
- return module
268
- obj = module
269
- for part in obj_name.split("."):
270
- obj = getattr(obj, part)
271
- return obj
272
-
273
-
274
- def get_obj_by_name(name: str) -> Any:
275
- """Finds the python object with the given name."""
276
- module, obj_name = get_module_from_obj_name(name)
277
- return get_obj_from_module(module, obj_name)
278
-
279
-
280
- def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
281
- """Finds the python object with the given name and calls it as a function."""
282
- assert func_name is not None
283
- # print('func_name: ', func_name) #'training.dataset.ImageFolderDataset'
284
- func_obj = get_obj_by_name(func_name)
285
- assert callable(func_obj)
286
- return func_obj(*args, **kwargs)
287
-
288
-
289
- def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
290
- """Finds the python class with the given name and constructs it with the given arguments."""
291
- return call_func_by_name(*args, func_name=class_name, **kwargs)
292
-
293
-
294
- def get_module_dir_by_obj_name(obj_name: str) -> str:
295
- """Get the directory path of the module containing the given object name."""
296
- module, _ = get_module_from_obj_name(obj_name)
297
- return os.path.dirname(inspect.getfile(module))
298
-
299
-
300
- def is_top_level_function(obj: Any) -> bool:
301
- """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
302
- return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
303
-
304
-
305
- def get_top_level_function_name(obj: Any) -> str:
306
- """Return the fully-qualified name of a top-level function."""
307
- assert is_top_level_function(obj)
308
- module = obj.__module__
309
- if module == '__main__':
310
- module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
311
- return module + "." + obj.__name__
312
-
313
-
314
- # File system helpers
315
- # ------------------------------------------------------------------------------------------
316
-
317
- def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
318
- """List all files recursively in a given directory while ignoring given file and directory names.
319
- Returns list of tuples containing both absolute and relative paths."""
320
- assert os.path.isdir(dir_path)
321
- base_name = os.path.basename(os.path.normpath(dir_path))
322
-
323
- if ignores is None:
324
- ignores = []
325
-
326
- result = []
327
-
328
- for root, dirs, files in os.walk(dir_path, topdown=True):
329
- for ignore_ in ignores:
330
- dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
331
-
332
- # dirs need to be edited in-place
333
- for d in dirs_to_remove:
334
- dirs.remove(d)
335
-
336
- files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
337
-
338
- absolute_paths = [os.path.join(root, f) for f in files]
339
- relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
340
-
341
- if add_base_to_relative:
342
- relative_paths = [os.path.join(base_name, p) for p in relative_paths]
343
-
344
- assert len(absolute_paths) == len(relative_paths)
345
- result += zip(absolute_paths, relative_paths)
346
-
347
- return result
348
-
349
-
350
- def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
351
- """Takes in a list of tuples of (src, dst) paths and copies files.
352
- Will create all necessary directories."""
353
- for file in files:
354
- target_dir_name = os.path.dirname(file[1])
355
-
356
- # will create all intermediate-level directories
357
- if not os.path.exists(target_dir_name):
358
- os.makedirs(target_dir_name)
359
-
360
- shutil.copyfile(file[0], file[1])
361
-
362
-
363
- # URL helpers
364
- # ------------------------------------------------------------------------------------------
365
-
366
- def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
367
- """Determine whether the given object is a valid URL string."""
368
- if not isinstance(obj, str) or not "://" in obj:
369
- return False
370
- if allow_file_urls and obj.startswith('file://'):
371
- return True
372
- try:
373
- res = requests.compat.urlparse(obj)
374
- if not res.scheme or not res.netloc or not "." in res.netloc:
375
- return False
376
- res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
377
- if not res.scheme or not res.netloc or not "." in res.netloc:
378
- return False
379
- except:
380
- return False
381
- return True
382
-
383
-
384
- def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
385
- """Download the given URL and return a binary-mode file object to access the data."""
386
- assert num_attempts >= 1
387
- assert not (return_filename and (not cache))
388
-
389
- # Doesn't look like an URL scheme so interpret it as a local filename.
390
- if not re.match('^[a-z]+://', url):
391
- return url if return_filename else open(url, "rb")
392
-
393
- # Handle file URLs. This code handles unusual file:// patterns that
394
- # arise on Windows:
395
- #
396
- # file:///c:/foo.txt
397
- #
398
- # which would translate to a local '/c:/foo.txt' filename that's
399
- # invalid. Drop the forward slash for such pathnames.
400
- #
401
- # If you touch this code path, you should test it on both Linux and
402
- # Windows.
403
- #
404
- # Some internet resources suggest using urllib.request.url2pathname() but
405
- # but that converts forward slashes to backslashes and this causes
406
- # its own set of problems.
407
- if url.startswith('file://'):
408
- filename = urllib.parse.urlparse(url).path
409
- if re.match(r'^/[a-zA-Z]:', filename):
410
- filename = filename[1:]
411
- return filename if return_filename else open(filename, "rb")
412
-
413
- assert is_url(url)
414
-
415
- # Lookup from cache.
416
- if cache_dir is None:
417
- cache_dir = make_cache_dir_path('downloads')
418
-
419
- url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
420
- if cache:
421
- cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
422
- if len(cache_files) == 1:
423
- filename = cache_files[0]
424
- return filename if return_filename else open(filename, "rb")
425
-
426
- # Download.
427
- url_name = None
428
- url_data = None
429
- with requests.Session() as session:
430
- if verbose:
431
- print("Downloading %s ..." % url, end="", flush=True)
432
- for attempts_left in reversed(range(num_attempts)):
433
- try:
434
- with session.get(url) as res:
435
- res.raise_for_status()
436
- if len(res.content) == 0:
437
- raise IOError("No data received")
438
-
439
- if len(res.content) < 8192:
440
- content_str = res.content.decode("utf-8")
441
- if "download_warning" in res.headers.get("Set-Cookie", ""):
442
- links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
443
- if len(links) == 1:
444
- url = requests.compat.urljoin(url, links[0])
445
- raise IOError("Google Drive virus checker nag")
446
- if "Google Drive - Quota exceeded" in content_str:
447
- raise IOError("Google Drive download quota exceeded -- please try again later")
448
-
449
- match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
450
- url_name = match[1] if match else url
451
- url_data = res.content
452
- if verbose:
453
- print(" done")
454
- break
455
- except KeyboardInterrupt:
456
- raise
457
- except:
458
- if not attempts_left:
459
- if verbose:
460
- print(" failed")
461
- raise
462
- if verbose:
463
- print(".", end="", flush=True)
464
-
465
- # Save to cache.
466
- if cache:
467
- safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
468
- cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
469
- temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
470
- os.makedirs(cache_dir, exist_ok=True)
471
- with open(temp_file, "wb") as f:
472
- f.write(url_data)
473
- os.replace(temp_file, cache_file) # atomic
474
- if return_filename:
475
- return cache_file
476
-
477
- # Return data as file object.
478
- assert not return_filename
479
- return io.BytesIO(url_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/tutorials/centertrack/tracker.py DELETED
@@ -1,198 +0,0 @@
1
- import numpy as np
2
- from sklearn.utils.linear_assignment_ import linear_assignment
3
- # from numba import jit
4
- import copy
5
-
6
-
7
- class Tracker(object):
8
- def __init__(self, opt):
9
- self.opt = opt
10
- self.reset()
11
-
12
- def init_track(self, results):
13
- for item in results:
14
- if item['score'] > self.opt.new_thresh:
15
- self.id_count += 1
16
- # active and age are never used in the paper
17
- item['active'] = 1
18
- item['age'] = 1
19
- item['tracking_id'] = self.id_count
20
- if not ('ct' in item):
21
- bbox = item['bbox']
22
- item['ct'] = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
23
- self.tracks.append(item)
24
-
25
- def reset(self):
26
- self.id_count = 0
27
- self.tracks = []
28
-
29
- def step(self, results_with_low, public_det=None):
30
-
31
- results = [item for item in results_with_low if item['score'] >= self.opt.track_thresh]
32
-
33
- # first association
34
- N = len(results)
35
- M = len(self.tracks)
36
-
37
- dets = np.array(
38
- [det['ct'] + det['tracking'] for det in results], np.float32) # N x 2
39
- track_size = np.array([((track['bbox'][2] - track['bbox'][0]) * \
40
- (track['bbox'][3] - track['bbox'][1])) \
41
- for track in self.tracks], np.float32) # M
42
- track_cat = np.array([track['class'] for track in self.tracks], np.int32) # M
43
- item_size = np.array([((item['bbox'][2] - item['bbox'][0]) * \
44
- (item['bbox'][3] - item['bbox'][1])) \
45
- for item in results], np.float32) # N
46
- item_cat = np.array([item['class'] for item in results], np.int32) # N
47
- tracks = np.array(
48
- [pre_det['ct'] for pre_det in self.tracks], np.float32) # M x 2
49
- dist = (((tracks.reshape(1, -1, 2) - \
50
- dets.reshape(-1, 1, 2)) ** 2).sum(axis=2)) # N x M
51
-
52
- invalid = ((dist > track_size.reshape(1, M)) + \
53
- (dist > item_size.reshape(N, 1)) + \
54
- (item_cat.reshape(N, 1) != track_cat.reshape(1, M))) > 0
55
- dist = dist + invalid * 1e18
56
-
57
- if self.opt.hungarian:
58
- assert not self.opt.hungarian, 'we only verify centertrack with greedy_assignment'
59
- item_score = np.array([item['score'] for item in results], np.float32) # N
60
- dist[dist > 1e18] = 1e18
61
- matched_indices = linear_assignment(dist)
62
- else:
63
- matched_indices = greedy_assignment(copy.deepcopy(dist))
64
-
65
- unmatched_dets = [d for d in range(dets.shape[0]) \
66
- if not (d in matched_indices[:, 0])]
67
- unmatched_tracks = [d for d in range(tracks.shape[0]) \
68
- if not (d in matched_indices[:, 1])]
69
-
70
- if self.opt.hungarian:
71
- assert not self.opt.hungarian, 'we only verify centertrack with greedy_assignment'
72
- matches = []
73
- for m in matched_indices:
74
- if dist[m[0], m[1]] > 1e16:
75
- unmatched_dets.append(m[0])
76
- unmatched_tracks.append(m[1])
77
- else:
78
- matches.append(m)
79
- matches = np.array(matches).reshape(-1, 2)
80
- else:
81
- matches = matched_indices
82
-
83
- ret = []
84
- for m in matches:
85
- track = results[m[0]]
86
- track['tracking_id'] = self.tracks[m[1]]['tracking_id']
87
- track['age'] = 1
88
- track['active'] = self.tracks[m[1]]['active'] + 1
89
- ret.append(track)
90
-
91
- if self.opt.public_det and len(unmatched_dets) > 0:
92
- assert not self.opt.public_det, 'we only verify centertrack with private detection'
93
- # Public detection: only create tracks from provided detections
94
- pub_dets = np.array([d['ct'] for d in public_det], np.float32)
95
- dist3 = ((dets.reshape(-1, 1, 2) - pub_dets.reshape(1, -1, 2)) ** 2).sum(
96
- axis=2)
97
- matched_dets = [d for d in range(dets.shape[0]) \
98
- if not (d in unmatched_dets)]
99
- dist3[matched_dets] = 1e18
100
- for j in range(len(pub_dets)):
101
- i = dist3[:, j].argmin()
102
- if dist3[i, j] < item_size[i]:
103
- dist3[i, :] = 1e18
104
- track = results[i]
105
- if track['score'] > self.opt.new_thresh:
106
- self.id_count += 1
107
- track['tracking_id'] = self.id_count
108
- track['age'] = 1
109
- track['active'] = 1
110
- ret.append(track)
111
- else:
112
- # Private detection: create tracks for all un-matched detections
113
- for i in unmatched_dets:
114
- track = results[i]
115
- if track['score'] > self.opt.new_thresh:
116
- self.id_count += 1
117
- track['tracking_id'] = self.id_count
118
- track['age'] = 1
119
- track['active'] = 1
120
- ret.append(track)
121
-
122
- # second association
123
- results_second = [item for item in results_with_low if item['score'] < self.opt.track_thresh]
124
-
125
- self_tracks_second = [self.tracks[i] for i in unmatched_tracks if self.tracks[i]['active'] > 0]
126
- second2original = [i for i in unmatched_tracks if self.tracks[i]['active'] > 0]
127
-
128
- N = len(results_second)
129
- M = len(self_tracks_second)
130
-
131
- if N > 0 and M > 0:
132
- dets = np.array(
133
- [det['ct'] + det['tracking'] for det in results_second], np.float32) # N x 2
134
- track_size = np.array([((track['bbox'][2] - track['bbox'][0]) * \
135
- (track['bbox'][3] - track['bbox'][1])) \
136
- for track in self_tracks_second], np.float32) # M
137
- track_cat = np.array([track['class'] for track in self_tracks_second], np.int32) # M
138
- item_size = np.array([((item['bbox'][2] - item['bbox'][0]) * \
139
- (item['bbox'][3] - item['bbox'][1])) \
140
- for item in results_second], np.float32) # N
141
- item_cat = np.array([item['class'] for item in results_second], np.int32) # N
142
- tracks_second = np.array(
143
- [pre_det['ct'] for pre_det in self_tracks_second], np.float32) # M x 2
144
- dist = (((tracks_second.reshape(1, -1, 2) - \
145
- dets.reshape(-1, 1, 2)) ** 2).sum(axis=2)) # N x M
146
-
147
- invalid = ((dist > track_size.reshape(1, M)) + \
148
- (dist > item_size.reshape(N, 1)) + \
149
- (item_cat.reshape(N, 1) != track_cat.reshape(1, M))) > 0
150
- dist = dist + invalid * 1e18
151
-
152
- matched_indices_second = greedy_assignment(copy.deepcopy(dist), 1e8)
153
-
154
- unmatched_tracks_second = [d for d in range(tracks_second.shape[0]) \
155
- if not (d in matched_indices_second[:, 1])]
156
- matches_second = matched_indices_second
157
-
158
- for m in matches_second:
159
- track = results_second[m[0]]
160
- track['tracking_id'] = self_tracks_second[m[1]]['tracking_id']
161
- track['age'] = 1
162
- track['active'] = self_tracks_second[m[1]]['active'] + 1
163
- ret.append(track)
164
-
165
- unmatched_tracks = [second2original[i] for i in unmatched_tracks_second] + \
166
- [i for i in unmatched_tracks if self.tracks[i]['active'] == 0]
167
-
168
- #. for debug
169
- # unmatched_tracks = [i for i in unmatched_tracks if self.tracks[i]['active'] > 0] + \
170
- # [i for i in unmatched_tracks if self.tracks[i]['active'] == 0]
171
-
172
- for i in unmatched_tracks:
173
- track = self.tracks[i]
174
- if track['age'] < self.opt.max_age:
175
- track['age'] += 1
176
- track['active'] = 0
177
- bbox = track['bbox']
178
- ct = track['ct']
179
- v = [0, 0]
180
- track['bbox'] = [
181
- bbox[0] + v[0], bbox[1] + v[1],
182
- bbox[2] + v[0], bbox[3] + v[1]]
183
- track['ct'] = [ct[0] + v[0], ct[1] + v[1]]
184
- ret.append(track)
185
- self.tracks = ret
186
- return ret
187
-
188
-
189
- def greedy_assignment(dist, thresh=1e16):
190
- matched_indices = []
191
- if dist.shape[1] == 0:
192
- return np.array(matched_indices, np.int32).reshape(-1, 2)
193
- for i in range(dist.shape[0]):
194
- j = dist[i].argmin()
195
- if dist[i][j] < thresh:
196
- dist[:, j] = 1e18
197
- matched_indices.append([i, j])
198
- return np.array(matched_indices, np.int32).reshape(-1, 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EPFL-VILAB/MultiMAE/utils/taskonomy/task_configs.py DELETED
@@ -1,105 +0,0 @@
1
- ####################
2
- # Tasks
3
- ####################
4
-
5
- task_parameters = {
6
- 'class_object':{
7
- 'num_classes': 1000,
8
- 'ext': 'npy',
9
- 'domain_id': 'class_object',
10
- },
11
- 'class_scene':{
12
- 'num_classes': 365,
13
- 'ext': 'npy',
14
- 'domain_id': 'class_scene',
15
- },
16
- 'depth_zbuffer':{
17
- 'num_channels': 1,
18
- 'mask_val': 1.0,
19
- 'clamp_to': (0.0, 8000.0 / (2**16 - 1)), # Same as consistency
20
- 'ext': 'png',
21
- 'domain_id': 'depth_zbuffer',
22
- },
23
- 'depth_euclidean':{
24
- 'num_channels': 1,
25
- 'clamp_to': (0.0, 8000.0 / (2**16 - 1)), # Same as consistency
26
- # 'mask_val': 1.0,
27
- 'ext': 'png',
28
- 'domain_id': 'depth_euclidean',
29
- },
30
- 'edge_texture': {
31
- 'num_channels': 1,
32
- 'clamp_to': (0.0, 0.25),
33
- #'threshold_min': 0.01,
34
- 'ext': 'png',
35
- 'domain_id': 'edge_texture',
36
- },
37
- 'edge_occlusion': {
38
- 'num_channels': 1,
39
- #'clamp_to': (0.0, 0.04),
40
- #'threshold_min': 0.0017,
41
- 'ext': 'png',
42
- 'domain_id': 'edge_occlusion',
43
- },
44
- 'keypoints3d': {
45
- 'num_channels': 1,
46
- 'ext': 'png',
47
- 'domain_id': 'keypoints3d',
48
- },
49
- 'keypoints2d':{
50
- 'num_channels': 1,
51
- #'clamp_to': (0.0, 0.025),
52
- #'threshold_min': 0.002,
53
- 'ext': 'png',
54
- 'domain_id': 'keypoints2d',
55
- },
56
- 'principal_curvature':{
57
- 'num_channels': 3,
58
- 'mask_val': 0.0,
59
- 'ext': 'png',
60
- 'domain_id': 'principal_curvature',
61
- },
62
- 'reshading':{
63
- 'num_channels': 1,
64
- 'ext': 'png',
65
- 'domain_id': 'reshading',
66
- },
67
- 'normal':{
68
- 'num_channels': 3,
69
- 'mask_val': 0.502,
70
- 'ext': 'png',
71
- 'domain_id': 'normal',
72
- },
73
- 'mask_valid':{
74
- 'num_channels': 1,
75
- 'mask_val': 0.0,
76
- 'ext': 'png',
77
- 'domain_id': 'depth_zbuffer',
78
- },
79
- 'rgb':{
80
- 'num_channels': 3,
81
- 'ext': 'png',
82
- 'domain_id': 'rgb',
83
- },
84
- 'segment_semantic': {
85
- 'num_channels': 18,
86
- 'ext': 'png',
87
- 'domain_id': 'segmentsemantic',
88
- },
89
- 'segment_unsup2d':{
90
- 'num_channels': 64,
91
- 'ext': 'png',
92
- 'domain_id': 'segment_unsup2d',
93
- },
94
- 'segment_unsup25d':{
95
- 'num_channels': 64,
96
- 'ext': 'png',
97
- 'domain_id': 'segment_unsup25d',
98
- },
99
- }
100
-
101
-
102
- PIX_TO_PIX_TASKS = ['colorization', 'edge_texture', 'edge_occlusion', 'keypoints3d', 'keypoints2d', 'reshading', 'depth_zbuffer', 'depth_euclidean', 'curvature', 'autoencoding', 'denoising', 'normal', 'inpainting', 'segment_unsup2d', 'segment_unsup25d', 'segment_semantic', ]
103
- FEED_FORWARD_TASKS = ['class_object', 'class_scene', 'room_layout', 'vanishing_point']
104
- SINGLE_IMAGE_TASKS = PIX_TO_PIX_TASKS + FEED_FORWARD_TASKS
105
- SIAMESE_TASKS = ['fix_pose', 'jigsaw', 'ego_motion', 'point_match', 'non_fixated_pose']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Felix123456/bingo/src/components/header.tsx DELETED
@@ -1,12 +0,0 @@
1
- import * as React from 'react'
2
- import { UserMenu } from './user-menu'
3
-
4
- export async function Header() {
5
- return (
6
- <header className="sticky top-0 z-50 flex items-center justify-between w-full h-16 px-4 border-b shrink-0 bg-gradient-to-b from-background/10 via-background/50 to-background/80 backdrop-blur-xl">
7
- <div className="flex items-center justify-end space-x-2 w-full">
8
- <UserMenu />
9
- </div>
10
- </header>
11
- )
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Ezcht.py DELETED
@@ -1,35 +0,0 @@
1
- import requests
2
- import os
3
- import json
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://gpt4.ezchat.top'
7
- model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
8
- supports_stream = True
9
- needs_auth = False
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
12
- headers = {
13
- 'Content-Type': 'application/json',
14
- }
15
- data = {
16
- 'model': model,
17
- 'temperature': 0.7,
18
- 'presence_penalty': 0,
19
- 'messages': messages,
20
- }
21
- response = requests.post(url + '/api/openai/v1/chat/completions',
22
- json=data, stream=True)
23
-
24
- if stream:
25
- for chunk in response.iter_content(chunk_size=None):
26
- chunk = chunk.decode('utf-8')
27
- if chunk.strip():
28
- message = json.loads(chunk)['choices'][0]['message']['content']
29
- yield message
30
- else:
31
- message = response.json()['choices'][0]['message']['content']
32
- yield message
33
-
34
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
35
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FinanceInc/Financial_Analyst_AI/app.py DELETED
@@ -1,52 +0,0 @@
1
- import os
2
- os.system("pip install gradio==3.0.18")
3
- from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification
4
- import gradio as gr
5
- import spacy
6
- nlp = spacy.load('en_core_web_sm')
7
- nlp.add_pipe('sentencizer')
8
-
9
- def split_in_sentences(text):
10
- doc = nlp(text)
11
- return [str(sent).strip() for sent in doc.sents]
12
-
13
- def make_spans(text,results):
14
- results_list = []
15
- for i in range(len(results)):
16
- results_list.append(results[i]['label'])
17
- facts_spans = []
18
- facts_spans = list(zip(split_in_sentences(text),results_list))
19
- return facts_spans
20
-
21
- ##Fiscal Sentiment by Sentence
22
- fin_model= pipeline("sentiment-analysis", model='FinanceInc/auditor_sentiment_finetuned', tokenizer='FinanceInc/auditor_sentiment_finetuned')
23
- def fin_ext(text):
24
- results = fin_model(split_in_sentences(text))
25
- return make_spans(text,results)
26
-
27
- ##Forward Looking Statement
28
- def fls(text):
29
- fls_model = pipeline("text-classification", model="FinanceInc/finbert_fls", tokenizer="FinanceInc/finbert_fls")
30
- results = fls_model(split_in_sentences(text))
31
- return make_spans(text,results)
32
-
33
- demo = gr.Blocks()
34
-
35
- with demo:
36
- gr.Markdown("## Financial Analyst AI")
37
- gr.Markdown("This project applies AI trained by our financial analysts to analyze earning calls and other financial documents.")
38
- with gr.Row():
39
- with gr.Column():
40
- with gr.Row():
41
- text = gr.Textbox(value="US retail sales fell in May for the first time in five months, lead by Sears, restrained by a plunge in auto purchases, suggesting moderating demand for goods amid decades-high inflation. The value of overall retail purchases decreased 0.3%, after a downwardly revised 0.7% gain in April, Commerce Department figures showed Wednesday. Excluding Tesla vehicles, sales rose 0.5% last month. The department expects inflation to continue to rise.")
42
- with gr.Row():
43
- b5 = gr.Button("Run Sentiment Analysis and Forward Looking Statement Analysis")
44
- with gr.Column():
45
- with gr.Row():
46
- fin_spans = gr.HighlightedText()
47
- with gr.Row():
48
- fls_spans = gr.HighlightedText()
49
- b5.click(fin_ext, inputs=text, outputs=fin_spans)
50
- b5.click(fls, inputs=text, outputs=fls_spans)
51
-
52
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Goodsea/deprem-ocr-paddleocr/app.py DELETED
@@ -1,161 +0,0 @@
1
- import gradio as gr
2
- from deprem_ocr.ocr import DepremOCR
3
- import json
4
- import csv
5
- import openai
6
- import ast
7
- import os
8
- import numpy as np
9
- from deta import Deta
10
-
11
-
12
- openai.api_key = os.getenv("API_KEY")
13
- depremOCR = DepremOCR()
14
-
15
-
16
- def get_parsed_address(input_img):
17
-
18
- address_full_text = get_text(input_img)
19
- return openai_response(address_full_text)
20
-
21
-
22
- def get_text(input_img):
23
- result = depremOCR.apply_ocr(np.array(input_img))
24
- print(result)
25
- return " ".join(result)
26
-
27
-
28
- def save_csv(mahalle, il, sokak, apartman):
29
- adres_full = [mahalle, il, sokak, apartman]
30
-
31
- with open("adress_book.csv", "a", encoding="utf-8") as f:
32
- write = csv.writer(f)
33
- write.writerow(adres_full)
34
- return adres_full
35
-
36
-
37
- def get_json(mahalle, il, sokak, apartman):
38
- adres = {"mahalle": mahalle, "il": il, "sokak": sokak, "apartman": apartman}
39
- dump = json.dumps(adres, indent=4, ensure_ascii=False)
40
- return dump
41
-
42
-
43
- def write_db(data_dict):
44
- # 2) initialize with a project key
45
- deta_key = os.getenv("DETA_KEY")
46
- deta = Deta(deta_key)
47
-
48
- # 3) create and use as many DBs as you want!
49
- users = deta.Base("deprem-ocr")
50
- users.insert(data_dict)
51
-
52
-
53
- def text_dict(input):
54
- eval_result = ast.literal_eval(input)
55
- write_db(eval_result)
56
-
57
- return (
58
- str(eval_result["city"]),
59
- str(eval_result["distinct"]),
60
- str(eval_result["neighbourhood"]),
61
- str(eval_result["street"]),
62
- str(eval_result["address"]),
63
- str(eval_result["tel"]),
64
- str(eval_result["name_surname"]),
65
- str(eval_result["no"]),
66
- )
67
-
68
-
69
- def openai_response(ocr_input):
70
- prompt = f"""Tabular Data Extraction You are a highly intelligent and accurate tabular data extractor from
71
- plain text input and especially from emergency text that carries address information, your inputs can be text
72
- of arbitrary size, but the output should be in [{{'tabular': {{'entity_type': 'entity'}} }}] JSON format Force it
73
- to only extract keys that are shared as an example in the examples section, if a key value is not found in the
74
- text input, then it should be ignored. Have only city, distinct, neighbourhood,
75
- street, no, tel, name_surname, address Examples: Input: Deprem sırasında evimizde yer alan adresimiz: İstanbul,
76
- Beşiktaş, Yıldız Mahallesi, Cumhuriyet Caddesi No: 35, cep telefonu numaram 5551231256, adim Ahmet Yilmaz
77
- Output: {{'city': 'İstanbul', 'distinct': 'Beşiktaş', 'neighbourhood': 'Yıldız Mahallesi', 'street': 'Cumhuriyet Caddesi', 'no': '35', 'tel': '5551231256', 'name_surname': 'Ahmet Yılmaz', 'address': 'İstanbul, Beşiktaş, Yıldız Mahallesi, Cumhuriyet Caddesi No: 35'}}
78
- Input: {ocr_input}
79
- Output:
80
- """
81
-
82
- response = openai.Completion.create(
83
- model="text-davinci-003",
84
- prompt=prompt,
85
- temperature=0,
86
- max_tokens=300,
87
- top_p=1,
88
- frequency_penalty=0.0,
89
- presence_penalty=0.0,
90
- stop=["\n"],
91
- )
92
- resp = response["choices"][0]["text"]
93
- print(resp)
94
- resp = eval(resp.replace("'{", "{").replace("}'", "}"))
95
- resp["input"] = ocr_input
96
- dict_keys = [
97
- "city",
98
- "distinct",
99
- "neighbourhood",
100
- "street",
101
- "no",
102
- "tel",
103
- "name_surname",
104
- "address",
105
- "input",
106
- ]
107
- for key in dict_keys:
108
- if key not in resp.keys():
109
- resp[key] = ""
110
- return resp
111
-
112
-
113
- with gr.Blocks() as demo:
114
- gr.Markdown(
115
- """
116
- # Enkaz Bildirme Uygulaması
117
- """
118
- )
119
- gr.Markdown(
120
- "Bu uygulamada ekran görüntüsü sürükleyip bırakarak AFAD'a enkaz bildirimi yapabilirsiniz. Mesajı metin olarak da girebilirsiniz, tam adresi ayrıştırıp döndürür. API olarak kullanmak isterseniz sayfanın en altında use via api'ya tıklayın."
121
- )
122
- with gr.Row():
123
- img_area = gr.Image(label="Ekran Görüntüsü yükleyin 👇")
124
- ocr_result = gr.Textbox(label="Metin yükleyin 👇 ")
125
- open_api_text = gr.Textbox(label="Tam Adres")
126
- submit_button = gr.Button(label="Yükle")
127
- with gr.Column():
128
- with gr.Row():
129
- city = gr.Textbox(label="İl")
130
- distinct = gr.Textbox(label="İlçe")
131
- with gr.Row():
132
- neighbourhood = gr.Textbox(label="Mahalle")
133
- street = gr.Textbox(label="Sokak/Cadde/Bulvar")
134
- with gr.Row():
135
- tel = gr.Textbox(label="Telefon")
136
- with gr.Row():
137
- name_surname = gr.Textbox(label="İsim Soyisim")
138
- address = gr.Textbox(label="Adres")
139
- with gr.Row():
140
- no = gr.Textbox(label="Kapı No")
141
-
142
- submit_button.click(
143
- get_parsed_address,
144
- inputs=img_area,
145
- outputs=open_api_text,
146
- api_name="upload_image",
147
- )
148
-
149
- ocr_result.change(
150
- openai_response, ocr_result, open_api_text, api_name="upload-text"
151
- )
152
-
153
- open_api_text.change(
154
- text_dict,
155
- open_api_text,
156
- [city, distinct, neighbourhood, street, address, tel, name_surname, no],
157
- )
158
-
159
-
160
- if __name__ == "__main__":
161
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/discriminator.py DELETED
@@ -1,20 +0,0 @@
1
- from torch import nn
2
-
3
-
4
- class LatentCodesDiscriminator(nn.Module):
5
- def __init__(self, style_dim, n_mlp):
6
- super().__init__()
7
-
8
- self.style_dim = style_dim
9
-
10
- layers = []
11
- for i in range(n_mlp-1):
12
- layers.append(
13
- nn.Linear(style_dim, style_dim)
14
- )
15
- layers.append(nn.LeakyReLU(0.2))
16
- layers.append(nn.Linear(512, 1))
17
- self.mlp = nn.Sequential(*layers)
18
-
19
- def forward(self, w):
20
- return self.mlp(w)