wanghaofan commited on
Commit
d489804
·
verified ·
1 Parent(s): f7f6b57

Upload 6 files

Browse files
controlnet_aux/anyline/__init__.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # code based in https://github.com/TheMistoAI/ComfyUI-Anyline/blob/main/anyline.py
2
+ import os
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import torch
7
+ from einops import rearrange
8
+ from huggingface_hub import hf_hub_download
9
+ from PIL import Image
10
+ from skimage import morphology
11
+
12
+ from ..teed.ted import TED
13
+ from ..util import HWC3, resize_image, safe_step
14
+
15
+
16
+ class AnylineDetector:
17
+ def __init__(self, model):
18
+ self.model = model
19
+
20
+ @classmethod
21
+ def from_pretrained(cls, pretrained_model_or_path, filename=None, subfolder=None):
22
+ if os.path.isdir(pretrained_model_or_path):
23
+ model_path = os.path.join(pretrained_model_or_path, filename)
24
+ else:
25
+ model_path = hf_hub_download(
26
+ pretrained_model_or_path, filename, subfolder=subfolder
27
+ )
28
+
29
+ model = TED()
30
+ model.load_state_dict(torch.load(model_path, map_location="cpu"))
31
+
32
+ return cls(model)
33
+
34
+ def to(self, device):
35
+ self.model.to(device)
36
+ return self
37
+
38
+ def __call__(
39
+ self,
40
+ input_image,
41
+ detect_resolution=1280,
42
+ guassian_sigma=2.0,
43
+ intensity_threshold=3,
44
+ output_type="pil",
45
+ ):
46
+ device = next(iter(self.model.parameters())).device
47
+
48
+ if not isinstance(input_image, np.ndarray):
49
+ input_image = np.array(input_image, dtype=np.uint8)
50
+ output_type = output_type or "pil"
51
+ else:
52
+ output_type = output_type or "np"
53
+
54
+ original_height, original_width, _ = input_image.shape
55
+
56
+ input_image = HWC3(input_image)
57
+ input_image = resize_image(input_image, detect_resolution)
58
+
59
+ assert input_image.ndim == 3
60
+ height, width, _ = input_image.shape
61
+ with torch.no_grad():
62
+ image_teed = torch.from_numpy(input_image.copy()).float().to(device)
63
+ image_teed = rearrange(image_teed, "h w c -> 1 c h w")
64
+ edges = self.model(image_teed)
65
+ edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
66
+ edges = [
67
+ cv2.resize(e, (width, height), interpolation=cv2.INTER_LINEAR)
68
+ for e in edges
69
+ ]
70
+ edges = np.stack(edges, axis=2)
71
+ edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
72
+ edge = safe_step(edge, 2)
73
+ edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
74
+
75
+ mteed_result = edge
76
+ mteed_result = HWC3(mteed_result)
77
+
78
+ x = input_image.astype(np.float32)
79
+ g = cv2.GaussianBlur(x, (0, 0), guassian_sigma)
80
+ intensity = np.min(g - x, axis=2).clip(0, 255)
81
+ intensity /= max(16, np.median(intensity[intensity > intensity_threshold]))
82
+ intensity *= 127
83
+ lineart_result = intensity.clip(0, 255).astype(np.uint8)
84
+
85
+ lineart_result = HWC3(lineart_result)
86
+
87
+ lineart_result = self.get_intensity_mask(
88
+ lineart_result, lower_bound=0, upper_bound=255
89
+ )
90
+
91
+ cleaned = morphology.remove_small_objects(
92
+ lineart_result.astype(bool), min_size=36, connectivity=1
93
+ )
94
+ lineart_result = lineart_result * cleaned
95
+ final_result = self.combine_layers(mteed_result, lineart_result)
96
+
97
+ final_result = cv2.resize(
98
+ final_result,
99
+ (original_width, original_height),
100
+ interpolation=cv2.INTER_LINEAR,
101
+ )
102
+
103
+ if output_type == "pil":
104
+ final_result = Image.fromarray(final_result)
105
+
106
+ return final_result
107
+
108
+ def get_intensity_mask(self, image_array, lower_bound, upper_bound):
109
+ mask = image_array[:, :, 0]
110
+ mask = np.where((mask >= lower_bound) & (mask <= upper_bound), mask, 0)
111
+ mask = np.expand_dims(mask, 2).repeat(3, axis=2)
112
+ return mask
113
+
114
+ def combine_layers(self, base_layer, top_layer):
115
+ mask = top_layer.astype(bool)
116
+ temp = 1 - (1 - top_layer) * (1 - base_layer)
117
+ result = base_layer * (~mask) + temp * mask
118
+ return result
controlnet_aux/teed/Fsmish.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Script based on:
3
+ Wang, Xueliang, Honge Ren, and Achuan Wang.
4
+ "Smish: A Novel Activation Function for Deep Learning Methods.
5
+ " Electronics 11.4 (2022): 540.
6
+ """
7
+
8
+ # import pytorch
9
+ import torch
10
+
11
+
12
+ @torch.jit.script
13
+ def smish(input):
14
+ """
15
+ Applies the mish function element-wise:
16
+ mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(sigmoid(x))))
17
+ See additional documentation for mish class.
18
+ """
19
+ return input * torch.tanh(torch.log(1 + torch.sigmoid(input)))
controlnet_aux/teed/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 Xavier Soria Poma
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
controlnet_aux/teed/Xsmish.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Script based on:
3
+ Wang, Xueliang, Honge Ren, and Achuan Wang.
4
+ "Smish: A Novel Activation Function for Deep Learning Methods.
5
+ " Electronics 11.4 (2022): 540.
6
+ smish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + sigmoid(x)))
7
+ """
8
+
9
+ # import pytorch
10
+ # import activation functions
11
+ from torch import nn
12
+
13
+ from .Fsmish import smish
14
+
15
+
16
+ class Smish(nn.Module):
17
+ """
18
+ Applies the mish function element-wise:
19
+ mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x)))
20
+ Shape:
21
+ - Input: (N, *) where * means, any number of additional
22
+ dimensions
23
+ - Output: (N, *), same shape as the input
24
+ Examples:
25
+ >>> m = Mish()
26
+ >>> input = torch.randn(2)
27
+ >>> output = m(input)
28
+ Reference: https://pytorch.org/docs/stable/generated/torch.nn.Mish.html
29
+ """
30
+
31
+ def __init__(self):
32
+ """
33
+ Init method.
34
+ """
35
+ super().__init__()
36
+
37
+ def forward(self, input):
38
+ """
39
+ Forward pass of the function.
40
+ """
41
+ return smish(input)
controlnet_aux/teed/__init__.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import cv2
4
+ import numpy as np
5
+ import torch
6
+ from einops import rearrange
7
+ from huggingface_hub import hf_hub_download
8
+ from PIL import Image
9
+
10
+ from ..util import HWC3, resize_image, safe_step
11
+ from .ted import TED
12
+
13
+
14
+ class TEEDdetector:
15
+ def __init__(self, model):
16
+ self.model = model
17
+
18
+ @classmethod
19
+ def from_pretrained(cls, pretrained_model_or_path, filename=None, subfolder=None):
20
+ if os.path.isdir(pretrained_model_or_path):
21
+ model_path = os.path.join(pretrained_model_or_path, filename)
22
+ else:
23
+ model_path = hf_hub_download(
24
+ pretrained_model_or_path, filename, subfolder=subfolder
25
+ )
26
+
27
+ model = TED()
28
+ model.load_state_dict(torch.load(model_path, map_location="cpu"))
29
+
30
+ return cls(model)
31
+
32
+ def to(self, device):
33
+ self.model.to(device)
34
+ return self
35
+
36
+ def __call__(
37
+ self,
38
+ input_image,
39
+ detect_resolution=512,
40
+ safe_steps=2,
41
+ output_type="pil",
42
+ ):
43
+ device = next(iter(self.model.parameters())).device
44
+ if not isinstance(input_image, np.ndarray):
45
+ input_image = np.array(input_image, dtype=np.uint8)
46
+ output_type = output_type or "pil"
47
+ else:
48
+ output_type = output_type or "np"
49
+
50
+ original_height, original_width, _ = input_image.shape
51
+
52
+ input_image = HWC3(input_image)
53
+ input_image = resize_image(input_image, detect_resolution)
54
+
55
+ assert input_image.ndim == 3
56
+ height, width, _ = input_image.shape
57
+ with torch.no_grad():
58
+ image_teed = torch.from_numpy(input_image.copy()).float().to(device)
59
+ image_teed = rearrange(image_teed, "h w c -> 1 c h w")
60
+ edges = self.model(image_teed)
61
+ edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
62
+ edges = [
63
+ cv2.resize(e, (width, height), interpolation=cv2.INTER_LINEAR)
64
+ for e in edges
65
+ ]
66
+ edges = np.stack(edges, axis=2)
67
+ edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
68
+ if safe_steps != 0:
69
+ edge = safe_step(edge, safe_steps)
70
+ edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
71
+
72
+ detected_map = edge
73
+ detected_map = HWC3(detected_map)
74
+
75
+ detected_map = cv2.resize(
76
+ detected_map,
77
+ (original_width, original_height),
78
+ interpolation=cv2.INTER_LINEAR,
79
+ )
80
+
81
+ if output_type == "pil":
82
+ detected_map = Image.fromarray(detected_map)
83
+
84
+ return detected_map
controlnet_aux/teed/ted.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original from: https://github.com/xavysp/TEED
2
+ # TEED: is a Tiny but Efficient Edge Detection, it comes from the LDC-B3
3
+ # with a Slightly modification
4
+ # LDC parameters:
5
+ # 155665
6
+ # TED > 58K
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+
12
+ from .Fsmish import smish as Fsmish
13
+ from .Xsmish import Smish
14
+
15
+
16
+ def weight_init(m):
17
+ if isinstance(m, (nn.Conv2d,)):
18
+ torch.nn.init.xavier_normal_(m.weight, gain=1.0)
19
+
20
+ if m.bias is not None:
21
+ torch.nn.init.zeros_(m.bias)
22
+
23
+ # for fusion layer
24
+ if isinstance(m, (nn.ConvTranspose2d,)):
25
+ torch.nn.init.xavier_normal_(m.weight, gain=1.0)
26
+ if m.bias is not None:
27
+ torch.nn.init.zeros_(m.bias)
28
+
29
+
30
+ class CoFusion(nn.Module):
31
+ # from LDC
32
+
33
+ def __init__(self, in_ch, out_ch):
34
+ super(CoFusion, self).__init__()
35
+ self.conv1 = nn.Conv2d(
36
+ in_ch, 32, kernel_size=3, stride=1, padding=1
37
+ ) # before 64
38
+ self.conv3 = nn.Conv2d(
39
+ 32, out_ch, kernel_size=3, stride=1, padding=1
40
+ ) # before 64 instead of 32
41
+ self.relu = nn.ReLU()
42
+ self.norm_layer1 = nn.GroupNorm(4, 32) # before 64
43
+
44
+ def forward(self, x):
45
+ # fusecat = torch.cat(x, dim=1)
46
+ attn = self.relu(self.norm_layer1(self.conv1(x)))
47
+ attn = F.softmax(self.conv3(attn), dim=1)
48
+ return ((x * attn).sum(1)).unsqueeze(1)
49
+
50
+
51
+ class CoFusion2(nn.Module):
52
+ # TEDv14-3
53
+ def __init__(self, in_ch, out_ch):
54
+ super(CoFusion2, self).__init__()
55
+ self.conv1 = nn.Conv2d(
56
+ in_ch, 32, kernel_size=3, stride=1, padding=1
57
+ ) # before 64
58
+ # self.conv2 = nn.Conv2d(32, 32, kernel_size=3,
59
+ # stride=1, padding=1)# before 64
60
+ self.conv3 = nn.Conv2d(
61
+ 32, out_ch, kernel_size=3, stride=1, padding=1
62
+ ) # before 64 instead of 32
63
+ self.smish = Smish() # nn.ReLU(inplace=True)
64
+
65
+ def forward(self, x):
66
+ # fusecat = torch.cat(x, dim=1)
67
+ attn = self.conv1(self.smish(x))
68
+ attn = self.conv3(self.smish(attn)) # before , )dim=1)
69
+
70
+ # return ((fusecat * attn).sum(1)).unsqueeze(1)
71
+ return ((x * attn).sum(1)).unsqueeze(1)
72
+
73
+
74
+ class DoubleFusion(nn.Module):
75
+ # TED fusion before the final edge map prediction
76
+ def __init__(self, in_ch, out_ch):
77
+ super(DoubleFusion, self).__init__()
78
+ self.DWconv1 = nn.Conv2d(
79
+ in_ch, in_ch * 8, kernel_size=3, stride=1, padding=1, groups=in_ch
80
+ ) # before 64
81
+ self.PSconv1 = nn.PixelShuffle(1)
82
+
83
+ self.DWconv2 = nn.Conv2d(
84
+ 24, 24 * 1, kernel_size=3, stride=1, padding=1, groups=24
85
+ ) # before 64 instead of 32
86
+
87
+ self.AF = Smish() # XAF() #nn.Tanh()# XAF() # # Smish()#
88
+
89
+ def forward(self, x):
90
+ # fusecat = torch.cat(x, dim=1)
91
+ attn = self.PSconv1(
92
+ self.DWconv1(self.AF(x))
93
+ ) # #TEED best res TEDv14 [8, 32, 352, 352]
94
+
95
+ attn2 = self.PSconv1(
96
+ self.DWconv2(self.AF(attn))
97
+ ) # #TEED best res TEDv14[8, 3, 352, 352]
98
+
99
+ return Fsmish(((attn2 + attn).sum(1)).unsqueeze(1)) # TED best res
100
+
101
+
102
+ class _DenseLayer(nn.Sequential):
103
+ def __init__(self, input_features, out_features):
104
+ super(_DenseLayer, self).__init__()
105
+
106
+ (
107
+ self.add_module(
108
+ "conv1",
109
+ nn.Conv2d(
110
+ input_features,
111
+ out_features,
112
+ kernel_size=3,
113
+ stride=1,
114
+ padding=2,
115
+ bias=True,
116
+ ),
117
+ ),
118
+ )
119
+ (self.add_module("smish1", Smish()),)
120
+ self.add_module(
121
+ "conv2",
122
+ nn.Conv2d(out_features, out_features, kernel_size=3, stride=1, bias=True),
123
+ )
124
+
125
+ def forward(self, x):
126
+ x1, x2 = x
127
+
128
+ new_features = super(_DenseLayer, self).forward(Fsmish(x1)) # F.relu()
129
+
130
+ return 0.5 * (new_features + x2), x2
131
+
132
+
133
+ class _DenseBlock(nn.Sequential):
134
+ def __init__(self, num_layers, input_features, out_features):
135
+ super(_DenseBlock, self).__init__()
136
+ for i in range(num_layers):
137
+ layer = _DenseLayer(input_features, out_features)
138
+ self.add_module("denselayer%d" % (i + 1), layer)
139
+ input_features = out_features
140
+
141
+
142
+ class UpConvBlock(nn.Module):
143
+ def __init__(self, in_features, up_scale):
144
+ super(UpConvBlock, self).__init__()
145
+ self.up_factor = 2
146
+ self.constant_features = 16
147
+
148
+ layers = self.make_deconv_layers(in_features, up_scale)
149
+ assert layers is not None, layers
150
+ self.features = nn.Sequential(*layers)
151
+
152
+ def make_deconv_layers(self, in_features, up_scale):
153
+ layers = []
154
+ all_pads = [0, 0, 1, 3, 7]
155
+ for i in range(up_scale):
156
+ kernel_size = 2**up_scale
157
+ pad = all_pads[up_scale] # kernel_size-1
158
+ out_features = self.compute_out_features(i, up_scale)
159
+ layers.append(nn.Conv2d(in_features, out_features, 1))
160
+ layers.append(Smish())
161
+ layers.append(
162
+ nn.ConvTranspose2d(
163
+ out_features, out_features, kernel_size, stride=2, padding=pad
164
+ )
165
+ )
166
+ in_features = out_features
167
+ return layers
168
+
169
+ def compute_out_features(self, idx, up_scale):
170
+ return 1 if idx == up_scale - 1 else self.constant_features
171
+
172
+ def forward(self, x):
173
+ return self.features(x)
174
+
175
+
176
+ class SingleConvBlock(nn.Module):
177
+ def __init__(self, in_features, out_features, stride, use_ac=False):
178
+ super(SingleConvBlock, self).__init__()
179
+ # self.use_bn = use_bs
180
+ self.use_ac = use_ac
181
+ self.conv = nn.Conv2d(in_features, out_features, 1, stride=stride, bias=True)
182
+ if self.use_ac:
183
+ self.smish = Smish()
184
+
185
+ def forward(self, x):
186
+ x = self.conv(x)
187
+ if self.use_ac:
188
+ return self.smish(x)
189
+ else:
190
+ return x
191
+
192
+
193
+ class DoubleConvBlock(nn.Module):
194
+ def __init__(
195
+ self, in_features, mid_features, out_features=None, stride=1, use_act=True
196
+ ):
197
+ super(DoubleConvBlock, self).__init__()
198
+
199
+ self.use_act = use_act
200
+ if out_features is None:
201
+ out_features = mid_features
202
+ self.conv1 = nn.Conv2d(in_features, mid_features, 3, padding=1, stride=stride)
203
+ self.conv2 = nn.Conv2d(mid_features, out_features, 3, padding=1)
204
+ self.smish = Smish() # nn.ReLU(inplace=True)
205
+
206
+ def forward(self, x):
207
+ x = self.conv1(x)
208
+ x = self.smish(x)
209
+ x = self.conv2(x)
210
+ if self.use_act:
211
+ x = self.smish(x)
212
+ return x
213
+
214
+
215
+ class TED(nn.Module):
216
+ """Definition of Tiny and Efficient Edge Detector
217
+ model
218
+ """
219
+
220
+ def __init__(self):
221
+ super(TED, self).__init__()
222
+ self.block_1 = DoubleConvBlock(
223
+ 3,
224
+ 16,
225
+ 16,
226
+ stride=2,
227
+ )
228
+ self.block_2 = DoubleConvBlock(16, 32, use_act=False)
229
+ self.dblock_3 = _DenseBlock(1, 32, 48) # [32,48,100,100] before (2, 32, 64)
230
+
231
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
232
+
233
+ # skip1 connection, see fig. 2
234
+ self.side_1 = SingleConvBlock(16, 32, 2)
235
+
236
+ # skip2 connection, see fig. 2
237
+ self.pre_dense_3 = SingleConvBlock(32, 48, 1) # before (32, 64, 1)
238
+
239
+ # USNet
240
+ self.up_block_1 = UpConvBlock(16, 1)
241
+ self.up_block_2 = UpConvBlock(32, 1)
242
+ self.up_block_3 = UpConvBlock(48, 2) # (32, 64, 1)
243
+
244
+ self.block_cat = DoubleFusion(3, 3) # TEED: DoubleFusion
245
+
246
+ self.apply(weight_init)
247
+
248
+ def slice(self, tensor, slice_shape):
249
+ t_shape = tensor.shape
250
+ img_h, img_w = slice_shape
251
+ if img_w != t_shape[-1] or img_h != t_shape[2]:
252
+ new_tensor = F.interpolate(
253
+ tensor, size=(img_h, img_w), mode="bicubic", align_corners=False
254
+ )
255
+
256
+ else:
257
+ new_tensor = tensor
258
+ # tensor[..., :height, :width]
259
+ return new_tensor
260
+
261
+ def resize_input(self, tensor):
262
+ t_shape = tensor.shape
263
+ if t_shape[2] % 8 != 0 or t_shape[3] % 8 != 0:
264
+ img_w = ((t_shape[3] // 8) + 1) * 8
265
+ img_h = ((t_shape[2] // 8) + 1) * 8
266
+ new_tensor = F.interpolate(
267
+ tensor, size=(img_h, img_w), mode="bicubic", align_corners=False
268
+ )
269
+ else:
270
+ new_tensor = tensor
271
+ return new_tensor
272
+
273
+ def crop_bdcn(data1, h, w, crop_h, crop_w):
274
+ # Based on BDCN Implementation @ https://github.com/pkuCactus/BDCN
275
+ _, _, h1, w1 = data1.size()
276
+ assert h <= h1 and w <= w1
277
+ data = data1[:, :, crop_h : crop_h + h, crop_w : crop_w + w]
278
+ return data
279
+
280
+ def forward(self, x, single_test=False):
281
+ assert x.ndim == 4, x.shape
282
+ # supose the image size is 352x352
283
+
284
+ # Block 1
285
+ block_1 = self.block_1(x) # [8,16,176,176]
286
+ block_1_side = self.side_1(block_1) # 16 [8,32,88,88]
287
+
288
+ # Block 2
289
+ block_2 = self.block_2(block_1) # 32 # [8,32,176,176]
290
+ block_2_down = self.maxpool(block_2) # [8,32,88,88]
291
+ block_2_add = block_2_down + block_1_side # [8,32,88,88]
292
+
293
+ # Block 3
294
+ block_3_pre_dense = self.pre_dense_3(
295
+ block_2_down
296
+ ) # [8,64,88,88] block 3 L connection
297
+ block_3, _ = self.dblock_3([block_2_add, block_3_pre_dense]) # [8,64,88,88]
298
+
299
+ # upsampling blocks
300
+ out_1 = self.up_block_1(block_1)
301
+ out_2 = self.up_block_2(block_2)
302
+ out_3 = self.up_block_3(block_3)
303
+
304
+ results = [out_1, out_2, out_3]
305
+
306
+ # concatenate multiscale outputs
307
+ block_cat = torch.cat(results, dim=1) # Bx6xHxW
308
+ block_cat = self.block_cat(block_cat) # Bx1xHxW DoubleFusion
309
+
310
+ results.append(block_cat)
311
+ return results
312
+
313
+
314
+ if __name__ == "__main__":
315
+ batch_size = 8
316
+ img_height = 352
317
+ img_width = 352
318
+
319
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
320
+ device = "cpu"
321
+ input = torch.rand(batch_size, 3, img_height, img_width).to(device)
322
+ # target = torch.rand(batch_size, 1, img_height, img_width).to(device)
323
+ print(f"input shape: {input.shape}")
324
+ model = TED().to(device)
325
+ output = model(input)
326
+ print(f"output shapes: {[t.shape for t in output]}")
327
+
328
+ # for i in range(20000):
329
+ # print(i)
330
+ # output = model(input)
331
+ # loss = nn.MSELoss()(output[-1], target)
332
+ # loss.backward()