diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..75b75275b3fe0ca8de7ea9b3a6df086a898dc0aa
--- /dev/null
+++ b/app.py
@@ -0,0 +1,238 @@
+from mmseg.apis import init_model
+from typing import List
+from glob import glob
+from cloud_adapter.cloud_adapter_dinov2 import CloudAdapterDinoVisionTransformer
+import numpy as np
+from PIL import Image
+from mmseg.models.segmentors.encoder_decoder import EncoderDecoder
+import gradio as gr
+import torch
+import os
+
+
+class CloudAdapterGradio:
+ def __init__(self, config_path=None, checkpoint_path=None, device="cpu", example_inputs=None, num_classes=2, palette=None):
+ self.config_path = config_path
+ self.checkpoint_path = checkpoint_path
+ self.device = device
+ self.model: EncoderDecoder = init_model(
+ self.config_path, self.checkpoint_path, device=self.device)
+ self.model.eval()
+ self.example_inputs = example_inputs
+ self.img_size = 256 if num_classes == 2 else 512
+ self.palette = palette
+ self.legend = self.html_legend(num_classes=num_classes)
+ self.name_mapping = {
+ "KappaMask": "kappamask",
+ "CDNetv1": "cdnetv1",
+ "CDNetv2": "cdnetv2",
+ "HRCloudNet": "hrcloudnet",
+ "MCDNet": "mcdnet",
+ "SCNN": "scnn",
+ "DBNet": "dbnet",
+ "UNetMobv2": "unetmobv2",
+ "Cloud-Adapter": "cloud-adapter",
+ }
+
+ self.create_ui()
+
+ def html_legend(self, num_classes=2):
+ if num_classes == 2:
+ return """
+
+ """
+ return """
+
+"""
+
+ def create_ui(self):
+ with gr.Row():
+ # 左侧:输入图片和按钮
+ with gr.Column(scale=1): # 左侧列
+ in_image = gr.Image(
+ label='Input Image',
+ sources='upload',
+ elem_classes='input_image',
+ interactive=True,
+ type="pil",
+ )
+ with gr.Row():
+ run_button = gr.Button(
+ 'Run',
+ variant="primary",
+ )
+ # 示例输入列表
+ gr.Examples(
+ examples=self.example_inputs,
+ inputs=in_image,
+ label="Example Inputs"
+ )
+
+ # 右侧:输出图片
+ with gr.Column(scale=1): # 右侧列
+ with gr.Column():
+ # 输出图片
+ out_image = gr.Image(
+ label='Output Image',
+ elem_classes='output_image',
+ interactive=False
+ )
+ # 图例
+ legend = gr.HTML(
+ value=self.legend,
+ elem_classes="output_legend",
+ )
+
+ # 按钮点击逻辑:触发图像转换
+ run_button.click(
+ self.inference,
+ inputs=in_image,
+ outputs=out_image,
+ )
+
+ @torch.no_grad()
+ def inference(self, image: Image.Image) -> Image.Image:
+ return self.cloud_adapter_forward(image)
+
+ @torch.no_grad()
+ def cloud_adapter_forward(self, image: Image.Image) -> Image.Image:
+ """
+ Cloud Adapter Inference
+ """
+ ori_size = image.size
+ image = image.resize((self.img_size, self.img_size),
+ resample=Image.Resampling.BILINEAR)
+ image = np.array(image)
+ # print(image.shape)
+ image = (image - np.min(image)) / (np.max(image)-np.min(image))
+
+ image = torch.from_numpy(image).unsqueeze(0).to(self.device)
+ image = image.permute(0, 3, 1, 2).float()
+
+ outs = self.model.predict(image)
+ pred_mask = outs[0].pred_sem_seg.data.cpu().numpy().astype(np.uint8)
+
+ im = Image.fromarray(pred_mask[0]).convert("P")
+ im.putpalette(self.palette)
+
+ del image
+ del outs
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ return im.resize(ori_size, resample=Image.Resampling.BILINEAR)
+
+
+def get_palette(dataset_name: str) -> List[int]:
+ if dataset_name in ["cloudsen12_high_l1c", "cloudsen12_high_l2a"]:
+ return [79, 253, 199, 77, 2, 115, 251, 255, 41, 221, 53, 223]
+ if dataset_name == "l8_biome":
+ return [79, 253, 199, 221, 53, 223, 251, 255, 41, 77, 2, 115]
+ if dataset_name in ["gf12ms_whu_gf1", "gf12ms_whu_gf2", "hrc_whu"]:
+ return [79, 253, 199, 77, 2, 115]
+ raise Exception("dataset_name not supported")
+
+
+if __name__ == '__main__':
+ title = 'Cloud Segmentation for Remote Sensing Images'
+ custom_css = """
+h1 {
+ text-align: center;
+ font-size: 24px;
+ font-weight: bold;
+ margin-bottom: 20px;
+}
+"""
+ hrc_whu_examples = glob("example_inputs/hrc_whu/*")
+ gf1_examples = glob("example_inputs/gf1/*")
+ gf2_examples = glob("example_inputs/gf2/*")
+ l1c_examples = glob("example_inputs/l1c/*")
+ l2a_examples = glob("example_inputs/l2a/*")
+ l8_examples = glob("example_inputs/l8/*")
+
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
+ with gr.Blocks(analytics_enabled=False, title=title,css=custom_css) as demo:
+ gr.Markdown(f'# {title}')
+ with gr.Tabs():
+ with gr.TabItem('Google Earth'):
+ CloudAdapterGradio(
+ config_path="cloud-adapter-configs/binary_classes_256x256.py",
+ checkpoint_path="checkpoints/cloud-adapter/hrc_whu_full_weight.pth",
+ device=device,
+ example_inputs=hrc_whu_examples,
+ num_classes=2,
+ palette=get_palette("hrc_whu"),
+ )
+ with gr.TabItem('Gaofen-1'):
+ CloudAdapterGradio(
+ config_path="cloud-adapter-configs/binary_classes_256x256.py",
+ checkpoint_path="checkpoints/cloud-adapter/gf1_full_weight.pth",
+ device=device,
+ example_inputs=gf1_examples,
+ num_classes=2,
+ palette=get_palette("gf12ms_whu_gf1"),
+ )
+ with gr.TabItem('Gaofen-2'):
+ CloudAdapterGradio(
+ config_path="cloud-adapter-configs/binary_classes_256x256.py",
+ checkpoint_path="checkpoints/cloud-adapter/gf2_full_weight.pth",
+ device=device,
+ example_inputs=gf2_examples,
+ num_classes=2,
+ palette=get_palette("gf12ms_whu_gf2"),
+ )
+
+ with gr.TabItem('Sentinel-2 (L1C)'):
+ CloudAdapterGradio(
+ config_path="cloud-adapter-configs/multi_classes_512x512.py",
+ checkpoint_path="checkpoints/cloud-adapter/l1c_full_weight.pth",
+ device=device,
+ example_inputs=l1c_examples,
+ num_classes=4,
+ palette=get_palette("cloudsen12_high_l1c"),
+ )
+ with gr.TabItem('Sentinel-2 (L2A)'):
+ CloudAdapterGradio(
+ config_path="cloud-adapter-configs/multi_classes_512x512.py",
+ checkpoint_path="checkpoints/cloud-adapter/l2a_full_weight.pth",
+ device=device,
+ example_inputs=l2a_examples,
+ num_classes=4,
+ palette=get_palette("cloudsen12_high_l2a"),
+ )
+ with gr.TabItem('Landsat-8'):
+ CloudAdapterGradio(
+ config_path="cloud-adapter-configs/multi_classes_512x512.py",
+ checkpoint_path="checkpoints/cloud-adapter/l8_full_weight.pth",
+ device=device,
+ example_inputs=l8_examples,
+ num_classes=4,
+ palette=get_palette("l8_biome"),
+ )
+
+ demo.launch(share=True, debug=True)
diff --git a/checkpoints/cloud-adapter/gf1_full_weight.pth b/checkpoints/cloud-adapter/gf1_full_weight.pth
new file mode 100644
index 0000000000000000000000000000000000000000..577d9c5ac966427d1736225657ae103b05e32b09
--- /dev/null
+++ b/checkpoints/cloud-adapter/gf1_full_weight.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:89e224c3b518fc8f59874f85a8c01a470cdbe4d602e22caf7f1ad1ededa2899e
+size 1326991459
diff --git a/checkpoints/cloud-adapter/gf2_full_weight.pth b/checkpoints/cloud-adapter/gf2_full_weight.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ad545d42c6daeda6d9235a2383dd93c4776af6c7
--- /dev/null
+++ b/checkpoints/cloud-adapter/gf2_full_weight.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4fd14e29f523e988743bc50e915816c80a69a526b032dab07e85433ace409d2f
+size 1311500515
diff --git a/checkpoints/cloud-adapter/hrc_whu_full_weight.pth b/checkpoints/cloud-adapter/hrc_whu_full_weight.pth
new file mode 100644
index 0000000000000000000000000000000000000000..97ec704ae569cbffaa1ca46bc2bfe52075d75b7c
--- /dev/null
+++ b/checkpoints/cloud-adapter/hrc_whu_full_weight.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:755b48d21763339284f4a9b6051c8dfb83a98babca75b39d6304b6f3e82f6c85
+size 1316424759
diff --git a/checkpoints/cloud-adapter/l1c_full_weight.pth b/checkpoints/cloud-adapter/l1c_full_weight.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ced09f47e0b25dcfb39b5166f0dee3e56b55b485
--- /dev/null
+++ b/checkpoints/cloud-adapter/l1c_full_weight.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7d4bb1dd99e1995450894b985a8fb29b6b931419e3ea08674e1420a5a044804
+size 1332592483
diff --git a/checkpoints/cloud-adapter/l2a_full_weight.pth b/checkpoints/cloud-adapter/l2a_full_weight.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8308c1b76915b6488f39bfc5b90b6a2f11ed3058
--- /dev/null
+++ b/checkpoints/cloud-adapter/l2a_full_weight.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d88e426ff6222b6807315060db1ee1b65a2f98de85a62bf7d3814ba846427bd0
+size 1327383395
diff --git a/checkpoints/cloud-adapter/l8_full_weight.pth b/checkpoints/cloud-adapter/l8_full_weight.pth
new file mode 100644
index 0000000000000000000000000000000000000000..90e492b02ed6e3d703ef50a29ed2b771debf02f3
--- /dev/null
+++ b/checkpoints/cloud-adapter/l8_full_weight.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:268ff0297fa37cde78ec16884f33a126d4e7c37b6e37c69d6b18a0ba258a0cee
+size 1461519710
diff --git a/cloud-adapter-configs/binary_classes_256x256.py b/cloud-adapter-configs/binary_classes_256x256.py
new file mode 100644
index 0000000000000000000000000000000000000000..af42b64247ea02275b0ae161b7921d01a2a3cdc8
--- /dev/null
+++ b/cloud-adapter-configs/binary_classes_256x256.py
@@ -0,0 +1,205 @@
+crop_size = (
+ 256,
+ 256,
+)
+model = dict(
+ backbone=dict(
+ adapter_index=[
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ ],
+ block_chunks=0,
+ depth=24,
+ embed_dim=1024,
+ ffn_bias=True,
+ ffn_layer='mlp',
+ has_cat=False,
+ img_size=512,
+ init_values=1e-05,
+ mlp_ratio=4,
+ num_heads=16,
+ cloud_adapter_config=dict(
+ cnn_type='pmaa',
+ context_dim=64,
+ depth=4,
+ emd_dim=1024,
+ global_groups=1,
+ hidden_channels=64,
+ int_type='convnext',
+ local_groups=1,
+ num_layers=24,
+ rank_dim=16,
+ return_last_feature=False,
+ return_multi_feats=False,
+ type='CloudAdapter'),
+ patch_size=16,
+ proj_bias=True,
+ qkv_bias=True,
+ type='CloudAdapterDinoVisionTransformer'),
+ data_preprocessor=dict(
+ bgr_to_rgb=True,
+ mean=[
+ 123.675,
+ 116.28,
+ 103.53,
+ ],
+ pad_val=0,
+ seg_pad_val=255,
+ size=(
+ 512,
+ 512,
+ ),
+ std=[
+ 58.395,
+ 57.12,
+ 57.375,
+ ],
+ type='SegDataPreProcessor'),
+ decode_head=dict(
+ align_corners=False,
+ enforce_decoder_input_project=False,
+ feat_channels=256,
+ in_channels=[
+ 1024,
+ 1024,
+ 1024,
+ 1024,
+ ],
+ loss_cls=dict(
+ class_weight=[
+ 1.0,
+ 1.0,
+ 1.0,
+ 1.0,
+ 0.1,
+ ],
+ loss_weight=2.0,
+ reduction='mean',
+ type='mmdet.CrossEntropyLoss',
+ use_sigmoid=False),
+ loss_dice=dict(
+ activate=True,
+ eps=1.0,
+ loss_weight=5.0,
+ naive_dice=True,
+ reduction='mean',
+ type='mmdet.DiceLoss',
+ use_sigmoid=True),
+ loss_mask=dict(
+ loss_weight=5.0,
+ reduction='mean',
+ type='mmdet.CrossEntropyLoss',
+ use_sigmoid=True),
+ num_classes=2,
+ num_queries=100,
+ num_transformer_feat_level=3,
+ out_channels=256,
+ pixel_decoder=dict(
+ act_cfg=dict(type='ReLU'),
+ encoder=dict(
+ init_cfg=None,
+ layer_cfg=dict(
+ ffn_cfg=dict(
+ act_cfg=dict(inplace=True, type='ReLU'),
+ embed_dims=256,
+ feedforward_channels=1024,
+ ffn_drop=0.0,
+ num_fcs=2),
+ self_attn_cfg=dict(
+ batch_first=True,
+ dropout=0.0,
+ embed_dims=256,
+ im2col_step=64,
+ init_cfg=None,
+ norm_cfg=None,
+ num_heads=8,
+ num_levels=3,
+ num_points=4)),
+ num_layers=6),
+ init_cfg=None,
+ norm_cfg=dict(num_groups=32, type='GN'),
+ num_outs=3,
+ positional_encoding=dict(normalize=True, num_feats=128),
+ type='mmdet.MSDeformAttnPixelDecoder'),
+ positional_encoding=dict(normalize=True, num_feats=128),
+ strides=[
+ 4,
+ 8,
+ 16,
+ 32,
+ ],
+ train_cfg=dict(
+ assigner=dict(
+ match_costs=[
+ dict(type='mmdet.ClassificationCost', weight=2.0),
+ dict(
+ type='mmdet.CrossEntropyLossCost',
+ use_sigmoid=True,
+ weight=5.0),
+ dict(
+ eps=1.0,
+ pred_act=True,
+ type='mmdet.DiceCost',
+ weight=5.0),
+ ],
+ type='mmdet.HungarianAssigner'),
+ importance_sample_ratio=0.75,
+ num_points=12544,
+ oversample_ratio=3.0,
+ sampler=dict(type='mmdet.MaskPseudoSampler')),
+ transformer_decoder=dict(
+ init_cfg=None,
+ layer_cfg=dict(
+ cross_attn_cfg=dict(
+ attn_drop=0.0,
+ batch_first=True,
+ dropout_layer=None,
+ embed_dims=256,
+ num_heads=8,
+ proj_drop=0.0),
+ ffn_cfg=dict(
+ act_cfg=dict(inplace=True, type='ReLU'),
+ add_identity=True,
+ dropout_layer=None,
+ embed_dims=256,
+ feedforward_channels=2048,
+ ffn_drop=0.0,
+ num_fcs=2),
+ self_attn_cfg=dict(
+ attn_drop=0.0,
+ batch_first=True,
+ dropout_layer=None,
+ embed_dims=256,
+ num_heads=8,
+ proj_drop=0.0)),
+ num_layers=9,
+ return_intermediate=True),
+ type='Mask2FormerHead'),
+ test_cfg=dict(mode='whole'),
+ train_cfg=dict(),
+ type='EncoderDecoder')
+
+
+
diff --git a/cloud-adapter-configs/multi_classes_512x512.py b/cloud-adapter-configs/multi_classes_512x512.py
new file mode 100644
index 0000000000000000000000000000000000000000..aac53dfb157e5536c94db334655f316864790386
--- /dev/null
+++ b/cloud-adapter-configs/multi_classes_512x512.py
@@ -0,0 +1,205 @@
+crop_size = (
+ 512,
+ 512,
+)
+model = dict(
+ backbone=dict(
+ adapter_index=[
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ ],
+ block_chunks=0,
+ depth=24,
+ embed_dim=1024,
+ ffn_bias=True,
+ ffn_layer='mlp',
+ has_cat=False,
+ img_size=512,
+ init_values=1e-05,
+ mlp_ratio=4,
+ num_heads=16,
+ cloud_adapter_config=dict(
+ cnn_type='pmaa',
+ context_dim=64,
+ depth=4,
+ emd_dim=1024,
+ global_groups=1,
+ hidden_channels=64,
+ int_type='convnext',
+ local_groups=1,
+ num_layers=24,
+ rank_dim=16,
+ return_last_feature=False,
+ return_multi_feats=False,
+ type='CloudAdapter'),
+ patch_size=16,
+ proj_bias=True,
+ qkv_bias=True,
+ type='CloudAdapterDinoVisionTransformer'),
+ data_preprocessor=dict(
+ bgr_to_rgb=True,
+ mean=[
+ 123.675,
+ 116.28,
+ 103.53,
+ ],
+ pad_val=0,
+ seg_pad_val=255,
+ size=(
+ 512,
+ 512,
+ ),
+ std=[
+ 58.395,
+ 57.12,
+ 57.375,
+ ],
+ type='SegDataPreProcessor'),
+ decode_head=dict(
+ align_corners=False,
+ enforce_decoder_input_project=False,
+ feat_channels=256,
+ in_channels=[
+ 1024,
+ 1024,
+ 1024,
+ 1024,
+ ],
+ loss_cls=dict(
+ class_weight=[
+ 1.0,
+ 1.0,
+ 1.0,
+ 1.0,
+ 0.1,
+ ],
+ loss_weight=2.0,
+ reduction='mean',
+ type='mmdet.CrossEntropyLoss',
+ use_sigmoid=False),
+ loss_dice=dict(
+ activate=True,
+ eps=1.0,
+ loss_weight=5.0,
+ naive_dice=True,
+ reduction='mean',
+ type='mmdet.DiceLoss',
+ use_sigmoid=True),
+ loss_mask=dict(
+ loss_weight=5.0,
+ reduction='mean',
+ type='mmdet.CrossEntropyLoss',
+ use_sigmoid=True),
+ num_classes=4,
+ num_queries=100,
+ num_transformer_feat_level=3,
+ out_channels=256,
+ pixel_decoder=dict(
+ act_cfg=dict(type='ReLU'),
+ encoder=dict(
+ init_cfg=None,
+ layer_cfg=dict(
+ ffn_cfg=dict(
+ act_cfg=dict(inplace=True, type='ReLU'),
+ embed_dims=256,
+ feedforward_channels=1024,
+ ffn_drop=0.0,
+ num_fcs=2),
+ self_attn_cfg=dict(
+ batch_first=True,
+ dropout=0.0,
+ embed_dims=256,
+ im2col_step=64,
+ init_cfg=None,
+ norm_cfg=None,
+ num_heads=8,
+ num_levels=3,
+ num_points=4)),
+ num_layers=6),
+ init_cfg=None,
+ norm_cfg=dict(num_groups=32, type='GN'),
+ num_outs=3,
+ positional_encoding=dict(normalize=True, num_feats=128),
+ type='mmdet.MSDeformAttnPixelDecoder'),
+ positional_encoding=dict(normalize=True, num_feats=128),
+ strides=[
+ 4,
+ 8,
+ 16,
+ 32,
+ ],
+ train_cfg=dict(
+ assigner=dict(
+ match_costs=[
+ dict(type='mmdet.ClassificationCost', weight=2.0),
+ dict(
+ type='mmdet.CrossEntropyLossCost',
+ use_sigmoid=True,
+ weight=5.0),
+ dict(
+ eps=1.0,
+ pred_act=True,
+ type='mmdet.DiceCost',
+ weight=5.0),
+ ],
+ type='mmdet.HungarianAssigner'),
+ importance_sample_ratio=0.75,
+ num_points=12544,
+ oversample_ratio=3.0,
+ sampler=dict(type='mmdet.MaskPseudoSampler')),
+ transformer_decoder=dict(
+ init_cfg=None,
+ layer_cfg=dict(
+ cross_attn_cfg=dict(
+ attn_drop=0.0,
+ batch_first=True,
+ dropout_layer=None,
+ embed_dims=256,
+ num_heads=8,
+ proj_drop=0.0),
+ ffn_cfg=dict(
+ act_cfg=dict(inplace=True, type='ReLU'),
+ add_identity=True,
+ dropout_layer=None,
+ embed_dims=256,
+ feedforward_channels=2048,
+ ffn_drop=0.0,
+ num_fcs=2),
+ self_attn_cfg=dict(
+ attn_drop=0.0,
+ batch_first=True,
+ dropout_layer=None,
+ embed_dims=256,
+ num_heads=8,
+ proj_drop=0.0)),
+ num_layers=9,
+ return_intermediate=True),
+ type='Mask2FormerHead'),
+ test_cfg=dict(mode='whole'),
+ train_cfg=dict(),
+ type='EncoderDecoder')
+
+
+
diff --git a/cloud_adapter/__init__.py b/cloud_adapter/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/cloud_adapter/__pycache__/__init__.cpython-38.pyc b/cloud_adapter/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..609e817548bfa0e9d5d86d5c45da5ededd12cece
Binary files /dev/null and b/cloud_adapter/__pycache__/__init__.cpython-38.pyc differ
diff --git a/cloud_adapter/__pycache__/cloud_adapter.cpython-38.pyc b/cloud_adapter/__pycache__/cloud_adapter.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c293b640183e6448b0ed06a102ba5f6d003707a
Binary files /dev/null and b/cloud_adapter/__pycache__/cloud_adapter.cpython-38.pyc differ
diff --git a/cloud_adapter/__pycache__/cloud_adapter_dinov2.cpython-38.pyc b/cloud_adapter/__pycache__/cloud_adapter_dinov2.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..16c434c147142d948c840b6d8f961cc636802374
Binary files /dev/null and b/cloud_adapter/__pycache__/cloud_adapter_dinov2.cpython-38.pyc differ
diff --git a/cloud_adapter/__pycache__/dino_v2.cpython-38.pyc b/cloud_adapter/__pycache__/dino_v2.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d814355d65d68b7e2a71ee36543c6814d517c71a
Binary files /dev/null and b/cloud_adapter/__pycache__/dino_v2.cpython-38.pyc differ
diff --git a/cloud_adapter/__pycache__/utils.cpython-38.pyc b/cloud_adapter/__pycache__/utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..65f895629042cd8354a1c1f8d7cad2dce66fb09f
Binary files /dev/null and b/cloud_adapter/__pycache__/utils.cpython-38.pyc differ
diff --git a/cloud_adapter/cdnetv1.py b/cloud_adapter/cdnetv1.py
new file mode 100644
index 0000000000000000000000000000000000000000..081151455cbde1a92b028e7ada73a25cb88e0f09
--- /dev/null
+++ b/cloud_adapter/cdnetv1.py
@@ -0,0 +1,389 @@
+# -*- coding: utf-8 -*-
+# @Time : 2024/7/24 上午11:36
+# @Author : xiaoshun
+# @Email : 3038523973@qq.com
+# @File : cdnetv1.py
+# @Software: PyCharm
+
+"""Cloud detection Network"""
+
+"""Cloud detection Network"""
+
+"""
+This is the implementation of CDnetV1 without multi-scale inputs. This implementation uses ResNet by default.
+"""
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+affine_par = True
+
+
+def conv3x3(in_planes, out_planes, stride=1):
+ "3x3 convolution with padding"
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super(BasicBlock, self).__init__()
+ self.conv1 = conv3x3(inplanes, planes, stride)
+ self.bn1 = nn.BatchNorm2d(planes, affine=affine_par)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = conv3x3(planes, planes)
+ self.bn2 = nn.BatchNorm2d(planes, affine=affine_par)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
+ super(Bottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
+ self.bn1 = nn.BatchNorm2d(planes, affine=affine_par)
+ for i in self.bn1.parameters():
+ i.requires_grad = False
+
+ padding = dilation
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
+ padding=padding, bias=False, dilation=dilation)
+ self.bn2 = nn.BatchNorm2d(planes, affine=affine_par)
+ for i in self.bn2.parameters():
+ i.requires_grad = False
+ self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * 4, affine=affine_par)
+ for i in self.bn3.parameters():
+ i.requires_grad = False
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Classifier_Module(nn.Module):
+
+ def __init__(self, dilation_series, padding_series, num_classes):
+ super(Classifier_Module, self).__init__()
+ self.conv2d_list = nn.ModuleList()
+ for dilation, padding in zip(dilation_series, padding_series):
+ self.conv2d_list.append(
+ nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True))
+
+ for m in self.conv2d_list:
+ m.weight.data.normal_(0, 0.01)
+
+ def forward(self, x):
+ out = self.conv2d_list[0](x)
+ for i in range(len(self.conv2d_list) - 1):
+ out += self.conv2d_list[i + 1](x)
+ return out
+
+
+class _ConvBNReLU(nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
+ dilation=1, groups=1, norm_layer=nn.BatchNorm2d):
+ super(_ConvBNReLU, self).__init__()
+ self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
+ self.bn = norm_layer(out_channels)
+ self.relu = nn.ReLU(True)
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ x = self.relu(x)
+ return x
+
+
+class _ASPPConv(nn.Module):
+ def __init__(self, in_channels, out_channels, atrous_rate, norm_layer):
+ super(_ASPPConv, self).__init__()
+ self.block = nn.Sequential(
+ nn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate, dilation=atrous_rate, bias=False),
+ norm_layer(out_channels),
+ nn.ReLU(True)
+ )
+
+ def forward(self, x):
+ return self.block(x)
+
+
+class _AsppPooling(nn.Module):
+ def __init__(self, in_channels, out_channels, norm_layer):
+ super(_AsppPooling, self).__init__()
+ self.gap = nn.Sequential(
+ nn.AdaptiveAvgPool2d(1),
+ nn.Conv2d(in_channels, out_channels, 1, bias=False),
+ norm_layer(out_channels),
+ nn.ReLU(True)
+ )
+
+ def forward(self, x):
+ size = x.size()[2:]
+ pool = self.gap(x)
+ out = F.interpolate(pool, size, mode='bilinear', align_corners=True)
+ return out
+
+
+class _ASPP(nn.Module):
+ def __init__(self, in_channels, atrous_rates, norm_layer):
+ super(_ASPP, self).__init__()
+ out_channels = 512 # changed from 256
+ self.b0 = nn.Sequential(
+ nn.Conv2d(in_channels, out_channels, 1, bias=False),
+ norm_layer(out_channels),
+ nn.ReLU(True)
+ )
+
+ rate1, rate2, rate3 = tuple(atrous_rates)
+ self.b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer)
+ self.b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer)
+ self.b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer)
+ self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer)
+
+ # self.project = nn.Sequential(
+ # nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
+ # norm_layer(out_channels),
+ # nn.ReLU(True),
+ # nn.Dropout(0.5))
+ self.dropout2d = nn.Dropout2d(0.3)
+
+ def forward(self, x):
+ feat1 = self.dropout2d(self.b0(x))
+ feat2 = self.dropout2d(self.b1(x))
+ feat3 = self.dropout2d(self.b2(x))
+ feat4 = self.dropout2d(self.b3(x))
+ feat5 = self.dropout2d(self.b4(x))
+ x = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
+ # x = self.project(x)
+ return x
+
+
+class _FPM(nn.Module):
+ def __init__(self, in_channels, num_classes, norm_layer=nn.BatchNorm2d):
+ super(_FPM, self).__init__()
+ self.aspp = _ASPP(in_channels, [6, 12, 18], norm_layer=norm_layer)
+ # self.dropout2d = nn.Dropout2d(0.5)
+
+ def forward(self, x):
+ x = torch.cat((x, self.aspp(x)), dim=1)
+ # x = self.dropout2d(x) # added
+ return x
+
+
+class BR(nn.Module):
+ def __init__(self, num_classes, stride=1, downsample=None):
+ super(BR, self).__init__()
+ self.conv1 = conv3x3(num_classes, num_classes * 16, stride)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = conv3x3(num_classes * 16, num_classes)
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out += residual
+
+ return out
+
+
+class CDnetV1(nn.Module):
+ def __init__(self, in_channels=3,block=Bottleneck, layers=[3, 4, 6, 3], num_classes=21, aux=True):
+ self.inplanes = 64
+ self.aux = aux
+ super().__init__()
+ # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
+ # self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
+
+ self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(64, affine=affine_par)
+ self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(64, affine=affine_par)
+ self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(64, affine=affine_par)
+
+ for i in self.bn1.parameters():
+ i.requires_grad = False
+ self.relu = nn.ReLU(inplace=True)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
+ self.layer1 = self._make_layer(block, 64, layers[0])
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
+ # self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
+
+ self.res5_con1x1 = nn.Sequential(
+ nn.Conv2d(1024 + 2048, 512, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(512),
+ nn.ReLU(True)
+ )
+
+ self.fpm1 = _FPM(512, num_classes)
+ self.fpm2 = _FPM(512, num_classes)
+ self.fpm3 = _FPM(256, num_classes)
+
+ self.br1 = BR(num_classes)
+ self.br2 = BR(num_classes)
+ self.br3 = BR(num_classes)
+ self.br4 = BR(num_classes)
+ self.br5 = BR(num_classes)
+ self.br6 = BR(num_classes)
+ self.br7 = BR(num_classes)
+
+ self.predict1 = self._predict_layer(512 * 6, num_classes)
+ self.predict2 = self._predict_layer(512 * 6, num_classes)
+ self.predict3 = self._predict_layer(512 * 5 + 256, num_classes)
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, 0.01)
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+ # for i in m.parameters():
+ # i.requires_grad = False
+
+ def _predict_layer(self, in_channels, num_classes):
+ return nn.Sequential(nn.Conv2d(in_channels, 256, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(256),
+ nn.ReLU(True),
+ nn.Dropout2d(0.1),
+ nn.Conv2d(256, num_classes, kernel_size=3, stride=1, padding=1, bias=True))
+
+ def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes, planes * block.expansion,
+ kernel_size=1, stride=stride, bias=False),
+ nn.BatchNorm2d(planes * block.expansion, affine=affine_par))
+ for i in downsample._modules['1'].parameters():
+ i.requires_grad = False
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes, dilation=dilation))
+
+ return nn.Sequential(*layers)
+
+ # def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
+ # return block(dilation_series,padding_series,num_classes)
+
+ def base_forward(self, x):
+ x = self.relu(self.bn1(self.conv1(x)))
+ size_conv1 = x.size()[2:]
+ x = self.relu(self.bn2(self.conv2(x)))
+ x = self.relu(self.bn3(self.conv3(x)))
+ x = self.maxpool(x)
+ x = self.layer1(x)
+ res2 = x
+ x = self.layer2(x)
+ res3 = x
+ x = self.layer3(x)
+ res4 = x
+ x = self.layer4(x)
+ x = self.res5_con1x1(torch.cat([x, res4], dim=1))
+
+ return x, res3, res2, size_conv1
+
+ def forward(self, x):
+ size = x.size()[2:]
+ score1, score2, score3, size_conv1 = self.base_forward(x)
+ # outputs = list()
+ score1 = self.fpm1(score1)
+ score1 = self.predict1(score1) # 1/8
+ predict1 = score1
+ score1 = self.br1(score1)
+
+ score2 = self.fpm2(score2)
+ score2 = self.predict2(score2) # 1/8
+ predict2 = score2
+
+ # first fusion
+ score2 = self.br2(score2) + score1
+ score2 = self.br3(score2)
+
+ score3 = self.fpm3(score3)
+ score3 = self.predict3(score3) # 1/4
+ predict3 = score3
+ score3 = self.br4(score3)
+
+ # second fusion
+ size_score3 = score3.size()[2:]
+ score3 = score3 + F.interpolate(score2, size_score3, mode='bilinear', align_corners=True)
+ score3 = self.br5(score3)
+
+ # upsampling + BR
+ score3 = F.interpolate(score3, size_conv1, mode='bilinear', align_corners=True)
+ score3 = self.br6(score3)
+ score3 = F.interpolate(score3, size, mode='bilinear', align_corners=True)
+ score3 = self.br7(score3)
+
+ # if self.aux:
+ # auxout = self.dsn(mid)
+ # auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
+ # #outputs.append(auxout)
+ return score3
+ # return score3, predict1, predict2, predict3
+
+
+if __name__ == '__main__':
+ model = CDnetV1(num_classes=21)
+ fake_image = torch.randn(2, 3, 224, 224)
+ outputs = model(fake_image)
+ for out in outputs:
+ print(out.shape)
+ # torch.Size([2, 21, 224, 224])
+ # torch.Size([2, 21, 29, 29])
+ # torch.Size([2, 21, 29, 29])
+ # torch.Size([2, 21, 57, 57])
\ No newline at end of file
diff --git a/cloud_adapter/cdnetv2.py b/cloud_adapter/cdnetv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6fbdeecff5f3cd7d9d1fda9613909d4d85b5cc5
--- /dev/null
+++ b/cloud_adapter/cdnetv2.py
@@ -0,0 +1,693 @@
+# -*- coding: utf-8 -*-
+# @Time : 2024/7/24 下午3:41
+# @Author : xiaoshun
+# @Email : 3038523973@qq.com
+# @File : cdnetv2.py
+# @Software: PyCharm
+
+"""Cloud detection Network"""
+
+"""
+This is the implementation of CDnetV2 without multi-scale inputs. This implementation uses ResNet by default.
+"""
+# nn.GroupNorm
+
+import torch
+# import torch.nn as nn
+import torch.nn.functional as F
+from torch import nn
+
+affine_par = True
+
+
+def conv3x3(in_planes, out_planes, stride=1):
+ "3x3 convolution with padding"
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super(BasicBlock, self).__init__()
+ self.conv1 = conv3x3(inplanes, planes, stride)
+ self.bn1 = nn.BatchNorm2d(planes, affine=affine_par)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = conv3x3(planes, planes)
+ self.bn2 = nn.BatchNorm2d(planes, affine=affine_par)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None):
+ super(Bottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
+ self.bn1 = nn.BatchNorm2d(planes, affine=affine_par)
+ for i in self.bn1.parameters():
+ i.requires_grad = False
+
+ padding = dilation
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
+ padding=padding, bias=False, dilation=dilation)
+ self.bn2 = nn.BatchNorm2d(planes, affine=affine_par)
+ for i in self.bn2.parameters():
+ i.requires_grad = False
+ self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * 4, affine=affine_par)
+ for i in self.bn3.parameters():
+ i.requires_grad = False
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+ # self.layerx_1 = Bottleneck_nosample(64, 64, stride=1, dilation=1)
+ # self.layerx_2 = Bottleneck(256, 64, stride=1, dilation=1, downsample=None)
+ # self.layerx_3 = Bottleneck_downsample(256, 64, stride=2, dilation=1)
+
+
+class Res_block_1(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes=64, planes=64, stride=1, dilation=1):
+ super(Res_block_1, self).__init__()
+
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False),
+ nn.GroupNorm(8, planes),
+ nn.ReLU(inplace=True))
+
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(planes, planes, kernel_size=3, stride=1,
+ padding=1, bias=False, dilation=1),
+ nn.GroupNorm(8, planes),
+ nn.ReLU(inplace=True))
+
+ self.conv3 = nn.Sequential(
+ nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False),
+ nn.GroupNorm(8, planes * 4))
+
+ self.relu = nn.ReLU(inplace=True)
+
+ self.down_sample = nn.Sequential(
+ nn.Conv2d(inplanes, planes * 4,
+ kernel_size=1, stride=1, bias=False),
+ nn.GroupNorm(8, planes * 4))
+
+ def forward(self, x):
+ # residual = x
+
+ out = self.conv1(x)
+ out = self.conv2(out)
+ out = self.conv3(out)
+ residual = self.down_sample(x)
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Res_block_2(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes=256, planes=64, stride=1, dilation=1):
+ super(Res_block_2, self).__init__()
+
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False),
+ nn.GroupNorm(8, planes),
+ nn.ReLU(inplace=True))
+
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(planes, planes, kernel_size=3, stride=1,
+ padding=1, bias=False, dilation=1),
+ nn.GroupNorm(8, planes),
+ nn.ReLU(inplace=True))
+
+ self.conv3 = nn.Sequential(
+ nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False),
+ nn.GroupNorm(8, planes * 4))
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.conv2(out)
+ out = self.conv3(out)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Res_block_3(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes=256, planes=64, stride=1, dilation=1):
+ super(Res_block_3, self).__init__()
+
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False),
+ nn.GroupNorm(8, planes),
+ nn.ReLU(inplace=True))
+
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(planes, planes, kernel_size=3, stride=1,
+ padding=1, bias=False, dilation=1),
+ nn.GroupNorm(8, planes),
+ nn.ReLU(inplace=True))
+
+ self.conv3 = nn.Sequential(
+ nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False),
+ nn.GroupNorm(8, planes * 4))
+
+ self.relu = nn.ReLU(inplace=True)
+
+ self.downsample = nn.Sequential(
+ nn.Conv2d(inplanes, planes * 4,
+ kernel_size=1, stride=stride, bias=False),
+ nn.GroupNorm(8, planes * 4))
+
+ def forward(self, x):
+ # residual = x
+
+ out = self.conv1(x)
+ out = self.conv2(out)
+ out = self.conv3(out)
+ # residual = self.downsample(x)
+ out += self.downsample(x)
+ out = self.relu(out)
+
+ return out
+
+
+class Classifier_Module(nn.Module):
+
+ def __init__(self, dilation_series, padding_series, num_classes):
+ super(Classifier_Module, self).__init__()
+ self.conv2d_list = nn.ModuleList()
+ for dilation, padding in zip(dilation_series, padding_series):
+ self.conv2d_list.append(
+ nn.Conv2d(2048, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True))
+
+ for m in self.conv2d_list:
+ m.weight.data.normal_(0, 0.01)
+
+ def forward(self, x):
+ out = self.conv2d_list[0](x)
+ for i in range(len(self.conv2d_list) - 1):
+ out += self.conv2d_list[i + 1](x)
+ return out
+
+
+class _ConvBNReLU(nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
+ dilation=1, groups=1, relu6=False, norm_layer=nn.BatchNorm2d):
+ super(_ConvBNReLU, self).__init__()
+ self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
+ self.bn = norm_layer(out_channels)
+ self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.bn(x)
+ x = self.relu(x)
+ return x
+
+
+class _ASPPConv(nn.Module):
+ def __init__(self, in_channels, out_channels, atrous_rate, norm_layer):
+ super(_ASPPConv, self).__init__()
+ self.block = nn.Sequential(
+ nn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate, dilation=atrous_rate, bias=False),
+ norm_layer(out_channels),
+ nn.ReLU(True)
+ )
+
+ def forward(self, x):
+ return self.block(x)
+
+
+class _AsppPooling(nn.Module):
+ def __init__(self, in_channels, out_channels, norm_layer):
+ super(_AsppPooling, self).__init__()
+ self.gap = nn.Sequential(
+ nn.AdaptiveAvgPool2d(1),
+ nn.Conv2d(in_channels, out_channels, 1, bias=False),
+ norm_layer(out_channels),
+ nn.ReLU(True)
+ )
+
+ def forward(self, x):
+ size = x.size()[2:]
+ pool = self.gap(x)
+ out = F.interpolate(pool, size, mode='bilinear', align_corners=True)
+ return out
+
+
+class _ASPP(nn.Module):
+ def __init__(self, in_channels, atrous_rates, norm_layer):
+ super(_ASPP, self).__init__()
+ out_channels = 256
+ self.b0 = nn.Sequential(
+ nn.Conv2d(in_channels, out_channels, 1, bias=False),
+ norm_layer(out_channels),
+ nn.ReLU(True)
+ )
+
+ rate1, rate2, rate3 = tuple(atrous_rates)
+ self.b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer)
+ self.b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer)
+ self.b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer)
+ self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer)
+
+ self.project = nn.Sequential(
+ nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
+ norm_layer(out_channels),
+ nn.ReLU(True),
+ nn.Dropout(0.5)
+ )
+
+ def forward(self, x):
+ feat1 = self.b0(x)
+ feat2 = self.b1(x)
+ feat3 = self.b2(x)
+ feat4 = self.b3(x)
+ feat5 = self.b4(x)
+ x = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
+ x = self.project(x)
+ return x
+
+
+class _DeepLabHead(nn.Module):
+ def __init__(self, num_classes, c1_channels=256, norm_layer=nn.BatchNorm2d):
+ super(_DeepLabHead, self).__init__()
+ self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer)
+ self.c1_block = _ConvBNReLU(c1_channels, 48, 3, padding=1, norm_layer=norm_layer)
+ self.block = nn.Sequential(
+ _ConvBNReLU(304, 256, 3, padding=1, norm_layer=norm_layer),
+ nn.Dropout(0.5),
+ _ConvBNReLU(256, 256, 3, padding=1, norm_layer=norm_layer),
+ nn.Dropout(0.1),
+ nn.Conv2d(256, num_classes, 1))
+
+ def forward(self, x, c1):
+ size = c1.size()[2:]
+ c1 = self.c1_block(c1)
+ x = self.aspp(x)
+ x = F.interpolate(x, size, mode='bilinear', align_corners=True)
+ return self.block(torch.cat([x, c1], dim=1))
+
+
+class _CARM(nn.Module):
+ def __init__(self, in_planes, ratio=8):
+ super(_CARM, self).__init__()
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
+ self.max_pool = nn.AdaptiveMaxPool2d(1)
+
+ self.fc1_1 = nn.Linear(in_planes, in_planes // ratio)
+ self.fc1_2 = nn.Linear(in_planes // ratio, in_planes)
+
+ self.fc2_1 = nn.Linear(in_planes, in_planes // ratio)
+ self.fc2_2 = nn.Linear(in_planes // ratio, in_planes)
+ self.relu = nn.ReLU(True)
+
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, x):
+ avg_out = self.avg_pool(x)
+ avg_out = avg_out.view(avg_out.size(0), -1)
+ avg_out = self.fc1_2(self.relu(self.fc1_1(avg_out)))
+
+ max_out = self.max_pool(x)
+ max_out = max_out.view(max_out.size(0), -1)
+ max_out = self.fc2_2(self.relu(self.fc2_1(max_out)))
+
+ max_out_size = max_out.size()[1]
+ avg_out = torch.reshape(avg_out, (-1, max_out_size, 1, 1))
+ max_out = torch.reshape(max_out, (-1, max_out_size, 1, 1))
+
+ out = self.sigmoid(avg_out + max_out)
+
+ x = out * x
+ return x
+
+
+class FSFB_CH(nn.Module):
+ def __init__(self, in_planes, num, ratio=8):
+ super(FSFB_CH, self).__init__()
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
+ self.max_pool = nn.AdaptiveMaxPool2d(1)
+
+ self.fc1_1 = nn.Linear(in_planes, in_planes // ratio)
+ self.fc1_2 = nn.Linear(in_planes // ratio, num * in_planes)
+
+ self.fc2_1 = nn.Linear(in_planes, in_planes // ratio)
+ self.fc2_2 = nn.Linear(in_planes // ratio, num * in_planes)
+ self.relu = nn.ReLU(True)
+
+ self.fc3 = nn.Linear(num * in_planes, 2 * num * in_planes)
+ self.fc4 = nn.Linear(2 * num * in_planes, 2 * num * in_planes)
+ self.fc5 = nn.Linear(2 * num * in_planes, num * in_planes)
+
+ self.softmax = nn.Softmax(dim=3)
+
+ def forward(self, x, num):
+ avg_out = self.avg_pool(x)
+ avg_out = avg_out.view(avg_out.size(0), -1)
+ avg_out = self.fc1_2(self.relu(self.fc1_1(avg_out)))
+
+ max_out = self.max_pool(x)
+ max_out = max_out.view(max_out.size(0), -1)
+ max_out = self.fc2_2(self.relu(self.fc2_1(max_out)))
+
+ out = avg_out + max_out
+ out = self.relu(self.fc3(out))
+ out = self.relu(self.fc4(out))
+ out = self.relu(self.fc5(out)) # (N, num*in_planes)
+
+ out_size = out.size()[1]
+ out = torch.reshape(out, (-1, out_size // num, 1, num)) # (N, in_planes, 1, num )
+ out = self.softmax(out)
+
+ channel_scale = torch.chunk(out, num, dim=3) # (N, in_planes, 1, 1 )
+
+ return channel_scale
+
+
+class FSFB_SP(nn.Module):
+ def __init__(self, num, norm_layer=nn.BatchNorm2d):
+ super(FSFB_SP, self).__init__()
+ self.conv = nn.Sequential(
+ nn.Conv2d(2, 2 * num, kernel_size=3, padding=1, bias=False),
+ norm_layer(2 * num),
+ nn.ReLU(True),
+ nn.Conv2d(2 * num, 4 * num, kernel_size=3, padding=1, bias=False),
+ norm_layer(4 * num),
+ nn.ReLU(True),
+ nn.Conv2d(4 * num, 4 * num, kernel_size=3, padding=1, bias=False),
+ norm_layer(4 * num),
+ nn.ReLU(True),
+ nn.Conv2d(4 * num, 2 * num, kernel_size=3, padding=1, bias=False),
+ norm_layer(2 * num),
+ nn.ReLU(True),
+ nn.Conv2d(2 * num, num, kernel_size=3, padding=1, bias=False)
+ )
+ self.softmax = nn.Softmax(dim=1)
+
+ def forward(self, x, num):
+ avg_out = torch.mean(x, dim=1, keepdim=True)
+ max_out, _ = torch.max(x, dim=1, keepdim=True)
+ x = torch.cat([avg_out, max_out], dim=1)
+ x = self.conv(x)
+ x = self.softmax(x)
+ spatial_scale = torch.chunk(x, num, dim=1)
+ return spatial_scale
+
+
+##################################################################################################################
+
+
+class _HFFM(nn.Module):
+ def __init__(self, in_channels, atrous_rates, norm_layer=nn.BatchNorm2d):
+ super(_HFFM, self).__init__()
+ out_channels = 256
+ self.b0 = nn.Sequential(
+ nn.Conv2d(in_channels, out_channels, 1, bias=False),
+ norm_layer(out_channels),
+ nn.ReLU(True)
+ )
+
+ rate1, rate2, rate3 = tuple(atrous_rates)
+ self.b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer)
+ self.b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer)
+ self.b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer)
+ self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer)
+ self.carm = _CARM(in_channels)
+ self.sa = FSFB_SP(4, norm_layer)
+ self.ca = FSFB_CH(out_channels, 4, 8)
+
+ def forward(self, x, num):
+ x = self.carm(x)
+ # feat1 = self.b0(x)
+ feat1 = self.b1(x)
+ feat2 = self.b2(x)
+ feat3 = self.b3(x)
+ feat4 = self.b4(x)
+ feat = feat1 + feat2 + feat3 + feat4
+ spatial_atten = self.sa(feat, num)
+ channel_atten = self.ca(feat, num)
+
+ feat_ca = channel_atten[0] * feat1 + channel_atten[1] * feat2 + channel_atten[2] * feat3 + channel_atten[
+ 3] * feat4
+ feat_sa = spatial_atten[0] * feat1 + spatial_atten[1] * feat2 + spatial_atten[2] * feat3 + spatial_atten[
+ 3] * feat4
+ feat_sa = feat_sa + feat_ca
+
+ return feat_sa
+
+
+class _AFFM(nn.Module):
+ def __init__(self, in_channels=256, norm_layer=nn.BatchNorm2d):
+ super(_AFFM, self).__init__()
+
+ self.sa = FSFB_SP(2, norm_layer)
+ self.ca = FSFB_CH(in_channels, 2, 8)
+ self.carm = _CARM(in_channels)
+
+ def forward(self, feat1, feat2, hffm, num):
+ feat = feat1 + feat2
+ spatial_atten = self.sa(feat, num)
+ channel_atten = self.ca(feat, num)
+
+ feat_ca = channel_atten[0] * feat1 + channel_atten[1] * feat2
+ feat_sa = spatial_atten[0] * feat1 + spatial_atten[1] * feat2
+ output = self.carm(feat_sa + feat_ca + hffm)
+ # output = self.carm (feat_sa + hffm)
+
+ return output, channel_atten, spatial_atten
+
+
+class block_Conv3x3(nn.Module):
+ def __init__(self, in_channels):
+ super(block_Conv3x3, self).__init__()
+ self.block = nn.Sequential(
+ nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=1, bias=False),
+ nn.BatchNorm2d(256),
+ nn.ReLU(True)
+ )
+
+ def forward(self, x):
+ return self.block(x)
+
+
+class CDnetV2(nn.Module):
+ def __init__(self, in_channels=3,block=Bottleneck, layers=[3, 4, 6, 3], num_classes=21, aux=True):
+ self.inplanes = 256 # change
+ self.aux = aux
+ super().__init__()
+ # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
+ # self.bn1 = nn.BatchNorm2d(64, affine = affine_par)
+
+ self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(64, affine=affine_par)
+
+ self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(64, affine=affine_par)
+
+ self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(64, affine=affine_par)
+
+ self.relu = nn.ReLU(inplace=True)
+
+ self.dropout = nn.Dropout(0.3)
+ for i in self.bn1.parameters():
+ i.requires_grad = False
+
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
+
+ # self.layer1 = self._make_layer(block, 64, layers[0])
+
+ self.layerx_1 = Res_block_1(64, 64, stride=1, dilation=1)
+ self.layerx_2 = Res_block_2(256, 64, stride=1, dilation=1)
+ self.layerx_3 = Res_block_3(256, 64, stride=2, dilation=1)
+
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
+ # self.layer5 = self._make_pred_layer(Classifier_Module, [6,12,18,24],[6,12,18,24],num_classes)
+
+ self.hffm = _HFFM(2048, [6, 12, 18])
+ self.affm_1 = _AFFM()
+ self.affm_2 = _AFFM()
+ self.affm_3 = _AFFM()
+ self.affm_4 = _AFFM()
+ self.carm = _CARM(256)
+
+ self.con_layer1_1 = block_Conv3x3(256)
+ self.con_res2 = block_Conv3x3(256)
+ self.con_res3 = block_Conv3x3(512)
+ self.con_res4 = block_Conv3x3(1024)
+ self.con_res5 = block_Conv3x3(2048)
+
+ self.dsn1 = nn.Sequential(
+ nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0)
+ )
+
+ self.dsn2 = nn.Sequential(
+ nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0)
+ )
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, 0.01)
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+ # for i in m.parameters():
+ # i.requires_grad = False
+
+ # self.inplanes = 256 # change
+
+ def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes, planes * block.expansion,
+ kernel_size=1, stride=stride, bias=False),
+ nn.BatchNorm2d(planes * block.expansion, affine=affine_par))
+ for i in downsample._modules['1'].parameters():
+ i.requires_grad = False
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes, dilation=dilation))
+
+ return nn.Sequential(*layers)
+
+ # def _make_pred_layer(self,block, dilation_series, padding_series,num_classes):
+ # return block(dilation_series,padding_series,num_classes)
+
+ def base_forward(self, x):
+ x = self.relu(self.bn1(self.conv1(x))) # 1/2
+ x = self.relu(self.bn2(self.conv2(x)))
+ x = self.relu(self.bn3(self.conv3(x)))
+ x = self.maxpool(x) # 1/4
+
+ # x = self.layer1(x) # 1/8
+
+ # layer1
+ x = self.layerx_1(x) # 1/4
+ layer1_0 = x
+
+ x = self.layerx_2(x) # 1/4
+ layer1_0 = self.con_layer1_1(x + layer1_0) # 256
+ size_layer1_0 = layer1_0.size()[2:]
+
+ x = self.layerx_3(x) # 1/8
+ res2 = self.con_res2(x) # 256
+ size_res2 = res2.size()[2:]
+
+ # layer2-4
+ x = self.layer2(x) # 1/16
+ res3 = self.con_res3(x) # 256
+ x = self.layer3(x) # 1/16
+
+ res4 = self.con_res4(x) # 256
+ x = self.layer4(x) # 1/16
+ res5 = self.con_res5(x) # 256
+
+ # x = self.res5_con1x1(torch.cat([x, res4], dim=1))
+ return layer1_0, res2, res3, res4, res5, x, size_layer1_0, size_res2
+
+ # return res2, res3, res4, res5, x, layer_1024, size_res2
+
+ def forward(self, x):
+ # size = x.size()[2:]
+ layer1_0, res2, res3, res4, res5, layer4, size_layer1_0, size_res2 = self.base_forward(x)
+
+ hffm = self.hffm(layer4, 4) # 256 HFFM
+ res5 = res5 + hffm
+ aux_feature = res5 # loss_aux
+ # res5 = self.carm(res5)
+ res5, _, _ = self.affm_1(res4, res5, hffm, 2) # 1/16
+ # aux_feature = res5
+ res5, _, _ = self.affm_2(res3, res5, hffm, 2) # 1/16
+
+ res5 = F.interpolate(res5, size_res2, mode='bilinear', align_corners=True)
+ res5, _, _ = self.affm_3(res2, res5, F.interpolate(hffm, size_res2, mode='bilinear', align_corners=True), 2)
+
+ res5 = F.interpolate(res5, size_layer1_0, mode='bilinear', align_corners=True)
+ res5, _, _ = self.affm_4(layer1_0, res5,
+ F.interpolate(hffm, size_layer1_0, mode='bilinear', align_corners=True), 2)
+
+ output = self.dsn1(res5)
+
+ if self.aux:
+ auxout = self.dsn2(aux_feature)
+ # auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
+ # outputs.append(auxout)
+ size = x.size()[2:]
+ pred, pred_aux = output, auxout
+ pred = F.interpolate(pred, size, mode='bilinear', align_corners=True)
+ pred_aux = F.interpolate(pred_aux, size, mode='bilinear', align_corners=True)
+ return pred
+ return pred, pred_aux
+
+
+if __name__ == '__main__':
+ model = CDnetV2(num_classes=3)
+ fake_image = torch.rand(2, 3, 256, 256)
+ output = model(fake_image)
+ for out in output:
+ print(out.shape)
+ # torch.Size([2, 3, 256, 256])
+ # torch.Size([2, 3, 256, 256])
\ No newline at end of file
diff --git a/cloud_adapter/cloud_adapter.py b/cloud_adapter/cloud_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..782b87d961051bb2fd11328a498d95fc5a4f6c3f
--- /dev/null
+++ b/cloud_adapter/cloud_adapter.py
@@ -0,0 +1,590 @@
+import torch
+from torch import nn
+from einops import rearrange
+from torch import nn, einsum
+from einops import rearrange
+from mmseg.models.builder import MODELS
+import math
+import torch
+from torch import nn as nn
+from mmseg.models.builder import MODELS
+from timm.layers import DropPath, trunc_normal_
+from typing import List
+from timm.layers import create_act_layer
+from functools import partial
+import torch.nn.functional as F
+
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import math
+from timm.layers import CondConv2d, get_condconv_initializer, create_conv2d, DropPath, get_norm_act_layer
+
+
+class LoRaMLP(nn.Module):
+ def __init__(self, in_dim, out_dim, rank_dim=8):
+ super().__init__()
+ self.loramlp = nn.Sequential(
+ nn.Linear(in_dim, rank_dim, bias=False),
+ nn.Linear(rank_dim, out_dim, bias=False),
+ )
+
+ def forward(self, x):
+ return self.loramlp(x)
+
+
+class CrossAttention(nn.Module):
+ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, rank_dim=None):
+ super().__init__()
+ inner_dim = dim_head * heads # 512
+ context_dim = query_dim if context_dim is None else context_dim
+
+ self.scale = dim_head ** -0.5
+ self.heads = heads
+
+ if not rank_dim:
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
+ self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
+ self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
+
+ self.to_out = nn.Linear(inner_dim, query_dim, bias=False)
+ else:
+ self.to_q = LoRaMLP(query_dim, inner_dim, rank_dim=rank_dim)
+ self.to_k = LoRaMLP(context_dim, inner_dim, rank_dim=rank_dim)
+ self.to_v = LoRaMLP(context_dim, inner_dim, rank_dim=rank_dim)
+
+ self.to_out = LoRaMLP(inner_dim, query_dim, rank_dim=rank_dim)
+
+ def forward(self, x, context):
+ h = self.heads
+
+ q = self.to_q(x)
+ k = self.to_k(context)
+ v = self.to_v(context)
+
+ q, k, v = map(lambda t: rearrange(
+ t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
+
+ attn = sim.softmax(dim=-1)
+
+ out = einsum('b i j, b j d -> b i d', attn, v)
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
+
+ return self.to_out(out)
+
+
+def num_groups(group_size, channels):
+ if not group_size:
+ return 1
+ else:
+ assert channels % group_size == 0
+ return channels // group_size
+
+
+def _init_weight_goog(m, n='', fix_group_fanout=True):
+ if isinstance(m, CondConv2d):
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ if fix_group_fanout:
+ fan_out //= m.groups
+ init_weight_fn = get_condconv_initializer(
+ lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape)
+ init_weight_fn(m.weight)
+ if m.bias is not None:
+ nn.init.zeros_(m.bias)
+ elif isinstance(m, nn.Conv2d):
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ if fix_group_fanout:
+ fan_out //= m.groups
+ nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out))
+ if m.bias is not None:
+ nn.init.zeros_(m.bias)
+ elif isinstance(m, nn.BatchNorm2d):
+ nn.init.ones_(m.weight)
+ nn.init.zeros_(m.bias)
+ elif isinstance(m, nn.Linear):
+ fan_out = m.weight.size(0)
+ fan_in = 0
+ if 'routing_fn' in n:
+ fan_in = m.weight.size(1)
+ init_range = 1.0 / math.sqrt(fan_in + fan_out)
+ nn.init.uniform_(m.weight, -init_range, init_range)
+ if m.bias is not None:
+ nn.init.zeros_(m.bias)
+
+
+class DepthwiseSeparableConv(nn.Module):
+ def __init__(
+ self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='',
+ noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
+ se_layer=None, drop_path_rate=0.):
+ super(DepthwiseSeparableConv, self).__init__()
+ norm_act_layer = get_norm_act_layer(norm_layer)
+ groups = num_groups(group_size, in_chs)
+ self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip
+ self.has_pw_act = pw_act
+
+ self.conv_dw = create_conv2d(
+ in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, groups=groups)
+ self.bn1 = norm_act_layer(in_chs, inplace=True)
+
+ self.se = se_layer(
+ in_chs, act_layer=act_layer) if se_layer else nn.Identity()
+
+ self.conv_pw = create_conv2d(
+ in_chs, out_chs, pw_kernel_size, padding=pad_type)
+ self.bn2 = norm_act_layer(
+ out_chs, inplace=True, apply_act=self.has_pw_act)
+ self.drop_path = DropPath(
+ drop_path_rate) if drop_path_rate else nn.Identity()
+
+ def feature_info(self, location):
+ if location == 'expansion':
+ return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
+ else:
+ return dict(module='', hook_type='', num_chs=self.conv_pw.out_channels)
+
+ def forward(self, x):
+ shortcut = x
+ x = self.conv_dw(x)
+ x = self.bn1(x)
+ x = self.se(x)
+ x = self.conv_pw(x)
+ x = self.bn2(x)
+ if self.has_skip:
+ x = self.drop_path(x) + shortcut
+ return x
+
+
+class PMAAConvBlock(nn.Module):
+ def __init__(self, in_channels=3, hidden_channels=256, depth=4, norm=nn.BatchNorm2d, act=nn.ReLU, return_multi_feats=False, return_last_feature=True, has_stem=True, has_block=True):
+ super().__init__()
+ self.return_last_feature = return_last_feature
+ self.depth = depth
+ self.has_stem = has_stem
+ self.return_multi_feats = return_multi_feats
+
+ self.proj_1x1 = DepthwiseSeparableConv(
+ in_channels, hidden_channels, dw_kernel_size=1, norm_layer=norm, act_layer=act)
+
+ self.spp_dw = nn.ModuleList()
+
+ if has_stem:
+ self.spp_dw.append(
+ DepthwiseSeparableConv(hidden_channels, hidden_channels, dw_kernel_size=3,
+ stride=1, group_size=hidden_channels, pad_type="same")
+ )
+ else:
+ self.spp_dw.append(nn.Identity())
+
+ if has_block:
+ for _ in range(self.depth):
+ self.spp_dw.append(
+ DepthwiseSeparableConv(
+ hidden_channels, hidden_channels, dw_kernel_size=3, stride=2, group_size=hidden_channels
+ )
+ )
+ else:
+ for _ in range(self.depth):
+ self.spp_dw.append(
+ nn.MaxPool2d(kernel_size=2, stride=2)
+ )
+ self._init_weights()
+
+ def forward(self, x):
+ B, C, H, W = x.shape
+ output1 = self.proj_1x1(x)
+ output = [self.spp_dw[0](output1)]
+
+ for k in range(1, self.depth+1):
+ out_k = self.spp_dw[k](output[-1])
+ output.append(out_k)
+
+ if self.return_multi_feats:
+ return output[1:]
+ else:
+ if self.return_last_feature:
+ return output[-1]
+ global_f = torch.zeros(
+ output[-1].shape, requires_grad=True, device=output1.device)
+ for fea in output:
+ global_f = global_f + F.adaptive_avg_pool2d(
+ fea, output_size=output[-1].shape[-2:]
+ )
+ return global_f
+
+ def _init_weights(self):
+ init_fn = _init_weight_goog
+ for n, m in self.named_modules():
+ init_fn(m, n)
+
+
+class ConvnextInteractiveModule(nn.Module):
+ def __init__(self, emd_dim=1024, context_dim=256, rank_dim=None):
+ super().__init__()
+ self.attn = CrossAttention(emd_dim, context_dim, rank_dim=rank_dim)
+
+ def forward(self, x, cache, index):
+ # x: 1024 2 1024
+ if isinstance(cache, list) or isinstance(cache, tuple):
+ # len(cache) 4 cache[4]-23
+ # 0-5->0 6-11 -> 1 12-17->2 18-23->3
+ cache = cache[index]
+ cache = F.interpolate(
+ cache, (int(math.sqrt(x.shape[0])), int(math.sqrt(x.shape[0]))), mode="bilinear", align_corners=False
+ )
+ cache = cache.flatten(2) # B C N
+ cache = cache.permute(2, 0, 1) # N B C
+
+ # Reshape: batch first
+ x = x.permute(1, 0, 2) # B N C
+ cache = cache.permute(1, 0, 2) # B N C
+ return (x + self.attn(x, cache)).permute(1, 0, 2)
+
+
+class PMAAInteractiveModule(nn.Module):
+ def __init__(self,
+ emd_dim=1024,
+ context_dim=64,
+ kernel: int = 1,
+ norm=nn.BatchNorm2d,
+ local_groups=32,
+ global_groups=2,
+ return_multi_feats=False,
+ ):
+ super().__init__()
+ self.return_multi_feats = return_multi_feats
+ self.local_embedding = nn.Sequential(
+ nn.Conv2d(emd_dim, emd_dim, kernel, groups=local_groups,
+ padding=int((kernel - 1) / 2), bias=False),
+ norm(emd_dim)
+ )
+ self.global_embedding = nn.Sequential(
+ nn.Conv2d(context_dim, emd_dim, kernel, groups=global_groups,
+ padding=int((kernel - 1) / 2), bias=False),
+ norm(emd_dim)
+ )
+ self.global_act = nn.Sequential(
+ nn.Conv2d(context_dim, emd_dim, kernel, groups=global_groups,
+ padding=int((kernel - 1) / 2), bias=False),
+ norm(emd_dim)
+ )
+ self.act = nn.Sigmoid()
+ self._init_weights()
+
+ def _init_weights(self):
+ init_fn = _init_weight_goog
+ for n, m in self.named_modules():
+ init_fn(m, n)
+
+ def forward(self, x, cache, index):
+ if isinstance(cache, list) or isinstance(cache, tuple):
+ cache = cache[index]
+ N, B, C = x.shape
+ H = W = int(math.sqrt(N))
+ # reshape x -> B, C, H, W
+ x = x.permute(1, 2, 0).reshape(B, C, H, W)
+ local_feat = self.local_embedding(x) # 32
+ global_act = self.global_act(cache)
+ sig_act = F.interpolate(self.act(global_act), size=(H, W)) # 32
+
+ global_feat = self.global_embedding(cache)
+ global_feat = F.interpolate(global_feat, size=(H, W)) # 32
+
+ out = local_feat * sig_act + global_feat
+
+ return out.permute(2, 3, 0, 1).reshape(N, B, C)
+
+
+class LayerNorm(nn.Module):
+ r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
+ shape (batch_size, height, width, channels) while channels_first corresponds to inputs
+ with shape (batch_size, channels, height, width).
+ """
+
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
+ self.eps = eps
+ self.data_format = data_format
+ if self.data_format not in ["channels_last", "channels_first"]:
+ raise NotImplementedError
+ self.normalized_shape = (normalized_shape, )
+
+ def forward(self, x):
+ if self.data_format == "channels_last":
+ return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
+ elif self.data_format == "channels_first":
+ u = x.mean(1, keepdim=True)
+ s = (x - u).pow(2).mean(1, keepdim=True)
+ x = (x - u) / torch.sqrt(s + self.eps)
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
+ return x
+
+
+class Block(nn.Module):
+ r""" ConvNeXt Block. There are two equivalent implementations:
+ (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
+ (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
+ We use (2) as we find it slightly faster in PyTorch
+
+ Args:
+ dim (int): Number of input channels.
+ drop_path (float): Stochastic depth rate. Default: 0.0
+ layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
+ """
+
+ def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
+ super().__init__()
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7,
+ padding=3, groups=dim) # depthwise conv
+ self.norm = LayerNorm(dim, eps=1e-6)
+ # pointwise/1x1 convs, implemented with linear layers
+ self.pwconv1 = nn.Linear(dim, 4 * dim)
+ self.act = nn.GELU()
+ self.pwconv2 = nn.Linear(4 * dim, dim)
+ self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
+ requires_grad=True) if layer_scale_init_value > 0 else None
+ self.drop_path = DropPath(
+ drop_path) if drop_path > 0. else nn.Identity()
+
+ def forward(self, x):
+ input = x
+ x = self.dwconv(x)
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
+ x = self.norm(x)
+ x = self.pwconv1(x)
+ x = self.act(x)
+ x = self.pwconv2(x)
+ if self.gamma is not None:
+ x = self.gamma * x
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
+
+ x = input + self.drop_path(x)
+ return x
+
+
+class ConvNeXt(nn.Module):
+ r""" ConvNeXt
+ A PyTorch impl of : `A ConvNet for the 2020s` -
+ https://arxiv.org/pdf/2201.03545.pdf
+
+ Args:
+ in_chans (int): Number of input image channels. Default: 3
+ num_classes (int): Number of classes for classification head. Default: 1000
+ depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
+ dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
+ drop_path_rate (float): Stochastic depth rate. Default: 0.
+ layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
+ head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
+ """
+
+ def __init__(self, in_chans=3, depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
+ drop_path_rate=0., layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3],
+ return_multi_feats=False,
+ return_last_feature=True
+ ):
+ super().__init__()
+ self.return_last_feature = return_last_feature
+ self.return_multi_feats = return_multi_feats
+
+ # stem and 3 intermediate downsampling conv layers
+ self.downsample_layers = nn.ModuleList()
+ stem = nn.Sequential(
+ nn.Conv2d(in_chans, dims[0], kernel_size=2, stride=2),
+ LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
+ )
+ self.downsample_layers.append(stem)
+ for i in range(3):
+ downsample_layer = nn.Sequential(
+ LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
+ nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
+ )
+ self.downsample_layers.append(downsample_layer)
+
+ # 4 feature resolution stages, each consisting of multiple residual blocks
+ self.stages = nn.ModuleList()
+ dp_rates = [x.item()
+ for x in torch.linspace(0, drop_path_rate, sum(depths))]
+ cur = 0
+ for i in range(4):
+ stage = nn.Sequential(
+ *[Block(dim=dims[i], drop_path=dp_rates[cur + j],
+ layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]
+ )
+ self.stages.append(stage)
+ cur += depths[i]
+
+ self.out_indices = out_indices
+
+ norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
+ for i_layer in range(4):
+ layer = norm_layer(dims[i_layer])
+ layer_name = f'norm{i_layer}'
+ self.add_module(layer_name, layer)
+
+ self.apply(self._init_weights)
+
+ def _init_weights(self, m):
+ if isinstance(m, (nn.Conv2d, nn.Linear)):
+ trunc_normal_(m.weight, std=.02)
+ nn.init.constant_(m.bias, 0)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+
+ def _init_weights(m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ if isinstance(pretrained, str):
+ self.apply(_init_weights)
+ # logger = get_root_logger()
+ # load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ self.apply(_init_weights)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward_features(self, x):
+ outs = []
+ for i in range(4):
+ x = self.downsample_layers[i](x)
+ x = self.stages[i](x)
+ if i in self.out_indices:
+ norm_layer = getattr(self, f'norm{i}')
+ x_out = norm_layer(x)
+ outs.append(x_out)
+ if self.return_multi_feats:
+ return tuple(outs)
+ if self.return_last_feature:
+ return outs[-1]
+ global_f = torch.zeros(
+ outs[-1].shape, requires_grad=True, device=outs[-1].device)
+ for fea in outs:
+ global_f = global_f + F.adaptive_avg_pool2d(
+ fea, output_size=outs[-1].shape[-2:]
+ )
+ return global_f
+
+ def forward(self, x):
+ x = self.forward_features(x)
+ return x
+
+
+class NoAdaptingModule(nn.Identity):
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, x, cache, layer):
+ return x
+
+
+@MODELS.register_module()
+class CloudAdapter(nn.Module):
+ def __init__(self,
+ cnn_type="convnext", # convnext or mobilenet
+ int_type="convnext", # cross_attention or
+ # 共同的参数 start
+ emd_dim=1024,
+ num_layers=24,
+
+ # 先判断是否返回多特征,之后再判断是否进行特征融合
+ return_multi_feats=True,
+ return_last_feature=False,
+
+ # 共同的参数 end
+
+ # pmaa 提取单个特征 or 多尺寸特征 start
+ hidden_channels=256,
+ depth=4,
+ norm=nn.BatchNorm2d,
+ act=nn.ReLU,
+ # pmaa 提取单个特征 or 多尺寸特征 end
+
+ # pmaa net start
+ local_groups=1,
+ global_groups=1,
+ # pmaa net end
+
+ # convnext 提取单个特征 or 多尺寸特征 start
+ context_dim=256,
+ rank_dim=None,
+ # convnext 提取单个特征 or 多尺寸特征 end,
+ has_stem=True,
+ has_block=True,
+ ):
+ super().__init__()
+ self.cnn = nn.Identity()
+ self.net = nn.Identity()
+ if cnn_type == "pmaa":
+ self.cnn = PMAAConvBlock(
+ hidden_channels=hidden_channels,
+ depth=depth,
+ norm=norm,
+ act=act,
+ return_multi_feats=return_multi_feats,
+ return_last_feature=return_last_feature,
+ has_stem=has_stem,
+ has_block=has_block
+ )
+ elif cnn_type == "convnext":
+ self.cnn = ConvNeXt(depths=[1]*4,
+ dims=[context_dim]*4,
+ return_multi_feats=return_multi_feats,
+ return_last_feature=return_last_feature
+ )
+
+ else:
+ raise ValueError(
+ f"cnn_type must in ['convnext','pmaa'],but got {cnn_type}")
+
+ if int_type == "convnext":
+ self.net = nn.ModuleList(
+ ConvnextInteractiveModule(emd_dim, context_dim, rank_dim)
+ for _ in range(num_layers)
+ )
+ elif int_type == "pmaa":
+ self.net = nn.ModuleList(
+ PMAAInteractiveModule(
+ emd_dim, context_dim, local_groups=local_groups, global_groups=global_groups)
+ for _ in range(num_layers)
+ )
+
+ elif int_type == "no_adapting":
+ self.net = nn.ModuleList(
+ NoAdaptingModule() for _ in range(num_layers)
+ )
+ else:
+ raise ValueError(
+ f"int_type must in ['convnext','pmaa'],but got {int_type}")
+
+ def forward(self, feats, layer, batch_first=True, has_cls_token=True, cache=None):
+ if batch_first:
+ feats = feats.permute(1, 0, 2) # 1025 2 1024
+ if has_cls_token:
+ cls_token, feats = torch.tensor_split(feats, [1], dim=0)
+ # 24 // 1
+ # feat: 1024 2 1024
+ feats = self.net[layer].forward(
+ feats, cache, layer//(len(self.net) // 4))
+
+ if has_cls_token:
+ feats = torch.cat([cls_token, feats], dim=0)
+ if batch_first:
+ feats = feats.permute(1, 0, 2)
+ return feats
+
diff --git a/cloud_adapter/cloud_adapter_dinov2.py b/cloud_adapter/cloud_adapter_dinov2.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c8fadbae95e1a15e9a78b9582c629024ee32c0f
--- /dev/null
+++ b/cloud_adapter/cloud_adapter_dinov2.py
@@ -0,0 +1,115 @@
+from mmseg.models.builder import BACKBONES, MODELS
+from torch import nn as nn
+from .cloud_adapter import CloudAdapter
+from .dino_v2 import DinoVisionTransformer
+from .utils import set_requires_grad, set_train
+import torch
+import torch.nn.functional as F
+
+
+@BACKBONES.register_module()
+class CloudAdapterDinoVisionTransformer(DinoVisionTransformer):
+ def __init__(
+ self,
+ cloud_adapter_config=None,
+ has_cat=False,
+ # [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, ],
+ adapter_index=[0, 6, 12, 18], # Transformer Block 的索引
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.cloud_adapter: CloudAdapter = MODELS.build(cloud_adapter_config)
+ self.has_cat = has_cat
+ self.adapter_index = adapter_index
+
+ def forward_features(self, x, masks=None):
+ B, _, h, w = x.shape
+ cache = self.cloud_adapter.cnn(x) # 得到多尺度特征或者单个特征
+ H, W = h // self.patch_size, w // self.patch_size
+ x = self.prepare_tokens_with_masks(x, masks)
+ outs = []
+ cur_idx = 0 # 交互模块的索引
+ for idx, blk in enumerate(self.blocks):
+ x = blk(x)
+ if idx in self.adapter_index:
+ x = self.cloud_adapter.forward(
+ x,
+ cur_idx,
+ batch_first=True,
+ has_cls_token=True,
+ cache=cache,
+ )
+ cur_idx += 1
+ if idx in self.out_indices:
+ outs.append(
+ x[:, 1:, :].permute(0, 2, 1).reshape(
+ B, -1, H, W).contiguous()
+ )
+ return outs, cache
+
+ def process_cache(self,ret,cache):
+ cache = F.interpolate(
+ cache,size=(ret.shape[-2],ret.shape[-1]),mode="bilinear",align_corners=False)
+ return cache
+
+ def forward(self, *args, **kwargs):
+ ret, cache = self.forward_features(*args, **kwargs)
+ if isinstance(ret[0], torch.Tensor):
+ ret[0] = F.interpolate(
+ ret[0], scale_factor=4, mode="bilinear", align_corners=False
+ )
+ ret[1] = F.interpolate(
+ ret[1], scale_factor=2, mode="bilinear", align_corners=False
+ )
+ ret[3] = F.interpolate(
+ ret[3], scale_factor=0.5, mode="bilinear", align_corners=False
+ )
+ if self.has_cat:
+ if isinstance(cache,tuple) or isinstance(cache,list):
+ ret[0] = torch.cat((ret[0], cache[0]), dim=1)
+ ret[1] = torch.cat((ret[1], cache[1]), dim=1)
+ ret[2] = torch.cat((ret[2], cache[2]), dim=1)
+ ret[3] = torch.cat((ret[3], cache[3]), dim=1)
+ else:
+ ret[0] = torch.cat((ret[0], self.process_cache(ret[0],cache)), dim=1)
+ ret[1] = torch.cat((ret[1], self.process_cache(ret[1],cache)), dim=1)
+ ret[2] = torch.cat((ret[2], self.process_cache(ret[2],cache)), dim=1)
+ ret[3] = torch.cat((ret[3], self.process_cache(ret[3],cache)), dim=1)
+ # ret[0] = torch.cat(ret[0], cache[0], dim=1) # bs 1024 128 128, bs 256 128 128
+ else:
+ ret[0][0] = F.interpolate(
+ ret[0][0], scale_factor=4, mode="bilinear", align_corners=False
+ )
+ ret[0][1] = F.interpolate(
+ ret[0][1], scale_factor=2, mode="bilinear", align_corners=False
+ )
+ ret[0][3] = F.interpolate(
+ ret[0][3], scale_factor=0.5, mode="bilinear", align_corners=False
+ )
+ if self.has_cat:
+ if isinstance(cache,tuple) or isinstance(cache,list):
+ ret[0][0] = torch.cat((ret[0][0], cache[0]), dim=1)
+ ret[0][1] = torch.cat((ret[0][1], cache[1]), dim=1)
+ ret[0][2] = torch.cat((ret[0][2], cache[2]), dim=1)
+ ret[0][3] = torch.cat((ret[0][3], cache[3]), dim=1)
+ else:
+ ret[0][0] = torch.cat((ret[0][0], self.process_cache(ret[0][0],cache)), dim=1)
+ ret[0][1] = torch.cat((ret[0][1], self.process_cache(ret[0][1],cache)), dim=1)
+ ret[0][2] = torch.cat((ret[0][2], self.process_cache(ret[0][2],cache)), dim=1)
+ ret[0][3] = torch.cat((ret[0][3], self.process_cache(ret[0][3],cache)), dim=1)
+ return ret
+
+ def train(self, mode: bool = True):
+ if not mode:
+ return super().train(mode)
+ set_requires_grad(self, ["cloud_adapter"])
+ set_train(self, ["cloud_adapter"])
+
+ def state_dict(self, destination, prefix, keep_vars):
+ state = super().state_dict(destination, prefix, keep_vars)
+ keys = [k for k in state.keys() if "cloud_adapter" not in k]
+ for key in keys:
+ state.pop(key)
+ if key in destination:
+ destination.pop(key)
+ return state
diff --git a/cloud_adapter/dbnet.py b/cloud_adapter/dbnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1dfa46e11f5befabfc4a0731fe8272bb0d5eb56
--- /dev/null
+++ b/cloud_adapter/dbnet.py
@@ -0,0 +1,680 @@
+# -*- coding: utf-8 -*-
+# @Time : 2024/7/26 上午11:19
+# @Author : xiaoshun
+# @Email : 3038523973@qq.com
+# @File : dbnet.py
+# @Software: PyCharm
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from einops import rearrange
+
+
+# from models.Transformer.ViT import truncated_normal_
+
+# Decoder细化卷积模块
+class SBR(nn.Module):
+ def __init__(self, in_ch):
+ super(SBR, self).__init__()
+ self.conv1x3 = nn.Sequential(
+ nn.Conv2d(in_ch, in_ch, kernel_size=(1, 3), stride=1, padding=(0, 1)),
+ nn.BatchNorm2d(in_ch),
+ nn.ReLU(True)
+ )
+ self.conv3x1 = nn.Sequential(
+ nn.Conv2d(in_ch, in_ch, kernel_size=(3, 1), stride=1, padding=(1, 0)),
+ nn.BatchNorm2d(in_ch),
+ nn.ReLU(True)
+ )
+
+ def forward(self, x):
+ out = self.conv3x1(self.conv1x3(x)) # 先进行1x3的卷积,得到结果并将结果再进行3x1的卷积
+ return out + x
+
+
+# 下采样卷积模块 stage 1,2,3
+class c_stage123(nn.Module):
+ def __init__(self, in_chans, out_chans):
+ super().__init__()
+ self.stage123 = nn.Sequential(
+ nn.Conv2d(in_channels=in_chans, out_channels=out_chans, kernel_size=3, stride=2, padding=1),
+ nn.BatchNorm2d(out_chans),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=out_chans, out_channels=out_chans, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(out_chans),
+ nn.ReLU(),
+ )
+ self.conv1x1_123 = nn.Conv2d(in_channels=in_chans, out_channels=out_chans, kernel_size=1)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+ def forward(self, x):
+ stage123 = self.stage123(x) # 3*3卷积,两倍下采样 3*224*224-->64*112*112
+ max = self.maxpool(x) # 最大值池化,两倍下采样 3*224*224-->3*112*112
+ max = self.conv1x1_123(max) # 1*1卷积 3*112*112-->64*112*112
+ stage123 = stage123 + max # 残差结构,广播机制
+ return stage123
+
+
+# 下采样卷积模块 stage4,5
+class c_stage45(nn.Module):
+ def __init__(self, in_chans, out_chans):
+ super().__init__()
+ self.stage45 = nn.Sequential(
+ nn.Conv2d(in_channels=in_chans, out_channels=out_chans, kernel_size=3, stride=2, padding=1),
+ nn.BatchNorm2d(out_chans),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=out_chans, out_channels=out_chans, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(out_chans),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=out_chans, out_channels=out_chans, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(out_chans),
+ nn.ReLU(),
+ )
+ self.conv1x1_45 = nn.Conv2d(in_channels=in_chans, out_channels=out_chans, kernel_size=1)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+ def forward(self, x):
+ stage45 = self.stage45(x) # 3*3卷积模块 2倍下采样
+ max = self.maxpool(x) # 最大值池化,两倍下采样
+ max = self.conv1x1_45(max) # 1*1卷积模块 调整通道数
+ stage45 = stage45 + max # 残差结构
+ return stage45
+
+
+class Identity(nn.Module): # 恒等映射
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, x):
+ return x
+
+
+# 轻量卷积模块
+class DepthwiseConv2d(nn.Module): # 用于自注意力机制
+ def __init__(self, in_chans, out_chans, kernel_size=1, stride=1, padding=0, dilation=1):
+ super().__init__()
+ # depthwise conv
+ self.depthwise = nn.Conv2d(
+ in_channels=in_chans,
+ out_channels=in_chans,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation, # 深层卷积的膨胀率
+ groups=in_chans # 指定分组卷积的组数
+ )
+ # batch norm
+ self.bn = nn.BatchNorm2d(num_features=in_chans)
+
+ # pointwise conv 逐点卷积
+ self.pointwise = nn.Conv2d(
+ in_channels=in_chans,
+ out_channels=out_chans,
+ kernel_size=1
+ )
+
+ def forward(self, x):
+ x = self.depthwise(x)
+ x = self.bn(x)
+ x = self.pointwise(x)
+ return x
+
+
+# residual skip connection 残差跳跃连接
+class Residual(nn.Module):
+ def __init__(self, fn):
+ super().__init__()
+ self.fn = fn
+
+ def forward(self, input, **kwargs):
+ x = self.fn(input, **kwargs)
+ return (x + input)
+
+
+# layer norm plus 层归一化
+class PreNorm(nn.Module): # 代表神经网络层
+ def __init__(self, dim, fn):
+ super().__init__()
+ self.norm = nn.LayerNorm(dim)
+ self.fn = fn
+
+ def forward(self, input, **kwargs):
+ return self.fn(self.norm(input), **kwargs)
+
+
+# FeedForward层使得representation的表达能力更强
+class FeedForward(nn.Module):
+ def __init__(self, dim, hidden_dim, dropout=0.):
+ super().__init__()
+ self.net = nn.Sequential(
+ nn.Linear(in_features=dim, out_features=hidden_dim),
+ nn.GELU(),
+ nn.Dropout(dropout),
+ nn.Linear(in_features=hidden_dim, out_features=dim),
+ nn.Dropout(dropout)
+ )
+
+ def forward(self, input):
+ return self.net(input)
+
+
+class ConvAttnetion(nn.Module):
+ '''
+ using the Depth_Separable_Wise Conv2d to produce the q, k, v instead of using Linear Project in ViT
+ '''
+
+ def __init__(self, dim, img_size, heads=8, dim_head=64, kernel_size=3, q_stride=1, k_stride=1, v_stride=1,
+ dropout=0., last_stage=False):
+ super().__init__()
+ self.last_stage = last_stage
+ self.img_size = img_size
+ inner_dim = dim_head * heads # 512
+ project_out = not (heads == 1 and dim_head == dim)
+
+ self.heads = heads
+ self.scale = dim_head ** (-0.5)
+
+ pad = (kernel_size - q_stride) // 2
+
+ self.to_q = DepthwiseConv2d(in_chans=dim, out_chans=inner_dim, kernel_size=kernel_size, stride=q_stride,
+ padding=pad) # 自注意力机制
+ self.to_k = DepthwiseConv2d(in_chans=dim, out_chans=inner_dim, kernel_size=kernel_size, stride=k_stride,
+ padding=pad)
+ self.to_v = DepthwiseConv2d(in_chans=dim, out_chans=inner_dim, kernel_size=kernel_size, stride=v_stride,
+ padding=pad)
+
+ self.to_out = nn.Sequential(
+ nn.Linear(
+ in_features=inner_dim,
+ out_features=dim
+ ),
+ nn.Dropout(dropout)
+ ) if project_out else Identity()
+
+ def forward(self, x):
+ b, n, c, h = *x.shape, self.heads # * 星号的作用大概是去掉 tuple 属性吧
+
+ # print(x.shape)
+ # print('+++++++++++++++++++++++++++++++++')
+
+ # if语句内容没有使用
+ if self.last_stage:
+ cls_token = x[:, 0]
+ # print(cls_token.shape)
+ # print('+++++++++++++++++++++++++++++++++')
+ x = x[:, 1:] # 去掉每个数组的第一个元素
+
+ cls_token = rearrange(torch.unsqueeze(cls_token, dim=1), 'b n (h d) -> b h n d', h=h)
+
+ # rearrange:用于对张量的维度进行重新变换排序,可用于替换pytorch中的reshape,view,transpose和permute等操作
+ x = rearrange(x, 'b (l w) n -> b n l w', l=self.img_size, w=self.img_size) # [1, 3136, 64]-->1*64*56*56
+ # batch_size,N(通道数),h,w
+
+ q = self.to_q(x) # 1*64*56*56-->1*64*56*56
+ # print(q.shape)
+ # print('++++++++++++++')
+ q = rearrange(q, 'b (h d) l w -> b h (l w) d', h=h) # 1*64*56*56-->1*1*3136*64
+ # print(q.shape)
+ # print('=====================')
+ # batch_size,head,h*w,dim_head
+
+ k = self.to_k(x) # 操作和q一样
+ k = rearrange(k, 'b (h d) l w -> b h (l w) d', h=h)
+ # batch_size,head,h*w,dim_head
+
+ v = self.to_v(x) ##操作和q一样
+ # print(v.shape)
+ # print('[[[[[[[[[[[[[[[[[[[[[[[[[[[[')
+ v = rearrange(v, 'b (h d) l w -> b h (l w) d', h=h)
+ # print(v.shape)
+ # print(']]]]]]]]]]]]]]]]]]]]]]]]]]]')
+ # batch_size,head,h*w,dim_head
+
+ if self.last_stage:
+ # print(q.shape)
+ # print('================')
+ q = torch.cat([cls_token, q], dim=2)
+ # print(q.shape)
+ # print('++++++++++++++++++')
+ v = torch.cat([cls_token, v], dim=2)
+ k = torch.cat([cls_token, k], dim=2)
+
+ # calculate attention by matmul + scale
+ # permute:(batch_size,head,dim_head,h*w
+ # print(k.shape)
+ # print('++++++++++++++++++++')
+ k = k.permute(0, 1, 3, 2) # 1*1*3136*64-->1*1*64*3136
+ # print(k.shape)
+ # print('====================')
+ attention = (q.matmul(k)) # 1*1*3136*3136
+ # print(attention.shape)
+ # print('--------------------')
+ attention = attention * self.scale # 可以得到一个logit的向量,避免出现梯度下降和梯度爆炸
+ # print(attention.shape)
+ # print('####################')
+ # pass a softmax
+ attention = F.softmax(attention, dim=-1)
+ # print(attention.shape)
+ # print('********************')
+
+ # matmul v
+ # attention.matmul(v):(batch_size,head,h*w,dim_head)
+ # permute:(batch_size,h*w,head,dim_head)
+ out = (attention.matmul(v)).permute(0, 2, 1, 3).reshape(b, n,
+ c) # 1*3136*64 这些操作的目的是将注意力权重和值向量相乘后得到的结果进行重塑,得到一个形状为 (batch size, 序列长度, 值向量或矩阵的维度) 的张量
+
+ # linear project
+ out = self.to_out(out)
+ return out
+
+
+# Reshape Layers
+class Rearrange(nn.Module):
+ def __init__(self, string, h, w):
+ super().__init__()
+ self.string = string
+ self.h = h
+ self.w = w
+
+ def forward(self, input):
+
+ if self.string == 'b c h w -> b (h w) c':
+ N, C, H, W = input.shape
+ # print(input.shape)
+ x = torch.reshape(input, shape=(N, -1, self.h * self.w)).permute(0, 2, 1)
+ # print(x.shape)
+ # print('+++++++++++++++++++')
+ if self.string == 'b (h w) c -> b c h w':
+ N, _, C = input.shape
+ # print(input.shape)
+ x = torch.reshape(input, shape=(N, self.h, self.w, -1)).permute(0, 3, 1, 2)
+ # print(x.shape)
+ # print('=====================')
+ return x
+
+
+# Transformer layers
+class Transformer(nn.Module):
+ def __init__(self, dim, img_size, depth, heads, dim_head, mlp_dim, dropout=0., last_stage=False):
+ super().__init__()
+ self.layers = nn.ModuleList([ # 管理子模块,参数注册
+ nn.ModuleList([
+ PreNorm(dim=dim, fn=ConvAttnetion(dim, img_size, heads=heads, dim_head=dim_head, dropout=dropout,
+ last_stage=last_stage)), # 归一化,重参数化
+ PreNorm(dim=dim, fn=FeedForward(dim=dim, hidden_dim=mlp_dim, dropout=dropout))
+ ]) for _ in range(depth)
+ ])
+
+ def forward(self, x):
+ for attn, ff in self.layers:
+ x = x + attn(x)
+ x = x + ff(x)
+ return x
+
+
+class DBNet(nn.Module): # 最主要的大函数
+ def __init__(self, img_size, in_channels, num_classes, dim=64, kernels=[7, 3, 3, 3], strides=[4, 2, 2, 2],
+ heads=[1, 3, 6, 6],
+ depth=[1, 2, 10, 10], pool='cls', dropout=0., emb_dropout=0., scale_dim=4, ):
+ super().__init__()
+
+ assert pool in ['cls', 'mean'], f'pool type must be either cls or mean pooling'
+ self.pool = pool
+ self.dim = dim
+
+ # stage1
+ # k:7 s:4 in: 1, 64, 56, 56 out: 1, 3136, 64
+ self.stage1_conv_embed = nn.Sequential(
+ nn.Conv2d( # 1*3*224*224-->[1, 64, 56, 56]
+ in_channels=in_channels,
+ out_channels=dim,
+ kernel_size=kernels[0],
+ stride=strides[0],
+ padding=2
+ ),
+ Rearrange('b c h w -> b (h w) c', h=img_size // 4, w=img_size // 4), # [1, 64, 56, 56]-->[1, 3136, 64]
+ nn.LayerNorm(dim) # 对每个batch归一化
+ )
+
+ self.stage1_transformer = nn.Sequential(
+ Transformer( #
+ dim=dim,
+ img_size=img_size // 4,
+ depth=depth[0], # Transformer层中的编码器和解码器层数。
+ heads=heads[0],
+ dim_head=self.dim, # 它是每个注意力头的维度大小,通常是嵌入维度除以头数。
+ mlp_dim=dim * scale_dim, # mlp_dim:它是Transformer中前馈神经网络的隐藏层维度大小,通常是嵌入维度乘以一个缩放因子。
+ dropout=dropout,
+ # last_stage=last_stage #它是一个标志位,用于表示该Transformer层是否是最后一层。
+ ),
+ Rearrange('b (h w) c -> b c h w', h=img_size // 4, w=img_size // 4)
+ )
+
+ # stage2
+ # k:3 s:2 in: 1, 192, 28, 28 out: 1, 784, 192
+ in_channels = dim
+ scale = heads[1] // heads[0]
+ dim = scale * dim
+
+ self.stage2_conv_embed = nn.Sequential(
+ nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=dim,
+ kernel_size=kernels[1],
+ stride=strides[1],
+ padding=1
+ ),
+ Rearrange('b c h w -> b (h w) c', h=img_size // 8, w=img_size // 8),
+ nn.LayerNorm(dim)
+ )
+
+ self.stage2_transformer = nn.Sequential(
+ Transformer(
+ dim=dim,
+ img_size=img_size // 8,
+ depth=depth[1],
+ heads=heads[1],
+ dim_head=self.dim,
+ mlp_dim=dim * scale_dim,
+ dropout=dropout
+ ),
+ Rearrange('b (h w) c -> b c h w', h=img_size // 8, w=img_size // 8)
+ )
+
+ # stage3
+ in_channels = dim
+ scale = heads[2] // heads[1]
+ dim = scale * dim
+
+ self.stage3_conv_embed = nn.Sequential(
+ nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=dim,
+ kernel_size=kernels[2],
+ stride=strides[2],
+ padding=1
+ ),
+ Rearrange('b c h w -> b (h w) c', h=img_size // 16, w=img_size // 16),
+ nn.LayerNorm(dim)
+ )
+
+ self.stage3_transformer = nn.Sequential(
+ Transformer(
+ dim=dim,
+ img_size=img_size // 16,
+ depth=depth[2],
+ heads=heads[2],
+ dim_head=self.dim,
+ mlp_dim=dim * scale_dim,
+ dropout=dropout
+ ),
+ Rearrange('b (h w) c -> b c h w', h=img_size // 16, w=img_size // 16)
+ )
+
+ # stage4
+ in_channels = dim
+ scale = heads[3] // heads[2]
+ dim = scale * dim
+
+ self.stage4_conv_embed = nn.Sequential(
+ nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=dim,
+ kernel_size=kernels[3],
+ stride=strides[3],
+ padding=1
+ ),
+ Rearrange('b c h w -> b (h w) c', h=img_size // 32, w=img_size // 32),
+ nn.LayerNorm(dim)
+ )
+
+ self.stage4_transformer = nn.Sequential(
+ Transformer(
+ dim=dim, img_size=img_size // 32,
+ depth=depth[3],
+ heads=heads[3],
+ dim_head=self.dim,
+ mlp_dim=dim * scale_dim,
+ dropout=dropout,
+ ),
+ Rearrange('b (h w) c -> b c h w', h=img_size // 32, w=img_size // 32)
+ )
+
+ ### CNN Branch ###
+ self.c_stage1 = c_stage123(in_chans=3, out_chans=64)
+ self.c_stage2 = c_stage123(in_chans=64, out_chans=128)
+ self.c_stage3 = c_stage123(in_chans=128, out_chans=384)
+ self.c_stage4 = c_stage45(in_chans=384, out_chans=512)
+ self.c_stage5 = c_stage45(in_chans=512, out_chans=1024)
+ self.c_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+ self.up_conv1 = nn.Conv2d(in_channels=192, out_channels=128, kernel_size=1)
+ self.up_conv2 = nn.Conv2d(in_channels=384, out_channels=512, kernel_size=1)
+
+ ### CTmerge ###
+ self.CTmerge1 = nn.Sequential(
+ nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(64),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(64),
+ nn.ReLU(),
+ )
+ self.CTmerge2 = nn.Sequential(
+ nn.Conv2d(in_channels=320, out_channels=128, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(128),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(128),
+ nn.ReLU(),
+ )
+ self.CTmerge3 = nn.Sequential(
+ nn.Conv2d(in_channels=768, out_channels=512, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(512),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=512, out_channels=384, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(384),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=384, out_channels=384, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(384),
+ nn.ReLU(),
+ )
+
+ self.CTmerge4 = nn.Sequential(
+ nn.Conv2d(in_channels=896, out_channels=640, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(640),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=640, out_channels=512, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(512),
+ nn.ReLU(),
+ nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
+ nn.BatchNorm2d(512),
+ nn.ReLU(),
+ )
+
+ # decoder
+ self.decoder4 = nn.Sequential(
+ DepthwiseConv2d(
+ in_chans=1408,
+ out_chans=1024,
+ kernel_size=3,
+ stride=1,
+ padding=1
+ ),
+ DepthwiseConv2d(
+ in_chans=1024,
+ out_chans=512,
+ kernel_size=3,
+ stride=1,
+ padding=1
+ ),
+ nn.GELU()
+ )
+ self.decoder3 = nn.Sequential(
+ DepthwiseConv2d(
+ in_chans=896,
+ out_chans=512,
+ kernel_size=3,
+ stride=1,
+ padding=1
+ ),
+ DepthwiseConv2d(
+ in_chans=512,
+ out_chans=384,
+ kernel_size=3,
+ stride=1,
+ padding=1
+ ),
+ nn.GELU()
+ )
+
+ self.decoder2 = nn.Sequential(
+ DepthwiseConv2d(
+ in_chans=576,
+ out_chans=256,
+ kernel_size=3,
+ stride=1,
+ padding=1
+ ),
+ DepthwiseConv2d(
+ in_chans=256,
+ out_chans=192,
+ kernel_size=3,
+ stride=1,
+ padding=1
+ ),
+ nn.GELU()
+ )
+
+ self.decoder1 = nn.Sequential(
+ DepthwiseConv2d(
+ in_chans=256,
+ out_chans=64,
+ kernel_size=3,
+ stride=1,
+ padding=1
+ ),
+ DepthwiseConv2d(
+ in_chans=64,
+ out_chans=16,
+ kernel_size=3,
+ stride=1,
+ padding=1
+ ),
+ nn.GELU()
+ )
+ self.sbr4 = SBR(512)
+ self.sbr3 = SBR(384)
+ self.sbr2 = SBR(192)
+ self.sbr1 = SBR(16)
+
+ self.head = nn.Conv2d(in_channels=16, out_channels=num_classes, kernel_size=1)
+
+ def forward(self, input):
+ ### encoder ###
+ # stage1 = ts1 cat cs1
+ # t_s1 = self.t_stage1(input)
+ # print(input.shape)
+ # print('++++++++++++++++++++++')
+
+ t_s1 = self.stage1_conv_embed(input) # 1*3*224*224-->1*3136*64
+
+ # print(t_s1.shape)
+ # print('======================')
+
+ t_s1 = self.stage1_transformer(t_s1) # 1*3136*64-->1*64*56*56
+
+ # print(t_s1.shape)
+ # print('----------------------')
+
+ c_s1 = self.c_stage1(input) # 1*3*224*224-->1*64*112*112
+
+ # print(c_s1.shape)
+ # print('!!!!!!!!!!!!!!!!!!!!!!!')
+
+ stage1 = self.CTmerge1(torch.cat([t_s1, self.c_max(c_s1)], dim=1)) # 1*64*56*56 # 拼接两条分支
+
+ # print(stage1.shape)
+ # print('[[[[[[[[[[[[[[[[[[[[[[[')
+
+ # stage2 = ts2 up cs2
+ # t_s2 = self.t_stage2(stage1)
+ t_s2 = self.stage2_conv_embed(stage1) # 1*64*56*56-->1*784*192 # stage2_conv_embed是转化为序列操作
+
+ # print(t_s2.shape)
+ # print('[[[[[[[[[[[[[[[[[[[[[[[')
+ t_s2 = self.stage2_transformer(t_s2) # 1*784*192-->1*192*28*28
+ # print(t_s2.shape)
+ # print('+++++++++++++++++++++++++')
+
+ c_s2 = self.c_stage2(c_s1) # 1*64*112*112-->1*128*56*56
+ stage2 = self.CTmerge2(
+ torch.cat([c_s2, F.interpolate(t_s2, size=c_s2.size()[2:], mode='bilinear', align_corners=True)],
+ dim=1)) # mode='bilinear'表示使用双线性插值 1*128*56*56
+
+ # stage3 = ts3 cat cs3
+ # t_s3 = self.t_stage3(t_s2)
+ t_s3 = self.stage3_conv_embed(t_s2) # 1*192*28*28-->1*196*384
+ # print(t_s3.shape)
+ # print('///////////////////////')
+ t_s3 = self.stage3_transformer(t_s3) # 1*196*384-->1*384*14*14
+ # print(t_s3.shape)
+ # print('....................')
+ c_s3 = self.c_stage3(stage2) # 1*128*56*56-->1*384*28*28
+ stage3 = self.CTmerge3(torch.cat([t_s3, self.c_max(c_s3)], dim=1)) # 1*384*14*14
+
+ # stage4 = ts4 up cs4
+ # t_s4 = self.t_stage4(stage3)
+ t_s4 = self.stage4_conv_embed(stage3) # 1*384*14*14-->1*49*384
+ # print(t_s4.shape)
+ # print(';;;;;;;;;;;;;;;;;;;;;;;')
+ t_s4 = self.stage4_transformer(t_s4) # 1*49*384-->1*384*7*7
+ # print(t_s4.shape)
+ # print('::::::::::::::::::::')
+
+ c_s4 = self.c_stage4(c_s3) # 1*384*28*28-->1*512*14*14
+ stage4 = self.CTmerge4(
+ torch.cat([c_s4, F.interpolate(t_s4, size=c_s4.size()[2:], mode='bilinear', align_corners=True)],
+ dim=1)) # 1*512*14*14
+
+ # cs5
+ c_s5 = self.c_stage5(stage4) # 1*512*14*14-->1*1024*7*7
+
+ ### decoder ###
+ decoder4 = torch.cat([c_s5, t_s4], dim=1) # 1*1408*7*7
+ decoder4 = self.decoder4(decoder4) # 1*1408*7*7-->1*512*7*7
+ decoder4 = F.interpolate(decoder4, size=c_s3.size()[2:], mode='bilinear',
+ align_corners=True) # 1*512*7*7-->1*512*28*28
+ decoder4 = self.sbr4(decoder4) # 1*512*28*28
+ # print(decoder4.shape)
+
+ decoder3 = torch.cat([decoder4, c_s3], dim=1) # 1*896*28*28
+ decoder3 = self.decoder3(decoder3) # 1*384*28*28
+ decoder3 = F.interpolate(decoder3, size=t_s2.size()[2:], mode='bilinear', align_corners=True) # 1*384*28*28
+ decoder3 = self.sbr3(decoder3) # 1*384*28*28
+ # print(decoder3.shape)
+
+ decoder2 = torch.cat([decoder3, t_s2], dim=1) # 1*576*28*28
+ decoder2 = self.decoder2(decoder2) # 1*192*28*28
+ decoder2 = F.interpolate(decoder2, size=c_s1.size()[2:], mode='bilinear', align_corners=True) # 1*192*112*112
+ decoder2 = self.sbr2(decoder2) # 1*192*112*112
+ # print(decoder2.shape)
+
+ decoder1 = torch.cat([decoder2, c_s1], dim=1) # 1*256*112*112
+ decoder1 = self.decoder1(decoder1) # 1*16*112*112
+ # print(decoder1.shape)
+ final = F.interpolate(decoder1, size=input.size()[2:], mode='bilinear', align_corners=True) # 1*16*224*224
+ # print(final.shape)
+ # final = self.sbr1(decoder1)
+ # print(final.shape)
+ final = self.head(final) # 1*3*224*224
+
+ return final
+
+
+if __name__ == '__main__':
+ x = torch.rand(1, 3, 224, 224).cuda()
+ model = DBNet(img_size=224, in_channels=3, num_classes=7).cuda()
+ y = model(x)
+ print(y.shape)
+ # torch.Size([1, 7, 224, 224])
\ No newline at end of file
diff --git a/cloud_adapter/dino_layers/__init__.py b/cloud_adapter/dino_layers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0498f467b29e3b2d226f6e5635172f13359b13ea
--- /dev/null
+++ b/cloud_adapter/dino_layers/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+from .dino_head import DINOHead
+from .mlp import Mlp
+from .patch_embed import PatchEmbed
+from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused
+from .block import NestedTensorBlock,drop_add_residual_stochastic_depth
+from .attention import MemEffAttention
\ No newline at end of file
diff --git a/cloud_adapter/dino_layers/__pycache__/__init__.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..15b97bfd150ab54d91154797abe495b5840cfc64
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/__init__.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/__pycache__/attention.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/attention.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..273a8448cbf71d3dfad057fb06fb4c173abae044
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/attention.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/__pycache__/block.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/block.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b44d5fb811090aff9247c738e9117c7fa8ccd423
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/block.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/__pycache__/dino_head.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/dino_head.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09204aa408f4e40d0f52b33b35212de13503da31
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/dino_head.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/__pycache__/drop_path.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/drop_path.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70e796f308762c79e34d4e575c10afb7dd618ebf
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/drop_path.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/__pycache__/layer_scale.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/layer_scale.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e882de4b9422593d2245d04595fe8d12e2ba4d8
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/layer_scale.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/__pycache__/mlp.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/mlp.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..235537269443cff2e7f2c744170486f3ac33420c
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/mlp.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/__pycache__/patch_embed.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/patch_embed.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..637ae567241fab3d12aaa31006a1fa794bb888bf
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/patch_embed.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/__pycache__/swiglu_ffn.cpython-38.pyc b/cloud_adapter/dino_layers/__pycache__/swiglu_ffn.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b6454aaaa8d96bcbc05d70d627fe48ec2a45709b
Binary files /dev/null and b/cloud_adapter/dino_layers/__pycache__/swiglu_ffn.cpython-38.pyc differ
diff --git a/cloud_adapter/dino_layers/attention.py b/cloud_adapter/dino_layers/attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fb76ef2816164729a58cceb18d0f000cfb18777
--- /dev/null
+++ b/cloud_adapter/dino_layers/attention.py
@@ -0,0 +1,89 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+# References:
+# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
+# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
+
+import logging
+import os
+import warnings
+
+from torch import Tensor
+from torch import nn
+
+
+logger = logging.getLogger("dinov2")
+
+
+XFORMERS_ENABLED = os.environ.get("XFORMERS_DISABLED") is None
+try:
+ if XFORMERS_ENABLED:
+ from xformers.ops import memory_efficient_attention, unbind
+
+ XFORMERS_AVAILABLE = True
+ warnings.warn("xFormers is available (Attention)")
+ else:
+ warnings.warn("xFormers is disabled (Attention)")
+ raise ImportError
+except ImportError:
+ XFORMERS_AVAILABLE = False
+ warnings.warn("xFormers is not available (Attention)")
+
+
+class Attention(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int = 8,
+ qkv_bias: bool = False,
+ proj_bias: bool = True,
+ attn_drop: float = 0.0,
+ proj_drop: float = 0.0,
+ ) -> None:
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = head_dim**-0.5
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim, bias=proj_bias)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ def forward(self, x: Tensor) -> Tensor:
+ B, N, C = x.shape
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+
+ q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
+ attn = q @ k.transpose(-2, -1)
+
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class MemEffAttention(Attention):
+ def forward(self, x: Tensor, attn_bias=None) -> Tensor:
+ if not XFORMERS_AVAILABLE:
+ if attn_bias is not None:
+ raise AssertionError("xFormers is required for using nested tensors")
+ return super().forward(x)
+
+ B, N, C = x.shape
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
+
+ q, k, v = unbind(qkv, 2)
+
+ x = memory_efficient_attention(q, k, v, attn_bias=attn_bias)
+ x = x.reshape([B, N, C])
+
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
diff --git a/cloud_adapter/dino_layers/block.py b/cloud_adapter/dino_layers/block.py
new file mode 100644
index 0000000000000000000000000000000000000000..930787b262faac4f2264797496faff75ac56b7cc
--- /dev/null
+++ b/cloud_adapter/dino_layers/block.py
@@ -0,0 +1,260 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+# References:
+# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
+# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
+
+import logging
+import os
+from typing import Callable, List, Any, Tuple, Dict
+import warnings
+
+import torch
+from torch import nn, Tensor
+
+from .attention import Attention, MemEffAttention
+from .drop_path import DropPath
+from .layer_scale import LayerScale
+from .mlp import Mlp
+
+
+logger = logging.getLogger("dinov2")
+
+
+XFORMERS_ENABLED = os.environ.get("XFORMERS_DISABLED") is None
+try:
+ if XFORMERS_ENABLED:
+ from xformers.ops import fmha, scaled_index_add, index_select_cat
+
+ XFORMERS_AVAILABLE = True
+ warnings.warn("xFormers is available (Block)")
+ else:
+ warnings.warn("xFormers is disabled (Block)")
+ raise ImportError
+except ImportError:
+ XFORMERS_AVAILABLE = False
+
+ warnings.warn("xFormers is not available (Block)")
+
+
+class Block(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int,
+ mlp_ratio: float = 4.0,
+ qkv_bias: bool = False,
+ proj_bias: bool = True,
+ ffn_bias: bool = True,
+ drop: float = 0.0,
+ attn_drop: float = 0.0,
+ init_values=None,
+ drop_path: float = 0.0,
+ act_layer: Callable[..., nn.Module] = nn.GELU,
+ norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
+ attn_class: Callable[..., nn.Module] = Attention,
+ ffn_layer: Callable[..., nn.Module] = Mlp,
+ ) -> None:
+ super().__init__()
+ # print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}")
+ self.norm1 = norm_layer(dim)
+ self.attn = attn_class(
+ dim,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ proj_bias=proj_bias,
+ attn_drop=attn_drop,
+ proj_drop=drop,
+ )
+ self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = ffn_layer(
+ in_features=dim,
+ hidden_features=mlp_hidden_dim,
+ act_layer=act_layer,
+ drop=drop,
+ bias=ffn_bias,
+ )
+ self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
+
+ self.sample_drop_ratio = drop_path
+
+ def forward(self, x: Tensor) -> Tensor:
+ def attn_residual_func(x: Tensor) -> Tensor:
+ return self.ls1(self.attn(self.norm1(x)))
+
+ def ffn_residual_func(x: Tensor) -> Tensor:
+ return self.ls2(self.mlp(self.norm2(x)))
+
+ if self.training and self.sample_drop_ratio > 0.1:
+ # the overhead is compensated only for a drop path rate larger than 0.1
+ x = drop_add_residual_stochastic_depth(
+ x,
+ residual_func=attn_residual_func,
+ sample_drop_ratio=self.sample_drop_ratio,
+ )
+ x = drop_add_residual_stochastic_depth(
+ x,
+ residual_func=ffn_residual_func,
+ sample_drop_ratio=self.sample_drop_ratio,
+ )
+ elif self.training and self.sample_drop_ratio > 0.0:
+ x = x + self.drop_path1(attn_residual_func(x))
+ x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
+ else:
+ x = x + attn_residual_func(x)
+ x = x + ffn_residual_func(x)
+ return x
+
+
+def drop_add_residual_stochastic_depth(
+ x: Tensor,
+ residual_func: Callable[[Tensor], Tensor],
+ sample_drop_ratio: float = 0.0,
+) -> Tensor:
+ # 1) extract subset using permutation
+ b, n, d = x.shape
+ sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
+ brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
+ x_subset = x[brange]
+
+ # 2) apply residual_func to get residual
+ residual = residual_func(x_subset)
+
+ x_flat = x.flatten(1)
+ residual = residual.flatten(1)
+
+ residual_scale_factor = b / sample_subset_size
+
+ # 3) add the residual
+ x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
+ return x_plus_residual.view_as(x)
+
+
+def get_branges_scales(x, sample_drop_ratio=0.0):
+ b, n, d = x.shape
+ sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
+ brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
+ residual_scale_factor = b / sample_subset_size
+ return brange, residual_scale_factor
+
+
+def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
+ if scaling_vector is None:
+ x_flat = x.flatten(1)
+ residual = residual.flatten(1)
+ x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
+ else:
+ x_plus_residual = scaled_index_add(
+ x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
+ )
+ return x_plus_residual
+
+
+attn_bias_cache: Dict[Tuple, Any] = {}
+
+
+def get_attn_bias_and_cat(x_list, branges=None):
+ """
+ this will perform the index select, cat the tensors, and provide the attn_bias from cache
+ """
+ batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
+ all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
+ if all_shapes not in attn_bias_cache.keys():
+ seqlens = []
+ for b, x in zip(batch_sizes, x_list):
+ for _ in range(b):
+ seqlens.append(x.shape[1])
+ attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
+ attn_bias._batch_sizes = batch_sizes
+ attn_bias_cache[all_shapes] = attn_bias
+
+ if branges is not None:
+ cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
+ else:
+ tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
+ cat_tensors = torch.cat(tensors_bs1, dim=1)
+
+ return attn_bias_cache[all_shapes], cat_tensors
+
+
+def drop_add_residual_stochastic_depth_list(
+ x_list: List[Tensor],
+ residual_func: Callable[[Tensor, Any], Tensor],
+ sample_drop_ratio: float = 0.0,
+ scaling_vector=None,
+) -> Tensor:
+ # 1) generate random set of indices for dropping samples in the batch
+ branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
+ branges = [s[0] for s in branges_scales]
+ residual_scale_factors = [s[1] for s in branges_scales]
+
+ # 2) get attention bias and index+concat the tensors
+ attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
+
+ # 3) apply residual_func to get residual, and split the result
+ residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
+
+ outputs = []
+ for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
+ outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
+ return outputs
+
+
+class NestedTensorBlock(Block):
+ def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
+ """
+ x_list contains a list of tensors to nest together and run
+ """
+ assert isinstance(self.attn, MemEffAttention)
+
+ if self.training and self.sample_drop_ratio > 0.0:
+
+ def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
+ return self.attn(self.norm1(x), attn_bias=attn_bias)
+
+ def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
+ return self.mlp(self.norm2(x))
+
+ x_list = drop_add_residual_stochastic_depth_list(
+ x_list,
+ residual_func=attn_residual_func,
+ sample_drop_ratio=self.sample_drop_ratio,
+ scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None,
+ )
+ x_list = drop_add_residual_stochastic_depth_list(
+ x_list,
+ residual_func=ffn_residual_func,
+ sample_drop_ratio=self.sample_drop_ratio,
+ scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None,
+ )
+ return x_list
+ else:
+
+ def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
+ return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias))
+
+ def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
+ return self.ls2(self.mlp(self.norm2(x)))
+
+ attn_bias, x = get_attn_bias_and_cat(x_list)
+ x = x + attn_residual_func(x, attn_bias=attn_bias)
+ x = x + ffn_residual_func(x)
+ return attn_bias.split(x)
+
+ def forward(self, x_or_x_list):
+ if isinstance(x_or_x_list, Tensor):
+ return super().forward(x_or_x_list)
+ elif isinstance(x_or_x_list, list):
+ if not XFORMERS_AVAILABLE:
+ raise AssertionError("xFormers is required for using nested tensors")
+ return self.forward_nested(x_or_x_list)
+ else:
+ raise AssertionError
diff --git a/cloud_adapter/dino_layers/dino_head.py b/cloud_adapter/dino_layers/dino_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ace8ffd6297a1dd480b19db407b662a6ea0f565
--- /dev/null
+++ b/cloud_adapter/dino_layers/dino_head.py
@@ -0,0 +1,58 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+import torch
+import torch.nn as nn
+from torch.nn.init import trunc_normal_
+from torch.nn.utils import weight_norm
+
+
+class DINOHead(nn.Module):
+ def __init__(
+ self,
+ in_dim,
+ out_dim,
+ use_bn=False,
+ nlayers=3,
+ hidden_dim=2048,
+ bottleneck_dim=256,
+ mlp_bias=True,
+ ):
+ super().__init__()
+ nlayers = max(nlayers, 1)
+ self.mlp = _build_mlp(nlayers, in_dim, bottleneck_dim, hidden_dim=hidden_dim, use_bn=use_bn, bias=mlp_bias)
+ self.apply(self._init_weights)
+ self.last_layer = weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
+ self.last_layer.weight_g.data.fill_(1)
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=0.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+
+ def forward(self, x):
+ x = self.mlp(x)
+ eps = 1e-6 if x.dtype == torch.float16 else 1e-12
+ x = nn.functional.normalize(x, dim=-1, p=2, eps=eps)
+ x = self.last_layer(x)
+ return x
+
+
+def _build_mlp(nlayers, in_dim, bottleneck_dim, hidden_dim=None, use_bn=False, bias=True):
+ if nlayers == 1:
+ return nn.Linear(in_dim, bottleneck_dim, bias=bias)
+ else:
+ layers = [nn.Linear(in_dim, hidden_dim, bias=bias)]
+ if use_bn:
+ layers.append(nn.BatchNorm1d(hidden_dim))
+ layers.append(nn.GELU())
+ for _ in range(nlayers - 2):
+ layers.append(nn.Linear(hidden_dim, hidden_dim, bias=bias))
+ if use_bn:
+ layers.append(nn.BatchNorm1d(hidden_dim))
+ layers.append(nn.GELU())
+ layers.append(nn.Linear(hidden_dim, bottleneck_dim, bias=bias))
+ return nn.Sequential(*layers)
diff --git a/cloud_adapter/dino_layers/drop_path.py b/cloud_adapter/dino_layers/drop_path.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d640e0b969b8dcba96260243473700b4e5b24b5
--- /dev/null
+++ b/cloud_adapter/dino_layers/drop_path.py
@@ -0,0 +1,34 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+# References:
+# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
+# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py
+
+
+from torch import nn
+
+
+def drop_path(x, drop_prob: float = 0.0, training: bool = False):
+ if drop_prob == 0.0 or not training:
+ return x
+ keep_prob = 1 - drop_prob
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
+ if keep_prob > 0.0:
+ random_tensor.div_(keep_prob)
+ output = x * random_tensor
+ return output
+
+
+class DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob=None):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, x):
+ return drop_path(x, self.drop_prob, self.training)
diff --git a/cloud_adapter/dino_layers/layer_scale.py b/cloud_adapter/dino_layers/layer_scale.py
new file mode 100644
index 0000000000000000000000000000000000000000..51df0d7ce61f2b41fa9e6369f52391dd7fe7d386
--- /dev/null
+++ b/cloud_adapter/dino_layers/layer_scale.py
@@ -0,0 +1,27 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+# Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110
+
+from typing import Union
+
+import torch
+from torch import Tensor
+from torch import nn
+
+
+class LayerScale(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ init_values: Union[float, Tensor] = 1e-5,
+ inplace: bool = False,
+ ) -> None:
+ super().__init__()
+ self.inplace = inplace
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
+
+ def forward(self, x: Tensor) -> Tensor:
+ return x.mul_(self.gamma) if self.inplace else x * self.gamma
diff --git a/cloud_adapter/dino_layers/mlp.py b/cloud_adapter/dino_layers/mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbf9432aae9258612caeae910a7bde17999e328e
--- /dev/null
+++ b/cloud_adapter/dino_layers/mlp.py
@@ -0,0 +1,40 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+# References:
+# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
+# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/mlp.py
+
+
+from typing import Callable, Optional
+
+from torch import Tensor, nn
+
+
+class Mlp(nn.Module):
+ def __init__(
+ self,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ act_layer: Callable[..., nn.Module] = nn.GELU,
+ drop: float = 0.0,
+ bias: bool = True,
+ ) -> None:
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x: Tensor) -> Tensor:
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
diff --git a/cloud_adapter/dino_layers/patch_embed.py b/cloud_adapter/dino_layers/patch_embed.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b7c0804784a42cf80c0297d110dcc68cc85b339
--- /dev/null
+++ b/cloud_adapter/dino_layers/patch_embed.py
@@ -0,0 +1,88 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+# References:
+# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
+# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
+
+from typing import Callable, Optional, Tuple, Union
+
+from torch import Tensor
+import torch.nn as nn
+
+
+def make_2tuple(x):
+ if isinstance(x, tuple):
+ assert len(x) == 2
+ return x
+
+ assert isinstance(x, int)
+ return (x, x)
+
+
+class PatchEmbed(nn.Module):
+ """
+ 2D image to patch embedding: (B,C,H,W) -> (B,N,D)
+
+ Args:
+ img_size: Image size.
+ patch_size: Patch token size.
+ in_chans: Number of input image channels.
+ embed_dim: Number of linear projection output channels.
+ norm_layer: Normalization layer.
+ """
+
+ def __init__(
+ self,
+ img_size: Union[int, Tuple[int, int]] = 224,
+ patch_size: Union[int, Tuple[int, int]] = 16,
+ in_chans: int = 3,
+ embed_dim: int = 768,
+ norm_layer: Optional[Callable] = None,
+ flatten_embedding: bool = True,
+ ) -> None:
+ super().__init__()
+
+ image_HW = make_2tuple(img_size)
+ patch_HW = make_2tuple(patch_size)
+ patch_grid_size = (
+ image_HW[0] // patch_HW[0],
+ image_HW[1] // patch_HW[1],
+ )
+
+ self.img_size = image_HW
+ self.patch_size = patch_HW
+ self.patches_resolution = patch_grid_size
+ self.num_patches = patch_grid_size[0] * patch_grid_size[1]
+
+ self.in_chans = in_chans
+ self.embed_dim = embed_dim
+
+ self.flatten_embedding = flatten_embedding
+
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW)
+ self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
+
+ def forward(self, x: Tensor) -> Tensor:
+ _, _, H, W = x.shape
+ patch_H, patch_W = self.patch_size
+
+ assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}"
+ assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}"
+
+ x = self.proj(x) # B C H W
+ H, W = x.size(2), x.size(3)
+ x = x.flatten(2).transpose(1, 2) # B HW C
+ x = self.norm(x)
+ if not self.flatten_embedding:
+ x = x.reshape(-1, H, W, self.embed_dim) # B H W C
+ return x
+
+ def flops(self) -> float:
+ Ho, Wo = self.patches_resolution
+ flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
+ if self.norm is not None:
+ flops += Ho * Wo * self.embed_dim
+ return flops
diff --git a/cloud_adapter/dino_layers/swiglu_ffn.py b/cloud_adapter/dino_layers/swiglu_ffn.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e9dafa4592a408f6874d54853e8f60db5c41f74
--- /dev/null
+++ b/cloud_adapter/dino_layers/swiglu_ffn.py
@@ -0,0 +1,72 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+import os
+from typing import Callable, Optional
+import warnings
+
+from torch import Tensor, nn
+import torch.nn.functional as F
+
+
+class SwiGLUFFN(nn.Module):
+ def __init__(
+ self,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ act_layer: Callable[..., nn.Module] = None,
+ drop: float = 0.0,
+ bias: bool = True,
+ ) -> None:
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias)
+ self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
+
+ def forward(self, x: Tensor) -> Tensor:
+ x12 = self.w12(x)
+ x1, x2 = x12.chunk(2, dim=-1)
+ hidden = F.silu(x1) * x2
+ return self.w3(hidden)
+
+
+XFORMERS_ENABLED = os.environ.get("XFORMERS_DISABLED") is None
+try:
+ if XFORMERS_ENABLED:
+ from xformers.ops import SwiGLU
+
+ XFORMERS_AVAILABLE = True
+ warnings.warn("xFormers is available (SwiGLU)")
+ else:
+ warnings.warn("xFormers is disabled (SwiGLU)")
+ raise ImportError
+except ImportError:
+ SwiGLU = SwiGLUFFN
+ XFORMERS_AVAILABLE = False
+
+ warnings.warn("xFormers is not available (SwiGLU)")
+
+
+class SwiGLUFFNFused(SwiGLU):
+ def __init__(
+ self,
+ in_features: int,
+ hidden_features: Optional[int] = None,
+ out_features: Optional[int] = None,
+ act_layer: Callable[..., nn.Module] = None,
+ drop: float = 0.0,
+ bias: bool = True,
+ ) -> None:
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
+ super().__init__(
+ in_features=in_features,
+ hidden_features=hidden_features,
+ out_features=out_features,
+ bias=bias,
+ )
diff --git a/cloud_adapter/dino_v2.py b/cloud_adapter/dino_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..59ea19510013a0f9584c9ee372ab9f4e8faf2b96
--- /dev/null
+++ b/cloud_adapter/dino_v2.py
@@ -0,0 +1,353 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+#
+# This source code is licensed under the Apache License, Version 2.0
+# found in the LICENSE file in the root directory of this source tree.
+
+# References:
+# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
+# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
+
+from functools import partial
+import math
+from typing import Sequence, Tuple, Union, Callable
+
+import torch
+import torch.nn as nn
+import torch.utils.checkpoint
+from mmseg.models.builder import BACKBONES
+from mmengine.model import BaseModule
+import torch.nn.functional as F
+from .dino_layers import (
+ Mlp,
+ PatchEmbed,
+ SwiGLUFFNFused,
+ MemEffAttention,
+ NestedTensorBlock as Block,
+)
+
+
+def named_apply(
+ fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False
+) -> nn.Module:
+ if not depth_first and include_root:
+ fn(module=module, name=name)
+ for child_name, child_module in module.named_children():
+ child_name = ".".join((name, child_name)) if name else child_name
+ named_apply(
+ fn=fn,
+ module=child_module,
+ name=child_name,
+ depth_first=depth_first,
+ include_root=True,
+ )
+ if depth_first and include_root:
+ fn(module=module, name=name)
+ return module
+
+
+class BlockChunk(nn.ModuleList):
+ def forward(self, x):
+ for b in self:
+ x = b(x)
+ return x
+
+
+@BACKBONES.register_module()
+class DinoVisionTransformer(BaseModule):
+ def __init__(
+ self,
+ img_size=224,
+ patch_size=16,
+ in_chans=3,
+ embed_dim=768,
+ depth=12,
+ num_heads=12,
+ mlp_ratio=4.0,
+ qkv_bias=True,
+ ffn_bias=True,
+ proj_bias=True,
+ drop_path_rate=0.0,
+ drop_path_uniform=False,
+ init_values=None, # for layerscale: None or 0 => no layerscale
+ embed_layer=PatchEmbed,
+ act_layer=nn.GELU,
+ block_fn=partial(Block, attn_class=MemEffAttention),
+ ffn_layer="mlp",
+ block_chunks=1,
+ out_indices=[7, 11, 15, 23],
+ init_cfg=None,
+ ):
+ """
+ Args:
+ img_size (int, tuple): input image size
+ patch_size (int, tuple): patch size
+ in_chans (int): number of input channels
+ embed_dim (int): embedding dimension
+ depth (int): depth of transformer
+ num_heads (int): number of attention heads
+ mlp_ratio (int): ratio of mlp hidden dim to embedding dim
+ qkv_bias (bool): enable bias for qkv if True
+ proj_bias (bool): enable bias for proj in attn if True
+ ffn_bias (bool): enable bias for ffn if True
+ drop_path_rate (float): stochastic depth rate
+ drop_path_uniform (bool): apply uniform drop rate across blocks
+ weight_init (str): weight init scheme
+ init_values (float): layer-scale init values
+ embed_layer (nn.Module): patch embedding layer
+ act_layer (nn.Module): MLP activation layer
+ block_fn (nn.Module): transformer block class
+ ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
+ block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
+ """
+ super().__init__(init_cfg)
+ norm_layer = partial(nn.LayerNorm, eps=1e-6)
+ self.out_indices = out_indices
+ self.drop_path_rate = drop_path_rate
+ self.num_features = (
+ self.embed_dim
+ ) = embed_dim # num_features for consistency with other models
+ self.num_tokens = 1
+ self.n_blocks = depth
+ self.num_heads = num_heads
+ self.norm_layer = norm_layer
+ self.patch_size = patch_size
+
+ self.patch_embed = embed_layer(
+ img_size=img_size,
+ patch_size=patch_size,
+ in_chans=in_chans,
+ embed_dim=embed_dim,
+ )
+ num_patches = self.patch_embed.num_patches
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
+ self.pos_embed = nn.Parameter(
+ torch.zeros(1, num_patches + self.num_tokens, embed_dim)
+ )
+
+ if drop_path_uniform is True:
+ dpr = [drop_path_rate] * depth
+ else:
+ dpr = [
+ x.item() for x in torch.linspace(0, drop_path_rate, depth)
+ ] # stochastic depth decay rule
+
+ if ffn_layer == "mlp":
+ ffn_layer = Mlp
+ elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
+ ffn_layer = SwiGLUFFNFused
+ elif ffn_layer == "identity":
+
+ def f(*args, **kwargs):
+ return nn.Identity()
+
+ ffn_layer = f
+ else:
+ raise NotImplementedError
+
+ blocks_list = [
+ block_fn(
+ dim=embed_dim,
+ num_heads=num_heads,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ proj_bias=proj_bias,
+ ffn_bias=ffn_bias,
+ drop_path=dpr[i],
+ norm_layer=norm_layer,
+ act_layer=act_layer,
+ ffn_layer=ffn_layer,
+ init_values=init_values,
+ )
+ for i in range(depth)
+ ]
+ if block_chunks > 0:
+ self.chunked_blocks = True
+ chunked_blocks = []
+ chunksize = depth // block_chunks
+ for i in range(0, depth, chunksize):
+ # this is to keep the block index consistent if we chunk the block list
+ chunked_blocks.append(
+ [nn.Identity()] * i + blocks_list[i : i + chunksize]
+ )
+ self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
+ else:
+ self.chunked_blocks = False
+ self.blocks = nn.ModuleList(blocks_list)
+
+ self.norm = norm_layer(embed_dim)
+ self.head = nn.Identity()
+
+ self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
+
+ def interpolate_pos_encoding(self, x, w, h):
+ previous_dtype = x.dtype
+ npatch = x.shape[1] - 1
+ N = self.pos_embed.shape[1] - 1
+ if npatch == N and w == h:
+ return self.pos_embed
+ pos_embed = self.pos_embed.float()
+ class_pos_embed = pos_embed[:, 0]
+ patch_pos_embed = pos_embed[:, 1:]
+ dim = x.shape[-1]
+ w0 = w // self.patch_size
+ h0 = h // self.patch_size
+ # we add a small number to avoid floating point error in the interpolation
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
+ w0, h0 = w0 + 0.1, h0 + 0.1
+
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed.reshape(
+ 1, int(math.sqrt(N)), int(math.sqrt(N)), dim
+ ).permute(0, 3, 1, 2),
+ scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
+ mode="bicubic",
+ )
+
+ assert (
+ int(w0) == patch_pos_embed.shape[-2]
+ and int(h0) == patch_pos_embed.shape[-1]
+ )
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(
+ previous_dtype
+ )
+
+ def prepare_tokens_with_masks(self, x, masks=None):
+ B, nc, w, h = x.shape
+ x = self.patch_embed(x)
+ if masks is not None:
+ x = torch.where(
+ masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x
+ )
+
+ x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
+ x = x + self.interpolate_pos_encoding(x, w, h)
+
+ return x
+
+ def forward_features_list(self, x_list, masks_list):
+ x = [
+ self.prepare_tokens_with_masks(x, masks)
+ for x, masks in zip(x_list, masks_list)
+ ]
+ for blk in self.blocks:
+ x = blk(x)
+
+ all_x = x
+ output = []
+ for x, masks in zip(all_x, masks_list):
+ x_norm = self.norm(x)
+ output.append(
+ {
+ "x_norm_clstoken": x_norm[:, 0],
+ "x_norm_patchtokens": x_norm[:, 1:],
+ "x_prenorm": x,
+ "masks": masks,
+ }
+ )
+ return output
+
+ def forward_features(self, x, masks=None):
+ B, _, h, w = x.shape
+ if isinstance(x, list):
+ return self.forward_features_list(x, masks)
+
+ x = self.prepare_tokens_with_masks(x, masks)
+ outs = []
+ for idx, blk in enumerate(self.blocks):
+ x = blk(x)
+ if idx in self.out_indices:
+ outs.append(
+ x[:, 1:, :]
+ .permute(0, 2, 1)
+ .reshape(B, -1, h // self.patch_size, w // self.patch_size)
+ .contiguous()
+ )
+ return outs
+
+ def _get_intermediate_layers_not_chunked(self, x, n=1):
+ x = self.prepare_tokens_with_masks(x)
+ # If n is an int, take the n last blocks. If it's a list, take them
+ output, total_block_len = [], len(self.blocks)
+ blocks_to_take = (
+ range(total_block_len - n, total_block_len) if isinstance(n, int) else n
+ )
+ for i, blk in enumerate(self.blocks):
+ x = blk(x)
+ if i in blocks_to_take:
+ output.append(x)
+ assert len(output) == len(
+ blocks_to_take
+ ), f"only {len(output)} / {len(blocks_to_take)} blocks found"
+ return output
+
+ def _get_intermediate_layers_chunked(self, x, n=1):
+ x = self.prepare_tokens_with_masks(x)
+ output, i, total_block_len = [], 0, len(self.blocks[-1])
+ # If n is an int, take the n last blocks. If it's a list, take them
+ blocks_to_take = (
+ range(total_block_len - n, total_block_len) if isinstance(n, int) else n
+ )
+ for block_chunk in self.blocks:
+ for blk in block_chunk[i:]: # Passing the nn.Identity()
+ x = blk(x)
+ if i in blocks_to_take:
+ output.append(x)
+ i += 1
+ assert len(output) == len(
+ blocks_to_take
+ ), f"only {len(output)} / {len(blocks_to_take)} blocks found"
+ return output
+
+ def get_intermediate_layers(
+ self,
+ x: torch.Tensor,
+ n: Union[int, Sequence] = 1, # Layers or n last layers to take
+ reshape: bool = False,
+ return_class_token: bool = False,
+ norm=True,
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
+ if self.chunked_blocks:
+ outputs = self._get_intermediate_layers_chunked(x, n)
+ else:
+ outputs = self._get_intermediate_layers_not_chunked(x, n)
+ if norm:
+ outputs = [self.norm(out) for out in outputs]
+ class_tokens = [out[:, 0] for out in outputs]
+ outputs = [out[:, 1:] for out in outputs]
+ if reshape:
+ B, _, w, h = x.shape
+ outputs = [
+ out.reshape(B, w // self.patch_size, h // self.patch_size, -1)
+ .permute(0, 3, 1, 2)
+ .contiguous()
+ for out in outputs
+ ]
+ if return_class_token:
+ return tuple(zip(outputs, class_tokens))
+ return tuple(outputs)
+
+ def forward(self, *args, **kwargs):
+ ret = self.forward_features(*args, **kwargs)
+ if isinstance(ret[0], torch.Tensor):
+ ret[0] = F.interpolate(
+ ret[0], scale_factor=4, mode="bilinear", align_corners=False
+ )
+ ret[1] = F.interpolate(
+ ret[1], scale_factor=2, mode="bilinear", align_corners=False
+ )
+ ret[3] = F.interpolate(
+ ret[3], scale_factor=0.5, mode="bilinear", align_corners=False
+ )
+ else:
+ ret[0][0] = F.interpolate(
+ ret[0][0], scale_factor=4, mode="bilinear", align_corners=False
+ )
+ ret[0][1] = F.interpolate(
+ ret[0][1], scale_factor=2, mode="bilinear", align_corners=False
+ )
+ ret[0][3] = F.interpolate(
+ ret[0][3], scale_factor=0.5, mode="bilinear", align_corners=False
+ )
+ return ret
\ No newline at end of file
diff --git a/cloud_adapter/hrcloudnet.py b/cloud_adapter/hrcloudnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbb49a3cf808d16f3327ae8912beac955488682c
--- /dev/null
+++ b/cloud_adapter/hrcloudnet.py
@@ -0,0 +1,751 @@
+# 论文地址:https://arxiv.org/abs/2407.07365
+#
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import logging
+import os
+
+import numpy as np
+import torch
+import torch._utils
+import torch.nn as nn
+import torch.nn.functional as F
+
+BatchNorm2d = nn.BatchNorm2d
+# BN_MOMENTUM = 0.01
+relu_inplace = True
+BN_MOMENTUM = 0.1
+ALIGN_CORNERS = True
+
+logger = logging.getLogger(__name__)
+
+
+def conv3x3(in_planes, out_planes, stride=1):
+ """3x3 convolution with padding"""
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+
+
+from yacs.config import CfgNode as CN
+import math
+from einops import rearrange
+
+# configs for HRNet48
+HRNET_48 = CN()
+HRNET_48.FINAL_CONV_KERNEL = 1
+
+HRNET_48.STAGE1 = CN()
+HRNET_48.STAGE1.NUM_MODULES = 1
+HRNET_48.STAGE1.NUM_BRANCHES = 1
+HRNET_48.STAGE1.NUM_BLOCKS = [4]
+HRNET_48.STAGE1.NUM_CHANNELS = [64]
+HRNET_48.STAGE1.BLOCK = 'BOTTLENECK'
+HRNET_48.STAGE1.FUSE_METHOD = 'SUM'
+
+HRNET_48.STAGE2 = CN()
+HRNET_48.STAGE2.NUM_MODULES = 1
+HRNET_48.STAGE2.NUM_BRANCHES = 2
+HRNET_48.STAGE2.NUM_BLOCKS = [4, 4]
+HRNET_48.STAGE2.NUM_CHANNELS = [48, 96]
+HRNET_48.STAGE2.BLOCK = 'BASIC'
+HRNET_48.STAGE2.FUSE_METHOD = 'SUM'
+
+HRNET_48.STAGE3 = CN()
+HRNET_48.STAGE3.NUM_MODULES = 4
+HRNET_48.STAGE3.NUM_BRANCHES = 3
+HRNET_48.STAGE3.NUM_BLOCKS = [4, 4, 4]
+HRNET_48.STAGE3.NUM_CHANNELS = [48, 96, 192]
+HRNET_48.STAGE3.BLOCK = 'BASIC'
+HRNET_48.STAGE3.FUSE_METHOD = 'SUM'
+
+HRNET_48.STAGE4 = CN()
+HRNET_48.STAGE4.NUM_MODULES = 3
+HRNET_48.STAGE4.NUM_BRANCHES = 4
+HRNET_48.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
+HRNET_48.STAGE4.NUM_CHANNELS = [48, 96, 192, 384]
+HRNET_48.STAGE4.BLOCK = 'BASIC'
+HRNET_48.STAGE4.FUSE_METHOD = 'SUM'
+
+HRNET_32 = CN()
+HRNET_32.FINAL_CONV_KERNEL = 1
+
+HRNET_32.STAGE1 = CN()
+HRNET_32.STAGE1.NUM_MODULES = 1
+HRNET_32.STAGE1.NUM_BRANCHES = 1
+HRNET_32.STAGE1.NUM_BLOCKS = [4]
+HRNET_32.STAGE1.NUM_CHANNELS = [64]
+HRNET_32.STAGE1.BLOCK = 'BOTTLENECK'
+HRNET_32.STAGE1.FUSE_METHOD = 'SUM'
+
+HRNET_32.STAGE2 = CN()
+HRNET_32.STAGE2.NUM_MODULES = 1
+HRNET_32.STAGE2.NUM_BRANCHES = 2
+HRNET_32.STAGE2.NUM_BLOCKS = [4, 4]
+HRNET_32.STAGE2.NUM_CHANNELS = [32, 64]
+HRNET_32.STAGE2.BLOCK = 'BASIC'
+HRNET_32.STAGE2.FUSE_METHOD = 'SUM'
+
+HRNET_32.STAGE3 = CN()
+HRNET_32.STAGE3.NUM_MODULES = 4
+HRNET_32.STAGE3.NUM_BRANCHES = 3
+HRNET_32.STAGE3.NUM_BLOCKS = [4, 4, 4]
+HRNET_32.STAGE3.NUM_CHANNELS = [32, 64, 128]
+HRNET_32.STAGE3.BLOCK = 'BASIC'
+HRNET_32.STAGE3.FUSE_METHOD = 'SUM'
+
+HRNET_32.STAGE4 = CN()
+HRNET_32.STAGE4.NUM_MODULES = 3
+HRNET_32.STAGE4.NUM_BRANCHES = 4
+HRNET_32.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
+HRNET_32.STAGE4.NUM_CHANNELS = [32, 64, 128, 256]
+HRNET_32.STAGE4.BLOCK = 'BASIC'
+HRNET_32.STAGE4.FUSE_METHOD = 'SUM'
+
+HRNET_18 = CN()
+HRNET_18.FINAL_CONV_KERNEL = 1
+
+HRNET_18.STAGE1 = CN()
+HRNET_18.STAGE1.NUM_MODULES = 1
+HRNET_18.STAGE1.NUM_BRANCHES = 1
+HRNET_18.STAGE1.NUM_BLOCKS = [4]
+HRNET_18.STAGE1.NUM_CHANNELS = [64]
+HRNET_18.STAGE1.BLOCK = 'BOTTLENECK'
+HRNET_18.STAGE1.FUSE_METHOD = 'SUM'
+
+HRNET_18.STAGE2 = CN()
+HRNET_18.STAGE2.NUM_MODULES = 1
+HRNET_18.STAGE2.NUM_BRANCHES = 2
+HRNET_18.STAGE2.NUM_BLOCKS = [4, 4]
+HRNET_18.STAGE2.NUM_CHANNELS = [18, 36]
+HRNET_18.STAGE2.BLOCK = 'BASIC'
+HRNET_18.STAGE2.FUSE_METHOD = 'SUM'
+
+HRNET_18.STAGE3 = CN()
+HRNET_18.STAGE3.NUM_MODULES = 4
+HRNET_18.STAGE3.NUM_BRANCHES = 3
+HRNET_18.STAGE3.NUM_BLOCKS = [4, 4, 4]
+HRNET_18.STAGE3.NUM_CHANNELS = [18, 36, 72]
+HRNET_18.STAGE3.BLOCK = 'BASIC'
+HRNET_18.STAGE3.FUSE_METHOD = 'SUM'
+
+HRNET_18.STAGE4 = CN()
+HRNET_18.STAGE4.NUM_MODULES = 3
+HRNET_18.STAGE4.NUM_BRANCHES = 4
+HRNET_18.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
+HRNET_18.STAGE4.NUM_CHANNELS = [18, 36, 72, 144]
+HRNET_18.STAGE4.BLOCK = 'BASIC'
+HRNET_18.STAGE4.FUSE_METHOD = 'SUM'
+
+
+class PPM(nn.Module):
+ def __init__(self, in_dim, reduction_dim, bins):
+ super(PPM, self).__init__()
+ self.features = []
+ for bin in bins:
+ self.features.append(nn.Sequential(
+ nn.AdaptiveAvgPool2d(bin),
+ nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False),
+ nn.BatchNorm2d(reduction_dim),
+ nn.ReLU(inplace=True)
+ ))
+ self.features = nn.ModuleList(self.features)
+
+ def forward(self, x):
+ x_size = x.size()
+ out = [x]
+ for f in self.features:
+ out.append(F.interpolate(f(x), x_size[2:], mode='bilinear', align_corners=True))
+ return torch.cat(out, 1)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super(BasicBlock, self).__init__()
+ self.conv1 = conv3x3(inplanes, planes, stride)
+ self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=relu_inplace)
+ self.conv2 = conv3x3(planes, planes)
+ self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+ out = out + residual
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super(Bottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
+ self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
+ padding=1, bias=False)
+ self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
+ bias=False)
+ self.bn3 = BatchNorm2d(planes * self.expansion,
+ momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=relu_inplace)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+ # att = self.downsample(att)
+ out = out + residual
+ out = self.relu(out)
+
+ return out
+
+
+class HighResolutionModule(nn.Module):
+ def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
+ num_channels, fuse_method, multi_scale_output=True):
+ super(HighResolutionModule, self).__init__()
+ self._check_branches(
+ num_branches, blocks, num_blocks, num_inchannels, num_channels)
+
+ self.num_inchannels = num_inchannels
+ self.fuse_method = fuse_method
+ self.num_branches = num_branches
+
+ self.multi_scale_output = multi_scale_output
+
+ self.branches = self._make_branches(
+ num_branches, blocks, num_blocks, num_channels)
+ self.fuse_layers = self._make_fuse_layers()
+ self.relu = nn.ReLU(inplace=relu_inplace)
+
+ def _check_branches(self, num_branches, blocks, num_blocks,
+ num_inchannels, num_channels):
+ if num_branches != len(num_blocks):
+ error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
+ num_branches, len(num_blocks))
+ logger.error(error_msg)
+ raise ValueError(error_msg)
+
+ if num_branches != len(num_channels):
+ error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
+ num_branches, len(num_channels))
+ logger.error(error_msg)
+ raise ValueError(error_msg)
+
+ if num_branches != len(num_inchannels):
+ error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
+ num_branches, len(num_inchannels))
+ logger.error(error_msg)
+ raise ValueError(error_msg)
+
+ def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
+ stride=1):
+ downsample = None
+ if stride != 1 or \
+ self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.num_inchannels[branch_index],
+ num_channels[branch_index] * block.expansion,
+ kernel_size=1, stride=stride, bias=False),
+ BatchNorm2d(num_channels[branch_index] * block.expansion,
+ momentum=BN_MOMENTUM),
+ )
+
+ layers = []
+ layers.append(block(self.num_inchannels[branch_index],
+ num_channels[branch_index], stride, downsample))
+ self.num_inchannels[branch_index] = \
+ num_channels[branch_index] * block.expansion
+ for i in range(1, num_blocks[branch_index]):
+ layers.append(block(self.num_inchannels[branch_index],
+ num_channels[branch_index]))
+
+ return nn.Sequential(*layers)
+
+ # 创建平行层
+ def _make_branches(self, num_branches, block, num_blocks, num_channels):
+ branches = []
+
+ for i in range(num_branches):
+ branches.append(
+ self._make_one_branch(i, block, num_blocks, num_channels))
+
+ return nn.ModuleList(branches)
+
+ def _make_fuse_layers(self):
+ if self.num_branches == 1:
+ return None
+ num_branches = self.num_branches # 3
+ num_inchannels = self.num_inchannels # [48, 96, 192]
+ fuse_layers = []
+ for i in range(num_branches if self.multi_scale_output else 1):
+ fuse_layer = []
+ for j in range(num_branches):
+ if j > i:
+ fuse_layer.append(nn.Sequential(
+ nn.Conv2d(num_inchannels[j],
+ num_inchannels[i],
+ 1,
+ 1,
+ 0,
+ bias=False),
+ BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))
+ elif j == i:
+ fuse_layer.append(None)
+ else:
+ conv3x3s = []
+ for k in range(i - j):
+ if k == i - j - 1:
+ num_outchannels_conv3x3 = num_inchannels[i]
+ conv3x3s.append(nn.Sequential(
+ nn.Conv2d(num_inchannels[j],
+ num_outchannels_conv3x3,
+ 3, 2, 1, bias=False),
+ BatchNorm2d(num_outchannels_conv3x3,
+ momentum=BN_MOMENTUM)))
+ else:
+ num_outchannels_conv3x3 = num_inchannels[j]
+ conv3x3s.append(nn.Sequential(
+ nn.Conv2d(num_inchannels[j],
+ num_outchannels_conv3x3,
+ 3, 2, 1, bias=False),
+ BatchNorm2d(num_outchannels_conv3x3,
+ momentum=BN_MOMENTUM),
+ nn.ReLU(inplace=relu_inplace)))
+ fuse_layer.append(nn.Sequential(*conv3x3s))
+ fuse_layers.append(nn.ModuleList(fuse_layer))
+
+ return nn.ModuleList(fuse_layers)
+
+ def get_num_inchannels(self):
+ return self.num_inchannels
+
+ def forward(self, x):
+ if self.num_branches == 1:
+ return [self.branches[0](x[0])]
+
+ for i in range(self.num_branches):
+ x[i] = self.branches[i](x[i])
+
+ x_fuse = []
+ for i in range(len(self.fuse_layers)):
+ y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
+ for j in range(1, self.num_branches):
+ if i == j:
+ y = y + x[j]
+ elif j > i:
+ width_output = x[i].shape[-1]
+ height_output = x[i].shape[-2]
+ y = y + F.interpolate(
+ self.fuse_layers[i][j](x[j]),
+ size=[height_output, width_output],
+ mode='bilinear', align_corners=ALIGN_CORNERS)
+ else:
+ y = y + self.fuse_layers[i][j](x[j])
+ x_fuse.append(self.relu(y))
+
+ return x_fuse
+
+
+blocks_dict = {
+ 'BASIC': BasicBlock,
+ 'BOTTLENECK': Bottleneck
+}
+
+
+class HRCloudNet(nn.Module):
+
+ def __init__(self, in_channels=3,num_classes=2, base_c=48, **kwargs):
+ global ALIGN_CORNERS
+ extra = HRNET_48
+ super(HRCloudNet, self).__init__()
+ ALIGN_CORNERS = True
+ # ALIGN_CORNERS = config.MODEL.ALIGN_CORNERS
+ self.num_classes = num_classes
+ # stem net
+ self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=2, padding=1,
+ bias=False)
+ self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)
+ self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
+ bias=False)
+ self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=relu_inplace)
+
+ self.stage1_cfg = extra['STAGE1']
+ num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
+ block = blocks_dict[self.stage1_cfg['BLOCK']]
+ num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
+ self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
+ stage1_out_channel = block.expansion * num_channels
+
+ self.stage2_cfg = extra['STAGE2']
+ num_channels = self.stage2_cfg['NUM_CHANNELS']
+ block = blocks_dict[self.stage2_cfg['BLOCK']]
+ num_channels = [
+ num_channels[i] * block.expansion for i in range(len(num_channels))]
+ self.transition1 = self._make_transition_layer(
+ [stage1_out_channel], num_channels)
+ self.stage2, pre_stage_channels = self._make_stage(
+ self.stage2_cfg, num_channels)
+
+ self.stage3_cfg = extra['STAGE3']
+ num_channels = self.stage3_cfg['NUM_CHANNELS']
+ block = blocks_dict[self.stage3_cfg['BLOCK']]
+ num_channels = [
+ num_channels[i] * block.expansion for i in range(len(num_channels))]
+ self.transition2 = self._make_transition_layer(
+ pre_stage_channels, num_channels) # 只在pre[-1]与cur[-1]之间下采样?
+ self.stage3, pre_stage_channels = self._make_stage(
+ self.stage3_cfg, num_channels)
+
+ self.stage4_cfg = extra['STAGE4']
+ num_channels = self.stage4_cfg['NUM_CHANNELS']
+ block = blocks_dict[self.stage4_cfg['BLOCK']]
+ num_channels = [
+ num_channels[i] * block.expansion for i in range(len(num_channels))]
+ self.transition3 = self._make_transition_layer(
+ pre_stage_channels, num_channels)
+ self.stage4, pre_stage_channels = self._make_stage(
+ self.stage4_cfg, num_channels, multi_scale_output=True)
+ self.out_conv = OutConv(base_c, num_classes)
+ last_inp_channels = int(np.sum(pre_stage_channels))
+
+ self.corr = Corr(nclass=2)
+ self.proj = nn.Sequential(
+ # 512 32
+ nn.Conv2d(720, 48, kernel_size=3, stride=1, padding=1, bias=True),
+ nn.BatchNorm2d(48),
+ nn.ReLU(inplace=True),
+ nn.Dropout2d(0.1),
+ )
+ # self.up1 = Up(base_c * 16, base_c * 8 // factor, bilinear)
+ self.up2 = Up(base_c * 8, base_c * 4, True)
+ self.up3 = Up(base_c * 4, base_c * 2, True)
+ self.up4 = Up(base_c * 2, base_c, True)
+ fea_dim = 720
+ bins = (1, 2, 3, 6)
+ self.ppm = PPM(fea_dim, int(fea_dim / len(bins)), bins)
+ fea_dim *= 2
+ self.cls = nn.Sequential(
+ nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False),
+ nn.BatchNorm2d(512),
+ nn.ReLU(inplace=True),
+ nn.Dropout2d(p=0.1),
+ nn.Conv2d(512, num_classes, kernel_size=1)
+ )
+
+ '''
+ 转换层的作用有两种情况:
+
+ 当前分支数小于之前分支数时,仅对前几个分支进行通道数调整。
+ 当前分支数大于之前分支数时,新建一些转换层,对多余的分支进行下采样,改变通道数以适应后续的连接。
+ 最终,这些转换层会被组合成一个 nn.ModuleList 对象,并在网络的构建过程中使用。
+ 这有助于确保每个分支的通道数在不同阶段之间能够正确匹配,以便进行特征的融合和连接
+ '''
+
+ def _make_transition_layer(
+ self, num_channels_pre_layer, num_channels_cur_layer):
+ # 现在的分支数
+ num_branches_cur = len(num_channels_cur_layer) # 3
+ # 处理前的分支数
+ num_branches_pre = len(num_channels_pre_layer) # 2
+
+ transition_layers = []
+ for i in range(num_branches_cur):
+ # 如果当前分支数小于之前分支数,仅针对第一到第二阶段
+ if i < num_branches_pre:
+ # 如果对应层的通道数不一致,则进行转化(
+ if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
+ transition_layers.append(nn.Sequential(
+
+ nn.Conv2d(num_channels_pre_layer[i],
+ num_channels_cur_layer[i],
+ 3,
+ 1,
+ 1,
+ bias=False),
+ BatchNorm2d(
+ num_channels_cur_layer[i], momentum=BN_MOMENTUM),
+ nn.ReLU(inplace=relu_inplace)))
+ else:
+ transition_layers.append(None)
+ else: # 在新建层下采样改变通道数
+ conv3x3s = []
+ for j in range(i + 1 - num_branches_pre): # 3
+ inchannels = num_channels_pre_layer[-1]
+ outchannels = num_channels_cur_layer[i] \
+ if j == i - num_branches_pre else inchannels
+ conv3x3s.append(nn.Sequential(
+ nn.Conv2d(
+ inchannels, outchannels, 3, 2, 1, bias=False),
+ BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
+ nn.ReLU(inplace=relu_inplace)))
+ transition_layers.append(nn.Sequential(*conv3x3s))
+
+ return nn.ModuleList(transition_layers)
+
+ '''
+ _make_layer 函数的主要作用是创建一个由多个相同类型的残差块(Residual Block)组成的层。
+ '''
+
+ def _make_layer(self, block, inplanes, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(inplanes, planes * block.expansion,
+ kernel_size=1, stride=stride, bias=False),
+ BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
+ )
+
+ layers = []
+ layers.append(block(inplanes, planes, stride, downsample))
+ inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ # 多尺度融合
+ def _make_stage(self, layer_config, num_inchannels,
+ multi_scale_output=True):
+ num_modules = layer_config['NUM_MODULES']
+ num_branches = layer_config['NUM_BRANCHES']
+ num_blocks = layer_config['NUM_BLOCKS']
+ num_channels = layer_config['NUM_CHANNELS']
+ block = blocks_dict[layer_config['BLOCK']]
+ fuse_method = layer_config['FUSE_METHOD']
+
+ modules = []
+ for i in range(num_modules): # 重复4次
+ # multi_scale_output is only used last module
+ if not multi_scale_output and i == num_modules - 1:
+ reset_multi_scale_output = False
+ else:
+ reset_multi_scale_output = True
+ modules.append(
+ HighResolutionModule(num_branches,
+ block,
+ num_blocks,
+ num_inchannels,
+ num_channels,
+ fuse_method,
+ reset_multi_scale_output)
+ )
+ num_inchannels = modules[-1].get_num_inchannels()
+
+ return nn.Sequential(*modules), num_inchannels
+
+ def forward(self, input, need_fp=True, use_corr=True):
+ # from ipdb import set_trace
+ # set_trace()
+ x = self.conv1(input)
+ x = self.bn1(x)
+ x = self.relu(x)
+ # x_176 = x
+ x = self.conv2(x)
+ x = self.bn2(x)
+ x = self.relu(x)
+ x = self.layer1(x)
+
+ x_list = []
+ for i in range(self.stage2_cfg['NUM_BRANCHES']): # 2
+ if self.transition1[i] is not None:
+ x_list.append(self.transition1[i](x))
+ else:
+ x_list.append(x)
+ y_list = self.stage2(x_list)
+ # Y1
+ x_list = []
+ for i in range(self.stage3_cfg['NUM_BRANCHES']):
+ if self.transition2[i] is not None:
+ if i < self.stage2_cfg['NUM_BRANCHES']:
+ x_list.append(self.transition2[i](y_list[i]))
+ else:
+ x_list.append(self.transition2[i](y_list[-1]))
+ else:
+ x_list.append(y_list[i])
+ y_list = self.stage3(x_list)
+
+ x_list = []
+ for i in range(self.stage4_cfg['NUM_BRANCHES']):
+ if self.transition3[i] is not None:
+ if i < self.stage3_cfg['NUM_BRANCHES']:
+ x_list.append(self.transition3[i](y_list[i]))
+ else:
+ x_list.append(self.transition3[i](y_list[-1]))
+ else:
+ x_list.append(y_list[i])
+ x = self.stage4(x_list)
+ dict_return = {}
+ # Upsampling
+ x0_h, x0_w = x[0].size(2), x[0].size(3)
+
+ x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
+ # x = self.stage3_(x)
+ x[2] = self.up2(x[3], x[2])
+ x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
+ # x = self.stage2_(x)
+ x[1] = self.up3(x[2], x[1])
+ x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=ALIGN_CORNERS)
+ x[0] = self.up4(x[1], x[0])
+ xk = torch.cat([x[0], x1, x2, x3], 1)
+ # PPM
+ feat = self.ppm(xk)
+ x = self.cls(feat)
+ # fp分支
+ if need_fp:
+ logits = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
+ # logits = self.out_conv(torch.cat((x, nn.Dropout2d(0.5)(x))))
+ out = logits
+ out_fp = logits
+ if use_corr:
+ proj_feats = self.proj(xk)
+ corr_out = self.corr(proj_feats, out)
+ corr_out = F.interpolate(corr_out, size=(352, 352), mode="bilinear", align_corners=True)
+ dict_return['corr_out'] = corr_out
+ dict_return['out'] = out
+ dict_return['out_fp'] = out_fp
+
+ return dict_return['out']
+
+ out = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)
+ if use_corr: # True
+ proj_feats = self.proj(xk)
+ # 计算
+ corr_out = self.corr(proj_feats, out)
+ corr_out = F.interpolate(corr_out, size=(352, 352), mode="bilinear", align_corners=True)
+ dict_return['corr_out'] = corr_out
+ dict_return['out'] = out
+ return dict_return['out']
+ # return x
+
+ def init_weights(self, pretrained='', ):
+ logger.info('=> init weights from normal distribution')
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.normal_(m.weight, std=0.001)
+ elif isinstance(m, nn.BatchNorm2d):
+ nn.init.constant_(m.weight, 1)
+ nn.init.constant_(m.bias, 0)
+ if os.path.isfile(pretrained):
+ pretrained_dict = torch.load(pretrained)
+ logger.info('=> loading pretrained model {}'.format(pretrained))
+ model_dict = self.state_dict()
+ pretrained_dict = {k: v for k, v in pretrained_dict.items()
+ if k in model_dict.keys()}
+ for k, _ in pretrained_dict.items():
+ logger.info(
+ '=> loading {} pretrained model {}'.format(k, pretrained))
+ model_dict.update(pretrained_dict)
+ self.load_state_dict(model_dict)
+
+
+class OutConv(nn.Sequential):
+ def __init__(self, in_channels, num_classes):
+ super(OutConv, self).__init__(
+ nn.Conv2d(720, num_classes, kernel_size=1)
+ )
+
+
+class DoubleConv(nn.Sequential):
+ def __init__(self, in_channels, out_channels, mid_channels=None):
+ if mid_channels is None:
+ mid_channels = out_channels
+ super(DoubleConv, self).__init__(
+ nn.Conv2d(in_channels + out_channels, mid_channels, kernel_size=3, padding=1, bias=False),
+ nn.BatchNorm2d(mid_channels),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
+ nn.BatchNorm2d(out_channels),
+ nn.ReLU(inplace=True)
+ )
+
+
+class Up(nn.Module):
+ def __init__(self, in_channels, out_channels, bilinear=True):
+ super(Up, self).__init__()
+ if bilinear:
+ self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
+ self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
+ else:
+ self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
+ self.conv = DoubleConv(in_channels, out_channels)
+
+ def forward(self, x1: torch.Tensor, x2: torch.Tensor) -> torch.Tensor:
+ x1 = self.up(x1)
+ # [N, C, H, W]
+ diff_y = x2.size()[2] - x1.size()[2]
+ diff_x = x2.size()[3] - x1.size()[3]
+
+ # padding_left, padding_right, padding_top, padding_bottom
+ x1 = F.pad(x1, [diff_x // 2, diff_x - diff_x // 2,
+ diff_y // 2, diff_y - diff_y // 2])
+
+ x = torch.cat([x2, x1], dim=1)
+ x = self.conv(x)
+ return x
+
+
+class Corr(nn.Module):
+ def __init__(self, nclass=2):
+ super(Corr, self).__init__()
+ self.nclass = nclass
+ self.conv1 = nn.Conv2d(48, self.nclass, kernel_size=1, stride=1, padding=0, bias=True)
+ self.conv2 = nn.Conv2d(48, self.nclass, kernel_size=1, stride=1, padding=0, bias=True)
+
+ def forward(self, feature_in, out):
+ # in torch.Size([4, 32, 22, 22])
+ # out = [4 2 352 352]
+ h_in, w_in = math.ceil(feature_in.shape[2] / (1)), math.ceil(feature_in.shape[3] / (1))
+ out = F.interpolate(out.detach(), (h_in, w_in), mode='bilinear', align_corners=True)
+ feature = F.interpolate(feature_in, (h_in, w_in), mode='bilinear', align_corners=True)
+ f1 = rearrange(self.conv1(feature), 'n c h w -> n c (h w)')
+ f2 = rearrange(self.conv2(feature), 'n c h w -> n c (h w)')
+ out_temp = rearrange(out, 'n c h w -> n c (h w)')
+ corr_map = torch.matmul(f1.transpose(1, 2), f2) / torch.sqrt(torch.tensor(f1.shape[1]).float())
+ corr_map = F.softmax(corr_map, dim=-1)
+ # out_temp 2 2 484
+ # corr_map 4 484 484
+ out = rearrange(torch.matmul(out_temp, corr_map), 'n c (h w) -> n c h w', h=h_in, w=w_in)
+ # out torch.Size([4, 2, 22, 22])
+ return out
+
+
+if __name__ == '__main__':
+ input = torch.randn(4, 3, 352, 352)
+ cloud = HRCloudNet(num_classes=2)
+ output = cloud(input)
+ print(output.shape)
+ # torch.Size([4, 2, 352, 352]) torch.Size([4, 2, 352, 352]) torch.Size([4, 2, 352, 352])
\ No newline at end of file
diff --git a/cloud_adapter/kappamask.py b/cloud_adapter/kappamask.py
new file mode 100644
index 0000000000000000000000000000000000000000..57072c589abd757c0e53b4237e7531da4b3f37c3
--- /dev/null
+++ b/cloud_adapter/kappamask.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+# @Time : 2024/8/7 下午3:51
+# @Author : xiaoshun
+# @Email : 3038523973@qq.com
+# @File : kappamask.py.py
+# @Software: PyCharm
+
+import torch
+from torch import nn as nn
+from torch.nn import functional as F
+
+
+class KappaMask(nn.Module):
+ def __init__(self, num_classes=2, in_channels=3):
+ super().__init__()
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(in_channels, 64, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(64, 64, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(64, 128, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(128, 128, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+ self.conv3 = nn.Sequential(
+ nn.Conv2d(128, 256, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(256, 256, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+
+ self.conv4 = nn.Sequential(
+ nn.Conv2d(256, 512, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(512, 512, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+ self.drop4 = nn.Dropout(0.5)
+
+ self.conv5 = nn.Sequential(
+ nn.Conv2d(512, 1024, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(1024, 1024, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+ self.drop5 = nn.Dropout(0.5)
+
+ self.up6 = nn.Sequential(
+ nn.Upsample(scale_factor=2),
+ nn.ZeroPad2d((0, 1, 0, 1)),
+ nn.Conv2d(1024, 512, 2),
+ nn.ReLU(inplace=True)
+ )
+ self.conv6 = nn.Sequential(
+ nn.Conv2d(1024, 512, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(512, 512, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+ self.up7 = nn.Sequential(
+ nn.Upsample(scale_factor=2),
+ nn.ZeroPad2d((0, 1, 0, 1)),
+ nn.Conv2d(512, 256, 2),
+ nn.ReLU(inplace=True)
+ )
+ self.conv7 = nn.Sequential(
+ nn.Conv2d(512, 256, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(256, 256, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+
+ self.up8 = nn.Sequential(
+ nn.Upsample(scale_factor=2),
+ nn.ZeroPad2d((0, 1, 0, 1)),
+ nn.Conv2d(256, 128, 2),
+ nn.ReLU(inplace=True)
+ )
+ self.conv8 = nn.Sequential(
+ nn.Conv2d(256, 128, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(128, 128, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+
+ self.up9 = nn.Sequential(
+ nn.Upsample(scale_factor=2),
+ nn.ZeroPad2d((0, 1, 0, 1)),
+ nn.Conv2d(128, 64, 2),
+ nn.ReLU(inplace=True)
+ )
+ self.conv9 = nn.Sequential(
+ nn.Conv2d(128, 64, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(64, 64, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(64, 2, 3, 1, 1),
+ nn.ReLU(inplace=True),
+ )
+ self.conv10 = nn.Conv2d(2, num_classes, 1)
+ self.__init_weights()
+
+ def __init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+
+ def forward(self, x):
+ conv1 = self.conv1(x)
+ pool1 = F.max_pool2d(conv1, 2, 2)
+
+ conv2 = self.conv2(pool1)
+ pool2 = F.max_pool2d(conv2, 2, 2)
+
+ conv3 = self.conv3(pool2)
+ pool3 = F.max_pool2d(conv3, 2, 2)
+
+ conv4 = self.conv4(pool3)
+ drop4 = self.drop4(conv4)
+ pool4 = F.max_pool2d(drop4, 2, 2)
+
+ conv5 = self.conv5(pool4)
+ drop5 = self.drop5(conv5)
+
+ up6 = self.up6(drop5)
+ merge6 = torch.cat((drop4, up6), dim=1)
+ conv6 = self.conv6(merge6)
+
+ up7 = self.up7(conv6)
+ merge7 = torch.cat((conv3, up7), dim=1)
+ conv7 = self.conv7(merge7)
+
+ up8 = self.up8(conv7)
+ merge8 = torch.cat((conv2, up8), dim=1)
+ conv8 = self.conv8(merge8)
+
+ up9 = self.up9(conv8)
+ merge9 = torch.cat((conv1, up9), dim=1)
+ conv9 = self.conv9(merge9)
+
+ output = self.conv10(conv9)
+ return output
+
+
+if __name__ == '__main__':
+ model = KappaMask(num_classes=2, in_channels=3)
+ fake_data = torch.rand(2, 3, 256, 256)
+ output = model(fake_data)
+ print(output.shape)
\ No newline at end of file
diff --git a/cloud_adapter/mcdnet.py b/cloud_adapter/mcdnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..93a1e0f8f944377e917da793ca7e73a5799ba704
--- /dev/null
+++ b/cloud_adapter/mcdnet.py
@@ -0,0 +1,435 @@
+# -*- coding: utf-8 -*-
+# @Time : 2024/7/21 下午3:51
+# @Author : xiaoshun
+# @Email : 3038523973@qq.com
+# @File : mcdnet.py
+# @Software: PyCharm
+import image_dehazer
+import numpy as np
+# 论文地址:https://www.sciencedirect.com/science/article/pii/S1569843224001742?via%3Dihub
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class _DPFF(nn.Module):
+ def __init__(self, in_channels) -> None:
+ super(_DPFF, self).__init__()
+ self.cbr1 = nn.Conv2d(in_channels * 2, in_channels, 1, 1, bias=False)
+ self.cbr2 = nn.Conv2d(in_channels * 2, in_channels, 1, 1, bias=False)
+ # self.sigmoid = nn.Sigmoid()
+ self.cbr3 = nn.Conv2d(in_channels, in_channels, 1, 1, bias=False)
+ self.cbr4 = nn.Conv2d(in_channels * 2, in_channels, 1, 1, bias=False)
+
+ def forward(self, feature1, feature2):
+ d1 = torch.abs(feature1 - feature2)
+ d2 = self.cbr1(torch.cat([feature1, feature2], dim=1))
+ d = torch.cat([d1, d2], dim=1)
+ d = self.cbr2(d)
+ # d = self.sigmoid(d)
+
+ v1, v2 = self.cbr3(feature1), self.cbr3(feature2)
+ v1, v2 = v1 * d, v2 * d
+ features = torch.cat([v1, v2], dim=1)
+ features = self.cbr4(features)
+
+ return features
+
+
+class DPFF(nn.Module):
+ def __init__(self, layer_channels) -> None:
+ super(DPFF, self).__init__()
+ self.cfes = nn.ModuleList()
+ for layer_channel in layer_channels:
+ self.cfes.append(_DPFF(layer_channel))
+
+ def forward(self, features1, features2):
+ outputs = []
+ for feature1, feature2, cfe in zip(features1, features2, self.cfes):
+ outputs.append(cfe(feature1, feature2))
+ return outputs
+
+
+class DirectDPFF(nn.Module):
+ def __init__(self, layer_channels) -> None:
+ super(DirectDPFF, self).__init__()
+ self.fusions = nn.ModuleList(
+ [nn.Conv2d(layer_channel * 2, layer_channel, 1, 1) for layer_channel in layer_channels]
+ )
+
+ def forward(self, features1, features2):
+ outputs = []
+ for feature1, feature2, fusion in zip(features1, features2, self.fusions):
+ feature = torch.cat([feature1, feature2], dim=1)
+ outputs.append(fusion(feature))
+ return outputs
+
+
+class ConvBlock(nn.Module):
+ def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True,
+ bn=False, activation=True, maxpool=True):
+ super(ConvBlock, self).__init__()
+ self.module = []
+ if maxpool:
+ down = nn.Sequential(
+ *[
+ nn.MaxPool2d(2),
+ nn.Conv2d(input_size, output_size, 1, 1, 0, bias=bias)
+ ]
+ )
+ else:
+ down = nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
+ self.module.append(down)
+ if bn:
+ self.module.append(nn.BatchNorm2d(output_size))
+ if activation:
+ self.module.append(nn.PReLU())
+ self.module = nn.Sequential(*self.module)
+
+ def forward(self, x):
+ out = self.module(x)
+
+ return out
+
+
+class DeconvBlock(nn.Module):
+ def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True,
+ bn=False, activation=True, bilinear=True):
+ super(DeconvBlock, self).__init__()
+ self.module = []
+ if bilinear:
+ deconv = nn.Sequential(
+ *[
+ nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
+ nn.Conv2d(input_size, output_size, 1, 1, 0, bias=bias)
+ ]
+ )
+ else:
+ deconv = nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
+ self.module.append(deconv)
+ if bn:
+ self.module.append(nn.BatchNorm2d(output_size))
+ if activation:
+ self.module.append(nn.PReLU())
+ self.module = nn.Sequential(*self.module)
+
+ def forward(self, x):
+ out = self.module(x)
+
+ return out
+
+
+class FusionBlock(torch.nn.Module):
+ def __init__(self, num_filter, num_ft, kernel_size=4, stride=2, padding=1, bias=True, maxpool=False,
+ bilinear=False):
+ super(FusionBlock, self).__init__()
+ self.num_ft = num_ft
+ self.up_convs = nn.ModuleList()
+ self.down_convs = nn.ModuleList()
+ for i in range(self.num_ft):
+ self.up_convs.append(
+ DeconvBlock(num_filter // (2 ** i), num_filter // (2 ** (i + 1)), kernel_size, stride, padding,
+ bias=bias, bilinear=bilinear)
+ )
+ self.down_convs.append(
+ ConvBlock(num_filter // (2 ** (i + 1)), num_filter // (2 ** i), kernel_size, stride, padding, bias=bias,
+ maxpool=maxpool)
+ )
+
+ def forward(self, ft_l, ft_h_list):
+ ft_fusion = ft_l
+ for i in range(len(ft_h_list)):
+ ft = ft_fusion
+ for j in range(self.num_ft - i):
+ ft = self.up_convs[j](ft)
+ ft = ft - ft_h_list[i]
+ for j in range(self.num_ft - i):
+ ft = self.down_convs[self.num_ft - i - j - 1](ft)
+ ft_fusion = ft_fusion + ft
+
+ return ft_fusion
+
+
+class ConvLayer(nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size, stride, bias=True):
+ super(ConvLayer, self).__init__()
+ reflection_padding = kernel_size // 2
+ self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
+ self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
+
+ def forward(self, x):
+ out = self.reflection_pad(x)
+ out = self.conv2d(out)
+ return out
+
+
+class UpsampleConvLayer(torch.nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size, stride):
+ super(UpsampleConvLayer, self).__init__()
+ self.conv2d = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride)
+
+ def forward(self, x):
+ out = self.conv2d(x)
+ return out
+
+
+class AddRelu(nn.Module):
+ """It is for adding two feed forwards to the output of the two following conv layers in expanding path
+ """
+
+ def __init__(self) -> None:
+ super(AddRelu, self).__init__()
+ self.relu = nn.PReLU()
+
+ def forward(self, input_tensor1, input_tensor2, input_tensor3):
+ x = input_tensor1 + input_tensor2 + input_tensor3
+ return self.relu(x)
+
+
+class BasicBlock(nn.Module):
+ def __init__(self, in_channels, out_channels, mid_channels=None):
+ super(BasicBlock, self).__init__()
+ if not mid_channels:
+ mid_channels = out_channels
+ self.conv1 = ConvLayer(in_channels, mid_channels, kernel_size=3, stride=1)
+ self.bn1 = nn.BatchNorm2d(mid_channels, momentum=0.1)
+ self.relu = nn.PReLU()
+
+ self.conv2 = ConvLayer(mid_channels, out_channels, kernel_size=3, stride=1)
+ self.bn2 = nn.BatchNorm2d(out_channels, momentum=0.1)
+
+ self.conv3 = ConvLayer(in_channels, out_channels, kernel_size=1, stride=1)
+
+ def forward(self, x):
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ residual = self.conv3(x)
+
+ out = out + residual
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ def __init__(self, in_channels, out_channels):
+ super(Bottleneck, self).__init__()
+ self.conv1 = ConvLayer(in_channels, out_channels, kernel_size=3, stride=1)
+ self.bn1 = nn.BatchNorm2d(out_channels, momentum=0.1)
+
+ self.conv2 = ConvLayer(out_channels, out_channels, kernel_size=3, stride=1)
+ self.bn2 = nn.BatchNorm2d(out_channels, momentum=0.1)
+
+ self.conv3 = ConvLayer(out_channels, out_channels, kernel_size=3, stride=1)
+ self.bn3 = nn.BatchNorm2d(out_channels, momentum=0.1)
+
+ self.conv4 = ConvLayer(in_channels, out_channels, kernel_size=1, stride=1)
+
+ self.relu = nn.PReLU()
+
+ def forward(self, x):
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ residual = self.conv4(x)
+
+ out = out + residual
+ out = self.relu(out)
+
+ return out
+
+
+class PPM(nn.Module):
+ def __init__(self, in_channels, out_channels):
+ super(PPM, self).__init__()
+
+ self.pool_sizes = [1, 2, 3, 6] # subregion size in each level
+ self.num_levels = len(self.pool_sizes) # number of pyramid levels
+
+ self.conv_layers = nn.ModuleList()
+ for i in range(self.num_levels):
+ self.conv_layers.append(nn.Sequential(
+ nn.AdaptiveAvgPool2d(output_size=self.pool_sizes[i]),
+ nn.Conv2d(in_channels, in_channels // self.num_levels, kernel_size=1),
+ nn.BatchNorm2d(in_channels // self.num_levels),
+ nn.ReLU(inplace=True)
+ ))
+ self.out_conv = nn.Conv2d(in_channels * 2, out_channels, kernel_size=1, stride=1)
+
+ def forward(self, x):
+ input_size = x.size()[2:] # get input size
+ output = [x]
+
+ # pyramid pooling
+ for i in range(self.num_levels):
+ out = self.conv_layers[i](x)
+ out = F.interpolate(out, size=input_size, mode='bilinear', align_corners=True)
+ output.append(out)
+
+ # concatenate features from different levels
+ output = torch.cat(output, dim=1)
+ output = self.out_conv(output)
+
+ return output
+
+
+class MCDNet(nn.Module):
+ def __init__(self, in_channels=4, num_classes=4, maxpool=False, bilinear=False) -> None:
+ super().__init__()
+ level = 1
+ # encoder
+ self.conv_input = ConvLayer(in_channels, 32 * level, kernel_size=3, stride=2)
+
+ self.dense0 = BasicBlock(32 * level, 32 * level)
+ self.conv2x = ConvLayer(32 * level, 64 * level, kernel_size=3, stride=2)
+
+ self.dense1 = BasicBlock(64 * level, 64 * level)
+ self.conv4x = ConvLayer(64 * level, 128 * level, kernel_size=3, stride=2)
+
+ self.dense2 = BasicBlock(128 * level, 128 * level)
+ self.conv8x = ConvLayer(128 * level, 256 * level, kernel_size=3, stride=2)
+
+ self.dense3 = BasicBlock(256 * level, 256 * level)
+ self.conv16x = ConvLayer(256 * level, 512 * level, kernel_size=3, stride=2)
+
+ self.dense4 = PPM(512 * level, 512 * level)
+
+ # dpff
+ self.dpffm = DPFF([32, 64, 128, 256, 512])
+
+ # decoder
+ self.convd16x = UpsampleConvLayer(512 * level, 256 * level, kernel_size=3, stride=2)
+ self.fusion4 = FusionBlock(256 * level, 3, maxpool=maxpool, bilinear=bilinear)
+ self.dense_4 = Bottleneck(512 * level, 256 * level)
+ self.add_block4 = AddRelu()
+
+ self.convd8x = UpsampleConvLayer(256 * level, 128 * level, kernel_size=3, stride=2)
+ self.fusion3 = FusionBlock(128 * level, 2, maxpool=maxpool, bilinear=bilinear)
+ self.dense_3 = Bottleneck(256 * level, 128 * level)
+ self.add_block3 = AddRelu()
+
+ self.convd4x = UpsampleConvLayer(128 * level, 64 * level, kernel_size=3, stride=2)
+ self.fusion2 = FusionBlock(64 * level, 1, maxpool=maxpool, bilinear=bilinear)
+ self.dense_2 = Bottleneck(128 * level, 64 * level)
+ self.add_block2 = AddRelu()
+
+ self.convd2x = UpsampleConvLayer(64 * level, 32 * level, kernel_size=3, stride=2)
+ self.dense_1 = Bottleneck(64 * level, 32 * level)
+ self.add_block1 = AddRelu()
+
+ self.head = UpsampleConvLayer(32 * level, num_classes, kernel_size=3, stride=2)
+ self.apply(self._weights_init)
+
+ @torch.no_grad()
+ def get_lr_data(self, x: torch.Tensor) -> torch.Tensor:
+ images = x.cpu().permute(0, 2, 3, 1).numpy() # b, h, w, c
+ batch_size = images.shape[0]
+ lr = []
+ for i in range(batch_size):
+ lr_image = image_dehazer.remove_haze((images[i]*255).astype(np.uint8), showHazeTransmissionMap=False)[0] # h, w, c, numpy.array
+ lr_tensor = torch.from_numpy(lr_image).permute(2, 0, 1)/255. # c, h, w
+ lr.append(lr_tensor)
+ return torch.stack(lr, dim=0).to(x.device) # b, c, h, w
+
+ def _weights_init(self, m):
+ if isinstance(m, nn.Linear):
+ nn.init.xavier_normal_(m.weight)
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+ elif isinstance(m, nn.BatchNorm2d):
+ nn.init.constant_(m.weight, 1)
+ nn.init.constant_(m.bias, 0)
+
+ def forward(self, x1):
+ x2 = self.get_lr_data(x1)
+ # encoder1
+ res1x_1 = self.conv_input(x1)
+ res1x_1 = self.dense0(res1x_1)
+
+ res2x_1 = self.conv2x(res1x_1)
+ res2x_1 = self.dense1(res2x_1)
+
+ res4x_1 = self.conv4x(res2x_1)
+ res4x_1 = self.dense2(res4x_1)
+
+ res8x_1 = self.conv8x(res4x_1)
+ res8x_1 = self.dense3(res8x_1)
+
+ res16x_1 = self.conv16x(res8x_1)
+ res16x_1 = self.dense4(res16x_1)
+
+ # encoder2
+ res1x_2 = self.conv_input(x2)
+ res1x_2 = self.dense0(res1x_2)
+
+ res2x_2 = self.conv2x(res1x_2)
+ res2x_2 = self.dense1(res2x_2)
+
+ res4x_2 = self.conv4x(res2x_2)
+ res4x_2 = self.dense2(res4x_2)
+
+ res8x_2 = self.conv8x(res4x_2)
+ res8x_2 = self.dense3(res8x_2)
+
+ res16x_2 = self.conv16x(res8x_2)
+ res16x_2 = self.dense4(res16x_2)
+
+ # dual-perspective feature fusion
+ res1x, res2x, res4x, res8x, res16x = self.dpffm(
+ [res1x_1, res2x_1, res4x_1, res8x_1, res16x_1],
+ [res1x_2, res2x_2, res4x_2, res8x_2, res16x_2]
+ )
+
+ # decoder
+ res8x1 = self.convd16x(res16x)
+ res8x1 = F.interpolate(res8x1, res8x.size()[2:], mode='bilinear')
+ res8x2 = self.fusion4(res8x, [res1x, res2x, res4x])
+ res8x2 = torch.cat([res8x1, res8x2], dim=1)
+ res8x2 = self.dense_4(res8x2)
+ res8x2 = self.add_block4(res8x1, res8x, res8x2)
+
+ res4x1 = self.convd8x(res8x2)
+ res4x1 = F.interpolate(res4x1, res4x.size()[2:], mode='bilinear')
+ res4x2 = self.fusion3(res4x, [res1x, res2x])
+ res4x2 = torch.cat([res4x1, res4x2], dim=1)
+ res4x2 = self.dense_3(res4x2)
+ res4x2 = self.add_block3(res4x1, res4x, res4x2)
+
+ res2x1 = self.convd4x(res4x2)
+ res2x1 = F.interpolate(res2x1, res2x.size()[2:], mode='bilinear')
+ res2x2 = self.fusion2(res2x, [res1x])
+ res2x2 = torch.cat([res2x1, res2x2], dim=1)
+ res2x2 = self.dense_2(res2x2)
+ res2x2 = self.add_block2(res2x1, res2x, res2x2)
+
+ res1x1 = self.convd2x(res2x2)
+ res1x1 = F.interpolate(res1x1, res1x.size()[2:], mode='bilinear')
+ res1x2 = torch.cat([res1x1, res1x], dim=1)
+ res1x2 = self.dense_1(res1x2)
+ res1x2 = self.add_block1(res1x1, res1x, res1x2)
+
+ out = self.head(res1x2)
+ out = F.interpolate(out, x1.size()[2:], mode='bilinear')
+
+ return out
+
+
+if __name__ == "__main__":
+ num_classes = 2
+ model = MCDNet()
+ # inp = torch.randn(size=(2, 3, 256, 256))
+ # assert model(input).shape == (2, 2, 256, 256)
\ No newline at end of file
diff --git a/cloud_adapter/scnn.py b/cloud_adapter/scnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5bd5d6583196717241440e434415060d1ba8e12
--- /dev/null
+++ b/cloud_adapter/scnn.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+# @Time : 2024/7/21 下午5:11
+# @Author : xiaoshun
+# @Email : 3038523973@qq.com
+# @File : scnn.py
+# @Software: PyCharm
+
+# 论文地址:https://www.sciencedirect.com/science/article/abs/pii/S0924271624000352?via%3Dihub#fn1
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class SCNN(nn.Module):
+ def __init__(self, in_channels=3, num_classes=2, dropout_p=0.5):
+ super().__init__()
+ self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=1)
+ self.conv2 = nn.Conv2d(64, num_classes, kernel_size=1)
+ self.conv3 = nn.Conv2d(num_classes, num_classes, kernel_size=3, padding=1)
+ self.dropout = nn.Dropout2d(p=dropout_p)
+
+ def forward(self, x):
+ x = F.relu(self.conv1(x))
+ x = self.dropout(x)
+ x = self.conv2(x)
+ x = self.conv3(x)
+ return x
+
+
+if __name__ == '__main__':
+ model = SCNN(num_classes=7)
+ fake_img = torch.randn((2, 3, 224, 224))
+ out = model(fake_img)
+ print(out.shape)
+ # torch.Size([2, 7, 224, 224])
\ No newline at end of file
diff --git a/cloud_adapter/unetmobv2.py b/cloud_adapter/unetmobv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ae78246b4c7ceed76e2eb2c7c013e5300666fe0
--- /dev/null
+++ b/cloud_adapter/unetmobv2.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# @Time : 2024/8/6 下午3:44
+# @Author : xiaoshun
+# @Email : 3038523973@qq.com
+# @File : unetmobv2.py
+# @Software: PyCharm
+import segmentation_models_pytorch as smp
+import torch
+from torch import nn as nn
+
+
+class UNetMobV2(nn.Module):
+ def __init__(self,num_classes,in_channels=3):
+ super().__init__()
+ self.backbone = smp.Unet(
+ encoder_name='mobilenet_v2',
+ encoder_weights=None,
+ in_channels=in_channels,
+ classes=num_classes,
+ )
+
+ def forward(self, x):
+ x = self.backbone(x)
+ return x
+
+
+if __name__ == '__main__':
+ fake_image = torch.rand(1, 3, 224, 224)
+ model = UNetMobV2(num_classes=2)
+ output = model(fake_image)
+ print(output.size())
\ No newline at end of file
diff --git a/cloud_adapter/utils.py b/cloud_adapter/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..036733167b0166ed6e42d67477771a517d5117c3
--- /dev/null
+++ b/cloud_adapter/utils.py
@@ -0,0 +1,58 @@
+import torch.nn as nn
+from typing import List
+from mmengine.logging import MMLogger
+
+first_set_requires_grad = True
+first_set_train = True
+
+
+def set_requires_grad(model: nn.Module, keywords: List[str]):
+ """
+ notice:key in name!
+ """
+ requires_grad_names = []
+ num_params = 0
+ num_trainable = 0
+ for name, param in model.named_parameters():
+ num_params += param.numel()
+ if any(key in name for key in keywords):
+ param.requires_grad = True
+ requires_grad_names.append(name)
+ num_trainable += param.numel()
+ else:
+ param.requires_grad = False
+ global first_set_requires_grad
+ if first_set_requires_grad:
+ logger = MMLogger.get_current_instance()
+ for name in requires_grad_names:
+ logger.info(f"set_requires_grad----{name}")
+ logger.info(
+ f"Total trainable params--{num_trainable}, All params--{num_params}, Ratio--{num_trainable*100/num_params:.1f}%"
+ )
+ first_set_requires_grad = False
+
+
+def _set_train(model: nn.Module, keywords: List[str], prefix: str = ""):
+ train_names = []
+ for name, child in model.named_children():
+ fullname = ".".join([prefix, name])
+ if any(name.startswith(key) for key in keywords):
+ train_names.append(fullname)
+ child.train()
+ else:
+ train_names += _set_train(child, keywords, prefix=fullname)
+ return train_names
+
+
+def set_train(model: nn.Module, keywords: List[str]):
+ """
+ notice:sub name startwith key!
+ """
+ model.train(False)
+ train_names = _set_train(model, keywords)
+ global first_set_train
+ if first_set_train:
+ logger = MMLogger.get_current_instance()
+ for train_name in train_names:
+ logger.info(f"set_train----{train_name}")
+ first_set_train = False
\ No newline at end of file
diff --git a/example_inputs/gf1/11.png b/example_inputs/gf1/11.png
new file mode 100644
index 0000000000000000000000000000000000000000..f140af36561e196074a0bf4adf84f95de8231d5d
Binary files /dev/null and b/example_inputs/gf1/11.png differ
diff --git a/example_inputs/gf1/48.png b/example_inputs/gf1/48.png
new file mode 100644
index 0000000000000000000000000000000000000000..1a2ef6daa29f194bd961a07b4ab0709d2ee3ce47
Binary files /dev/null and b/example_inputs/gf1/48.png differ
diff --git a/example_inputs/gf1/9.png b/example_inputs/gf1/9.png
new file mode 100644
index 0000000000000000000000000000000000000000..a206feaeb4af1132deeacc565ab0a78b6ea39d23
Binary files /dev/null and b/example_inputs/gf1/9.png differ
diff --git a/example_inputs/gf2/160.png b/example_inputs/gf2/160.png
new file mode 100644
index 0000000000000000000000000000000000000000..3d5df50c6e3ab430549c781fd2bccca9b41dec23
Binary files /dev/null and b/example_inputs/gf2/160.png differ
diff --git a/example_inputs/gf2/2.png b/example_inputs/gf2/2.png
new file mode 100644
index 0000000000000000000000000000000000000000..b25bcb2b91ef66195e7c71a479b11237d80c6b5d
Binary files /dev/null and b/example_inputs/gf2/2.png differ
diff --git a/example_inputs/gf2/63.png b/example_inputs/gf2/63.png
new file mode 100644
index 0000000000000000000000000000000000000000..ad831a8f2ec6af36edb7790d6a8f8c15126af61e
Binary files /dev/null and b/example_inputs/gf2/63.png differ
diff --git a/example_inputs/hrc_whu/barren_7.png b/example_inputs/hrc_whu/barren_7.png
new file mode 100644
index 0000000000000000000000000000000000000000..d0fb526613808c2b0cc68a3b54c7fe7bf58fc8e9
Binary files /dev/null and b/example_inputs/hrc_whu/barren_7.png differ
diff --git a/example_inputs/hrc_whu/snow_10.png b/example_inputs/hrc_whu/snow_10.png
new file mode 100644
index 0000000000000000000000000000000000000000..af123dbc6ccfb9b135f8d94bd9750adbf76aec11
Binary files /dev/null and b/example_inputs/hrc_whu/snow_10.png differ
diff --git a/example_inputs/hrc_whu/vegetation_21.png b/example_inputs/hrc_whu/vegetation_21.png
new file mode 100644
index 0000000000000000000000000000000000000000..6ea92b4d80bde86c01d93d43d64cfad87d2e4a3c
Binary files /dev/null and b/example_inputs/hrc_whu/vegetation_21.png differ
diff --git a/example_inputs/l1c/1.png b/example_inputs/l1c/1.png
new file mode 100644
index 0000000000000000000000000000000000000000..f36ecf60322837525f33ac5e4a54a051333b3b48
Binary files /dev/null and b/example_inputs/l1c/1.png differ
diff --git a/example_inputs/l1c/27.png b/example_inputs/l1c/27.png
new file mode 100644
index 0000000000000000000000000000000000000000..96a3bc291f8cc3c678ceea8b339807b8da9d2f31
Binary files /dev/null and b/example_inputs/l1c/27.png differ
diff --git a/example_inputs/l1c/76.png b/example_inputs/l1c/76.png
new file mode 100644
index 0000000000000000000000000000000000000000..c06811d59483a905ec5e179d0b2a0bb7fe6360fa
Binary files /dev/null and b/example_inputs/l1c/76.png differ
diff --git a/example_inputs/l2a/121.png b/example_inputs/l2a/121.png
new file mode 100644
index 0000000000000000000000000000000000000000..7d9d4cbb45f6205c1684009133d0dd807c5dbcd3
Binary files /dev/null and b/example_inputs/l2a/121.png differ
diff --git a/example_inputs/l2a/18.png b/example_inputs/l2a/18.png
new file mode 100644
index 0000000000000000000000000000000000000000..903d2f11a8501566d2024c73ddfe39fe6d5cc08d
Binary files /dev/null and b/example_inputs/l2a/18.png differ
diff --git a/example_inputs/l2a/35.png b/example_inputs/l2a/35.png
new file mode 100644
index 0000000000000000000000000000000000000000..9fa0521173620ef89f0d6aead16ec7d64348ebcf
Binary files /dev/null and b/example_inputs/l2a/35.png differ
diff --git a/example_inputs/l8/barren_LC81390292014135LGN00_patch_5632_512.png b/example_inputs/l8/barren_LC81390292014135LGN00_patch_5632_512.png
new file mode 100644
index 0000000000000000000000000000000000000000..278dafd53a3fdf8bfc70313d14c4da4aded8d43d
Binary files /dev/null and b/example_inputs/l8/barren_LC81390292014135LGN00_patch_5632_512.png differ
diff --git a/example_inputs/l8/forest_LC80160502014041LGN00_patch_4608_4608.png b/example_inputs/l8/forest_LC80160502014041LGN00_patch_4608_4608.png
new file mode 100644
index 0000000000000000000000000000000000000000..4ba447143ab2270d10e2357c564eb3219142cd72
Binary files /dev/null and b/example_inputs/l8/forest_LC80160502014041LGN00_patch_4608_4608.png differ
diff --git a/example_inputs/l8/shrubland_LC81020802014100LGN00_patch_1024_3584.png b/example_inputs/l8/shrubland_LC81020802014100LGN00_patch_1024_3584.png
new file mode 100644
index 0000000000000000000000000000000000000000..2d6043502426af800c994106fe27afc31885e77c
Binary files /dev/null and b/example_inputs/l8/shrubland_LC81020802014100LGN00_patch_1024_3584.png differ