_base_ = [ './datasets/hsi_detection4x.py', '../_base_/default_runtime.py' ] in_channels = 30 model = dict( type='DETR', num_queries=100, data_preprocessor=dict( type='HSIDetDataPreprocessor', pad_size_divisor=1), backbone=dict( type='ResNet', in_channels =in_channels, depth=50, num_stages=4, out_indices=(3,), frozen_stages=-1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='ChannelMapper', in_channels=[2048], kernel_size=1, out_channels=256, act_cfg=None, norm_cfg=None, num_outs=1), encoder=dict( # DetrTransformerEncoder num_layers=6, layer_cfg=dict( # DetrTransformerEncoderLayer self_attn_cfg=dict( # MultiheadAttention embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True)))), decoder=dict( # DetrTransformerDecoder num_layers=6, layer_cfg=dict( # DetrTransformerDecoderLayer self_attn_cfg=dict( # MultiheadAttention embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), cross_attn_cfg=dict( # MultiheadAttention embed_dims=256, num_heads=8, dropout=0.1, batch_first=True), ffn_cfg=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, ffn_drop=0.1, act_cfg=dict(type='ReLU', inplace=True))), return_intermediate=True), positional_encoding=dict(num_feats=128, normalize=True), bbox_head=dict( type='DETRHead', num_classes=8, embed_dims=256, loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict( type='HungarianAssigner', match_costs=[ dict(type='ClassificationCost', weight=1.), dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), dict(type='IoUCost', iou_mode='giou', weight=2.0) ])), test_cfg=dict(max_per_img=100)) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. # optimizer optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001), clip_grad=dict(max_norm=0.1, norm_type=2), paramwise_cfg=dict( custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) # learning policy max_epochs = 100 train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=20) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop') param_scheduler = [ dict( type='MultiStepLR', begin=0, end=max_epochs, by_epoch=True, milestones=[90], gamma=0.1) ] # default_hooks = dict( # checkpoint=dict(type='CheckpointHook',interval=-1, by_epoch=False,save_best='auto')) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=4)