from yacs.config import CfgNode as CN _CN = CN() ############## Model ############## _CN.MODEL = None # options: ['MicKey'] _CN.DEBUG = False # MicKey configuration _CN.MICKEY = CN() _CN.MICKEY.DINOV2 = CN() _CN.MICKEY.DINOV2.DOWN_FACTOR = None _CN.MICKEY.DINOV2.CHANNEL_DIM = None _CN.MICKEY.DINOV2.FLOAT16 = None _CN.MICKEY.KP_HEADS = CN() _CN.MICKEY.KP_HEADS.BLOCKS_DIM = None _CN.MICKEY.KP_HEADS.BN = None _CN.MICKEY.KP_HEADS.USE_SOFTMAX = None _CN.MICKEY.KP_HEADS.USE_DEPTHSIGMOID = None _CN.MICKEY.KP_HEADS.MAX_DEPTH = None _CN.MICKEY.KP_HEADS.POS_ENCODING = None _CN.MICKEY.DSC_HEAD = CN() _CN.MICKEY.DSC_HEAD.LAST_DIM = None _CN.MICKEY.DSC_HEAD.BLOCKS_DIM = None _CN.MICKEY.DSC_HEAD.BN = None _CN.MICKEY.DSC_HEAD.NORM_DSC = None _CN.MICKEY.DSC_HEAD.POS_ENCODING = None _CN.FEATURE_MATCHER = CN() _CN.FEATURE_MATCHER.TYPE = None _CN.FEATURE_MATCHER.DUAL_SOFTMAX = CN() _CN.FEATURE_MATCHER.DUAL_SOFTMAX.TEMPERATURE = None _CN.FEATURE_MATCHER.DUAL_SOFTMAX.USE_DUSTBIN = None _CN.FEATURE_MATCHER.SINKHORN = CN() _CN.FEATURE_MATCHER.SINKHORN.NUM_IT = None _CN.FEATURE_MATCHER.SINKHORN.DUSTBIN_SCORE_INIT = None _CN.FEATURE_MATCHER.USE_TRANSFORMER = None _CN.FEATURE_MATCHER.TOP_KEYPOINTS = False # LOSS_CLASS _CN.LOSS_CLASS = CN() _CN.LOSS_CLASS.LOSS_FUNCTION = None _CN.LOSS_CLASS.SOFT_CLIPPING = None _CN.LOSS_CLASS.POSE_ERR = CN() _CN.LOSS_CLASS.POSE_ERR.MAX_LOSS_VALUE = None _CN.LOSS_CLASS.POSE_ERR.MAX_LOSS_SOFTVALUE = None _CN.LOSS_CLASS.VCRE = CN() _CN.LOSS_CLASS.VCRE.MAX_LOSS_VALUE = None _CN.LOSS_CLASS.VCRE.MAX_LOSS_SOFTVALUE = None _CN.LOSS_CLASS.GENERATE_HYPOTHESES = CN() _CN.LOSS_CLASS.GENERATE_HYPOTHESES.SCORE_TEMPERATURE = None _CN.LOSS_CLASS.GENERATE_HYPOTHESES.IT_MATCHES = None _CN.LOSS_CLASS.GENERATE_HYPOTHESES.IT_RANSAC = None _CN.LOSS_CLASS.GENERATE_HYPOTHESES.INLIER_3D_TH = None _CN.LOSS_CLASS.GENERATE_HYPOTHESES.INLIER_REF_TH = None _CN.LOSS_CLASS.GENERATE_HYPOTHESES.NUM_REF_STEPS = None _CN.LOSS_CLASS.GENERATE_HYPOTHESES.NUM_CORR_3d3d = None _CN.LOSS_CLASS.CURRICULUM_LEARNING = CN() _CN.LOSS_CLASS.CURRICULUM_LEARNING.TRAIN_CURRICULUM = None _CN.LOSS_CLASS.CURRICULUM_LEARNING.TRAIN_WITH_TOPK = None _CN.LOSS_CLASS.CURRICULUM_LEARNING.TOPK_INIT = None _CN.LOSS_CLASS.CURRICULUM_LEARNING.TOPK = None _CN.LOSS_CLASS.NULL_HYPOTHESIS = CN() _CN.LOSS_CLASS.NULL_HYPOTHESIS.ADD_NULL_HYPOTHESIS = None _CN.LOSS_CLASS.NULL_HYPOTHESIS.TH_OUTLIERS = None _CN.LOSS_CLASS.SAMPLER = CN() _CN.LOSS_CLASS.SAMPLER.NUM_SAMPLES_MATCHES = None # Procrustes RANSAC options _CN.PROCRUSTES = CN() _CN.PROCRUSTES.IT_MATCHES = None _CN.PROCRUSTES.IT_RANSAC = None _CN.PROCRUSTES.NUM_SAMPLED_MATCHES = None _CN.PROCRUSTES.NUM_CORR_3D_3D = None _CN.PROCRUSTES.NUM_REFINEMENTS = None _CN.PROCRUSTES.TH_INLIER = None _CN.PROCRUSTES.TH_SOFT_INLIER = None # Training Procrustes RANSAC options _CN.PROCRUSTES_TRAINING = CN() _CN.PROCRUSTES_TRAINING.MAX_CORR_DIST = None _CN.PROCRUSTES_TRAINING.REFINE = False #refine pose with ICP ############## Dataset ############## _CN.DATASET = CN() # 1. data config _CN.DATASET.DATA_SOURCE = None # options: ['ScanNet', '7Scenes', 'MapFree'] _CN.DATASET.SCENES = None # scenes to use (for 7Scenes/MapFree); should be a list []; If none, use all scenes. _CN.DATASET.DATA_ROOT = None # path to dataset folder _CN.DATASET.SEED = None # SEED for dataset generation _CN.DATASET.NPZ_ROOT = None # path to npz files containing pairs of frame indices per sample _CN.DATASET.MIN_OVERLAP_SCORE = None # discard data with overlap_score < min_overlap_score _CN.DATASET.MAX_OVERLAP_SCORE = None # discard data with overlap_score > max_overlap_score _CN.DATASET.CONSECUTIVE_PAIRS = None # options: [None, 'colorjitter'] _CN.DATASET.FRAME_RATE = None # options: [None, 'colorjitter'] _CN.DATASET.AUGMENTATION_TYPE = None # options: [None, 'colorjitter'] _CN.DATASET.BLACK_WHITE = False # if true, transform images to black & white _CN.DATASET.PAIRS_TXT = CN() # Path to text file defining the train/val/test pairs (7Scenes) _CN.DATASET.PAIRS_TXT.TRAIN = None _CN.DATASET.PAIRS_TXT.VAL = None _CN.DATASET.PAIRS_TXT.TEST = None _CN.DATASET.PAIRS_TXT.ONE_NN = False # If true, keeps only reference image w/ highest similarity to each query _CN.DATASET.HEIGHT = None _CN.DATASET.WIDTH = None ############# TRAINING ############# _CN.TRAINING = CN() # Data Loader settings _CN.TRAINING.BATCH_SIZE = None _CN.TRAINING.NUM_WORKERS = None _CN.TRAINING.NUM_GPUS = None _CN.TRAINING.SAMPLER = None # options: ['random', 'scene_balance'] _CN.TRAINING.N_SAMPLES_SCENE = None # if 'scene_balance' sampler, the number of samples to get per scene _CN.TRAINING.SAMPLE_WITH_REPLACEMENT = None # if 'scene_balance' sampler, whether to sample with replacement # Training settings _CN.TRAINING.LR = None _CN.TRAINING.LR_STEP_INTERVAL = None _CN.TRAINING.LR_STEP_GAMMA = None # multiplicative factor of LR every LR_STEP_ITERATIONS _CN.TRAINING.VAL_INTERVAL = None _CN.TRAINING.VAL_BATCHES = None _CN.TRAINING.LOG_INTERVAL = None _CN.TRAINING.EPOCHS = None _CN.TRAINING.GRAD_CLIP = 0. # Indicates the L2 norm at which to clip the gradient. Disabled if 0 cfg = _CN