diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..5045717190f487fc0ef3792d1ccee60d016d36a9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +examples/drone_video.mp4 filter=lfs diff=lfs merge=lfs -text +examples/IMG_9730.mov filter=lfs diff=lfs merge=lfs -text +examples/IMG_9731.mov filter=lfs diff=lfs merge=lfs -text +examples/IMG_9732.mov filter=lfs diff=lfs merge=lfs -text diff --git a/checkpoints/dpvo.pth b/checkpoints/dpvo.pth new file mode 100644 index 0000000000000000000000000000000000000000..25b16864668c8625f38021d17cc534258ff6297f --- /dev/null +++ b/checkpoints/dpvo.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30d02dc2b88a321cf99aad8e4ea1152a44d791b5b65bf95ad036922819c0ff12 +size 14167743 diff --git a/checkpoints/wham_vit_bedlam_w_3dpw.pth.tar b/checkpoints/wham_vit_bedlam_w_3dpw.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..8ad730690f89ceb46bb3415e675688beb0c8998d --- /dev/null +++ b/checkpoints/wham_vit_bedlam_w_3dpw.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91d250d2d298b00f200aa39df36253b55ca434188c2934d8e91e5e0777fb67fd +size 527307587 diff --git a/checkpoints/wham_vit_w_3dpw.pth.tar b/checkpoints/wham_vit_w_3dpw.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..5bc1ba919994f301d0c4df9b5c88d4cbfb871321 --- /dev/null +++ b/checkpoints/wham_vit_w_3dpw.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9835bcbc952221ad72fa72e768e1f4620e96788b12cecd676a3b1dbee057dd66 +size 527307587 diff --git a/checkpoints/yolov8x.pt b/checkpoints/yolov8x.pt new file mode 100644 index 0000000000000000000000000000000000000000..a0510bf3bb96a465f97b81dab2dd2f437e2cccbe --- /dev/null +++ b/checkpoints/yolov8x.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4d5a3f000d771762f03fc8b57ebd0aae324aeaefdd6e68492a9c4470f2d1e8b +size 136867539 diff --git a/configs/__pycache__/config.cpython-39.pyc b/configs/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d54c1c5e7237aaa07a716b7513de4a26bfdbac2c Binary files /dev/null and b/configs/__pycache__/config.cpython-39.pyc differ diff --git a/configs/__pycache__/constants.cpython-39.pyc b/configs/__pycache__/constants.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5159979482d30d508ad35f1370115d7154714364 Binary files /dev/null and b/configs/__pycache__/constants.cpython-39.pyc differ diff --git a/configs/config.py b/configs/config.py new file mode 100644 index 0000000000000000000000000000000000000000..2d58747ff089106fb22345a461345e833d711f6a --- /dev/null +++ b/configs/config.py @@ -0,0 +1,111 @@ +import argparse +from yacs.config import CfgNode as CN + +# Configuration variable +cfg = CN() + +cfg.TITLE = 'default' +cfg.OUTPUT_DIR = 'results' +cfg.EXP_NAME = 'default' +cfg.DEVICE = 'cuda' +cfg.DEBUG = False +cfg.EVAL = False +cfg.RESUME = False +cfg.LOGDIR = '' +cfg.NUM_WORKERS = 5 +cfg.SEED_VALUE = -1 +cfg.SUMMARY_ITER = 50 +cfg.MODEL_CONFIG = '' +cfg.FLIP_EVAL = False + +cfg.TRAIN = CN() +cfg.TRAIN.STAGE = 'stage1' +cfg.TRAIN.DATASET_EVAL = '3dpw' +cfg.TRAIN.CHECKPOINT = '' +cfg.TRAIN.BATCH_SIZE = 64 +cfg.TRAIN.START_EPOCH = 0 +cfg.TRAIN.END_EPOCH = 999 +cfg.TRAIN.OPTIM = 'Adam' +cfg.TRAIN.LR = 3e-4 +cfg.TRAIN.LR_FINETUNE = 5e-5 +cfg.TRAIN.LR_PATIENCE = 5 +cfg.TRAIN.LR_DECAY_RATIO = 0.1 +cfg.TRAIN.WD = 0.0 +cfg.TRAIN.MOMENTUM = 0.9 +cfg.TRAIN.MILESTONES = [50, 70] + +cfg.DATASET = CN() +cfg.DATASET.SEQLEN = 81 +cfg.DATASET.RATIO = [1.0, 0, 0, 0, 0] + +cfg.MODEL = CN() +cfg.MODEL.BACKBONE = 'vit' + +cfg.LOSS = CN() +cfg.LOSS.SHAPE_LOSS_WEIGHT = 0.001 +cfg.LOSS.JOINT2D_LOSS_WEIGHT = 5. +cfg.LOSS.JOINT3D_LOSS_WEIGHT = 5. +cfg.LOSS.VERTS3D_LOSS_WEIGHT = 1. +cfg.LOSS.POSE_LOSS_WEIGHT = 1. +cfg.LOSS.CASCADED_LOSS_WEIGHT = 0.0 +cfg.LOSS.CONTACT_LOSS_WEIGHT = 0.04 +cfg.LOSS.ROOT_VEL_LOSS_WEIGHT = 0.001 +cfg.LOSS.ROOT_POSE_LOSS_WEIGHT = 0.4 +cfg.LOSS.SLIDING_LOSS_WEIGHT = 0.5 +cfg.LOSS.CAMERA_LOSS_WEIGHT = 0.04 +cfg.LOSS.LOSS_WEIGHT = 60. +cfg.LOSS.CAMERA_LOSS_SKIP_EPOCH = 5 + + +def get_cfg_defaults(): + """Get a yacs CfgNode object with default values for my_project.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + return cfg.clone() + + +def get_cfg(args, test): + """ + Define configuration. + """ + import os + + cfg = get_cfg_defaults() + if os.path.exists(args.cfg): + cfg.merge_from_file(args.cfg) + + cfg.merge_from_list(args.opts) + if test: + cfg.merge_from_list(['EVAL', True]) + + return cfg.clone() + + +def bool_arg(value): + if value.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif value.lower() in ('no', 'false', 'f', 'n', '0'): + return False + + +def parse_args(test=False): + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--cfg', type=str, default='./configs/debug.yaml', help='cfg file path') + parser.add_argument( + "--eval-set", type=str, default='3dpw', help="Evaluation dataset") + parser.add_argument( + "--eval-split", type=str, default='test', help="Evaluation data split") + parser.add_argument('--render', default=False, type=bool_arg, + help='Render SMPL meshes after the evaluation') + parser.add_argument('--save-results', default=False, type=bool_arg, + help='Save SMPL parameters after the evaluation') + parser.add_argument( + "opts", default=None, nargs=argparse.REMAINDER, + help="Modify config options using the command-line") + + args = parser.parse_args() + print(args, end='\n\n') + cfg_file = args.cfg + cfg = get_cfg(args, test) + + return cfg, cfg_file, args \ No newline at end of file diff --git a/configs/constants.py b/configs/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..ded9d2241a5ecfa192c09af4f27cfd969d1e11fa --- /dev/null +++ b/configs/constants.py @@ -0,0 +1,59 @@ +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +import torch + +IMG_FEAT_DIM = { + 'resnet': 2048, + 'vit': 1024 +} + +N_JOINTS = 17 +root = 'dataset' +class PATHS: + # Raw data folders + PARSED_DATA = f'{root}/parsed_data' + AMASS_PTH = f'{root}/AMASS' + THREEDPW_PTH = f'{root}/3DPW' + HUMAN36M_PTH = f'{root}/Human36M' + RICH_PTH = f'{root}/RICH' + EMDB_PTH = f'{root}/EMDB' + + # Processed labels + AMASS_LABEL = f'{root}/parsed_data/amass.pth' + THREEDPW_LABEL = f'{root}/parsed_data/3dpw_dset_backbone.pth' + MPII3D_LABEL = f'{root}/parsed_data/mpii3d_dset_backbone.pth' + HUMAN36M_LABEL = f'{root}/parsed_data/human36m_dset_backbone.pth' + INSTA_LABEL = f'{root}/parsed_data/insta_dset_backbone.pth' + BEDLAM_LABEL = f'{root}/parsed_data/bedlam_train_backbone.pth' + +class KEYPOINTS: + NUM_JOINTS = N_JOINTS + H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9] + H36M_TO_J14 = H36M_TO_J17[:14] + J17_TO_H36M = [14, 3, 4, 5, 2, 1, 0, 15, 12, 16, 13, 9, 10, 11, 8, 7, 6] + COCO_AUG_DICT = f'{root}/body_models/coco_aug_dict.pth' + TREE = [[5, 6], 0, 0, 1, 2, -1, -1, 5, 6, 7, 8, -1, -1, 11, 12, 13, 14, 15, 15, 15, 16, 16, 16] + + # STD scale for video noise + S_BIAS = 1e-1 + S_JITTERING = 5e-2 + S_PEAK = 3e-1 + S_PEAK_MASK = 5e-3 + S_MASK = 0.03 + + +class BMODEL: + MAIN_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21] # reduced_joints + + FLDR = f'{root}/body_models/smpl/' + SMPLX2SMPL = f'{root}/body_models/smplx2smpl.pkl' + FACES = f'{root}/body_models/smpl_faces.npy' + MEAN_PARAMS = f'{root}/body_models/smpl_mean_params.npz' + JOINTS_REGRESSOR_WHAM = f'{root}/body_models/J_regressor_wham.npy' + JOINTS_REGRESSOR_H36M = f'{root}/body_models/J_regressor_h36m.npy' + JOINTS_REGRESSOR_EXTRA = f'{root}/body_models/J_regressor_extra.npy' + JOINTS_REGRESSOR_FEET = f'{root}/body_models/J_regressor_feet.npy' + PARENTS = torch.tensor([ + -1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21]) \ No newline at end of file diff --git a/configs/yamls/demo.yaml b/configs/yamls/demo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed67a32e22ee65ee9c0c67cf4e98bd3a8f577af6 --- /dev/null +++ b/configs/yamls/demo.yaml @@ -0,0 +1,14 @@ +LOGDIR: '' +DEVICE: 'cuda' +EXP_NAME: 'demo' +OUTPUT_DIR: 'experiments/' +NUM_WORKERS: 0 +MODEL_CONFIG: 'configs/yamls/model_base.yaml' +FLIP_EVAL: True + +TRAIN: + STAGE: 'stage2' + CHECKPOINT: 'checkpoints/wham_vit_bedlam_w_3dpw.pth.tar' + +MODEL: + BACKBONE: 'vit' \ No newline at end of file diff --git a/configs/yamls/model_base.yaml b/configs/yamls/model_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89a1bd8c7392a973cc269601db24960e9924d42e --- /dev/null +++ b/configs/yamls/model_base.yaml @@ -0,0 +1,7 @@ +architecture: 'RNN' +in_dim: 49 +n_iters: 1 +pose_dr: 0.15 +d_embed: 512 +n_layers: 3 +layer: 'LSTM' \ No newline at end of file diff --git a/configs/yamls/stage1.yaml b/configs/yamls/stage1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a25fd4bd219cc0842a7f81dcb226048c0793a12c --- /dev/null +++ b/configs/yamls/stage1.yaml @@ -0,0 +1,28 @@ +LOGDIR: '' +DEVICE: 'cuda' +EXP_NAME: 'train_stage1' +OUTPUT_DIR: 'experiments/' +NUM_WORKERS: 8 +MODEL_CONFIG: 'configs/yamls/model_base.yaml' +FLIP_EVAL: True +SEED_VALUE: 42 + +TRAIN: + LR: 5e-4 + BATCH_SIZE: 64 + END_EPOCH: 100 + STAGE: 'stage1' + CHECKPOINT: '' + MILESTONES: [60, 80] + +LOSS: + SHAPE_LOSS_WEIGHT: 0.004 + JOINT3D_LOSS_WEIGHT: 0.4 + JOINT2D_LOSS_WEIGHT: 0.1 + POSE_LOSS_WEIGHT: 8.0 + CASCADED_LOSS_WEIGHT: 0.0 + SLIDING_LOSS_WEIGHT: 0.5 + CAMERA_LOSS_WEIGHT: 0.04 + ROOT_VEL_LOSS_WEIGHT: 0.001 + LOSS_WEIGHT: 50.0 + CAMERA_LOSS_SKIP_EPOCH: 5 \ No newline at end of file diff --git a/configs/yamls/stage2.yaml b/configs/yamls/stage2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57e69e0a3bc740daa073e829e01e9053e6dae885 --- /dev/null +++ b/configs/yamls/stage2.yaml @@ -0,0 +1,37 @@ +LOGDIR: '' +DEVICE: 'cuda' +EXP_NAME: 'train_stage2' +OUTPUT_DIR: 'experiments' +NUM_WORKERS: 8 +MODEL_CONFIG: 'configs/yamls/model_base.yaml' +FLIP_EVAL: True +SEED_VALUE: 42 + +TRAIN: + LR: 1e-4 + LR_FINETUNE: 1e-5 + STAGE: 'stage2' + CHECKPOINT: 'checkpoints/wham_stage1.pth.tar' + BATCH_SIZE: 64 + END_EPOCH: 40 + MILESTONES: [20, 30] + LR_DECAY_RATIO: 0.2 + +MODEL: + BACKBONE: 'vit' + +LOSS: + SHAPE_LOSS_WEIGHT: 0.0 + JOINT2D_LOSS_WEIGHT: 3.0 + JOINT3D_LOSS_WEIGHT: 6.0 + POSE_LOSS_WEIGHT: 1.0 + CASCADED_LOSS_WEIGHT: 0.05 + SLIDING_LOSS_WEIGHT: 0.5 + CAMERA_LOSS_WEIGHT: 0.01 + ROOT_VEL_LOSS_WEIGHT: 0.001 + LOSS_WEIGHT: 60.0 + CAMERA_LOSS_SKIP_EPOCH: 0 + +DATASET: + SEQLEN: 81 + RATIO: [0.2, 0.2, 0.2, 0.2, 0.2] \ No newline at end of file diff --git a/configs/yamls/stage2_b.yaml b/configs/yamls/stage2_b.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9dfa7524d95f943edcbaaf72918c565535773e35 --- /dev/null +++ b/configs/yamls/stage2_b.yaml @@ -0,0 +1,38 @@ +LOGDIR: '' +DEVICE: 'cuda' +EXP_NAME: 'train_stage2_b' +OUTPUT_DIR: 'experiments' +NUM_WORKERS: 8 +MODEL_CONFIG: 'configs/yamls/model_base.yaml' +FLIP_EVAL: True +SEED_VALUE: 42 + +TRAIN: + LR: 1e-4 + LR_FINETUNE: 1e-5 + STAGE: 'stage2' + CHECKPOINT: 'checkpoints/wham_stage1.pth.tar' + BATCH_SIZE: 64 + END_EPOCH: 80 + MILESTONES: [40, 50, 70] + LR_DECAY_RATIO: 0.2 + +MODEL: + BACKBONE: 'vit' + +LOSS: + SHAPE_LOSS_WEIGHT: 0.0 + JOINT2D_LOSS_WEIGHT: 5.0 + JOINT3D_LOSS_WEIGHT: 5.0 + VERTS3D_LOSS_WEIGHT: 1.0 + POSE_LOSS_WEIGHT: 3.0 + CASCADED_LOSS_WEIGHT: 0.05 + SLIDING_LOSS_WEIGHT: 0.5 + CAMERA_LOSS_WEIGHT: 0.01 + ROOT_VEL_LOSS_WEIGHT: 0.001 + LOSS_WEIGHT: 60.0 + CAMERA_LOSS_SKIP_EPOCH: 0 + +DATASET: + SEQLEN: 81 + RATIO: [0.2, 0.2, 0.2, 0.2, 0.0, 0.2] \ No newline at end of file diff --git a/dataset/body_models/J_regressor_coco.npy b/dataset/body_models/J_regressor_coco.npy new file mode 100644 index 0000000000000000000000000000000000000000..3eed75e4c494ded1e239dc939e50182491b2c9f3 --- /dev/null +++ b/dataset/body_models/J_regressor_coco.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cd49241810715e752aa7384363b7bc09fb96b386ca99aa1c3eb2c0d15d6b8b9 +size 468648 diff --git a/dataset/body_models/J_regressor_feet.npy b/dataset/body_models/J_regressor_feet.npy new file mode 100644 index 0000000000000000000000000000000000000000..8731b49f3a6632f26910d77be25b132ca4f041a7 --- /dev/null +++ b/dataset/body_models/J_regressor_feet.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef9e6d64796f2f342983a9fde6a6d9f8e3544f1239e7f86aa4f6b7aa82f4cf6 +size 220608 diff --git a/dataset/body_models/J_regressor_h36m.npy b/dataset/body_models/J_regressor_h36m.npy new file mode 100644 index 0000000000000000000000000000000000000000..d8ea80f7f2fa4c3fde21c543d28376b84e22d77a --- /dev/null +++ b/dataset/body_models/J_regressor_h36m.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c655cd7013d7829eb9acbebf0e43f952a3fa0305a53c35880e39192bfb6444a0 +size 937168 diff --git a/dataset/body_models/J_regressor_wham.npy b/dataset/body_models/J_regressor_wham.npy new file mode 100644 index 0000000000000000000000000000000000000000..0befeb8ff8ec0882510cabf925d7ab96d73c7efe --- /dev/null +++ b/dataset/body_models/J_regressor_wham.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f938dcfd5cd88d0b19ee34e442d49f1dc370d3d8c4f5aef57a93d0cf2e267c4c +size 854488 diff --git a/dataset/body_models/smpl/SMPL_FEMALE.pkl b/dataset/body_models/smpl/SMPL_FEMALE.pkl new file mode 100644 index 0000000000000000000000000000000000000000..92a201f4839bd95c1c1986437c7c6a02d7d1ae99 --- /dev/null +++ b/dataset/body_models/smpl/SMPL_FEMALE.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a583c1b98e4afc19042641f1bae5cd8a1f712a6724886291a7627ec07acd408d +size 39056454 diff --git a/dataset/body_models/smpl/SMPL_MALE.pkl b/dataset/body_models/smpl/SMPL_MALE.pkl new file mode 100644 index 0000000000000000000000000000000000000000..43dfecc57d9b7aa99cd2398df818ba252be7f605 --- /dev/null +++ b/dataset/body_models/smpl/SMPL_MALE.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e8c0bbbbc635dcb166ed29c303fb4bef16ea5f623e5a89263495a9e403575bd +size 39056404 diff --git a/dataset/body_models/smpl/SMPL_NEUTRAL.pkl b/dataset/body_models/smpl/SMPL_NEUTRAL.pkl new file mode 100644 index 0000000000000000000000000000000000000000..26574fd104c4b69467f3c7c3516a8508d8a1a36e --- /dev/null +++ b/dataset/body_models/smpl/SMPL_NEUTRAL.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98e65c74ad9b998783132f00880d1025a8d64b158e040e6ef13a557e5098bc42 +size 39001280 diff --git a/dataset/body_models/smpl/__MACOSX/._smpl b/dataset/body_models/smpl/__MACOSX/._smpl new file mode 100644 index 0000000000000000000000000000000000000000..ecd992ce89eb63ad13ac00ecb1840eb08669d78e Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/._smpl differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/._.DS_Store b/dataset/body_models/smpl/__MACOSX/smpl/._.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..09fa6bdda3a49951cf3fb7aa68796ee7d5c71310 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/._.DS_Store differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/.___init__.py b/dataset/body_models/smpl/__MACOSX/smpl/.___init__.py new file mode 100644 index 0000000000000000000000000000000000000000..198315090137148619e28344fa871854f05f2afd Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/.___init__.py differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/._models b/dataset/body_models/smpl/__MACOSX/smpl/._models new file mode 100644 index 0000000000000000000000000000000000000000..33583c02c45f5acd6d0a92c24ffdfc98ffc99594 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/._models differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/._smpl_webuser b/dataset/body_models/smpl/__MACOSX/smpl/._smpl_webuser new file mode 100644 index 0000000000000000000000000000000000000000..ffe2ae843d4c972cdad070513b7a1f0702998da8 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/._smpl_webuser differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl b/dataset/body_models/smpl/__MACOSX/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..92a201f4839bd95c1c1986437c7c6a02d7d1ae99 --- /dev/null +++ b/dataset/body_models/smpl/__MACOSX/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a583c1b98e4afc19042641f1bae5cd8a1f712a6724886291a7627ec07acd408d +size 39056454 diff --git a/dataset/body_models/smpl/__MACOSX/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl b/dataset/body_models/smpl/__MACOSX/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..43dfecc57d9b7aa99cd2398df818ba252be7f605 --- /dev/null +++ b/dataset/body_models/smpl/__MACOSX/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e8c0bbbbc635dcb166ed29c303fb4bef16ea5f623e5a89263495a9e403575bd +size 39056404 diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._LICENSE.txt b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..6df69f4fe6a82caa314bf48708774f6c577cade3 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._LICENSE.txt differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._README.txt b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._README.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3c0861d4d29455621650e98336cb97c09b3e124 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._README.txt differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/.___init__.py b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/.___init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9d3d005dbdb334ed5c90e8f2a05eafb71307b3e5 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/.___init__.py differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._hello_world b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._hello_world new file mode 100644 index 0000000000000000000000000000000000000000..815dfc0483eb0314c48268ed68102a6442e97982 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._hello_world differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._lbs.py b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._lbs.py new file mode 100644 index 0000000000000000000000000000000000000000..c141f0b71b678ee836ef1b58733749b8aea579c9 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._lbs.py differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._posemapper.py b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._posemapper.py new file mode 100644 index 0000000000000000000000000000000000000000..f4a067064fdd6a84bd9f7a6042579d744c501927 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._posemapper.py differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._serialization.py b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..5349c7ffefc22416559dcb7ef5cba17164de4391 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._serialization.py differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._verts.py b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._verts.py new file mode 100644 index 0000000000000000000000000000000000000000..e3dbe30d07990f310b3c5bd767953c0715247e20 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/._verts.py differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/hello_world/._hello_smpl.py b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/hello_world/._hello_smpl.py new file mode 100644 index 0000000000000000000000000000000000000000..660954a98795d0a7bf3e7f431936d531229fd661 Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/hello_world/._hello_smpl.py differ diff --git a/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/hello_world/._render_smpl.py b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/hello_world/._render_smpl.py new file mode 100644 index 0000000000000000000000000000000000000000..6b8b3bece60fad5e282a73384e6e06fa8d04737d Binary files /dev/null and b/dataset/body_models/smpl/__MACOSX/smpl/smpl_webuser/hello_world/._render_smpl.py differ diff --git a/dataset/body_models/smpl_faces.npy b/dataset/body_models/smpl_faces.npy new file mode 100644 index 0000000000000000000000000000000000000000..4b0c3c149ef8a1899182c056ed2cb24746ae7199 --- /dev/null +++ b/dataset/body_models/smpl_faces.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51fc11ebadb0487d74bef220c4eea43f014609249f0121413c1fc629d859fecb +size 165392 diff --git a/dataset/body_models/smpl_mean_params.npz b/dataset/body_models/smpl_mean_params.npz new file mode 100644 index 0000000000000000000000000000000000000000..c6f60a76976b877cbc08345b2977c6ddd83ced87 --- /dev/null +++ b/dataset/body_models/smpl_mean_params.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fd6dd687800da946d0a0492383f973b92ec20f166a0b829775882868c35fcdd +size 1310 diff --git a/dataset/body_models/smplx2smpl.pkl b/dataset/body_models/smplx2smpl.pkl new file mode 100644 index 0000000000000000000000000000000000000000..0f25e10571181989524020c803280607b7ee9a85 --- /dev/null +++ b/dataset/body_models/smplx2smpl.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1d912d121ad98132e4492d8e7a0f1a8cf4412811e14a7ef8cb337bb48eef99e +size 578019251 diff --git a/demo.py b/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..178255379cec258316cfe7e642d9688219c39faf --- /dev/null +++ b/demo.py @@ -0,0 +1,234 @@ +import os +import argparse +import os.path as osp +from glob import glob +from collections import defaultdict + +import cv2 +import torch +import joblib +import numpy as np +from loguru import logger +from progress.bar import Bar + +from configs.config import get_cfg_defaults +from lib.data.datasets import CustomDataset +from lib.utils.imutils import avg_preds +from lib.utils.transforms import matrix_to_axis_angle +from lib.models import build_network, build_body_model +from lib.models.preproc.detector import DetectionModel +from lib.models.preproc.extractor import FeatureExtractor +from lib.models.smplify import TemporalSMPLify + +try: + from lib.models.preproc.slam import SLAMModel + _run_global = True +except: + logger.info('DPVO is not properly installed. Only estimate in local coordinates !') + _run_global = False + +def run(cfg, + video, + output_pth, + network, + calib=None, + run_global=True, + save_pkl=False, + visualize=False): + + cap = cv2.VideoCapture(video) + assert cap.isOpened(), f'Faild to load video file {video}' + fps = cap.get(cv2.CAP_PROP_FPS) + length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + width, height = cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + + # Whether or not estimating motion in global coordinates + run_global = run_global and _run_global + + # Preprocess + with torch.no_grad(): + if not (osp.exists(osp.join(output_pth, 'tracking_results.pth')) and + osp.exists(osp.join(output_pth, 'slam_results.pth'))): + + detector = DetectionModel(cfg.DEVICE.lower()) + extractor = FeatureExtractor(cfg.DEVICE.lower(), cfg.FLIP_EVAL) + + if run_global: slam = SLAMModel(video, output_pth, width, height, calib) + else: slam = None + + bar = Bar('Preprocess: 2D detection and SLAM', fill='#', max=length) + while (cap.isOpened()): + flag, img = cap.read() + if not flag: break + + # 2D detection and tracking + detector.track(img, fps, length) + + # SLAM + if slam is not None: + slam.track() + + bar.next() + + tracking_results = detector.process(fps) + + if slam is not None: + slam_results = slam.process() + else: + slam_results = np.zeros((length, 7)) + slam_results[:, 3] = 1.0 # Unit quaternion + + # Extract image features + # TODO: Merge this into the previous while loop with an online bbox smoothing. + tracking_results = extractor.run(video, tracking_results) + logger.info('Complete Data preprocessing!') + + # Save the processed data + joblib.dump(tracking_results, osp.join(output_pth, 'tracking_results.pth')) + joblib.dump(slam_results, osp.join(output_pth, 'slam_results.pth')) + logger.info(f'Save processed data at {output_pth}') + + # If the processed data already exists, load the processed data + else: + tracking_results = joblib.load(osp.join(output_pth, 'tracking_results.pth')) + slam_results = joblib.load(osp.join(output_pth, 'slam_results.pth')) + logger.info(f'Already processed data exists at {output_pth} ! Load the data .') + + # Build dataset + dataset = CustomDataset(cfg, tracking_results, slam_results, width, height, fps) + + # run WHAM + results = defaultdict(dict) + + n_subjs = len(dataset) + for subj in range(n_subjs): + + with torch.no_grad(): + if cfg.FLIP_EVAL: + # Forward pass with flipped input + flipped_batch = dataset.load_data(subj, True) + _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = flipped_batch + flipped_pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel, return_y_up=True, **kwargs) + + # Forward pass with normal input + batch = dataset.load_data(subj) + _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = batch + pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel, return_y_up=True, **kwargs) + + # Merge two predictions + flipped_pose, flipped_shape = flipped_pred['pose'].squeeze(0), flipped_pred['betas'].squeeze(0) + pose, shape = pred['pose'].squeeze(0), pred['betas'].squeeze(0) + flipped_pose, pose = flipped_pose.reshape(-1, 24, 6), pose.reshape(-1, 24, 6) + avg_pose, avg_shape = avg_preds(pose, shape, flipped_pose, flipped_shape) + avg_pose = avg_pose.reshape(-1, 144) + avg_contact = (flipped_pred['contact'][..., [2, 3, 0, 1]] + pred['contact']) / 2 + + # Refine trajectory with merged prediction + network.pred_pose = avg_pose.view_as(network.pred_pose) + network.pred_shape = avg_shape.view_as(network.pred_shape) + network.pred_contact = avg_contact.view_as(network.pred_contact) + output = network.forward_smpl(**kwargs) + pred = network.refine_trajectory(output, cam_angvel, return_y_up=True) + + else: + # data + batch = dataset.load_data(subj) + _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = batch + + # inference + pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel, return_y_up=True, **kwargs) + + # if False: + if args.run_smplify: + smplify = TemporalSMPLify(smpl, img_w=width, img_h=height, device=cfg.DEVICE) + input_keypoints = dataset.tracking_results[_id]['keypoints'] + pred = smplify.fit(pred, input_keypoints, **kwargs) + + with torch.no_grad(): + network.pred_pose = pred['pose'] + network.pred_shape = pred['betas'] + network.pred_cam = pred['cam'] + output = network.forward_smpl(**kwargs) + pred = network.refine_trajectory(output, cam_angvel, return_y_up=True) + + # ========= Store results ========= # + pred_body_pose = matrix_to_axis_angle(pred['poses_body']).cpu().numpy().reshape(-1, 69) + pred_root = matrix_to_axis_angle(pred['poses_root_cam']).cpu().numpy().reshape(-1, 3) + pred_root_world = matrix_to_axis_angle(pred['poses_root_world']).cpu().numpy().reshape(-1, 3) + pred_pose = np.concatenate((pred_root, pred_body_pose), axis=-1) + pred_pose_world = np.concatenate((pred_root_world, pred_body_pose), axis=-1) + pred_trans = (pred['trans_cam'] - network.output.offset).cpu().numpy() + + results[_id]['pose'] = pred_pose + results[_id]['trans'] = pred_trans + results[_id]['pose_world'] = pred_pose_world + results[_id]['trans_world'] = pred['trans_world'].cpu().squeeze(0).numpy() + results[_id]['betas'] = pred['betas'].cpu().squeeze(0).numpy() + results[_id]['verts'] = (pred['verts_cam'] + pred['trans_cam'].unsqueeze(1)).cpu().numpy() + results[_id]['frame_ids'] = frame_id + + if save_pkl: + joblib.dump(results, osp.join(output_pth, "wham_output.pkl")) + + # Visualize + if visualize: + from lib.vis.run_vis import run_vis_on_demo + with torch.no_grad(): + run_vis_on_demo(cfg, video, results, output_pth, network.smpl, vis_global=run_global) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + parser.add_argument('--video', type=str, + default='examples/demo_video.mp4', + help='input video path or youtube link') + + parser.add_argument('--output_pth', type=str, default='output/demo', + help='output folder to write results') + + parser.add_argument('--calib', type=str, default=None, + help='Camera calibration file path') + + parser.add_argument('--estimate_local_only', action='store_true', + help='Only estimate motion in camera coordinate if True') + + parser.add_argument('--visualize', action='store_true', + help='Visualize the output mesh if True') + + parser.add_argument('--save_pkl', action='store_true', + help='Save output as pkl file') + + parser.add_argument('--run_smplify', action='store_true', + help='Run Temporal SMPLify for post processing') + + args = parser.parse_args() + + cfg = get_cfg_defaults() + cfg.merge_from_file('configs/yamls/demo.yaml') + + logger.info(f'GPU name -> {torch.cuda.get_device_name()}') + logger.info(f'GPU feat -> {torch.cuda.get_device_properties("cuda")}') + + # ========= Load WHAM ========= # + smpl_batch_size = cfg.TRAIN.BATCH_SIZE * cfg.DATASET.SEQLEN + smpl = build_body_model(cfg.DEVICE, smpl_batch_size) + network = build_network(cfg, smpl) + network.eval() + + # Output folder + sequence = '.'.join(args.video.split('/')[-1].split('.')[:-1]) + output_pth = osp.join(args.output_pth, sequence) + os.makedirs(output_pth, exist_ok=True) + + run(cfg, + args.video, + output_pth, + network, + args.calib, + run_global=not args.estimate_local_only, + save_pkl=args.save_pkl, + visualize=args.visualize) + + print() + logger.info('Done !') \ No newline at end of file diff --git a/docs/API.md b/docs/API.md new file mode 100644 index 0000000000000000000000000000000000000000..511ac3a0374218d567a97a7e36562d2140d6d074 --- /dev/null +++ b/docs/API.md @@ -0,0 +1,18 @@ +## Python API + +To use python API of WHAM, please finish the basic installation first ([Installation](INSTALL.md) or [Docker](DOCKER.md)). + +If you use Docker environment, please run: + +```bash +cd /path/to/WHAM +docker run -it -v .:/code/ --rm yusun9/wham-vitpose-dpvo-cuda11.3-python3.9 python +``` + +Then you can run wham via python code like +```bash +from wham_api import WHAM_API +wham_model = WHAM_API() +input_video_path = 'examples/IMG_9732.mov' +results, tracking_results, slam_results = wham_model(input_video_path) +``` \ No newline at end of file diff --git a/docs/DATASET.md b/docs/DATASET.md new file mode 100644 index 0000000000000000000000000000000000000000..b47a4def1b56f8c39f56052b556edd6602ce8ab0 --- /dev/null +++ b/docs/DATASET.md @@ -0,0 +1,42 @@ +# Dataset + +## Training Data +We use [AMASS](https://amass.is.tue.mpg.de/), [InstaVariety](https://github.com/akanazawa/human_dynamics/blob/master/doc/insta_variety.md), [MPI-INF-3DHP](https://vcai.mpi-inf.mpg.de/3dhp-dataset/), [Human3.6M](http://vision.imar.ro/human3.6m/description.php), and [3DPW](https://virtualhumans.mpi-inf.mpg.de/3DPW/) datasets for training. Please register to their websites to download and process the data. You can download parsed ViT version of InstaVariety, MPI-INF-3DHP, Human3.6M, and 3DPW data from the [Google Drive](https://drive.google.com/drive/folders/13T2ghVvrw_fEk3X-8L0e6DVSYx_Og8o3?usp=sharing). You can save the data under `dataset/parsed_data` folder. + +### Process AMASS dataset +After downloading AMASS dataset, you can process it by running: +```bash +python -m lib.data_utils.amass_utils +``` +The processed data will be stored at `dataset/parsed_data/amass.pth`. + +### Process 3DPW, MPII3D, Human3.6M, and InstaVariety datasets +First, visit [TCMR](https://github.com/hongsukchoi/TCMR_RELEASE) and download preprocessed data at `dataset/parsed_data/TCMR_preproc/'. + +Next, prepare 2D keypoints detection using [ViTPose](https://github.com/ViTAE-Transformer/ViTPose) and store the results at `dataset/detection_results/\/\'. You may need to download all images to prepare the detection results. + +For Human36M, MPII3D, and InstaVariety datasets, you need to also download [NeuralAnnot](https://github.com/mks0601/NeuralAnnot_RELEASE) pseudo groundtruth SMPL label. As mentioned in our paper, we do not supervise WHAM on this label, but use it for neural initialization step. + +Finally, run following codes to preprocess all training data. +```bash +python -m lib.data_utils.threedpw_train_utils # 3DPW dataset +# [Coming] python -m lib.data_utils.human36m_train_utils # Human3.6M dataset +# [Coming] python -m lib.data_utils.mpii3d_train_utils # MPI-INF-3DHP dataset +# [Coming] python -m lib.data_utils.insta_train_utils # InstaVariety dataset +``` + +### Process BEDLAM dataset +Will be updated. + +## Evaluation Data +We use [3DPW](https://virtualhumans.mpi-inf.mpg.de/3DPW/), [RICH](https://rich.is.tue.mpg.de/), and [EMDB](https://eth-ait.github.io/emdb/) for the evaluation. We provide the parsed data for the evaluation. Please download the data from [Google Drive](https://drive.google.com/drive/folders/13T2ghVvrw_fEk3X-8L0e6DVSYx_Og8o3?usp=sharing) and place them at `dataset/parsed_data/`. + +To process the data at your end, please +1) Download parsed 3DPW data from [TCMR](https://github.com/hongsukchoi/TCMR_RELEASE) and store `dataset/parsed_data/TCMR_preproc/'. +2) Run [ViTPose](https://github.com/ViTAE-Transformer/ViTPose) on all test data and store the results at `dataset/detection_results/\'. +3) Run following codes. +```bash +python -m lib.data_utils.threedpw_eval_utils --split <"val" or "test"> # 3DPW dataset +python -m lib.data_utils.emdb_eval_utils --split <"1" or "2"> # EMDB dataset +python -m lib.data_utils.rich_eval_utils # RICH dataset +``` \ No newline at end of file diff --git a/docs/DOCKER.md b/docs/DOCKER.md new file mode 100644 index 0000000000000000000000000000000000000000..23f45a20ed665442674f3398ba18b363bb5f7508 --- /dev/null +++ b/docs/DOCKER.md @@ -0,0 +1,23 @@ +## Installation + +### Pre-requirments +1. Please make sure that you have properly installed the [Docker](https://www.docker.com/) and [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) before installation. + +2. Please prepare the essential data for inference: +To download SMPL body models (Neutral, Female, and Male), you need to register for [SMPL](https://smpl.is.tue.mpg.de/) and [SMPLify](https://smplify.is.tue.mpg.de/). The username and password for both homepages will be used while fetching the demo data. +Next, run the following script to fetch demo data. This script will download all the required dependencies including trained models and demo videos. +```bash +bash fetch_demo_data.sh +``` + +### Usage +1. Pulling the docker image from docker hub: +```bash +docker pull yusun9/wham-vitpose-dpvo-cuda11.3-python3.9:latest +``` + +2. Run the code with docker environment: +```bash +cd /path/to/WHAM +docker run -v .:/code/ --rm yusun9/wham-vitpose-dpvo-cuda11.3-python3.9 python demo.py --video examples/IMG_9732.mov +``` \ No newline at end of file diff --git a/docs/INSTALL.md b/docs/INSTALL.md new file mode 100644 index 0000000000000000000000000000000000000000..9f9038009109d1d375b88fd5533d22750a0c53a3 --- /dev/null +++ b/docs/INSTALL.md @@ -0,0 +1,38 @@ +# Installation + +WHAM has been implemented and tested on Ubuntu 20.04 and 22.04 with python = 3.9. We provide [anaconda](https://www.anaconda.com/) environment to run WHAM as below. + +```bash +# Clone the repo +git clone https://github.com/yohanshin/WHAM.git --recursive +cd WHAM/ + +# Create Conda environment +conda create -n wham python=3.9 +conda activate wham + +# Install PyTorch libraries +conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 cudatoolkit=11.3 -c pytorch + +# Install PyTorch3D (optional) for visualization +conda install -c fvcore -c iopath -c conda-forge fvcore iopath +pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py39_cu113_pyt1110/download.html + +# Install WHAM dependencies +pip install -r requirements.txt + +# Install ViTPose +pip install -v -e third-party/ViTPose + +# Install DPVO +cd third-party/DPVO +wget https://gitlab.com/libeigen/eigen/-/archive/3.4.0/eigen-3.4.0.zip +unzip eigen-3.4.0.zip -d thirdparty && rm -rf eigen-3.4.0.zip +conda install pytorch-scatter=2.0.9 -c rusty1s +conda install cudatoolkit-dev=11.3.1 -c conda-forge + +# ONLY IF your GCC version is larger than 10 +conda install -c conda-forge gxx=9.5 + +pip install . +``` diff --git a/examples/IMG_9730.mov b/examples/IMG_9730.mov new file mode 100644 index 0000000000000000000000000000000000000000..96e46dcccd85d1561f436db14563ae10103364b8 --- /dev/null +++ b/examples/IMG_9730.mov @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3739b87ba0c64d047df3d8f5479c530377788fdab4c2283925477894a1d252f9 +size 21526220 diff --git a/examples/IMG_9731.mov b/examples/IMG_9731.mov new file mode 100644 index 0000000000000000000000000000000000000000..4d409b0a40c04ffeccf55ee215737ca7cb2c14a3 --- /dev/null +++ b/examples/IMG_9731.mov @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:116ad3f95743524283a234fd9e7a1152b28a04536ab5975f4e4e71c547d9e1a6 +size 22633328 diff --git a/examples/IMG_9732.mov b/examples/IMG_9732.mov new file mode 100644 index 0000000000000000000000000000000000000000..7ba45a3d48213e3c52c46569131485c93c44429d --- /dev/null +++ b/examples/IMG_9732.mov @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:168773c92e0112361dcd1da4154c915983490e58ff89102c1a65edb28d505813 +size 23960355 diff --git a/examples/drone_calib.txt b/examples/drone_calib.txt new file mode 100644 index 0000000000000000000000000000000000000000..00052336e94e9f785a3ce455700a4bd888d213ce --- /dev/null +++ b/examples/drone_calib.txt @@ -0,0 +1 @@ +1321.0 1321.0 960.0 540.0 \ No newline at end of file diff --git a/examples/drone_video.mp4 b/examples/drone_video.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..bdcf1548b1ccff376f1d7f358ddd3ff9184ed7af --- /dev/null +++ b/examples/drone_video.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0da55210a305c3c75caa732c46b7330bb3d4e39ebeb9bc3af1e2b100dd8990c1 +size 20601030