fleaven's picture
Add files using upload-large-folder tool
fb66ec4 verified
@conference{AMASS,
title = {{AMASS}: Archive of Motion Capture as Surface Shapes},
author = {Mahmood, Naureen and Ghorbani, Nima and Troje, Nikolaus F. and Pons-Moll, Gerard and Black, Michael J.},
booktitle = {International Conference on Computer Vision},
pages = {5442--5451},
month = oct,
year = {2019},
month_numeric = {10}
}
@misc{AMASS_ACCAD,
title = {{ACCAD MoCap Dataset}},
author = {{Advanced Computing Center for the Arts and Design}},
url = {https://accad.osu.edu/research/motion-lab/mocap-system-and-data}
}
@inproceedings{AMASS_BMLhandball,
author = {Helm, Fabian and Troje, Nikolaus and Reiser, Mathias and Munzert, Jörn},
year = {2015},
month = {01},
pages = {},
title = {Bewegungsanalyse getäuschter und nicht-getäuschter 7m-Würfe im Handball},
journal = {47. Jahrestagung der Arbeitsgemeinschaft für Sportpsychologie, Freiburg.}
}
@article{AMASS_BMLmovi,
title = {{MoVi}: A Large Multipurpose Motion and Video Dataset},
author = {Saeed Ghorbani and Kimia Mahdaviani and Anne Thaler and Konrad Kording and Douglas James Cook and Gunnar Blohm and Nikolaus F. Troje},
year = {2020},
journal = {arXiv preprint arXiv: 2003.01888}
}
@article{AMASS_BMLrub,
title = {Decomposing Biological Motion: {A} Framework for Analysis and Synthesis of Human Gait Patterns},
author = {Troje, Nikolaus F.},
year = 2002,
month = sep,
journal = {Journal of Vision},
volume = 2,
number = 5,
pages = {2--2},
doi = {10.1167/2.5.2},
month_numeric = 9
}
@misc{AMASS_CMU,
title = {{CMU MoCap Dataset}},
author = {{Carnegie Mellon University}},
url = {http://mocap.cs.cmu.edu}
}
@article{AMASS_DanceDB,
author = {Aristidou, Andreas and Shamir, Ariel and Chrysanthou, Yiorgos},
title = {Digital Dance Ethnography: {O}rganizing Large Dance Collections},
journal = {J. Comput. Cult. Herit.},
issue_date = {January 2020},
volume = {12},
number = {4},
month = nov,
year = {2019},
issn = {1556-4673},
articleno = {29},
numpages = {27},
url = {https://doi.org/10.1145/3344383},
doi = {10.1145/3344383},
acmid = {},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
}
@inproceedings{AMASS_DFaust,
title = {Dynamic {FAUST}: {R}egistering Human Bodies in Motion},
author = {Bogo, Federica and Romero, Javier and Pons-Moll, Gerard and Black, Michael J.},
booktitle = {IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)},
month = jul,
year = {2017},
month_numeric = {7}
}
@misc{AMASS_EyesJapanDataset,
title = {{Eyes Japan MoCap Dataset}},
author = {Eyes JAPAN Co. Ltd.},
url = {http://mocapdata.com}
}
@inproceedings{AMASS_GRAB,
title = {{GRAB}: A Dataset of Whole-Body Human Grasping of Objects},
author = {Taheri, Omid and Ghorbani, Nima and Black, Michael J. and Tzionas, Dimitrios},
booktitle = {European Conference on Computer Vision (ECCV)},
year = {2020},
url = {https://grab.is.tue.mpg.de}
}
@inproceedings{AMASS_GRAB-2,
title = {{ContactDB}: Analyzing and Predicting Grasp Contact via Thermal Imaging},
author = {Brahmbhatt, Samarth and Ham, Cusuh and Kemp, Charles C. and Hays, James},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2019},
url = {https://contactdb.cc.gatech.edu}
}
@techreport{AMASS_HDM05,
author = {M. M\"{u}ller and T. R\"{o}der and M. Clausen and B. Eberhardt and B. Kr\"{u}ger and A. Weber},
title = {Documentation Mocap Database HDM05},
number = {CG-2007-2},
year = {2007},
month = {June},
institution = {Universit\"{a}t Bonn},
issn = {1610-8892}
}
@article{AMASS_HUMAN4D,
title = {HUMAN4D: A Human-Centric Multimodal Dataset for Motions and Immersive Media},
author = {Chatzitofis, Anargyros and Saroglou, Leonidas and Boutis, Prodromos and Drakoulis, Petros and Zioulis, Nikolaos and Subramanyam, Shishir and Kevelham, Bart and Charbonnier, Caecilia and Cesar, Pablo and Zarpalas, Dimitrios and others},
journal = {IEEE Access},
volume = {8},
pages = {176241--176262},
year = {2020},
publisher = {IEEE}
}
@article{AMASS_HumanEva,
title = {{HumanEva}: Synchronized video and motion capture dataset and baseline algorithm for evaluation of articulated human motion},
author = {Sigal, L. and Balan, A. and Black, M. J.},
journal = {International Journal of Computer Vision},
volume = {87},
number = {1},
pages = {4--27},
publisher = {Springer Netherlands},
month = mar,
year = {2010},
doi = {},
month_numeric = {3}
}
@inproceedings{AMASS_KIT-CNRS-EKUT-WEIZMANN,
author = {Christian Mandery and \"Omer Terlemez and Martin Do and Nikolaus Vahrenkamp and Tamim Asfour},
title = {The {KIT} Whole-Body Human Motion Database},
booktitle = {International Conference on Advanced Robotics (ICAR)},
pages = {329--336},
year = {2015},
}
@article{AMASS_KIT-CNRS-EKUT-WEIZMANN-2,
author = {Christian Mandery and \"Omer Terlemez and Martin Do and Nikolaus Vahrenkamp and Tamim Asfour},
title = {Unifying Representations and Large-Scale Whole-Body Motion Databases for Studying Human Motion},
pages = {796--809},
volume = {32},
number = {4},
journal = {IEEE Transactions on Robotics},
year = {2016},
}
@inproceedings{AMASS_KIT-CNRS-EKUT-WEIZMANN-3,
author = {Franziska Krebs and Andre Meixner and Isabel Patzer and Tamim Asfour},
title = {The {KIT} Bimanual Manipulation Dataset},
booktitle = {IEEE/RAS International Conference on Humanoid Robots (Humanoids)},
pages = {499--506},
year = {2021},
}
@inproceedings{AMASS_MOYO,
title = {{3D} Human Pose Estimation via Intuitive Physics},
author = {Tripathi, Shashank and M{\"u}ller, Lea and Huang, Chun-Hao P. and Taheri Omid and Black, Michael J. and Tzionas, Dimitrios},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2023}
}
@article{AMASS_MoSh,
title = {{MoSh}: Motion and Shape Capture from Sparse Markers},
author = {Loper, Matthew M. and Mahmood, Naureen and Black, Michael J.},
address = {New York, NY, USA},
publisher = {ACM},
month = nov,
number = {6},
volume = {33},
pages = {220:1--220:13},
abstract = {Marker-based motion capture (mocap) is widely criticized as producing lifeless animations. We argue that important information about body surface motion is present in standard marker sets but is lost in extracting a skeleton. We demonstrate a new approach called MoSh (Motion and Shape capture), that automatically extracts this detail from mocap data. MoSh estimates body shape and pose together using sparse marker data by exploiting a parametric model of the human body. In contrast to previous work, MoSh solves for the marker locations relative to the body and estimates accurate body shape directly from the markers without the use of 3D scans; this effectively turns a mocap system into an approximate body scanner. MoSh is able to capture soft tissue motions directly from markers, by allowing body shape to vary over time. We evaluate the effect of different marker sets on pose and shape accuracy and propose a new sparse marker set for capturing soft-tissue motion. We illustrate MoSh by recovering body shape, pose, and soft-tissue motion from archival mocap data and using this to produce animations with subtlety and realism. We also show soft-tissue motion retargeting to new characters and show how to magnify the 3D deformations of soft tissue to create animations with appealing exaggerations.},
journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)},
url = {http://doi.acm.org/10.1145/2661229.2661273},
year = {2014},
doi = {10.1145/2661229.2661273}
}
@inproceedings{AMASS_PosePrior,
title = {Pose-Conditioned Joint Angle Limits for {3D} Human Pose Reconstruction},
author = {Akhter, Ijaz and Black, Michael J.},
booktitle = { IEEE Conf. on Computer Vision and Pattern Recognition (CVPR) 2015},
month = jun,
abstract = {The estimation of 3D human pose from 2D joint locations is central to many vision problems involving the analysis, of people in images and video. To address the fact that the problem is inherently ill posed, many methods impose a prior over human poses. Unfortunately these priors admit invalid poses because they do not model how joint-limits vary with pose. Here we make two key contributions. First, we collected a motion capture dataset that explores a wide range of human poses. From this we learn a pose-dependent model of joint limits that forms our prior. The dataset and the prior will be made publicly available. Second, we define a general parameterization of body pose and a new, multistage, method to estimate 3D pose from 2D joint locations that uses an over-complete dictionary of human poses. Our method shows good generalization while avoiding impossible poses. We quantitatively compare our method with recent work and show state-of-the-art results on 2D to 3D pose estimation using the CMU mocap dataset. We also show superior results on manual annotations on real images and automatic part-based detections on the Leeds sports pose dataset.},
year = {2015}
}
@misc{AMASS_SFU,
title = {{SFU Motion Capture Database}},
author = {Simon Fraser University and National University of Singapore},
url = {http://mocap.cs.sfu.ca/}
}
@inproceedings{AMASS_SOMA,
title = {{SOMA}: Solving Optical Marker-Based MoCap Automatically},
author = {Ghorbani, Nima and Black, Michael J.},
booktitle = {Proc. International Conference on Computer Vision (ICCV)},
pages = {11117--11126},
month = oct,
year = {2021},
doi = {},
month_numeric = {10}
}
@inproceedings{AMASS_TCDHands,
author = {Ludovic Hoyet and Kenneth Ryall and Rachel McDonnell and Carol O'Sullivan},
title = {Sleight of Hand: Perception of Finger Motion from Reduced Marker Sets},
booktitle = {Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games},
year = {2012},
pages = {79--86},
doi = {10.1145/2159616.2159629}
}
@inproceedings{AMASS_TotalCapture,
author = {Trumble, Matt and Gilbert, Andrew and Malleson, Charles and Hilton, Adrian and Collomosse, John},
title = {{Total Capture}: 3D Human Pose Estimation Fusing Video and Inertial Sensors},
booktitle = {2017 British Machine Vision Conference (BMVC)},
year = {2017}
}
@inproceedings{AMASS_WheelPoser,
title={WheelPoser: Sparse-IMU Based Body Pose Estimation for Wheelchair Users},
author={Li, Yunzhi and Mollyn, Vimal and Yuan, Kuang and Carrington, Patrick},
booktitle={Proceedings of the 26th International ACM SIGACCESS Conference on Computers and Accessibility},
pages={1--17},
year={2024}
}