diff --git a/ .gitignore b/ .gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b9e4db9a67ad69c0499fe1ceb571dfd7c899081c
--- /dev/null
+++ b/ .gitignore
@@ -0,0 +1,18 @@
+data/*/*
+data/thuman*
+!data/tbfo.ttf
+__pycache__
+debug/
+log/
+.vscode
+!.gitignore
+force_push.sh
+.idea
+human_det/
+kaolin/
+neural_voxelization_layer/
+pytorch3d/
+force_push.sh
+results/
+gradio_cached_examples/
+gradio_queue.db
\ No newline at end of file
diff --git a/ assets/garment_teaser.png b/ assets/garment_teaser.png
new file mode 100644
index 0000000000000000000000000000000000000000..15f249d06f6ae0f8f69badad21189d3221122ae5
--- /dev/null
+++ b/ assets/garment_teaser.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1bf1fde8dcec40a5b50a5eb3ba6cdeefad344348271b9b087d9f327efc5db845
+size 593988
diff --git a/ assets/intermediate_results.png b/ assets/intermediate_results.png
new file mode 100644
index 0000000000000000000000000000000000000000..a10ee888465f36e04bf3f54ef2a99ab5056f54c0
--- /dev/null
+++ b/ assets/intermediate_results.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2daa92446130e9bf410ba55889740537c68f9c51f1799f89f2575581870c0d80
+size 301248
diff --git a/ assets/teaser.gif b/ assets/teaser.gif
new file mode 100644
index 0000000000000000000000000000000000000000..e7d00f4bd21c4bde181111b863231c6e8e32963d
--- /dev/null
+++ b/ assets/teaser.gif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0955111cbe83559ee8065b15dfed9f52da9e8190297c715d74d1a30cdee7cad5
+size 382289
diff --git a/ assets/thumbnail.png b/ assets/thumbnail.png
new file mode 100644
index 0000000000000000000000000000000000000000..e7db2a9dac3b1bf4d2e0e57e30faea862df6916f
--- /dev/null
+++ b/ assets/thumbnail.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5259d6e413242c63afe88027122eed783612ff9a9e48b9a9c51313f6bf66fb94
+size 51470
diff --git a/ examples/22097467bffc92d4a5c4246f7d4edb75.png b/ examples/22097467bffc92d4a5c4246f7d4edb75.png
new file mode 100644
index 0000000000000000000000000000000000000000..2664dc2840b0019c5e3d0a5a002448821efe3600
--- /dev/null
+++ b/ examples/22097467bffc92d4a5c4246f7d4edb75.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f37625631d1cea79fca0c77d6a809e827f86d2ddc51515abaade0801b9ef1a57
+size 447838
diff --git a/ examples/44c0f84c957b6b9bdf77662af5bb7078.png b/ examples/44c0f84c957b6b9bdf77662af5bb7078.png
new file mode 100644
index 0000000000000000000000000000000000000000..10a7155d5ceeaf50ca94cadfc3fb87f6bf78f72e
--- /dev/null
+++ b/ examples/44c0f84c957b6b9bdf77662af5bb7078.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b5ccc3ff6e99b32fed04bdd8f72873e7d987e088e83bbb235152db0500fdc6dc
+size 181743
diff --git a/ examples/5a6a25963db2f667441d5076972c207c.png b/ examples/5a6a25963db2f667441d5076972c207c.png
new file mode 100644
index 0000000000000000000000000000000000000000..f1a9a17e3e508d31f3839c0142ae925dc93a2ee2
--- /dev/null
+++ b/ examples/5a6a25963db2f667441d5076972c207c.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a4e0773d094b45a7c496292e5352166d6f47e469c2c6101ffa9536e44007a4e3
+size 523414
diff --git a/ examples/8da7ceb94669c2f65cbd28022e1f9876.png b/ examples/8da7ceb94669c2f65cbd28022e1f9876.png
new file mode 100644
index 0000000000000000000000000000000000000000..33af4a270f4a1a61b9173aa941b7fb77b073e134
--- /dev/null
+++ b/ examples/8da7ceb94669c2f65cbd28022e1f9876.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7be8a036e6f3d11db05f0c6a93de165dae4c2afc052d09f6660c43a0a0484e99
+size 286010
diff --git a/ examples/923d65f767c85a42212cae13fba3750b.png b/ examples/923d65f767c85a42212cae13fba3750b.png
new file mode 100644
index 0000000000000000000000000000000000000000..48ee4bcbe3eea59fdaf4728feac55adb16e33493
--- /dev/null
+++ b/ examples/923d65f767c85a42212cae13fba3750b.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86f4eff6d64d036a91d193e2373a76fd6698b8a3cd8be01e65b96a742907838d
+size 772835
diff --git a/ examples/c9856a2bc31846d684cbb965457fad59.png b/ examples/c9856a2bc31846d684cbb965457fad59.png
new file mode 100644
index 0000000000000000000000000000000000000000..a78d43f3dd2a81f4d915d9292f1480ddd7dfc18e
--- /dev/null
+++ b/ examples/c9856a2bc31846d684cbb965457fad59.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b97743cb85d8b2db10f86b5216a67f0df0ff84b71665d2be451dcd517c557fb6
+size 157434
diff --git a/ examples/e1e7622af7074a022f5d96dc16672517.png b/ examples/e1e7622af7074a022f5d96dc16672517.png
new file mode 100644
index 0000000000000000000000000000000000000000..1e351d7818f5e7313827b6b88f10687a79010ad7
--- /dev/null
+++ b/ examples/e1e7622af7074a022f5d96dc16672517.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:badb5a8c2d9591aa4c71915795cb3d229678cad3612f2ee36d399174de32004e
+size 651690
diff --git a/ examples/fb9d20fdb93750584390599478ecf86e.png b/ examples/fb9d20fdb93750584390599478ecf86e.png
new file mode 100644
index 0000000000000000000000000000000000000000..f6771e483af896854f0e39f450026c3e966b7f9b
--- /dev/null
+++ b/ examples/fb9d20fdb93750584390599478ecf86e.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae80334944bb3c9496565dbe28e0ec30d2150344b600b6aac5c917c8c6ef4f1f
+size 623131
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..7b897fb03e86f1e1f612aa787e7089fab523244c
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,37 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zstandard filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+*.obj filter=lfs diff=lfs merge=lfs -text
+*.mp4 filter=lfs diff=lfs merge=lfs -text
+*.glb filter=lfs diff=lfs merge=lfs -text
+*.png filter=lfs diff=lfs merge=lfs -text
+*.gif filter=lfs diff=lfs merge=lfs -text
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f818dd424b6391845aaf5d0563c8b5726d056abd
--- /dev/null
+++ b/README.md
@@ -0,0 +1,14 @@
+---
+title: YoonaAvatar
+sdk: gradio
+emoji: đ„
+colorFrom: red
+colorTo: purple
+sdk_version: 3.2
+app_file: app.py
+pinned: false
+python_version: 3.8.13
+duplicated_from: YoonaAI/yoonaAvatarSpace
+---
+
+Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd7a5d58975062a31af2b6eb2f95bc86fcb3a5df
--- /dev/null
+++ b/app.py
@@ -0,0 +1,144 @@
+# install
+
+
+import glob
+import gradio as gr
+import os
+import numpy as np
+
+import subprocess
+
+if os.getenv('SYSTEM') == 'spaces':
+ subprocess.run('pip install pyembree'.split())
+ subprocess.run(
+ 'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split())
+ subprocess.run(
+ 'pip install https://download.is.tue.mpg.de/icon/HF/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl'.split())
+ subprocess.run(
+ 'pip install https://download.is.tue.mpg.de/icon/HF/pytorch3d-0.7.0-cp38-cp38-linux_x86_64.whl'.split())
+ subprocess.run(
+ 'pip install git+https://github.com/YuliangXiu/neural_voxelization_layer.git'.split())
+
+from apps.infer import generate_model
+
+# running
+
+description = '''
+# ICON Clothed Human Digitization
+### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)
+
+
+
+
+
+
+ |
+
+
+ |
+
+ The reconstruction + refinement + video take about 200 seconds for single image. If ERROR, try "Submit Image" again.
+
+More
+#### Citation
+```
+@inproceedings{xiu2022icon,
+ title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals},
+ author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.},
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
+ month = {June},
+ year = {2022},
+ pages = {13296-13306}
+}
+```
+#### Acknowledgments:
+- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/)
+- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu)
+- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization)
+#### Image Credits
+* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox)
+#### Related works
+* [ICON @ MPI](https://icon.is.tue.mpg.de/)
+* [MonoPort @ USC](https://xiuyuliang.cn/monoport)
+* [Phorhum @ Google](https://phorhum.github.io/)
+* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/)
+* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html)
+
+'''
+
+
+def generate_image(seed, psi):
+ iface = gr.Interface.load("spaces/hysts/StyleGAN-Human")
+ img = iface(seed, psi)
+ return img
+
+
+model_types = ['ICON', 'PIFu', 'PaMIR']
+examples_names = glob.glob('examples/*.png')
+examples_types = np.random.choice(
+ model_types, len(examples_names), p=[0.6, 0.2, 0.2])
+
+examples = [list(item) for item in zip(examples_names, examples_types)]
+
+with gr.Blocks() as demo:
+ gr.Markdown(description)
+
+ out_lst = []
+ with gr.Row():
+ with gr.Column():
+ with gr.Row():
+ with gr.Column():
+ seed = gr.inputs.Slider(
+ 0, 1000, step=1, default=0, label='Seed (For Image Generation)')
+ psi = gr.inputs.Slider(
+ 0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)')
+ radio_choice = gr.Radio(
+ model_types, label='Method (For Reconstruction)', value='icon-filter')
+ inp = gr.Image(type="filepath", label="Input Image")
+ with gr.Row():
+ btn_sample = gr.Button("Generate Image")
+ btn_submit = gr.Button("Submit Image")
+
+ gr.Examples(examples=examples,
+ inputs=[inp, radio_choice],
+ cache_examples=False,
+ fn=generate_model,
+ outputs=out_lst)
+
+ out_vid = gr.Video(
+ label="Image + Normal + SMPL Body + Clothed Human")
+ out_vid_download = gr.File(
+ label="Download Video, welcome share on Twitter with #ICON")
+
+ with gr.Column():
+ overlap_inp = gr.Image(
+ type="filepath", label="Image Normal Overlap")
+ out_final = gr.Model3D(
+ clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human")
+ out_final_download = gr.File(
+ label="Download clothed human mesh")
+ out_smpl = gr.Model3D(
+ clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL body")
+ out_smpl_download = gr.File(label="Download SMPL body mesh")
+ out_smpl_npy_download = gr.File(label="Download SMPL params")
+
+ out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download,
+ out_final, out_final_download, out_vid, out_vid_download, overlap_inp]
+
+ btn_submit.click(fn=generate_model, inputs=[
+ inp, radio_choice], outputs=out_lst)
+ btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp)
+
+if __name__ == "__main__":
+
+ # demo.launch(debug=False, enable_queue=False,
+ # auth=(os.environ['USER'], os.environ['PASSWORD']),
+ # auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.")
+
+ demo.launch(debug=True, enable_queue=True)
\ No newline at end of file
diff --git a/apps/ICON.py b/apps/ICON.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a88cfb1ebc9c7e59b40b69cf6662f4698e6b988
--- /dev/null
+++ b/apps/ICON.py
@@ -0,0 +1,735 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from lib.common.seg3d_lossless import Seg3dLossless
+from lib.dataset.Evaluator import Evaluator
+from lib.net import HGPIFuNet
+from lib.common.train_util import *
+from lib.common.render import Render
+from lib.dataset.mesh_util import SMPLX, update_mesh_shape_prior_losses, get_visibility
+import torch
+import lib.smplx as smplx
+import numpy as np
+from torch import nn
+from skimage.transform import resize
+import pytorch_lightning as pl
+
+torch.backends.cudnn.benchmark = True
+
+
+class ICON(pl.LightningModule):
+
+ def __init__(self, cfg):
+ super(ICON, self).__init__()
+
+ self.cfg = cfg
+ self.batch_size = self.cfg.batch_size
+ self.lr_G = self.cfg.lr_G
+
+ self.use_sdf = cfg.sdf
+ self.prior_type = cfg.net.prior_type
+ self.mcube_res = cfg.mcube_res
+ self.clean_mesh_flag = cfg.clean_mesh
+
+ self.netG = HGPIFuNet(
+ self.cfg,
+ self.cfg.projection_mode,
+ error_term=nn.SmoothL1Loss() if self.use_sdf else nn.MSELoss(),
+ )
+
+ self.evaluator = Evaluator(
+ device=torch.device(f"cuda:{self.cfg.gpus[0]}"))
+
+ self.resolutions = (np.logspace(
+ start=5,
+ stop=np.log2(self.mcube_res),
+ base=2,
+ num=int(np.log2(self.mcube_res) - 4),
+ endpoint=True,
+ ) + 1.0)
+ self.resolutions = self.resolutions.astype(np.int16).tolist()
+
+ self.base_keys = ["smpl_verts", "smpl_faces"]
+ self.feat_names = self.cfg.net.smpl_feats
+
+ self.icon_keys = self.base_keys + [
+ f"smpl_{feat_name}" for feat_name in self.feat_names
+ ]
+ self.keypoint_keys = self.base_keys + [
+ f"smpl_{feat_name}" for feat_name in self.feat_names
+ ]
+ self.pamir_keys = [
+ "voxel_verts", "voxel_faces", "pad_v_num", "pad_f_num"
+ ]
+ self.pifu_keys = []
+
+ self.reconEngine = Seg3dLossless(
+ query_func=query_func,
+ b_min=[[-1.0, 1.0, -1.0]],
+ b_max=[[1.0, -1.0, 1.0]],
+ resolutions=self.resolutions,
+ align_corners=True,
+ balance_value=0.50,
+ device=torch.device(f"cuda:{self.cfg.test_gpus[0]}"),
+ visualize=False,
+ debug=False,
+ use_cuda_impl=False,
+ faster=True,
+ )
+
+ self.render = Render(
+ size=512, device=torch.device(f"cuda:{self.cfg.test_gpus[0]}"))
+ self.smpl_data = SMPLX()
+
+ self.get_smpl_model = lambda smpl_type, gender, age, v_template: smplx.create(
+ self.smpl_data.model_dir,
+ kid_template_path=osp.join(
+ osp.realpath(self.smpl_data.model_dir),
+ f"{smpl_type}/{smpl_type}_kid_template.npy",
+ ),
+ model_type=smpl_type,
+ gender=gender,
+ age=age,
+ v_template=v_template,
+ use_face_contour=False,
+ ext="pkl",
+ )
+
+ self.in_geo = [item[0] for item in cfg.net.in_geo]
+ self.in_nml = [item[0] for item in cfg.net.in_nml]
+ self.in_geo_dim = [item[1] for item in cfg.net.in_geo]
+ self.in_total = self.in_geo + self.in_nml
+ self.smpl_dim = cfg.net.smpl_dim
+
+ self.export_dir = None
+ self.result_eval = {}
+
+ def get_progress_bar_dict(self):
+ tqdm_dict = super().get_progress_bar_dict()
+ if "v_num" in tqdm_dict:
+ del tqdm_dict["v_num"]
+ return tqdm_dict
+
+ # Training related
+ def configure_optimizers(self):
+
+ # set optimizer
+ weight_decay = self.cfg.weight_decay
+ momentum = self.cfg.momentum
+
+ optim_params_G = [{
+ "params": self.netG.if_regressor.parameters(),
+ "lr": self.lr_G
+ }]
+
+ if self.cfg.net.use_filter:
+ optim_params_G.append({
+ "params": self.netG.F_filter.parameters(),
+ "lr": self.lr_G
+ })
+
+ if self.cfg.net.prior_type == "pamir":
+ optim_params_G.append({
+ "params": self.netG.ve.parameters(),
+ "lr": self.lr_G
+ })
+
+ if self.cfg.optim == "Adadelta":
+
+ optimizer_G = torch.optim.Adadelta(optim_params_G,
+ lr=self.lr_G,
+ weight_decay=weight_decay)
+
+ elif self.cfg.optim == "Adam":
+
+ optimizer_G = torch.optim.Adam(optim_params_G,
+ lr=self.lr_G,
+ weight_decay=weight_decay)
+
+ elif self.cfg.optim == "RMSprop":
+
+ optimizer_G = torch.optim.RMSprop(
+ optim_params_G,
+ lr=self.lr_G,
+ weight_decay=weight_decay,
+ momentum=momentum,
+ )
+
+ else:
+ raise NotImplementedError
+
+ # set scheduler
+ scheduler_G = torch.optim.lr_scheduler.MultiStepLR(
+ optimizer_G, milestones=self.cfg.schedule, gamma=self.cfg.gamma)
+
+ return [optimizer_G], [scheduler_G]
+
+ def training_step(self, batch, batch_idx):
+
+ if not self.cfg.fast_dev:
+ export_cfg(self.logger, self.cfg)
+
+ self.netG.train()
+
+ in_tensor_dict = {
+ "sample": batch["samples_geo"].permute(0, 2, 1),
+ "calib": batch["calib"],
+ "label": batch["labels_geo"].unsqueeze(1),
+ }
+
+ for name in self.in_total:
+ in_tensor_dict.update({name: batch[name]})
+
+ in_tensor_dict.update({
+ k: batch[k] if k in batch.keys() else None
+ for k in getattr(self, f"{self.prior_type}_keys")
+ })
+
+ preds_G, error_G = self.netG(in_tensor_dict)
+
+ acc, iou, prec, recall = self.evaluator.calc_acc(
+ preds_G.flatten(),
+ in_tensor_dict["label"].flatten(),
+ 0.5,
+ use_sdf=self.cfg.sdf,
+ )
+
+ # metrics processing
+ metrics_log = {
+ "train_loss": error_G.item(),
+ "train_acc": acc.item(),
+ "train_iou": iou.item(),
+ "train_prec": prec.item(),
+ "train_recall": recall.item(),
+ }
+
+ tf_log = tf_log_convert(metrics_log)
+ bar_log = bar_log_convert(metrics_log)
+
+ if batch_idx % int(self.cfg.freq_show_train) == 0:
+
+ with torch.no_grad():
+ self.render_func(in_tensor_dict, dataset="train")
+
+ metrics_return = {
+ k.replace("train_", ""): torch.tensor(v)
+ for k, v in metrics_log.items()
+ }
+
+ metrics_return.update({
+ "loss": error_G,
+ "log": tf_log,
+ "progress_bar": bar_log
+ })
+
+ return metrics_return
+
+ def training_epoch_end(self, outputs):
+
+ if [] in outputs:
+ outputs = outputs[0]
+
+ # metrics processing
+ metrics_log = {
+ "train_avgloss": batch_mean(outputs, "loss"),
+ "train_avgiou": batch_mean(outputs, "iou"),
+ "train_avgprec": batch_mean(outputs, "prec"),
+ "train_avgrecall": batch_mean(outputs, "recall"),
+ "train_avgacc": batch_mean(outputs, "acc"),
+ }
+
+ tf_log = tf_log_convert(metrics_log)
+
+ return {"log": tf_log}
+
+ def validation_step(self, batch, batch_idx):
+
+ self.netG.eval()
+ self.netG.training = False
+
+ in_tensor_dict = {
+ "sample": batch["samples_geo"].permute(0, 2, 1),
+ "calib": batch["calib"],
+ "label": batch["labels_geo"].unsqueeze(1),
+ }
+
+ for name in self.in_total:
+ in_tensor_dict.update({name: batch[name]})
+
+ in_tensor_dict.update({
+ k: batch[k] if k in batch.keys() else None
+ for k in getattr(self, f"{self.prior_type}_keys")
+ })
+
+ preds_G, error_G = self.netG(in_tensor_dict)
+
+ acc, iou, prec, recall = self.evaluator.calc_acc(
+ preds_G.flatten(),
+ in_tensor_dict["label"].flatten(),
+ 0.5,
+ use_sdf=self.cfg.sdf,
+ )
+
+ if batch_idx % int(self.cfg.freq_show_val) == 0:
+ with torch.no_grad():
+ self.render_func(in_tensor_dict, dataset="val", idx=batch_idx)
+
+ metrics_return = {
+ "val_loss": error_G,
+ "val_acc": acc,
+ "val_iou": iou,
+ "val_prec": prec,
+ "val_recall": recall,
+ }
+
+ return metrics_return
+
+ def validation_epoch_end(self, outputs):
+
+ # metrics processing
+ metrics_log = {
+ "val_avgloss": batch_mean(outputs, "val_loss"),
+ "val_avgacc": batch_mean(outputs, "val_acc"),
+ "val_avgiou": batch_mean(outputs, "val_iou"),
+ "val_avgprec": batch_mean(outputs, "val_prec"),
+ "val_avgrecall": batch_mean(outputs, "val_recall"),
+ }
+
+ tf_log = tf_log_convert(metrics_log)
+
+ return {"log": tf_log}
+
+ def compute_vis_cmap(self, smpl_type, smpl_verts, smpl_faces):
+
+ (xy, z) = torch.as_tensor(smpl_verts).split([2, 1], dim=1)
+ smpl_vis = get_visibility(xy, -z, torch.as_tensor(smpl_faces).long())
+ smpl_cmap = self.smpl_data.cmap_smpl_vids(smpl_type)
+
+ return {
+ "smpl_vis": smpl_vis.unsqueeze(0).to(self.device),
+ "smpl_cmap": smpl_cmap.unsqueeze(0).to(self.device),
+ "smpl_verts": smpl_verts.unsqueeze(0),
+ }
+
+ @torch.enable_grad()
+ def optim_body(self, in_tensor_dict, batch):
+
+ smpl_model = self.get_smpl_model(batch["type"][0], batch["gender"][0],
+ batch["age"][0], None).to(self.device)
+ in_tensor_dict["smpl_faces"] = (torch.tensor(
+ smpl_model.faces.astype(np.int)).long().unsqueeze(0).to(
+ self.device))
+
+ # The optimizer and variables
+ optimed_pose = torch.tensor(batch["body_pose"][0],
+ device=self.device,
+ requires_grad=True) # [1,23,3,3]
+ optimed_trans = torch.tensor(batch["transl"][0],
+ device=self.device,
+ requires_grad=True) # [3]
+ optimed_betas = torch.tensor(batch["betas"][0],
+ device=self.device,
+ requires_grad=True) # [1,10]
+ optimed_orient = torch.tensor(batch["global_orient"][0],
+ device=self.device,
+ requires_grad=True) # [1,1,3,3]
+
+ optimizer_smpl = torch.optim.SGD(
+ [optimed_pose, optimed_trans, optimed_betas, optimed_orient],
+ lr=1e-3,
+ momentum=0.9,
+ )
+ scheduler_smpl = torch.optim.lr_scheduler.ReduceLROnPlateau(
+ optimizer_smpl,
+ mode="min",
+ factor=0.5,
+ verbose=0,
+ min_lr=1e-5,
+ patience=5)
+ loop_smpl = range(50)
+ for i in loop_smpl:
+
+ optimizer_smpl.zero_grad()
+
+ # prior_loss, optimed_pose = dataset.vposer_prior(optimed_pose)
+ smpl_out = smpl_model(
+ betas=optimed_betas,
+ body_pose=optimed_pose,
+ global_orient=optimed_orient,
+ transl=optimed_trans,
+ return_verts=True,
+ )
+
+ smpl_verts = smpl_out.vertices[0] * 100.0
+ smpl_verts = projection(smpl_verts,
+ batch["calib"][0],
+ format="tensor")
+ smpl_verts[:, 1] *= -1
+ # render optimized mesh (normal, T_normal, image [-1,1])
+ self.render.load_meshes(smpl_verts, in_tensor_dict["smpl_faces"])
+ (
+ in_tensor_dict["T_normal_F"],
+ in_tensor_dict["T_normal_B"],
+ ) = self.render.get_rgb_image()
+
+ T_mask_F, T_mask_B = self.render.get_silhouette_image()
+
+ with torch.no_grad():
+ (
+ in_tensor_dict["normal_F"],
+ in_tensor_dict["normal_B"],
+ ) = self.netG.normal_filter(in_tensor_dict)
+
+ # mask = torch.abs(in_tensor['T_normal_F']).sum(dim=0, keepdims=True) > 0.0
+ diff_F_smpl = torch.abs(in_tensor_dict["T_normal_F"] -
+ in_tensor_dict["normal_F"])
+ diff_B_smpl = torch.abs(in_tensor_dict["T_normal_B"] -
+ in_tensor_dict["normal_B"])
+ loss = (diff_F_smpl + diff_B_smpl).mean()
+
+ # silhouette loss
+ smpl_arr = torch.cat([T_mask_F, T_mask_B], dim=-1)[0]
+ gt_arr = torch.cat(
+ [in_tensor_dict["normal_F"][0], in_tensor_dict["normal_B"][0]],
+ dim=2).permute(1, 2, 0)
+ gt_arr = ((gt_arr + 1.0) * 0.5).to(self.device)
+ bg_color = (torch.Tensor(
+ [0.5, 0.5, 0.5]).unsqueeze(0).unsqueeze(0).to(self.device))
+ gt_arr = ((gt_arr - bg_color).sum(dim=-1) != 0.0).float()
+ loss += torch.abs(smpl_arr - gt_arr).mean()
+
+ # Image.fromarray(((in_tensor_dict['T_normal_F'][0].permute(1,2,0)+1.0)*0.5*255.0).detach().cpu().numpy().astype(np.uint8)).show()
+
+ # loop_smpl.set_description(f"smpl = {loss:.3f}")
+
+ loss.backward(retain_graph=True)
+ optimizer_smpl.step()
+ scheduler_smpl.step(loss)
+ in_tensor_dict["smpl_verts"] = smpl_verts.unsqueeze(0)
+
+ in_tensor_dict.update(
+ self.compute_vis_cmap(
+ batch["type"][0],
+ in_tensor_dict["smpl_verts"][0],
+ in_tensor_dict["smpl_faces"][0],
+ ))
+
+ features, inter = self.netG.filter(in_tensor_dict, return_inter=True)
+
+ return features, inter, in_tensor_dict
+
+ @torch.enable_grad()
+ def optim_cloth(self, verts_pr, faces_pr, inter):
+
+ # convert from GT to SDF
+ verts_pr -= (self.resolutions[-1] - 1) / 2.0
+ verts_pr /= (self.resolutions[-1] - 1) / 2.0
+
+ losses = {
+ "cloth": {
+ "weight": 5.0,
+ "value": 0.0
+ },
+ "edge": {
+ "weight": 100.0,
+ "value": 0.0
+ },
+ "normal": {
+ "weight": 0.2,
+ "value": 0.0
+ },
+ "laplacian": {
+ "weight": 100.0,
+ "value": 0.0
+ },
+ "smpl": {
+ "weight": 1.0,
+ "value": 0.0
+ },
+ "deform": {
+ "weight": 20.0,
+ "value": 0.0
+ },
+ }
+
+ deform_verts = torch.full(verts_pr.shape,
+ 0.0,
+ device=self.device,
+ requires_grad=True)
+ optimizer_cloth = torch.optim.SGD([deform_verts],
+ lr=1e-1,
+ momentum=0.9)
+ scheduler_cloth = torch.optim.lr_scheduler.ReduceLROnPlateau(
+ optimizer_cloth,
+ mode="min",
+ factor=0.1,
+ verbose=0,
+ min_lr=1e-3,
+ patience=5)
+ # cloth optimization
+ loop_cloth = range(100)
+
+ for i in loop_cloth:
+
+ optimizer_cloth.zero_grad()
+
+ self.render.load_meshes(
+ verts_pr.unsqueeze(0).to(self.device),
+ faces_pr.unsqueeze(0).to(self.device).long(),
+ deform_verts,
+ )
+ P_normal_F, P_normal_B = self.render.get_rgb_image()
+
+ update_mesh_shape_prior_losses(self.render.mesh, losses)
+ diff_F_cloth = torch.abs(P_normal_F[0] - inter[:3])
+ diff_B_cloth = torch.abs(P_normal_B[0] - inter[3:])
+ losses["cloth"]["value"] = (diff_F_cloth + diff_B_cloth).mean()
+ losses["deform"]["value"] = torch.topk(
+ torch.abs(deform_verts.flatten()), 30)[0].mean()
+
+ # Weighted sum of the losses
+ cloth_loss = torch.tensor(0.0, device=self.device)
+ pbar_desc = ""
+
+ for k in losses.keys():
+ if k != "smpl":
+ cloth_loss_per_cls = losses[k]["value"] * \
+ losses[k]["weight"]
+ pbar_desc += f"{k}: {cloth_loss_per_cls:.3f} | "
+ cloth_loss += cloth_loss_per_cls
+
+ # loop_cloth.set_description(pbar_desc)
+ cloth_loss.backward(retain_graph=True)
+ optimizer_cloth.step()
+ scheduler_cloth.step(cloth_loss)
+
+ # convert from GT to SDF
+ deform_verts = deform_verts.flatten().detach()
+ deform_verts[torch.topk(torch.abs(deform_verts),
+ 30)[1]] = deform_verts.mean()
+ deform_verts = deform_verts.view(-1, 3).cpu()
+
+ verts_pr += deform_verts
+ verts_pr *= (self.resolutions[-1] - 1) / 2.0
+ verts_pr += (self.resolutions[-1] - 1) / 2.0
+
+ return verts_pr
+
+ def test_step(self, batch, batch_idx):
+
+ self.netG.eval()
+ self.netG.training = False
+ in_tensor_dict = {}
+
+ # export paths
+ mesh_name = batch["subject"][0]
+ mesh_rot = batch["rotation"][0].item()
+
+ self.export_dir = osp.join(self.cfg.results_path, self.cfg.name,
+ "-".join(self.cfg.dataset.types), mesh_name)
+
+ os.makedirs(self.export_dir, exist_ok=True)
+
+ for name in self.in_total:
+ if name in batch.keys():
+ in_tensor_dict.update({name: batch[name]})
+
+ in_tensor_dict.update({
+ k: batch[k] if k in batch.keys() else None
+ for k in getattr(self, f"{self.prior_type}_keys")
+ })
+
+ if "T_normal_F" not in in_tensor_dict.keys(
+ ) or "T_normal_B" not in in_tensor_dict.keys():
+
+ # update the new T_normal_F/B
+ self.render.load_meshes(
+ batch["smpl_verts"] *
+ torch.tensor([1.0, -1.0, 1.0]).to(self.device),
+ batch["smpl_faces"])
+ T_normal_F, T_noraml_B = self.render.get_rgb_image()
+ in_tensor_dict.update({
+ 'T_normal_F': T_normal_F,
+ 'T_normal_B': T_noraml_B
+ })
+
+ with torch.no_grad():
+ features, inter = self.netG.filter(in_tensor_dict,
+ return_inter=True)
+ sdf = self.reconEngine(opt=self.cfg,
+ netG=self.netG,
+ features=features,
+ proj_matrix=None)
+
+ def tensor2arr(x):
+ return (x[0].permute(1, 2, 0).detach().cpu().numpy() +
+ 1.0) * 0.5 * 255.0
+
+ # save inter results
+ image = tensor2arr(in_tensor_dict["image"])
+ smpl_F = tensor2arr(in_tensor_dict["T_normal_F"])
+ smpl_B = tensor2arr(in_tensor_dict["T_normal_B"])
+ image_inter = np.concatenate(self.tensor2image(512, inter[0]) +
+ [smpl_F, smpl_B, image],
+ axis=1)
+ Image.fromarray((image_inter).astype(np.uint8)).save(
+ osp.join(self.export_dir, f"{mesh_rot}_inter.png"))
+
+ verts_pr, faces_pr = self.reconEngine.export_mesh(sdf)
+
+ if self.clean_mesh_flag:
+ verts_pr, faces_pr = clean_mesh(verts_pr, faces_pr)
+
+ verts_gt = batch["verts"][0]
+ faces_gt = batch["faces"][0]
+
+ self.result_eval.update({
+ "verts_gt": verts_gt,
+ "faces_gt": faces_gt,
+ "verts_pr": verts_pr,
+ "faces_pr": faces_pr,
+ "recon_size": (self.resolutions[-1] - 1.0),
+ "calib": batch["calib"][0],
+ })
+
+ self.evaluator.set_mesh(self.result_eval)
+ chamfer, p2s = self.evaluator.calculate_chamfer_p2s(num_samples=1000)
+ normal_consist = self.evaluator.calculate_normal_consist(
+ osp.join(self.export_dir, f"{mesh_rot}_nc.png"))
+
+ test_log = {"chamfer": chamfer, "p2s": p2s, "NC": normal_consist}
+
+ return test_log
+
+ def test_epoch_end(self, outputs):
+
+ # make_test_gif("/".join(self.export_dir.split("/")[:-2]))
+
+ accu_outputs = accumulate(
+ outputs,
+ rot_num=3,
+ split={
+ "cape-easy": (0, 50),
+ "cape-hard": (50, 100)
+ },
+ )
+
+ print(colored(self.cfg.name, "green"))
+ print(colored(self.cfg.dataset.noise_scale, "green"))
+
+ self.logger.experiment.add_hparams(
+ hparam_dict={
+ "lr_G": self.lr_G,
+ "bsize": self.batch_size
+ },
+ metric_dict=accu_outputs,
+ )
+
+ np.save(
+ osp.join(self.export_dir, "../test_results.npy"),
+ accu_outputs,
+ allow_pickle=True,
+ )
+
+ return accu_outputs
+
+ def tensor2image(self, height, inter):
+
+ all = []
+ for dim in self.in_geo_dim:
+ img = resize(
+ np.tile(
+ ((inter[:dim].cpu().numpy() + 1.0) / 2.0 *
+ 255.0).transpose(1, 2, 0),
+ (1, 1, int(3 / dim)),
+ ),
+ (height, height),
+ anti_aliasing=True,
+ )
+
+ all.append(img)
+ inter = inter[dim:]
+
+ return all
+
+ def render_func(self, in_tensor_dict, dataset="title", idx=0):
+
+ for name in in_tensor_dict.keys():
+ if in_tensor_dict[name] is not None:
+ in_tensor_dict[name] = in_tensor_dict[name][0:1]
+
+ self.netG.eval()
+ features, inter = self.netG.filter(in_tensor_dict, return_inter=True)
+ sdf = self.reconEngine(opt=self.cfg,
+ netG=self.netG,
+ features=features,
+ proj_matrix=None)
+
+ if sdf is not None:
+ render = self.reconEngine.display(sdf)
+
+ image_pred = np.flip(render[:, :, ::-1], axis=0)
+ height = image_pred.shape[0]
+
+ image_gt = resize(
+ ((in_tensor_dict["image"].cpu().numpy()[0] + 1.0) / 2.0 *
+ 255.0).transpose(1, 2, 0),
+ (height, height),
+ anti_aliasing=True,
+ )
+ image_inter = self.tensor2image(height, inter[0])
+ image = np.concatenate([image_pred, image_gt] + image_inter,
+ axis=1)
+
+ step_id = self.global_step if dataset == "train" else self.global_step + idx
+ self.logger.experiment.add_image(
+ tag=f"Occupancy-{dataset}/{step_id}",
+ img_tensor=image.transpose(2, 0, 1),
+ global_step=step_id,
+ )
+
+ def test_single(self, batch):
+
+ self.netG.eval()
+ self.netG.training = False
+ in_tensor_dict = {}
+
+ for name in self.in_total:
+ if name in batch.keys():
+ in_tensor_dict.update({name: batch[name]})
+
+ in_tensor_dict.update({
+ k: batch[k] if k in batch.keys() else None
+ for k in getattr(self, f"{self.prior_type}_keys")
+ })
+
+ with torch.no_grad():
+ features, inter = self.netG.filter(in_tensor_dict,
+ return_inter=True)
+ sdf = self.reconEngine(opt=self.cfg,
+ netG=self.netG,
+ features=features,
+ proj_matrix=None)
+
+ verts_pr, faces_pr = self.reconEngine.export_mesh(sdf)
+
+ if self.clean_mesh_flag:
+ verts_pr, faces_pr = clean_mesh(verts_pr, faces_pr)
+
+ verts_pr -= (self.resolutions[-1] - 1) / 2.0
+ verts_pr /= (self.resolutions[-1] - 1) / 2.0
+
+ return verts_pr, faces_pr, inter
\ No newline at end of file
diff --git a/apps/Normal.py b/apps/Normal.py
new file mode 100644
index 0000000000000000000000000000000000000000..2838f18ce3ec39d3e5d4ffe6cd1e46b3f08e58e1
--- /dev/null
+++ b/apps/Normal.py
@@ -0,0 +1,220 @@
+from lib.net import NormalNet
+from lib.common.train_util import *
+import logging
+import torch
+import numpy as np
+from torch import nn
+from skimage.transform import resize
+import pytorch_lightning as pl
+
+torch.backends.cudnn.benchmark = True
+
+logging.getLogger("lightning").setLevel(logging.ERROR)
+import warnings
+
+warnings.filterwarnings("ignore")
+
+
+class Normal(pl.LightningModule):
+
+ def __init__(self, cfg):
+ super(Normal, self).__init__()
+ self.cfg = cfg
+ self.batch_size = self.cfg.batch_size
+ self.lr_N = self.cfg.lr_N
+
+ self.schedulers = []
+
+ self.netG = NormalNet(self.cfg, error_term=nn.SmoothL1Loss())
+
+ self.in_nml = [item[0] for item in cfg.net.in_nml]
+
+ def get_progress_bar_dict(self):
+ tqdm_dict = super().get_progress_bar_dict()
+ if "v_num" in tqdm_dict:
+ del tqdm_dict["v_num"]
+ return tqdm_dict
+
+ # Training related
+ def configure_optimizers(self):
+
+ # set optimizer
+ weight_decay = self.cfg.weight_decay
+ momentum = self.cfg.momentum
+
+ optim_params_N_F = [{
+ "params": self.netG.netF.parameters(),
+ "lr": self.lr_N
+ }]
+ optim_params_N_B = [{
+ "params": self.netG.netB.parameters(),
+ "lr": self.lr_N
+ }]
+
+ optimizer_N_F = torch.optim.Adam(optim_params_N_F,
+ lr=self.lr_N,
+ weight_decay=weight_decay)
+
+ optimizer_N_B = torch.optim.Adam(optim_params_N_B,
+ lr=self.lr_N,
+ weight_decay=weight_decay)
+
+ scheduler_N_F = torch.optim.lr_scheduler.MultiStepLR(
+ optimizer_N_F, milestones=self.cfg.schedule, gamma=self.cfg.gamma)
+
+ scheduler_N_B = torch.optim.lr_scheduler.MultiStepLR(
+ optimizer_N_B, milestones=self.cfg.schedule, gamma=self.cfg.gamma)
+
+ self.schedulers = [scheduler_N_F, scheduler_N_B]
+ optims = [optimizer_N_F, optimizer_N_B]
+
+ return optims, self.schedulers
+
+ def render_func(self, render_tensor):
+
+ height = render_tensor["image"].shape[2]
+ result_list = []
+
+ for name in render_tensor.keys():
+ result_list.append(
+ resize(
+ ((render_tensor[name].cpu().numpy()[0] + 1.0) /
+ 2.0).transpose(1, 2, 0),
+ (height, height),
+ anti_aliasing=True,
+ ))
+ result_array = np.concatenate(result_list, axis=1)
+
+ return result_array
+
+ def training_step(self, batch, batch_idx, optimizer_idx):
+
+ export_cfg(self.logger, self.cfg)
+
+ # retrieve the data
+ in_tensor = {}
+ for name in self.in_nml:
+ in_tensor[name] = batch[name]
+
+ FB_tensor = {
+ "normal_F": batch["normal_F"],
+ "normal_B": batch["normal_B"]
+ }
+
+ self.netG.train()
+
+ preds_F, preds_B = self.netG(in_tensor)
+ error_NF, error_NB = self.netG.get_norm_error(preds_F, preds_B,
+ FB_tensor)
+
+ (opt_nf, opt_nb) = self.optimizers()
+
+ opt_nf.zero_grad()
+ opt_nb.zero_grad()
+
+ self.manual_backward(error_NF, opt_nf)
+ self.manual_backward(error_NB, opt_nb)
+
+ opt_nf.step()
+ opt_nb.step()
+
+ if batch_idx > 0 and batch_idx % int(self.cfg.freq_show_train) == 0:
+
+ self.netG.eval()
+ with torch.no_grad():
+ nmlF, nmlB = self.netG(in_tensor)
+ in_tensor.update({"nmlF": nmlF, "nmlB": nmlB})
+ result_array = self.render_func(in_tensor)
+
+ self.logger.experiment.add_image(
+ tag=f"Normal-train/{self.global_step}",
+ img_tensor=result_array.transpose(2, 0, 1),
+ global_step=self.global_step,
+ )
+
+ # metrics processing
+ metrics_log = {
+ "train_loss-NF": error_NF.item(),
+ "train_loss-NB": error_NB.item(),
+ }
+
+ tf_log = tf_log_convert(metrics_log)
+ bar_log = bar_log_convert(metrics_log)
+
+ return {
+ "loss": error_NF + error_NB,
+ "loss-NF": error_NF,
+ "loss-NB": error_NB,
+ "log": tf_log,
+ "progress_bar": bar_log,
+ }
+
+ def training_epoch_end(self, outputs):
+
+ if [] in outputs:
+ outputs = outputs[0]
+
+ # metrics processing
+ metrics_log = {
+ "train_avgloss": batch_mean(outputs, "loss"),
+ "train_avgloss-NF": batch_mean(outputs, "loss-NF"),
+ "train_avgloss-NB": batch_mean(outputs, "loss-NB"),
+ }
+
+ tf_log = tf_log_convert(metrics_log)
+
+ tf_log["lr-NF"] = self.schedulers[0].get_last_lr()[0]
+ tf_log["lr-NB"] = self.schedulers[1].get_last_lr()[0]
+
+ return {"log": tf_log}
+
+ def validation_step(self, batch, batch_idx):
+
+ # retrieve the data
+ in_tensor = {}
+ for name in self.in_nml:
+ in_tensor[name] = batch[name]
+
+ FB_tensor = {
+ "normal_F": batch["normal_F"],
+ "normal_B": batch["normal_B"]
+ }
+
+ self.netG.train()
+
+ preds_F, preds_B = self.netG(in_tensor)
+ error_NF, error_NB = self.netG.get_norm_error(preds_F, preds_B,
+ FB_tensor)
+
+ if (batch_idx > 0 and batch_idx % int(self.cfg.freq_show_train)
+ == 0) or (batch_idx == 0):
+
+ with torch.no_grad():
+ nmlF, nmlB = self.netG(in_tensor)
+ in_tensor.update({"nmlF": nmlF, "nmlB": nmlB})
+ result_array = self.render_func(in_tensor)
+
+ self.logger.experiment.add_image(
+ tag=f"Normal-val/{self.global_step}",
+ img_tensor=result_array.transpose(2, 0, 1),
+ global_step=self.global_step,
+ )
+
+ return {
+ "val_loss": error_NF + error_NB,
+ "val_loss-NF": error_NF,
+ "val_loss-NB": error_NB,
+ }
+
+ def validation_epoch_end(self, outputs):
+
+ # metrics processing
+ metrics_log = {
+ "val_avgloss": batch_mean(outputs, "val_loss"),
+ "val_avgloss-NF": batch_mean(outputs, "val_loss-NF"),
+ "val_avgloss-NB": batch_mean(outputs, "val_loss-NB"),
+ }
+
+ tf_log = tf_log_convert(metrics_log)
+
+ return {"log": tf_log}
diff --git a/apps/infer.py b/apps/infer.py
new file mode 100644
index 0000000000000000000000000000000000000000..7842f2bf67762dcc55dbb5988f9b8e697e70053e
--- /dev/null
+++ b/apps/infer.py
@@ -0,0 +1,492 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import os
+import gc
+
+import logging
+from lib.common.config import cfg
+from lib.dataset.mesh_util import (
+ load_checkpoint,
+ update_mesh_shape_prior_losses,
+ blend_rgb_norm,
+ unwrap,
+ remesh,
+ tensor2variable,
+ rot6d_to_rotmat
+)
+
+from lib.dataset.TestDataset import TestDataset
+from lib.common.render import query_color
+from lib.net.local_affine import LocalAffine
+from pytorch3d.structures import Meshes
+from apps.ICON import ICON
+
+from termcolor import colored
+import numpy as np
+from PIL import Image
+import trimesh
+import numpy as np
+from tqdm import tqdm
+
+import torch
+torch.backends.cudnn.benchmark = True
+
+logging.getLogger("trimesh").setLevel(logging.ERROR)
+
+
+def generate_model(in_path, model_type):
+
+ torch.cuda.empty_cache()
+
+ if model_type == 'ICON':
+ model_type = 'icon-filter'
+ else:
+ model_type = model_type.lower()
+
+ config_dict = {'loop_smpl': 100,
+ 'loop_cloth': 200,
+ 'patience': 5,
+ 'out_dir': './results',
+ 'hps_type': 'pymaf',
+ 'config': f"./configs/{model_type}.yaml"}
+
+ # cfg read and merge
+ cfg.merge_from_file(config_dict['config'])
+ cfg.merge_from_file("./lib/pymaf/configs/pymaf_config.yaml")
+
+ os.makedirs(config_dict['out_dir'], exist_ok=True)
+
+ cfg_show_list = [
+ "test_gpus",
+ [0],
+ "mcube_res",
+ 256,
+ "clean_mesh",
+ True,
+ ]
+
+ cfg.merge_from_list(cfg_show_list)
+ cfg.freeze()
+
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
+ device = torch.device(f"cuda:0")
+
+ # load model and dataloader
+ model = ICON(cfg)
+ model = load_checkpoint(model, cfg)
+
+ dataset_param = {
+ 'image_path': in_path,
+ 'seg_dir': None,
+ 'has_det': True, # w/ or w/o detection
+ 'hps_type': 'pymaf' # pymaf/pare/pixie
+ }
+
+ if config_dict['hps_type'] == "pixie" and "pamir" in config_dict['config']:
+ print(colored("PIXIE isn't compatible with PaMIR, thus switch to PyMAF", "red"))
+ dataset_param["hps_type"] = "pymaf"
+
+ dataset = TestDataset(dataset_param, device)
+
+ print(colored(f"Dataset Size: {len(dataset)}", "green"))
+
+ pbar = tqdm(dataset)
+
+ for data in pbar:
+
+ pbar.set_description(f"{data['name']}")
+
+ in_tensor = {"smpl_faces": data["smpl_faces"], "image": data["image"]}
+
+ # The optimizer and variables
+ optimed_pose = torch.tensor(
+ data["body_pose"], device=device, requires_grad=True
+ ) # [1,23,3,3]
+ optimed_trans = torch.tensor(
+ data["trans"], device=device, requires_grad=True
+ ) # [3]
+ optimed_betas = torch.tensor(
+ data["betas"], device=device, requires_grad=True
+ ) # [1,10]
+ optimed_orient = torch.tensor(
+ data["global_orient"], device=device, requires_grad=True
+ ) # [1,1,3,3]
+
+ optimizer_smpl = torch.optim.Adam(
+ [optimed_pose, optimed_trans, optimed_betas, optimed_orient],
+ lr=1e-3,
+ amsgrad=True,
+ )
+ scheduler_smpl = torch.optim.lr_scheduler.ReduceLROnPlateau(
+ optimizer_smpl,
+ mode="min",
+ factor=0.5,
+ verbose=0,
+ min_lr=1e-5,
+ patience=config_dict['patience'],
+ )
+
+ losses = {
+ # Cloth: Normal_recon - Normal_pred
+ "cloth": {"weight": 1e1, "value": 0.0},
+ # Cloth: [RT]_v1 - [RT]_v2 (v1-edge-v2)
+ "stiffness": {"weight": 1e5, "value": 0.0},
+ # Cloth: det(R) = 1
+ "rigid": {"weight": 1e5, "value": 0.0},
+ # Cloth: edge length
+ "edge": {"weight": 0, "value": 0.0},
+ # Cloth: normal consistency
+ "nc": {"weight": 0, "value": 0.0},
+ # Cloth: laplacian smoonth
+ "laplacian": {"weight": 1e2, "value": 0.0},
+ # Body: Normal_pred - Normal_smpl
+ "normal": {"weight": 1e0, "value": 0.0},
+ # Body: Silhouette_pred - Silhouette_smpl
+ "silhouette": {"weight": 1e0, "value": 0.0},
+ }
+
+ # smpl optimization
+
+ loop_smpl = tqdm(range(config_dict['loop_smpl']))
+
+ for _ in loop_smpl:
+
+ optimizer_smpl.zero_grad()
+
+ # 6d_rot to rot_mat
+ optimed_orient_mat = rot6d_to_rotmat(optimed_orient.view(-1,6)).unsqueeze(0)
+ optimed_pose_mat = rot6d_to_rotmat(optimed_pose.view(-1,6)).unsqueeze(0)
+
+ if dataset_param["hps_type"] != "pixie":
+ smpl_out = dataset.smpl_model(
+ betas=optimed_betas,
+ body_pose=optimed_pose_mat,
+ global_orient=optimed_orient_mat,
+ pose2rot=False,
+ )
+
+ smpl_verts = ((smpl_out.vertices) +
+ optimed_trans) * data["scale"]
+ else:
+ smpl_verts, _, _ = dataset.smpl_model(
+ shape_params=optimed_betas,
+ expression_params=tensor2variable(data["exp"], device),
+ body_pose=optimed_pose_mat,
+ global_pose=optimed_orient_mat,
+ jaw_pose=tensor2variable(data["jaw_pose"], device),
+ left_hand_pose=tensor2variable(
+ data["left_hand_pose"], device),
+ right_hand_pose=tensor2variable(
+ data["right_hand_pose"], device),
+ )
+
+ smpl_verts = (smpl_verts + optimed_trans) * data["scale"]
+
+ # render optimized mesh (normal, T_normal, image [-1,1])
+ in_tensor["T_normal_F"], in_tensor["T_normal_B"] = dataset.render_normal(
+ smpl_verts *
+ torch.tensor([1.0, -1.0, -1.0]
+ ).to(device), in_tensor["smpl_faces"]
+ )
+ T_mask_F, T_mask_B = dataset.render.get_silhouette_image()
+
+ with torch.no_grad():
+ in_tensor["normal_F"], in_tensor["normal_B"] = model.netG.normal_filter(
+ in_tensor
+ )
+
+ diff_F_smpl = torch.abs(
+ in_tensor["T_normal_F"] - in_tensor["normal_F"])
+ diff_B_smpl = torch.abs(
+ in_tensor["T_normal_B"] - in_tensor["normal_B"])
+
+ losses["normal"]["value"] = (diff_F_smpl + diff_B_smpl).mean()
+
+ # silhouette loss
+ smpl_arr = torch.cat([T_mask_F, T_mask_B], dim=-1)[0]
+ gt_arr = torch.cat(
+ [in_tensor["normal_F"][0], in_tensor["normal_B"][0]], dim=2
+ ).permute(1, 2, 0)
+ gt_arr = ((gt_arr + 1.0) * 0.5).to(device)
+ bg_color = (
+ torch.Tensor([0.5, 0.5, 0.5]).unsqueeze(
+ 0).unsqueeze(0).to(device)
+ )
+ gt_arr = ((gt_arr - bg_color).sum(dim=-1) != 0.0).float()
+ diff_S = torch.abs(smpl_arr - gt_arr)
+ losses["silhouette"]["value"] = diff_S.mean()
+
+ # Weighted sum of the losses
+ smpl_loss = 0.0
+ pbar_desc = "Body Fitting --- "
+ for k in ["normal", "silhouette"]:
+ pbar_desc += f"{k}: {losses[k]['value'] * losses[k]['weight']:.3f} | "
+ smpl_loss += losses[k]["value"] * losses[k]["weight"]
+ pbar_desc += f"Total: {smpl_loss:.3f}"
+ loop_smpl.set_description(pbar_desc)
+
+ smpl_loss.backward()
+ optimizer_smpl.step()
+ scheduler_smpl.step(smpl_loss)
+ in_tensor["smpl_verts"] = smpl_verts * \
+ torch.tensor([1.0, 1.0, -1.0]).to(device)
+
+ # visualize the optimization process
+ # 1. SMPL Fitting
+ # 2. Clothes Refinement
+
+ os.makedirs(os.path.join(config_dict['out_dir'], cfg.name,
+ "refinement"), exist_ok=True)
+
+ # visualize the final results in self-rotation mode
+ os.makedirs(os.path.join(config_dict['out_dir'],
+ cfg.name, "vid"), exist_ok=True)
+
+ # final results rendered as image
+ # 1. Render the final fitted SMPL (xxx_smpl.png)
+ # 2. Render the final reconstructed clothed human (xxx_cloth.png)
+ # 3. Blend the original image with predicted cloth normal (xxx_overlap.png)
+
+ os.makedirs(os.path.join(config_dict['out_dir'],
+ cfg.name, "png"), exist_ok=True)
+
+ # final reconstruction meshes
+ # 1. SMPL mesh (xxx_smpl.obj)
+ # 2. SMPL params (xxx_smpl.npy)
+ # 3. clohted mesh (xxx_recon.obj)
+ # 4. remeshed clothed mesh (xxx_remesh.obj)
+ # 5. refined clothed mesh (xxx_refine.obj)
+
+ os.makedirs(os.path.join(config_dict['out_dir'],
+ cfg.name, "obj"), exist_ok=True)
+
+ norm_pred_F = (
+ ((in_tensor["normal_F"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0)
+ .detach()
+ .cpu()
+ .numpy()
+ .astype(np.uint8)
+ )
+
+ norm_pred_B = (
+ ((in_tensor["normal_B"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0)
+ .detach()
+ .cpu()
+ .numpy()
+ .astype(np.uint8)
+ )
+
+ norm_orig_F = unwrap(norm_pred_F, data)
+ norm_orig_B = unwrap(norm_pred_B, data)
+
+ mask_orig = unwrap(
+ np.repeat(
+ data["mask"].permute(1, 2, 0).detach().cpu().numpy(), 3, axis=2
+ ).astype(np.uint8),
+ data,
+ )
+ rgb_norm_F = blend_rgb_norm(data["ori_image"], norm_orig_F, mask_orig)
+ rgb_norm_B = blend_rgb_norm(data["ori_image"], norm_orig_B, mask_orig)
+
+ Image.fromarray(
+ np.concatenate(
+ [data["ori_image"].astype(np.uint8), rgb_norm_F, rgb_norm_B], axis=1)
+ ).save(os.path.join(config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png"))
+
+ smpl_obj = trimesh.Trimesh(
+ in_tensor["smpl_verts"].detach().cpu()[0] *
+ torch.tensor([1.0, -1.0, 1.0]),
+ in_tensor['smpl_faces'].detach().cpu()[0],
+ process=False,
+ maintains_order=True
+ )
+ smpl_obj.visual.vertex_colors = (smpl_obj.vertex_normals+1.0)*255.0*0.5
+ smpl_obj.export(
+ f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.obj")
+ smpl_obj.export(
+ f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.glb")
+
+ smpl_info = {'betas': optimed_betas,
+ 'pose': optimed_pose_mat,
+ 'orient': optimed_orient_mat,
+ 'trans': optimed_trans}
+
+ np.save(
+ f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.npy", smpl_info, allow_pickle=True)
+
+ # ------------------------------------------------------------------------------------------------------------------
+
+ # cloth optimization
+
+ # cloth recon
+ in_tensor.update(
+ dataset.compute_vis_cmap(
+ in_tensor["smpl_verts"][0], in_tensor["smpl_faces"][0]
+ )
+ )
+
+ if cfg.net.prior_type == "pamir":
+ in_tensor.update(
+ dataset.compute_voxel_verts(
+ optimed_pose,
+ optimed_orient,
+ optimed_betas,
+ optimed_trans,
+ data["scale"],
+ )
+ )
+
+ with torch.no_grad():
+ verts_pr, faces_pr, _ = model.test_single(in_tensor)
+
+ recon_obj = trimesh.Trimesh(
+ verts_pr, faces_pr, process=False, maintains_order=True
+ )
+ recon_obj.visual.vertex_colors = (
+ recon_obj.vertex_normals+1.0)*255.0*0.5
+ recon_obj.export(
+ os.path.join(config_dict['out_dir'], cfg.name,
+ f"obj/{data['name']}_recon.obj")
+ )
+
+ # Isotropic Explicit Remeshing for better geometry topology
+ verts_refine, faces_refine = remesh(os.path.join(config_dict['out_dir'], cfg.name,
+ f"obj/{data['name']}_recon.obj"), 0.5, device)
+
+ # define local_affine deform verts
+ mesh_pr = Meshes(verts_refine, faces_refine).to(device)
+ local_affine_model = LocalAffine(
+ mesh_pr.verts_padded().shape[1], mesh_pr.verts_padded().shape[0], mesh_pr.edges_packed()).to(device)
+ optimizer_cloth = torch.optim.Adam(
+ [{'params': local_affine_model.parameters()}], lr=1e-4, amsgrad=True)
+
+ scheduler_cloth = torch.optim.lr_scheduler.ReduceLROnPlateau(
+ optimizer_cloth,
+ mode="min",
+ factor=0.1,
+ verbose=0,
+ min_lr=1e-5,
+ patience=config_dict['patience'],
+ )
+
+ final = None
+
+ if config_dict['loop_cloth'] > 0:
+
+ loop_cloth = tqdm(range(config_dict['loop_cloth']))
+
+ for _ in loop_cloth:
+
+ optimizer_cloth.zero_grad()
+
+ deformed_verts, stiffness, rigid = local_affine_model(
+ verts_refine.to(device), return_stiff=True)
+ mesh_pr = mesh_pr.update_padded(deformed_verts)
+
+ # losses for laplacian, edge, normal consistency
+ update_mesh_shape_prior_losses(mesh_pr, losses)
+
+ in_tensor["P_normal_F"], in_tensor["P_normal_B"] = dataset.render_normal(
+ mesh_pr.verts_padded(), mesh_pr.faces_padded())
+
+ diff_F_cloth = torch.abs(
+ in_tensor["P_normal_F"] - in_tensor["normal_F"])
+ diff_B_cloth = torch.abs(
+ in_tensor["P_normal_B"] - in_tensor["normal_B"])
+
+ losses["cloth"]["value"] = (diff_F_cloth + diff_B_cloth).mean()
+ losses["stiffness"]["value"] = torch.mean(stiffness)
+ losses["rigid"]["value"] = torch.mean(rigid)
+
+ # Weighted sum of the losses
+ cloth_loss = torch.tensor(0.0, requires_grad=True).to(device)
+ pbar_desc = "Cloth Refinement --- "
+
+ for k in losses.keys():
+ if k not in ["normal", "silhouette"] and losses[k]["weight"] > 0.0:
+ cloth_loss = cloth_loss + \
+ losses[k]["value"] * losses[k]["weight"]
+ pbar_desc += f"{k}:{losses[k]['value']* losses[k]['weight']:.5f} | "
+
+ pbar_desc += f"Total: {cloth_loss:.5f}"
+ loop_cloth.set_description(pbar_desc)
+
+ # update params
+ cloth_loss.backward()
+ optimizer_cloth.step()
+ scheduler_cloth.step(cloth_loss)
+
+ final = trimesh.Trimesh(
+ mesh_pr.verts_packed().detach().squeeze(0).cpu(),
+ mesh_pr.faces_packed().detach().squeeze(0).cpu(),
+ process=False, maintains_order=True
+ )
+
+ # only with front texture
+ tex_colors = query_color(
+ mesh_pr.verts_packed().detach().squeeze(0).cpu(),
+ mesh_pr.faces_packed().detach().squeeze(0).cpu(),
+ in_tensor["image"],
+ device=device,
+ )
+
+ # full normal textures
+ norm_colors = (mesh_pr.verts_normals_padded().squeeze(
+ 0).detach().cpu() + 1.0) * 0.5 * 255.0
+
+ final.visual.vertex_colors = tex_colors
+ final.export(
+ f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj")
+
+ final.visual.vertex_colors = norm_colors
+ final.export(
+ f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.glb")
+
+ # always export visualized video regardless of the cloth refinment
+ verts_lst = [smpl_obj.vertices, final.vertices]
+ faces_lst = [smpl_obj.faces, final.faces]
+
+ # self-rotated video
+ dataset.render.load_meshes(
+ verts_lst, faces_lst)
+ dataset.render.get_rendered_video(
+ [data["ori_image"], rgb_norm_F, rgb_norm_B],
+ os.path.join(config_dict['out_dir'], cfg.name,
+ f"vid/{data['name']}_cloth.mp4"),
+ )
+
+ smpl_obj_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.obj"
+ smpl_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.glb"
+ smpl_npy_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.npy"
+ refine_obj_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj"
+ refine_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.glb"
+
+ video_path = os.path.join(
+ config_dict['out_dir'], cfg.name, f"vid/{data['name']}_cloth.mp4")
+ overlap_path = os.path.join(
+ config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png")
+
+ # clean all the variables
+ for element in dir():
+ if 'path' not in element:
+ del locals()[element]
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ return [smpl_glb_path, smpl_obj_path,smpl_npy_path,
+ refine_glb_path, refine_obj_path,
+ video_path, video_path, overlap_path]
\ No newline at end of file
diff --git a/configs / icon-filter.yaml b/configs / icon-filter.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..47a0cc21a60f0ee50129df9f03f0ce24be557473
--- /dev/null
+++ b/configs / icon-filter.yaml
@@ -0,0 +1,25 @@
+name: icon-filter
+ckpt_dir: "./data/ckpt/"
+resume_path: "https://huggingface.co/Yuliang/ICON/resolve/main/icon-filter.ckpt"
+normal_path: "https://huggingface.co/Yuliang/ICON/resolve/main/normal.ckpt"
+
+test_mode: True
+batch_size: 1
+
+net:
+ mlp_dim: [256, 512, 256, 128, 1]
+ res_layers: [2,3,4]
+ num_stack: 2
+ prior_type: "icon" # icon/pamir/icon
+ use_filter: True
+ in_geo: (('normal_F',3), ('normal_B',3))
+ in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3))
+ smpl_feats: ['sdf', 'norm', 'vis', 'cmap']
+ gtype: 'HGPIFuNet'
+ norm_mlp: 'batch'
+ hourglass_dim: 6
+ smpl_dim: 7
+
+# user defined
+mcube_res: 512 # occupancy field resolution, higher --> more details
+clean_mesh: False # if True, will remove floating pieces
diff --git a/configs / icon-nofilter.yaml b/configs / icon-nofilter.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8ceae86d5024a81ec177d69b189b825cdcb7605c
--- /dev/null
+++ b/configs / icon-nofilter.yaml
@@ -0,0 +1,25 @@
+name: icon-nofilter
+ckpt_dir: "./data/ckpt/"
+resume_path: "https://huggingface.co/Yuliang/ICON/resolve/main/icon-nofilter.ckpt"
+normal_path: "https://huggingface.co/Yuliang/ICON/resolve/main/normal.ckpt"
+
+test_mode: True
+batch_size: 1
+
+net:
+ mlp_dim: [256, 512, 256, 128, 1]
+ res_layers: [2,3,4]
+ num_stack: 2
+ prior_type: "icon" # icon/pamir/icon
+ use_filter: False
+ in_geo: (('normal_F',3), ('normal_B',3))
+ in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3))
+ smpl_feats: ['sdf', 'norm', 'vis', 'cmap']
+ gtype: 'HGPIFuNet'
+ norm_mlp: 'batch'
+ hourglass_dim: 6
+ smpl_dim: 7
+
+# user defined
+mcube_res: 512 # occupancy field resolution, higher --> more details
+clean_mesh: False # if True, will remove floating pieces
\ No newline at end of file
diff --git a/configs /pamir.yaml b/configs /pamir.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a59a7531d24216c48666cc939a281755a45edcfa
--- /dev/null
+++ b/configs /pamir.yaml
@@ -0,0 +1,24 @@
+name: pamir
+ckpt_dir: "./data/ckpt/"
+resume_path: "https://huggingface.co/Yuliang/ICON/resolve/main/pamir.ckpt"
+normal_path: "https://huggingface.co/Yuliang/ICON/resolve/main/normal.ckpt"
+
+test_mode: True
+batch_size: 1
+
+net:
+ mlp_dim: [256, 512, 256, 128, 1]
+ res_layers: [2,3,4]
+ num_stack: 2
+ prior_type: "pamir" # icon/pamir/icon
+ use_filter: True
+ in_geo: (('image',3), ('normal_F',3), ('normal_B',3))
+ in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3))
+ gtype: 'HGPIFuNet'
+ norm_mlp: 'batch'
+ hourglass_dim: 6
+ voxel_dim: 7
+
+# user defined
+mcube_res: 512 # occupancy field resolution, higher --> more details
+clean_mesh: False # if True, will remove floating pieces
\ No newline at end of file
diff --git a/configs /pifu.yaml b/configs /pifu.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..69438539d03913ac51c527517e3f33e19e1c9bb3
--- /dev/null
+++ b/configs /pifu.yaml
@@ -0,0 +1,24 @@
+name: pifu
+ckpt_dir: "./data/ckpt/"
+resume_path: "https://huggingface.co/Yuliang/ICON/resolve/main/pifu.ckpt"
+normal_path: "https://huggingface.co/Yuliang/ICON/resolve/main/normal.ckpt"
+
+test_mode: True
+batch_size: 1
+
+net:
+ mlp_dim: [256, 512, 256, 128, 1]
+ res_layers: [2,3,4]
+ num_stack: 2
+ prior_type: "pifu" # icon/pamir/icon
+ use_filter: True
+ in_geo: (('image',3), ('normal_F',3), ('normal_B',3))
+ in_nml: (('image',3), ('T_normal_F',3), ('T_normal_B',3))
+ gtype: 'HGPIFuNet'
+ norm_mlp: 'batch'
+ hourglass_dim: 12
+
+
+# user defined
+mcube_res: 512 # occupancy field resolution, higher --> more details
+clean_mesh: False # if True, will remove floating pieces
\ No newline at end of file
diff --git a/lib / pymaf / configs / pymaf_config.yaml b/lib / pymaf / configs / pymaf_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5e4bb7fff6145ebd520dc3db51b0acede783129d
--- /dev/null
+++ b/lib / pymaf / configs / pymaf_config.yaml
@@ -0,0 +1,47 @@
+SOLVER:
+ MAX_ITER: 500000
+ TYPE: Adam
+ BASE_LR: 0.00005
+ GAMMA: 0.1
+ STEPS: [0]
+ EPOCHS: [0]
+DEBUG: False
+LOGDIR: ''
+DEVICE: cuda
+NUM_WORKERS: 8
+SEED_VALUE: -1
+LOSS:
+ KP_2D_W: 300.0
+ KP_3D_W: 300.0
+ SHAPE_W: 0.06
+ POSE_W: 60.0
+ VERT_W: 0.0
+ INDEX_WEIGHTS: 2.0
+ # Loss weights for surface parts. (24 Parts)
+ PART_WEIGHTS: 0.3
+ # Loss weights for UV regression.
+ POINT_REGRESSION_WEIGHTS: 0.5
+TRAIN:
+ NUM_WORKERS: 8
+ BATCH_SIZE: 64
+ PIN_MEMORY: True
+TEST:
+ BATCH_SIZE: 32
+MODEL:
+ PyMAF:
+ BACKBONE: 'res50'
+ MLP_DIM: [256, 128, 64, 5]
+ N_ITER: 3
+ AUX_SUPV_ON: True
+ DP_HEATMAP_SIZE: 56
+RES_MODEL:
+ DECONV_WITH_BIAS: False
+ NUM_DECONV_LAYERS: 3
+ NUM_DECONV_FILTERS:
+ - 256
+ - 256
+ - 256
+ NUM_DECONV_KERNELS:
+ - 4
+ - 4
+ - 4
\ No newline at end of file
diff --git a/lib / pymaf /core / __init__.py b/lib / pymaf /core / __init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib / pymaf /core / train_options.py b/lib / pymaf /core / train_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..15657f6d55ae39f93896c149e290f54d1a594538
--- /dev/null
+++ b/lib / pymaf /core / train_options.py
@@ -0,0 +1,135 @@
+import argparse
+
+
+class TrainOptions():
+ def __init__(self):
+ self.parser = argparse.ArgumentParser()
+
+ gen = self.parser.add_argument_group('General')
+ gen.add_argument(
+ '--resume',
+ dest='resume',
+ default=False,
+ action='store_true',
+ help='Resume from checkpoint (Use latest checkpoint by default')
+
+ io = self.parser.add_argument_group('io')
+ io.add_argument('--log_dir',
+ default='logs',
+ help='Directory to store logs')
+ io.add_argument(
+ '--pretrained_checkpoint',
+ default=None,
+ help='Load a pretrained checkpoint at the beginning training')
+
+ train = self.parser.add_argument_group('Training Options')
+ train.add_argument('--num_epochs',
+ type=int,
+ default=200,
+ help='Total number of training epochs')
+ train.add_argument('--regressor',
+ type=str,
+ choices=['hmr', 'pymaf_net'],
+ default='pymaf_net',
+ help='Name of the SMPL regressor.')
+ train.add_argument('--cfg_file',
+ type=str,
+ default='./configs/pymaf_config.yaml',
+ help='config file path for PyMAF.')
+ train.add_argument(
+ '--img_res',
+ type=int,
+ default=224,
+ help='Rescale bounding boxes to size [img_res, img_res] before feeding them in the network'
+ )
+ train.add_argument(
+ '--rot_factor',
+ type=float,
+ default=30,
+ help='Random rotation in the range [-rot_factor, rot_factor]')
+ train.add_argument(
+ '--noise_factor',
+ type=float,
+ default=0.4,
+ help='Randomly multiply pixel values with factor in the range [1-noise_factor, 1+noise_factor]'
+ )
+ train.add_argument(
+ '--scale_factor',
+ type=float,
+ default=0.25,
+ help='Rescale bounding boxes by a factor of [1-scale_factor,1+scale_factor]'
+ )
+ train.add_argument(
+ '--openpose_train_weight',
+ default=0.,
+ help='Weight for OpenPose keypoints during training')
+ train.add_argument('--gt_train_weight',
+ default=1.,
+ help='Weight for GT keypoints during training')
+ train.add_argument('--eval_dataset',
+ type=str,
+ default='h36m-p2-mosh',
+ help='Name of the evaluation dataset.')
+ train.add_argument('--single_dataset',
+ default=False,
+ action='store_true',
+ help='Use a single dataset')
+ train.add_argument('--single_dataname',
+ type=str,
+ default='h36m',
+ help='Name of the single dataset.')
+ train.add_argument('--eval_pve',
+ default=False,
+ action='store_true',
+ help='evaluate PVE')
+ train.add_argument('--overwrite',
+ default=False,
+ action='store_true',
+ help='overwrite the latest checkpoint')
+
+ train.add_argument('--distributed',
+ action='store_true',
+ help='Use distributed training')
+ train.add_argument('--dist_backend',
+ default='nccl',
+ type=str,
+ help='distributed backend')
+ train.add_argument('--dist_url',
+ default='tcp://127.0.0.1:10356',
+ type=str,
+ help='url used to set up distributed training')
+ train.add_argument('--world_size',
+ default=1,
+ type=int,
+ help='number of nodes for distributed training')
+ train.add_argument("--local_rank", default=0, type=int)
+ train.add_argument('--rank',
+ default=0,
+ type=int,
+ help='node rank for distributed training')
+ train.add_argument(
+ '--multiprocessing_distributed',
+ action='store_true',
+ help='Use multi-processing distributed training to launch '
+ 'N processes per node, which has N GPUs. This is the '
+ 'fastest way to use PyTorch for either single node or '
+ 'multi node data parallel training')
+
+ misc = self.parser.add_argument_group('Misc Options')
+ misc.add_argument('--misc',
+ help="Modify config options using the command-line",
+ default=None,
+ nargs=argparse.REMAINDER)
+ return
+
+ def parse_args(self):
+ """Parse input arguments."""
+ self.args = self.parser.parse_args()
+ self.save_dump()
+ return self.args
+
+ def save_dump(self):
+ """Store all argument values to a json file.
+ The default location is logs/expname/args.json.
+ """
+ pass
\ No newline at end of file
diff --git a/lib / pymaf /core /base_trainer.py b/lib / pymaf /core /base_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f06856c7038fbd652a3dc077a872da66d3585dec
--- /dev/null
+++ b/lib / pymaf /core /base_trainer.py
@@ -0,0 +1,107 @@
+# This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/utils/base_trainer.py
+from __future__ import division
+import logging
+from utils import CheckpointSaver
+from tensorboardX import SummaryWriter
+
+import torch
+from tqdm import tqdm
+
+tqdm.monitor_interval = 0
+
+
+logger = logging.getLogger(__name__)
+
+
+class BaseTrainer(object):
+ """Base class for Trainer objects.
+ Takes care of checkpointing/logging/resuming training.
+ """
+
+ def __init__(self, options):
+ self.options = options
+ if options.multiprocessing_distributed:
+ self.device = torch.device('cuda', options.gpu)
+ else:
+ self.device = torch.device(
+ 'cuda' if torch.cuda.is_available() else 'cpu')
+ # override this function to define your model, optimizers etc.
+ self.saver = CheckpointSaver(save_dir=options.checkpoint_dir,
+ overwrite=options.overwrite)
+ if options.rank == 0:
+ self.summary_writer = SummaryWriter(self.options.summary_dir)
+ self.init_fn()
+
+ self.checkpoint = None
+ if options.resume and self.saver.exists_checkpoint():
+ self.checkpoint = self.saver.load_checkpoint(
+ self.models_dict, self.optimizers_dict)
+
+ if self.checkpoint is None:
+ self.epoch_count = 0
+ self.step_count = 0
+ else:
+ self.epoch_count = self.checkpoint['epoch']
+ self.step_count = self.checkpoint['total_step_count']
+
+ if self.checkpoint is not None:
+ self.checkpoint_batch_idx = self.checkpoint['batch_idx']
+ else:
+ self.checkpoint_batch_idx = 0
+
+ self.best_performance = float('inf')
+
+ def load_pretrained(self, checkpoint_file=None):
+ """Load a pretrained checkpoint.
+ This is different from resuming training using --resume.
+ """
+ if checkpoint_file is not None:
+ checkpoint = torch.load(checkpoint_file)
+ for model in self.models_dict:
+ if model in checkpoint:
+ self.models_dict[model].load_state_dict(checkpoint[model],
+ strict=True)
+ print(f'Checkpoint {model} loaded')
+
+ def move_dict_to_device(self, dict, device, tensor2float=False):
+ for k, v in dict.items():
+ if isinstance(v, torch.Tensor):
+ if tensor2float:
+ dict[k] = v.float().to(device)
+ else:
+ dict[k] = v.to(device)
+
+ # The following methods (with the possible exception of test) have to be implemented in the derived classes
+ def train(self, epoch):
+ raise NotImplementedError('You need to provide an train method')
+
+ def init_fn(self):
+ raise NotImplementedError('You need to provide an _init_fn method')
+
+ def train_step(self, input_batch):
+ raise NotImplementedError('You need to provide a _train_step method')
+
+ def train_summaries(self, input_batch):
+ raise NotImplementedError(
+ 'You need to provide a _train_summaries method')
+
+ def visualize(self, input_batch):
+ raise NotImplementedError('You need to provide a visualize method')
+
+ def validate(self):
+ pass
+
+ def test(self):
+ pass
+
+ def evaluate(self):
+ pass
+
+ def fit(self):
+ # Run training for num_epochs epochs
+ for epoch in tqdm(range(self.epoch_count, self.options.num_epochs),
+ total=self.options.num_epochs,
+ initial=self.epoch_count):
+ self.epoch_count = epoch
+ self.train(epoch)
+ return
\ No newline at end of file
diff --git a/lib / pymaf /core /cfgs.py b/lib / pymaf /core /cfgs.py
new file mode 100644
index 0000000000000000000000000000000000000000..09ac4fa48483aa9e595b7e4b27dfa7426cb11d33
--- /dev/null
+++ b/lib / pymaf /core /cfgs.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import os
+import json
+from yacs.config import CfgNode as CN
+
+# Configuration variables
+cfg = CN(new_allowed=True)
+
+cfg.OUTPUT_DIR = 'results'
+cfg.DEVICE = 'cuda'
+cfg.DEBUG = False
+cfg.LOGDIR = ''
+cfg.VAL_VIS_BATCH_FREQ = 200
+cfg.TRAIN_VIS_ITER_FERQ = 1000
+cfg.SEED_VALUE = -1
+
+cfg.TRAIN = CN(new_allowed=True)
+
+cfg.LOSS = CN(new_allowed=True)
+cfg.LOSS.KP_2D_W = 300.0
+cfg.LOSS.KP_3D_W = 300.0
+cfg.LOSS.SHAPE_W = 0.06
+cfg.LOSS.POSE_W = 60.0
+cfg.LOSS.VERT_W = 0.0
+
+# Loss weights for dense correspondences
+cfg.LOSS.INDEX_WEIGHTS = 2.0
+# Loss weights for surface parts. (24 Parts)
+cfg.LOSS.PART_WEIGHTS = 0.3
+# Loss weights for UV regression.
+cfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5
+
+cfg.MODEL = CN(new_allowed=True)
+
+cfg.MODEL.PyMAF = CN(new_allowed=True)
+
+# switch
+cfg.TRAIN.VAL_LOOP = True
+
+cfg.TEST = CN(new_allowed=True)
+
+
+def get_cfg_defaults():
+ """Get a yacs CfgNode object with default values for my_project."""
+ # Return a clone so that the defaults will not be altered
+ # This is for the "local variable" use pattern
+ # return cfg.clone()
+ return cfg
+
+
+def update_cfg(cfg_file):
+ # cfg = get_cfg_defaults()
+ cfg.merge_from_file(cfg_file)
+ # return cfg.clone()
+ return cfg
+
+
+def parse_args(args):
+ cfg_file = args.cfg_file
+ if args.cfg_file is not None:
+ cfg = update_cfg(args.cfg_file)
+ else:
+ cfg = get_cfg_defaults()
+
+ # if args.misc is not None:
+ # cfg.merge_from_list(args.misc)
+
+ return cfg
+
+
+def parse_args_extend(args):
+ if args.resume:
+ if not os.path.exists(args.log_dir):
+ raise ValueError(
+ 'Experiment are set to resume mode, but log directory does not exist.'
+ )
+
+ # load log's cfg
+ cfg_file = os.path.join(args.log_dir, 'cfg.yaml')
+ cfg = update_cfg(cfg_file)
+
+ if args.misc is not None:
+ cfg.merge_from_list(args.misc)
+ else:
+ parse_args(args)
diff --git a/lib / pymaf /core /constants.py b/lib / pymaf /core /constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..69f507432f3adfd73164a15d03f86f02c3db5287
--- /dev/null
+++ b/lib / pymaf /core /constants.py
@@ -0,0 +1,153 @@
+# This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/constants.py
+FOCAL_LENGTH = 5000.
+IMG_RES = 224
+
+# Mean and standard deviation for normalizing input image
+IMG_NORM_MEAN = [0.485, 0.456, 0.406]
+IMG_NORM_STD = [0.229, 0.224, 0.225]
+"""
+We create a superset of joints containing the OpenPose joints together with the ones that each dataset provides.
+We keep a superset of 24 joints such that we include all joints from every dataset.
+If a dataset doesn't provide annotations for a specific joint, we simply ignore it.
+The joints used here are the following:
+"""
+JOINT_NAMES = [
+ # 25 OpenPose joints (in the order provided by OpenPose)
+ 'OP Nose',
+ 'OP Neck',
+ 'OP RShoulder',
+ 'OP RElbow',
+ 'OP RWrist',
+ 'OP LShoulder',
+ 'OP LElbow',
+ 'OP LWrist',
+ 'OP MidHip',
+ 'OP RHip',
+ 'OP RKnee',
+ 'OP RAnkle',
+ 'OP LHip',
+ 'OP LKnee',
+ 'OP LAnkle',
+ 'OP REye',
+ 'OP LEye',
+ 'OP REar',
+ 'OP LEar',
+ 'OP LBigToe',
+ 'OP LSmallToe',
+ 'OP LHeel',
+ 'OP RBigToe',
+ 'OP RSmallToe',
+ 'OP RHeel',
+ # 24 Ground Truth joints (superset of joints from different datasets)
+ 'Right Ankle',
+ 'Right Knee',
+ 'Right Hip', # 2
+ 'Left Hip',
+ 'Left Knee', # 4
+ 'Left Ankle',
+ 'Right Wrist', # 6
+ 'Right Elbow',
+ 'Right Shoulder', # 8
+ 'Left Shoulder',
+ 'Left Elbow', # 10
+ 'Left Wrist',
+ 'Neck (LSP)', # 12
+ 'Top of Head (LSP)',
+ 'Pelvis (MPII)', # 14
+ 'Thorax (MPII)',
+ 'Spine (H36M)', # 16
+ 'Jaw (H36M)',
+ 'Head (H36M)', # 18
+ 'Nose',
+ 'Left Eye',
+ 'Right Eye',
+ 'Left Ear',
+ 'Right Ear'
+]
+
+# Dict containing the joints in numerical order
+JOINT_IDS = {JOINT_NAMES[i]: i for i in range(len(JOINT_NAMES))}
+
+# Map joints to SMPL joints
+JOINT_MAP = {
+ 'OP Nose': 24,
+ 'OP Neck': 12,
+ 'OP RShoulder': 17,
+ 'OP RElbow': 19,
+ 'OP RWrist': 21,
+ 'OP LShoulder': 16,
+ 'OP LElbow': 18,
+ 'OP LWrist': 20,
+ 'OP MidHip': 0,
+ 'OP RHip': 2,
+ 'OP RKnee': 5,
+ 'OP RAnkle': 8,
+ 'OP LHip': 1,
+ 'OP LKnee': 4,
+ 'OP LAnkle': 7,
+ 'OP REye': 25,
+ 'OP LEye': 26,
+ 'OP REar': 27,
+ 'OP LEar': 28,
+ 'OP LBigToe': 29,
+ 'OP LSmallToe': 30,
+ 'OP LHeel': 31,
+ 'OP RBigToe': 32,
+ 'OP RSmallToe': 33,
+ 'OP RHeel': 34,
+ 'Right Ankle': 8,
+ 'Right Knee': 5,
+ 'Right Hip': 45,
+ 'Left Hip': 46,
+ 'Left Knee': 4,
+ 'Left Ankle': 7,
+ 'Right Wrist': 21,
+ 'Right Elbow': 19,
+ 'Right Shoulder': 17,
+ 'Left Shoulder': 16,
+ 'Left Elbow': 18,
+ 'Left Wrist': 20,
+ 'Neck (LSP)': 47,
+ 'Top of Head (LSP)': 48,
+ 'Pelvis (MPII)': 49,
+ 'Thorax (MPII)': 50,
+ 'Spine (H36M)': 51,
+ 'Jaw (H36M)': 52,
+ 'Head (H36M)': 53,
+ 'Nose': 24,
+ 'Left Eye': 26,
+ 'Right Eye': 25,
+ 'Left Ear': 28,
+ 'Right Ear': 27
+}
+
+# Joint selectors
+# Indices to get the 14 LSP joints from the 17 H36M joints
+H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
+H36M_TO_J14 = H36M_TO_J17[:14]
+# Indices to get the 14 LSP joints from the ground truth joints
+J24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]
+J24_TO_J14 = J24_TO_J17[:14]
+J24_TO_J19 = J24_TO_J17[:14] + [19, 20, 21, 22, 23]
+J24_TO_JCOCO = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]
+
+# Permutation of SMPL pose parameters when flipping the shape
+SMPL_JOINTS_FLIP_PERM = [
+ 0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21,
+ 20, 23, 22
+]
+SMPL_POSE_FLIP_PERM = []
+for i in SMPL_JOINTS_FLIP_PERM:
+ SMPL_POSE_FLIP_PERM.append(3 * i)
+ SMPL_POSE_FLIP_PERM.append(3 * i + 1)
+ SMPL_POSE_FLIP_PERM.append(3 * i + 2)
+# Permutation indices for the 24 ground truth joints
+J24_FLIP_PERM = [
+ 5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21,
+ 20, 23, 22
+]
+# Permutation indices for the full set of 49 joints
+J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\
+ + [25+i for i in J24_FLIP_PERM]
+SMPL_J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\
+ + [25+i for i in SMPL_JOINTS_FLIP_PERM]
\ No newline at end of file
diff --git a/lib / pymaf /core /fits_dict.py b/lib / pymaf /core /fits_dict.py
new file mode 100644
index 0000000000000000000000000000000000000000..43a3415dd0395ac3d481a3497b0397a60bc5ef90
--- /dev/null
+++ b/lib / pymaf /core /fits_dict.py
@@ -0,0 +1,133 @@
+'''
+This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/train/fits_dict.py
+'''
+import os
+import cv2
+import torch
+import numpy as np
+from torchgeometry import angle_axis_to_rotation_matrix, rotation_matrix_to_angle_axis
+
+from core import path_config, constants
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class FitsDict():
+ """ Dictionary keeping track of the best fit per image in the training set """
+
+ def __init__(self, options, train_dataset):
+ self.options = options
+ self.train_dataset = train_dataset
+ self.fits_dict = {}
+ self.valid_fit_state = {}
+ # array used to flip SMPL pose parameters
+ self.flipped_parts = torch.tensor(constants.SMPL_POSE_FLIP_PERM,
+ dtype=torch.int64)
+ # Load dictionary state
+ for ds_name, ds in train_dataset.dataset_dict.items():
+ if ds_name in ['h36m']:
+ dict_file = os.path.join(path_config.FINAL_FITS_DIR,
+ ds_name + '.npy')
+ self.fits_dict[ds_name] = torch.from_numpy(np.load(dict_file))
+ self.valid_fit_state[ds_name] = torch.ones(len(
+ self.fits_dict[ds_name]),
+ dtype=torch.uint8)
+ else:
+ dict_file = os.path.join(path_config.FINAL_FITS_DIR,
+ ds_name + '.npz')
+ fits_dict = np.load(dict_file)
+ opt_pose = torch.from_numpy(fits_dict['pose'])
+ opt_betas = torch.from_numpy(fits_dict['betas'])
+ opt_valid_fit = torch.from_numpy(fits_dict['valid_fit']).to(
+ torch.uint8)
+ self.fits_dict[ds_name] = torch.cat([opt_pose, opt_betas],
+ dim=1)
+ self.valid_fit_state[ds_name] = opt_valid_fit
+
+ if not options.single_dataset:
+ for ds in train_dataset.datasets:
+ if ds.dataset not in ['h36m']:
+ ds.pose = self.fits_dict[ds.dataset][:, :72].numpy()
+ ds.betas = self.fits_dict[ds.dataset][:, 72:].numpy()
+ ds.has_smpl = self.valid_fit_state[ds.dataset].numpy()
+
+ def save(self):
+ """ Save dictionary state to disk """
+ for ds_name in self.train_dataset.dataset_dict.keys():
+ dict_file = os.path.join(self.options.checkpoint_dir,
+ ds_name + '_fits.npy')
+ np.save(dict_file, self.fits_dict[ds_name].cpu().numpy())
+
+ def __getitem__(self, x):
+ """ Retrieve dictionary entries """
+ dataset_name, ind, rot, is_flipped = x
+ batch_size = len(dataset_name)
+ pose = torch.zeros((batch_size, 72))
+ betas = torch.zeros((batch_size, 10))
+ for ds, i, n in zip(dataset_name, ind, range(batch_size)):
+ params = self.fits_dict[ds][i]
+ pose[n, :] = params[:72]
+ betas[n, :] = params[72:]
+ pose = pose.clone()
+ # Apply flipping and rotation
+ pose = self.flip_pose(self.rotate_pose(pose, rot), is_flipped)
+ betas = betas.clone()
+ return pose, betas
+
+ def get_vaild_state(self, dataset_name, ind):
+ batch_size = len(dataset_name)
+ valid_fit = torch.zeros(batch_size, dtype=torch.uint8)
+ for ds, i, n in zip(dataset_name, ind, range(batch_size)):
+ valid_fit[n] = self.valid_fit_state[ds][i]
+ valid_fit = valid_fit.clone()
+ return valid_fit
+
+ def __setitem__(self, x, val):
+ """ Update dictionary entries """
+ dataset_name, ind, rot, is_flipped, update = x
+ pose, betas = val
+ batch_size = len(dataset_name)
+ # Undo flipping and rotation
+ pose = self.rotate_pose(self.flip_pose(pose, is_flipped), -rot)
+ params = torch.cat((pose, betas), dim=-1).cpu()
+ for ds, i, n in zip(dataset_name, ind, range(batch_size)):
+ if update[n]:
+ self.fits_dict[ds][i] = params[n]
+
+ def flip_pose(self, pose, is_flipped):
+ """flip SMPL pose parameters"""
+ is_flipped = is_flipped.byte()
+ pose_f = pose.clone()
+ pose_f[is_flipped, :] = pose[is_flipped][:, self.flipped_parts]
+ # we also negate the second and the third dimension of the axis-angle representation
+ pose_f[is_flipped, 1::3] *= -1
+ pose_f[is_flipped, 2::3] *= -1
+ return pose_f
+
+ def rotate_pose(self, pose, rot):
+ """Rotate SMPL pose parameters by rot degrees"""
+ pose = pose.clone()
+ cos = torch.cos(-np.pi * rot / 180.)
+ sin = torch.sin(-np.pi * rot / 180.)
+ zeros = torch.zeros_like(cos)
+ r3 = torch.zeros(cos.shape[0], 1, 3, device=cos.device)
+ r3[:, 0, -1] = 1
+ R = torch.cat([
+ torch.stack([cos, -sin, zeros], dim=-1).unsqueeze(1),
+ torch.stack([sin, cos, zeros], dim=-1).unsqueeze(1), r3
+ ],
+ dim=1)
+ global_pose = pose[:, :3]
+ global_pose_rotmat = angle_axis_to_rotation_matrix(global_pose)
+ global_pose_rotmat_3b3 = global_pose_rotmat[:, :3, :3]
+ global_pose_rotmat_3b3 = torch.matmul(R, global_pose_rotmat_3b3)
+ global_pose_rotmat[:, :3, :3] = global_pose_rotmat_3b3
+ global_pose_rotmat = global_pose_rotmat[:, :-1, :-1].cpu().numpy()
+ global_pose_np = np.zeros((global_pose.shape[0], 3))
+ for i in range(global_pose.shape[0]):
+ aa, _ = cv2.Rodrigues(global_pose_rotmat[i])
+ global_pose_np[i, :] = aa.squeeze()
+ pose[:, :3] = torch.from_numpy(global_pose_np).to(pose.device)
+ return pose
\ No newline at end of file
diff --git a/lib / pymaf /core /path_config.py b/lib / pymaf /core /path_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..a053b972c515f83133c62b753eb8d31f1c16588a
--- /dev/null
+++ b/lib / pymaf /core /path_config.py
@@ -0,0 +1,24 @@
+"""
+This script is borrowed and extended from https://github.com/nkolot/SPIN/blob/master/path_config.py
+path configuration
+This file contains definitions of useful data stuctures and the paths
+for the datasets and data files necessary to run the code.
+Things you need to change: *_ROOT that indicate the path to each dataset
+"""
+import os
+from huggingface_hub import hf_hub_url, cached_download
+
+# pymaf
+pymaf_data_dir = hf_hub_url('Yuliang/PyMAF', '')
+smpl_data_dir = hf_hub_url('Yuliang/SMPL', '')
+SMPL_MODEL_DIR = os.path.join(smpl_data_dir, 'models/smpl')
+
+SMPL_MEAN_PARAMS = cached_download(os.path.join(pymaf_data_dir, 'smpl_mean_params.npz'), use_auth_token=os.environ['ICON'])
+MESH_DOWNSAMPLEING = cached_download(os.path.join(pymaf_data_dir, 'mesh_downsampling.npz'), use_auth_token=os.environ['ICON'])
+CUBE_PARTS_FILE = cached_download(os.path.join(pymaf_data_dir, 'cube_parts.npy'), use_auth_token=os.environ['ICON'])
+JOINT_REGRESSOR_TRAIN_EXTRA = cached_download(os.path.join(pymaf_data_dir, 'J_regressor_extra.npy'), use_auth_token=os.environ['ICON'])
+JOINT_REGRESSOR_H36M = cached_download(os.path.join(pymaf_data_dir, 'J_regressor_h36m.npy'), use_auth_token=os.environ['ICON'])
+VERTEX_TEXTURE_FILE = cached_download(os.path.join(pymaf_data_dir, 'vertex_texture.npy'), use_auth_token=os.environ['ICON'])
+SMPL_MEAN_PARAMS = cached_download(os.path.join(pymaf_data_dir, 'smpl_mean_params.npz'), use_auth_token=os.environ['ICON'])
+CHECKPOINT_FILE = cached_download(os.path.join(pymaf_data_dir, 'pretrained_model/PyMAF_model_checkpoint.pt'), use_auth_token=os.environ['ICON'])
+
diff --git a/lib / pymaf /models / __init__.py b/lib / pymaf /models / __init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..af3f32ff51b1640179b30dfe22c4e3cf50c7c98c
--- /dev/null
+++ b/lib / pymaf /models / __init__.py
@@ -0,0 +1,3 @@
+from .hmr import hmr
+from .pymaf_net import pymaf_net
+from .smpl import SMPL
\ No newline at end of file
diff --git a/lib / pymaf /models / pymaf_net.py b/lib / pymaf /models / pymaf_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..2807abaa3c7da0be6913d2fd68cb0ad1721e2bf1
--- /dev/null
+++ b/lib / pymaf /models / pymaf_net.py
@@ -0,0 +1,362 @@
+import torch
+import torch.nn as nn
+import numpy as np
+
+from lib.pymaf.utils.geometry import rot6d_to_rotmat, projection, rotation_matrix_to_angle_axis
+from .maf_extractor import MAF_Extractor
+from .smpl import SMPL, SMPL_MODEL_DIR, SMPL_MEAN_PARAMS, H36M_TO_J14
+from .hmr import ResNet_Backbone
+from .res_module import IUV_predict_layer
+from lib.common.config import cfg
+import logging
+
+logger = logging.getLogger(__name__)
+
+BN_MOMENTUM = 0.1
+
+
+class Regressor(nn.Module):
+ def __init__(self, feat_dim, smpl_mean_params):
+ super().__init__()
+
+ npose = 24 * 6
+
+ self.fc1 = nn.Linear(feat_dim + npose + 13, 1024)
+ self.drop1 = nn.Dropout()
+ self.fc2 = nn.Linear(1024, 1024)
+ self.drop2 = nn.Dropout()
+ self.decpose = nn.Linear(1024, npose)
+ self.decshape = nn.Linear(1024, 10)
+ self.deccam = nn.Linear(1024, 3)
+ nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
+ nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
+ nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
+
+ self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=64, create_transl=False)
+
+ mean_params = np.load(smpl_mean_params)
+ init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
+ init_shape = torch.from_numpy(
+ mean_params['shape'][:].astype('float32')).unsqueeze(0)
+ init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)
+ self.register_buffer('init_pose', init_pose)
+ self.register_buffer('init_shape', init_shape)
+ self.register_buffer('init_cam', init_cam)
+
+ def forward(self,
+ x,
+ init_pose=None,
+ init_shape=None,
+ init_cam=None,
+ n_iter=1,
+ J_regressor=None):
+ batch_size = x.shape[0]
+
+ if init_pose is None:
+ init_pose = self.init_pose.expand(batch_size, -1)
+ if init_shape is None:
+ init_shape = self.init_shape.expand(batch_size, -1)
+ if init_cam is None:
+ init_cam = self.init_cam.expand(batch_size, -1)
+
+ pred_pose = init_pose
+ pred_shape = init_shape
+ pred_cam = init_cam
+ for i in range(n_iter):
+ xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)
+ xc = self.fc1(xc)
+ xc = self.drop1(xc)
+ xc = self.fc2(xc)
+ xc = self.drop2(xc)
+ pred_pose = self.decpose(xc) + pred_pose
+ pred_shape = self.decshape(xc) + pred_shape
+ pred_cam = self.deccam(xc) + pred_cam
+
+ pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
+
+ pred_output = self.smpl(betas=pred_shape,
+ body_pose=pred_rotmat[:, 1:],
+ global_orient=pred_rotmat[:, 0].unsqueeze(1),
+ pose2rot=False)
+
+ pred_vertices = pred_output.vertices
+ pred_joints = pred_output.joints
+ pred_smpl_joints = pred_output.smpl_joints
+ pred_keypoints_2d = projection(pred_joints, pred_cam)
+ pose = rotation_matrix_to_angle_axis(pred_rotmat.reshape(-1, 3,
+ 3)).reshape(
+ -1, 72)
+
+ if J_regressor is not None:
+ pred_joints = torch.matmul(J_regressor, pred_vertices)
+ pred_pelvis = pred_joints[:, [0], :].clone()
+ pred_joints = pred_joints[:, H36M_TO_J14, :]
+ pred_joints = pred_joints - pred_pelvis
+
+ output = {
+ 'theta': torch.cat([pred_cam, pred_shape, pose], dim=1),
+ 'verts': pred_vertices,
+ 'kp_2d': pred_keypoints_2d,
+ 'kp_3d': pred_joints,
+ 'smpl_kp_3d': pred_smpl_joints,
+ 'rotmat': pred_rotmat,
+ 'pred_cam': pred_cam,
+ 'pred_shape': pred_shape,
+ 'pred_pose': pred_pose,
+ }
+ return output
+
+ def forward_init(self,
+ x,
+ init_pose=None,
+ init_shape=None,
+ init_cam=None,
+ n_iter=1,
+ J_regressor=None):
+ batch_size = x.shape[0]
+
+ if init_pose is None:
+ init_pose = self.init_pose.expand(batch_size, -1)
+ if init_shape is None:
+ init_shape = self.init_shape.expand(batch_size, -1)
+ if init_cam is None:
+ init_cam = self.init_cam.expand(batch_size, -1)
+
+ pred_pose = init_pose
+ pred_shape = init_shape
+ pred_cam = init_cam
+
+ pred_rotmat = rot6d_to_rotmat(pred_pose.contiguous()).view(
+ batch_size, 24, 3, 3)
+
+ pred_output = self.smpl(betas=pred_shape,
+ body_pose=pred_rotmat[:, 1:],
+ global_orient=pred_rotmat[:, 0].unsqueeze(1),
+ pose2rot=False)
+
+ pred_vertices = pred_output.vertices
+ pred_joints = pred_output.joints
+ pred_smpl_joints = pred_output.smpl_joints
+ pred_keypoints_2d = projection(pred_joints, pred_cam)
+ pose = rotation_matrix_to_angle_axis(pred_rotmat.reshape(-1, 3,
+ 3)).reshape(
+ -1, 72)
+
+ if J_regressor is not None:
+ pred_joints = torch.matmul(J_regressor, pred_vertices)
+ pred_pelvis = pred_joints[:, [0], :].clone()
+ pred_joints = pred_joints[:, H36M_TO_J14, :]
+ pred_joints = pred_joints - pred_pelvis
+
+ output = {
+ 'theta': torch.cat([pred_cam, pred_shape, pose], dim=1),
+ 'verts': pred_vertices,
+ 'kp_2d': pred_keypoints_2d,
+ 'kp_3d': pred_joints,
+ 'smpl_kp_3d': pred_smpl_joints,
+ 'rotmat': pred_rotmat,
+ 'pred_cam': pred_cam,
+ 'pred_shape': pred_shape,
+ 'pred_pose': pred_pose,
+ }
+ return output
+
+
+class PyMAF(nn.Module):
+ """ PyMAF based Deep Regressor for Human Mesh Recovery
+ PyMAF: 3D Human Pose and Shape Regression with Pyramidal Mesh Alignment Feedback Loop, in ICCV, 2021
+ """
+
+ def __init__(self, smpl_mean_params=SMPL_MEAN_PARAMS, pretrained=True):
+ super().__init__()
+ self.feature_extractor = ResNet_Backbone(
+ model=cfg.MODEL.PyMAF.BACKBONE, pretrained=pretrained)
+
+ # deconv layers
+ self.inplanes = self.feature_extractor.inplanes
+ self.deconv_with_bias = cfg.RES_MODEL.DECONV_WITH_BIAS
+ self.deconv_layers = self._make_deconv_layer(
+ cfg.RES_MODEL.NUM_DECONV_LAYERS,
+ cfg.RES_MODEL.NUM_DECONV_FILTERS,
+ cfg.RES_MODEL.NUM_DECONV_KERNELS,
+ )
+
+ self.maf_extractor = nn.ModuleList()
+ for _ in range(cfg.MODEL.PyMAF.N_ITER):
+ self.maf_extractor.append(MAF_Extractor())
+ ma_feat_len = self.maf_extractor[-1].Dmap.shape[
+ 0] * cfg.MODEL.PyMAF.MLP_DIM[-1]
+
+ grid_size = 21
+ xv, yv = torch.meshgrid([
+ torch.linspace(-1, 1, grid_size),
+ torch.linspace(-1, 1, grid_size)
+ ])
+ points_grid = torch.stack([xv.reshape(-1),
+ yv.reshape(-1)]).unsqueeze(0)
+ self.register_buffer('points_grid', points_grid)
+ grid_feat_len = grid_size * grid_size * cfg.MODEL.PyMAF.MLP_DIM[-1]
+
+ self.regressor = nn.ModuleList()
+ for i in range(cfg.MODEL.PyMAF.N_ITER):
+ if i == 0:
+ ref_infeat_dim = grid_feat_len
+ else:
+ ref_infeat_dim = ma_feat_len
+ self.regressor.append(
+ Regressor(feat_dim=ref_infeat_dim,
+ smpl_mean_params=smpl_mean_params))
+
+ dp_feat_dim = 256
+ self.with_uv = cfg.LOSS.POINT_REGRESSION_WEIGHTS > 0
+ if cfg.MODEL.PyMAF.AUX_SUPV_ON:
+ self.dp_head = IUV_predict_layer(feat_dim=dp_feat_dim)
+
+ def _make_layer(self, block, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ nn.BatchNorm2d(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
+ """
+ Deconv_layer used in Simple Baselines:
+ Xiao et al. Simple Baselines for Human Pose Estimation and Tracking
+ https://github.com/microsoft/human-pose-estimation.pytorch
+ """
+ assert num_layers == len(num_filters), \
+ 'ERROR: num_deconv_layers is different len(num_deconv_filters)'
+ assert num_layers == len(num_kernels), \
+ 'ERROR: num_deconv_layers is different len(num_deconv_filters)'
+
+ def _get_deconv_cfg(deconv_kernel, index):
+ if deconv_kernel == 4:
+ padding = 1
+ output_padding = 0
+ elif deconv_kernel == 3:
+ padding = 1
+ output_padding = 1
+ elif deconv_kernel == 2:
+ padding = 0
+ output_padding = 0
+
+ return deconv_kernel, padding, output_padding
+
+ layers = []
+ for i in range(num_layers):
+ kernel, padding, output_padding = _get_deconv_cfg(
+ num_kernels[i], i)
+
+ planes = num_filters[i]
+ layers.append(
+ nn.ConvTranspose2d(in_channels=self.inplanes,
+ out_channels=planes,
+ kernel_size=kernel,
+ stride=2,
+ padding=padding,
+ output_padding=output_padding,
+ bias=self.deconv_with_bias))
+ layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
+ layers.append(nn.ReLU(inplace=True))
+ self.inplanes = planes
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x, J_regressor=None):
+
+ batch_size = x.shape[0]
+
+ # spatial features and global features
+ s_feat, g_feat = self.feature_extractor(x)
+
+ assert cfg.MODEL.PyMAF.N_ITER >= 0 and cfg.MODEL.PyMAF.N_ITER <= 3
+ if cfg.MODEL.PyMAF.N_ITER == 1:
+ deconv_blocks = [self.deconv_layers]
+ elif cfg.MODEL.PyMAF.N_ITER == 2:
+ deconv_blocks = [self.deconv_layers[0:6], self.deconv_layers[6:9]]
+ elif cfg.MODEL.PyMAF.N_ITER == 3:
+ deconv_blocks = [
+ self.deconv_layers[0:3], self.deconv_layers[3:6],
+ self.deconv_layers[6:9]
+ ]
+
+ out_list = {}
+
+ # initial parameters
+ # TODO: remove the initial mesh generation during forward to reduce runtime
+ # by generating initial mesh the beforehand: smpl_output = self.init_smpl
+ smpl_output = self.regressor[0].forward_init(g_feat,
+ J_regressor=J_regressor)
+
+ out_list['smpl_out'] = [smpl_output]
+ out_list['dp_out'] = []
+
+ # for visulization
+ vis_feat_list = [s_feat.detach()]
+
+ # parameter predictions
+ for rf_i in range(cfg.MODEL.PyMAF.N_ITER):
+ pred_cam = smpl_output['pred_cam']
+ pred_shape = smpl_output['pred_shape']
+ pred_pose = smpl_output['pred_pose']
+
+ pred_cam = pred_cam.detach()
+ pred_shape = pred_shape.detach()
+ pred_pose = pred_pose.detach()
+
+ s_feat_i = deconv_blocks[rf_i](s_feat)
+ s_feat = s_feat_i
+ vis_feat_list.append(s_feat_i.detach())
+
+ self.maf_extractor[rf_i].im_feat = s_feat_i
+ self.maf_extractor[rf_i].cam = pred_cam
+
+ if rf_i == 0:
+ sample_points = torch.transpose(
+ self.points_grid.expand(batch_size, -1, -1), 1, 2)
+ ref_feature = self.maf_extractor[rf_i].sampling(sample_points)
+ else:
+ pred_smpl_verts = smpl_output['verts'].detach()
+ # TODO: use a more sparse SMPL implementation (with 431 vertices) for acceleration
+ pred_smpl_verts_ds = torch.matmul(
+ self.maf_extractor[rf_i].Dmap.unsqueeze(0),
+ pred_smpl_verts) # [B, 431, 3]
+ ref_feature = self.maf_extractor[rf_i](
+ pred_smpl_verts_ds) # [B, 431 * n_feat]
+
+ smpl_output = self.regressor[rf_i](ref_feature,
+ pred_pose,
+ pred_shape,
+ pred_cam,
+ n_iter=1,
+ J_regressor=J_regressor)
+ out_list['smpl_out'].append(smpl_output)
+
+ if self.training and cfg.MODEL.PyMAF.AUX_SUPV_ON:
+ iuv_out_dict = self.dp_head(s_feat)
+ out_list['dp_out'].append(iuv_out_dict)
+
+ return out_list
+
+
+def pymaf_net(smpl_mean_params, pretrained=True):
+ """ Constructs an PyMAF model with ResNet50 backbone.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ model = PyMAF(smpl_mean_params, pretrained)
+ return model
diff --git a/lib / pymaf /models / smpl.py b/lib / pymaf /models / smpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad0059acc3d88d7bf13d6bca25ed9da1b82bb5fe
--- /dev/null
+++ b/lib / pymaf /models / smpl.py
@@ -0,0 +1,92 @@
+# This script is borrowed from https://github.com/nkolot/SPIN/blob/master/models/smpl.py
+
+import torch
+import numpy as np
+from lib.smplx import SMPL as _SMPL
+from lib.smplx.body_models import ModelOutput
+from lib.smplx.lbs import vertices2joints
+from collections import namedtuple
+
+from lib.pymaf.core import path_config, constants
+
+SMPL_MEAN_PARAMS = path_config.SMPL_MEAN_PARAMS
+SMPL_MODEL_DIR = path_config.SMPL_MODEL_DIR
+
+# Indices to get the 14 LSP joints from the 17 H36M joints
+H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
+H36M_TO_J14 = H36M_TO_J17[:14]
+
+
+class SMPL(_SMPL):
+ """ Extension of the official SMPL implementation to support more joints """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]
+ J_regressor_extra = np.load(path_config.JOINT_REGRESSOR_TRAIN_EXTRA)
+ self.register_buffer(
+ 'J_regressor_extra',
+ torch.tensor(J_regressor_extra, dtype=torch.float32))
+ self.joint_map = torch.tensor(joints, dtype=torch.long)
+ self.ModelOutput = namedtuple(
+ 'ModelOutput_', ModelOutput._fields + (
+ 'smpl_joints',
+ 'joints_J19',
+ ))
+ self.ModelOutput.__new__.__defaults__ = (None, ) * len(
+ self.ModelOutput._fields)
+
+ def forward(self, *args, **kwargs):
+ kwargs['get_skin'] = True
+ smpl_output = super().forward(*args, **kwargs)
+ extra_joints = vertices2joints(self.J_regressor_extra,
+ smpl_output.vertices)
+ # smpl_output.joints: [B, 45, 3] extra_joints: [B, 9, 3]
+ vertices = smpl_output.vertices
+ joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
+ smpl_joints = smpl_output.joints[:, :24]
+ joints = joints[:, self.joint_map, :] # [B, 49, 3]
+ joints_J24 = joints[:, -24:, :]
+ joints_J19 = joints_J24[:, constants.J24_TO_J19, :]
+ output = self.ModelOutput(vertices=vertices,
+ global_orient=smpl_output.global_orient,
+ body_pose=smpl_output.body_pose,
+ joints=joints,
+ joints_J19=joints_J19,
+ smpl_joints=smpl_joints,
+ betas=smpl_output.betas,
+ full_pose=smpl_output.full_pose)
+ return output
+
+
+def get_smpl_faces():
+ smpl = SMPL(SMPL_MODEL_DIR, batch_size=1, create_transl=False)
+ return smpl.faces
+
+
+def get_part_joints(smpl_joints):
+ batch_size = smpl_joints.shape[0]
+
+ # part_joints = torch.zeros().to(smpl_joints.device)
+
+ one_seg_pairs = [(0, 1), (0, 2), (0, 3), (3, 6), (9, 12), (9, 13), (9, 14),
+ (12, 15), (13, 16), (14, 17)]
+ two_seg_pairs = [(1, 4), (2, 5), (4, 7), (5, 8), (16, 18), (17, 19),
+ (18, 20), (19, 21)]
+
+ one_seg_pairs.extend(two_seg_pairs)
+
+ single_joints = [(10), (11), (15), (22), (23)]
+
+ part_joints = []
+
+ for j_p in one_seg_pairs:
+ new_joint = torch.mean(smpl_joints[:, j_p], dim=1, keepdim=True)
+ part_joints.append(new_joint)
+
+ for j_p in single_joints:
+ part_joints.append(smpl_joints[:, j_p:j_p + 1])
+
+ part_joints = torch.cat(part_joints, dim=1)
+
+ return part_joints
diff --git a/lib / pymaf /models /hmr.py b/lib / pymaf /models /hmr.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fb1cd6b7e2e4581f2c5d9cb5e952049b5a075e1
--- /dev/null
+++ b/lib / pymaf /models /hmr.py
@@ -0,0 +1,303 @@
+# This script is borrowed from https://github.com/nkolot/SPIN/blob/master/models/hmr.py
+
+import torch
+import torch.nn as nn
+import torchvision.models.resnet as resnet
+import numpy as np
+import math
+from lib.pymaf.utils.geometry import rot6d_to_rotmat
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+BN_MOMENTUM = 0.1
+
+
+class Bottleneck(nn.Module):
+ """ Redefinition of Bottleneck residual block
+ Adapted from the official PyTorch implementation
+ """
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super().__init__()
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.conv2 = nn.Conv2d(planes,
+ planes,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * 4)
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class ResNet_Backbone(nn.Module):
+ """ Feature Extrator with ResNet backbone
+ """
+
+ def __init__(self, model='res50', pretrained=True):
+ if model == 'res50':
+ block, layers = Bottleneck, [3, 4, 6, 3]
+ else:
+ pass # TODO
+
+ self.inplanes = 64
+ super().__init__()
+ npose = 24 * 6
+ self.conv1 = nn.Conv2d(3,
+ 64,
+ kernel_size=7,
+ stride=2,
+ padding=3,
+ bias=False)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.relu = nn.ReLU(inplace=True)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+ self.layer1 = self._make_layer(block, 64, layers[0])
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
+ self.avgpool = nn.AvgPool2d(7, stride=1)
+
+ if pretrained:
+ resnet_imagenet = resnet.resnet50(pretrained=True)
+ self.load_state_dict(resnet_imagenet.state_dict(), strict=False)
+ logger.info('loaded resnet50 imagenet pretrained model')
+
+ def _make_layer(self, block, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ nn.BatchNorm2d(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
+ assert num_layers == len(num_filters), \
+ 'ERROR: num_deconv_layers is different len(num_deconv_filters)'
+ assert num_layers == len(num_kernels), \
+ 'ERROR: num_deconv_layers is different len(num_deconv_filters)'
+
+ def _get_deconv_cfg(deconv_kernel, index):
+ if deconv_kernel == 4:
+ padding = 1
+ output_padding = 0
+ elif deconv_kernel == 3:
+ padding = 1
+ output_padding = 1
+ elif deconv_kernel == 2:
+ padding = 0
+ output_padding = 0
+
+ return deconv_kernel, padding, output_padding
+
+ layers = []
+ for i in range(num_layers):
+ kernel, padding, output_padding = _get_deconv_cfg(
+ num_kernels[i], i)
+
+ planes = num_filters[i]
+ layers.append(
+ nn.ConvTranspose2d(in_channels=self.inplanes,
+ out_channels=planes,
+ kernel_size=kernel,
+ stride=2,
+ padding=padding,
+ output_padding=output_padding,
+ bias=self.deconv_with_bias))
+ layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
+ layers.append(nn.ReLU(inplace=True))
+ self.inplanes = planes
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+
+ batch_size = x.shape[0]
+
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+
+ x1 = self.layer1(x)
+ x2 = self.layer2(x1)
+ x3 = self.layer3(x2)
+ x4 = self.layer4(x3)
+
+ xf = self.avgpool(x4)
+ xf = xf.view(xf.size(0), -1)
+
+ x_featmap = x4
+
+ return x_featmap, xf
+
+
+class HMR(nn.Module):
+ """ SMPL Iterative Regressor with ResNet50 backbone
+ """
+
+ def __init__(self, block, layers, smpl_mean_params):
+ self.inplanes = 64
+ super().__init__()
+ npose = 24 * 6
+ self.conv1 = nn.Conv2d(3,
+ 64,
+ kernel_size=7,
+ stride=2,
+ padding=3,
+ bias=False)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.relu = nn.ReLU(inplace=True)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+ self.layer1 = self._make_layer(block, 64, layers[0])
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
+ self.avgpool = nn.AvgPool2d(7, stride=1)
+ self.fc1 = nn.Linear(512 * block.expansion + npose + 13, 1024)
+ self.drop1 = nn.Dropout()
+ self.fc2 = nn.Linear(1024, 1024)
+ self.drop2 = nn.Dropout()
+ self.decpose = nn.Linear(1024, npose)
+ self.decshape = nn.Linear(1024, 10)
+ self.deccam = nn.Linear(1024, 3)
+ nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
+ nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
+ nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
+
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+
+ mean_params = np.load(smpl_mean_params)
+ init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)
+ init_shape = torch.from_numpy(
+ mean_params['shape'][:].astype('float32')).unsqueeze(0)
+ init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)
+ self.register_buffer('init_pose', init_pose)
+ self.register_buffer('init_shape', init_shape)
+ self.register_buffer('init_cam', init_cam)
+
+ def _make_layer(self, block, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ nn.BatchNorm2d(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def forward(self,
+ x,
+ init_pose=None,
+ init_shape=None,
+ init_cam=None,
+ n_iter=3):
+
+ batch_size = x.shape[0]
+
+ if init_pose is None:
+ init_pose = self.init_pose.expand(batch_size, -1)
+ if init_shape is None:
+ init_shape = self.init_shape.expand(batch_size, -1)
+ if init_cam is None:
+ init_cam = self.init_cam.expand(batch_size, -1)
+
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+
+ x1 = self.layer1(x)
+ x2 = self.layer2(x1)
+ x3 = self.layer3(x2)
+ x4 = self.layer4(x3)
+
+ xf = self.avgpool(x4)
+ xf = xf.view(xf.size(0), -1)
+
+ pred_pose = init_pose
+ pred_shape = init_shape
+ pred_cam = init_cam
+ for i in range(n_iter):
+ xc = torch.cat([xf, pred_pose, pred_shape, pred_cam], 1)
+ xc = self.fc1(xc)
+ xc = self.drop1(xc)
+ xc = self.fc2(xc)
+ xc = self.drop2(xc)
+ pred_pose = self.decpose(xc) + pred_pose
+ pred_shape = self.decshape(xc) + pred_shape
+ pred_cam = self.deccam(xc) + pred_cam
+
+ pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
+
+ return pred_rotmat, pred_shape, pred_cam
+
+
+def hmr(smpl_mean_params, pretrained=True, **kwargs):
+ """ Constructs an HMR model with ResNet50 backbone.
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ model = HMR(Bottleneck, [3, 4, 6, 3], smpl_mean_params, **kwargs)
+ if pretrained:
+ resnet_imagenet = resnet.resnet50(pretrained=True)
+ model.load_state_dict(resnet_imagenet.state_dict(), strict=False)
+ return model
diff --git a/lib / pymaf /models /maf_extractor.py b/lib / pymaf /models /maf_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..66a228d7992ea284f2374b6cfb7343c5a47c902a
--- /dev/null
+++ b/lib / pymaf /models /maf_extractor.py
@@ -0,0 +1,135 @@
+# This script is borrowed and extended from https://github.com/shunsukesaito/PIFu/blob/master/lib/model/SurfaceClassifier.py
+
+from packaging import version
+import torch
+import scipy
+import numpy as np
+import torch.nn as nn
+import torch.nn.functional as F
+
+from lib.common.config import cfg
+from lib.pymaf.utils.geometry import projection
+from lib.pymaf.core.path_config import MESH_DOWNSAMPLEING
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class MAF_Extractor(nn.Module):
+ ''' Mesh-aligned Feature Extrator
+ As discussed in the paper, we extract mesh-aligned features based on 2D projection of the mesh vertices.
+ The features extrated from spatial feature maps will go through a MLP for dimension reduction.
+ '''
+
+ def __init__(self, device=torch.device('cuda')):
+ super().__init__()
+
+ self.device = device
+ self.filters = []
+ self.num_views = 1
+ filter_channels = cfg.MODEL.PyMAF.MLP_DIM
+ self.last_op = nn.ReLU(True)
+
+ for l in range(0, len(filter_channels) - 1):
+ if 0 != l:
+ self.filters.append(
+ nn.Conv1d(filter_channels[l] + filter_channels[0],
+ filter_channels[l + 1], 1))
+ else:
+ self.filters.append(
+ nn.Conv1d(filter_channels[l], filter_channels[l + 1], 1))
+
+ self.add_module("conv%d" % l, self.filters[l])
+
+ self.im_feat = None
+ self.cam = None
+
+ # downsample SMPL mesh and assign part labels
+ # from https://github.com/nkolot/GraphCMR/blob/master/data/mesh_downsampling.npz
+ smpl_mesh_graph = np.load(MESH_DOWNSAMPLEING,
+ allow_pickle=True,
+ encoding='latin1')
+
+ A = smpl_mesh_graph['A']
+ U = smpl_mesh_graph['U']
+ D = smpl_mesh_graph['D'] # shape: (2,)
+
+ # downsampling
+ ptD = []
+ for i in range(len(D)):
+ d = scipy.sparse.coo_matrix(D[i])
+ i = torch.LongTensor(np.array([d.row, d.col]))
+ v = torch.FloatTensor(d.data)
+ ptD.append(torch.sparse.FloatTensor(i, v, d.shape))
+
+ # downsampling mapping from 6890 points to 431 points
+ # ptD[0].to_dense() - Size: [1723, 6890]
+ # ptD[1].to_dense() - Size: [431. 1723]
+ Dmap = torch.matmul(ptD[1].to_dense(),
+ ptD[0].to_dense()) # 6890 -> 431
+ self.register_buffer('Dmap', Dmap)
+
+ def reduce_dim(self, feature):
+ '''
+ Dimension reduction by multi-layer perceptrons
+ :param feature: list of [B, C_s, N] point-wise features before dimension reduction
+ :return: [B, C_p x N] concatantion of point-wise features after dimension reduction
+ '''
+ y = feature
+ tmpy = feature
+ for i, f in enumerate(self.filters):
+ y = self._modules['conv' +
+ str(i)](y if i == 0 else torch.cat([y, tmpy], 1))
+ if i != len(self.filters) - 1:
+ y = F.leaky_relu(y)
+ if self.num_views > 1 and i == len(self.filters) // 2:
+ y = y.view(-1, self.num_views, y.shape[1],
+ y.shape[2]).mean(dim=1)
+ tmpy = feature.view(-1, self.num_views, feature.shape[1],
+ feature.shape[2]).mean(dim=1)
+
+ y = self.last_op(y)
+
+ y = y.view(y.shape[0], -1)
+ return y
+
+ def sampling(self, points, im_feat=None, z_feat=None):
+ '''
+ Given 2D points, sample the point-wise features for each point,
+ the dimension of point-wise features will be reduced from C_s to C_p by MLP.
+ Image features should be pre-computed before this call.
+ :param points: [B, N, 2] image coordinates of points
+ :im_feat: [B, C_s, H_s, W_s] spatial feature maps
+ :return: [B, C_p x N] concatantion of point-wise features after dimension reduction
+ '''
+ if im_feat is None:
+ im_feat = self.im_feat
+
+ batch_size = im_feat.shape[0]
+
+ if version.parse(torch.__version__) >= version.parse('1.3.0'):
+ # Default grid_sample behavior has changed to align_corners=False since 1.3.0.
+ point_feat = torch.nn.functional.grid_sample(
+ im_feat, points.unsqueeze(2), align_corners=True)[..., 0]
+ else:
+ point_feat = torch.nn.functional.grid_sample(
+ im_feat, points.unsqueeze(2))[..., 0]
+
+ mesh_align_feat = self.reduce_dim(point_feat)
+ return mesh_align_feat
+
+ def forward(self, p, s_feat=None, cam=None, **kwargs):
+ ''' Returns mesh-aligned features for the 3D mesh points.
+ Args:
+ p (tensor): [B, N_m, 3] mesh vertices
+ s_feat (tensor): [B, C_s, H_s, W_s] spatial feature maps
+ cam (tensor): [B, 3] camera
+ Return:
+ mesh_align_feat (tensor): [B, C_p x N_m] mesh-aligned features
+ '''
+ if cam is None:
+ cam = self.cam
+ p_proj_2d = projection(p, cam, retain_z=False)
+ mesh_align_feat = self.sampling(p_proj_2d, s_feat)
+ return mesh_align_feat
diff --git a/lib / pymaf /models /res_module.py b/lib / pymaf /models /res_module.py
new file mode 100644
index 0000000000000000000000000000000000000000..60c6a1a87326e80c68fa0c058206af4062c39d00
--- /dev/null
+++ b/lib / pymaf /models /res_module.py
@@ -0,0 +1,385 @@
+# code brought in part from https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/models/pose_resnet.py
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from collections import OrderedDict
+import os
+from lib.pymaf.core.cfgs import cfg
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+BN_MOMENTUM = 0.1
+
+
+def conv3x3(in_planes, out_planes, stride=1, bias=False, groups=1):
+ """3x3 convolution with padding"""
+ return nn.Conv2d(in_planes * groups,
+ out_planes * groups,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ bias=bias,
+ groups=groups)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1):
+ super().__init__()
+ self.conv1 = conv3x3(inplanes, planes, stride, groups=groups)
+ self.bn1 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = conv3x3(planes, planes, groups=groups)
+ self.bn2 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1):
+ super().__init__()
+ self.conv1 = nn.Conv2d(inplanes * groups,
+ planes * groups,
+ kernel_size=1,
+ bias=False,
+ groups=groups)
+ self.bn1 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)
+ self.conv2 = nn.Conv2d(planes * groups,
+ planes * groups,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ bias=False,
+ groups=groups)
+ self.bn2 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)
+ self.conv3 = nn.Conv2d(planes * groups,
+ planes * self.expansion * groups,
+ kernel_size=1,
+ bias=False,
+ groups=groups)
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion * groups,
+ momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+resnet_spec = {
+ 18: (BasicBlock, [2, 2, 2, 2]),
+ 34: (BasicBlock, [3, 4, 6, 3]),
+ 50: (Bottleneck, [3, 4, 6, 3]),
+ 101: (Bottleneck, [3, 4, 23, 3]),
+ 152: (Bottleneck, [3, 8, 36, 3])
+}
+
+
+class IUV_predict_layer(nn.Module):
+ def __init__(self,
+ feat_dim=256,
+ final_cov_k=3,
+ part_out_dim=25,
+ with_uv=True):
+ super().__init__()
+
+ self.with_uv = with_uv
+ if self.with_uv:
+ self.predict_u = nn.Conv2d(in_channels=feat_dim,
+ out_channels=25,
+ kernel_size=final_cov_k,
+ stride=1,
+ padding=1 if final_cov_k == 3 else 0)
+
+ self.predict_v = nn.Conv2d(in_channels=feat_dim,
+ out_channels=25,
+ kernel_size=final_cov_k,
+ stride=1,
+ padding=1 if final_cov_k == 3 else 0)
+
+ self.predict_ann_index = nn.Conv2d(
+ in_channels=feat_dim,
+ out_channels=15,
+ kernel_size=final_cov_k,
+ stride=1,
+ padding=1 if final_cov_k == 3 else 0)
+
+ self.predict_uv_index = nn.Conv2d(in_channels=feat_dim,
+ out_channels=25,
+ kernel_size=final_cov_k,
+ stride=1,
+ padding=1 if final_cov_k == 3 else 0)
+
+ self.inplanes = feat_dim
+
+ def _make_layer(self, block, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ nn.BatchNorm2d(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ return_dict = {}
+
+ predict_uv_index = self.predict_uv_index(x)
+ predict_ann_index = self.predict_ann_index(x)
+
+ return_dict['predict_uv_index'] = predict_uv_index
+ return_dict['predict_ann_index'] = predict_ann_index
+
+ if self.with_uv:
+ predict_u = self.predict_u(x)
+ predict_v = self.predict_v(x)
+ return_dict['predict_u'] = predict_u
+ return_dict['predict_v'] = predict_v
+ else:
+ return_dict['predict_u'] = None
+ return_dict['predict_v'] = None
+ # return_dict['predict_u'] = torch.zeros(predict_uv_index.shape).to(predict_uv_index.device)
+ # return_dict['predict_v'] = torch.zeros(predict_uv_index.shape).to(predict_uv_index.device)
+
+ return return_dict
+
+
+class SmplResNet(nn.Module):
+ def __init__(self,
+ resnet_nums,
+ in_channels=3,
+ num_classes=229,
+ last_stride=2,
+ n_extra_feat=0,
+ truncate=0,
+ **kwargs):
+ super().__init__()
+
+ self.inplanes = 64
+ self.truncate = truncate
+ # extra = cfg.MODEL.EXTRA
+ # self.deconv_with_bias = extra.DECONV_WITH_BIAS
+ block, layers = resnet_spec[resnet_nums]
+
+ self.conv1 = nn.Conv2d(in_channels,
+ 64,
+ kernel_size=7,
+ stride=2,
+ padding=3,
+ bias=False)
+ self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=True)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+ self.layer1 = self._make_layer(block, 64, layers[0])
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+ self.layer3 = self._make_layer(block, 256, layers[2],
+ stride=2) if truncate < 2 else None
+ self.layer4 = self._make_layer(
+ block, 512, layers[3],
+ stride=last_stride) if truncate < 1 else None
+
+ self.avg_pooling = nn.AdaptiveAvgPool2d(1)
+
+ self.num_classes = num_classes
+ if num_classes > 0:
+ self.final_layer = nn.Linear(512 * block.expansion, num_classes)
+ nn.init.xavier_uniform_(self.final_layer.weight, gain=0.01)
+
+ self.n_extra_feat = n_extra_feat
+ if n_extra_feat > 0:
+ self.trans_conv = nn.Sequential(
+ nn.Conv2d(n_extra_feat + 512 * block.expansion,
+ 512 * block.expansion,
+ kernel_size=1,
+ bias=False),
+ nn.BatchNorm2d(512 * block.expansion, momentum=BN_MOMENTUM),
+ nn.ReLU(True))
+
+ def _make_layer(self, block, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, downsample))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x, infeat=None):
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+
+ x1 = self.layer1(x)
+ x2 = self.layer2(x1)
+ x3 = self.layer3(x2) if self.truncate < 2 else x2
+ x4 = self.layer4(x3) if self.truncate < 1 else x3
+
+ if infeat is not None:
+ x4 = self.trans_conv(torch.cat([infeat, x4], 1))
+
+ if self.num_classes > 0:
+ xp = self.avg_pooling(x4)
+ cls = self.final_layer(xp.view(xp.size(0), -1))
+ if not cfg.DANET.USE_MEAN_PARA:
+ # for non-negative scale
+ scale = F.relu(cls[:, 0]).unsqueeze(1)
+ cls = torch.cat((scale, cls[:, 1:]), dim=1)
+ else:
+ cls = None
+
+ return cls, {'x4': x4}
+
+ def init_weights(self, pretrained=''):
+ if os.path.isfile(pretrained):
+ logger.info('=> loading pretrained model {}'.format(pretrained))
+ # self.load_state_dict(pretrained_state_dict, strict=False)
+ checkpoint = torch.load(pretrained)
+ if isinstance(checkpoint, OrderedDict):
+ # state_dict = checkpoint
+ state_dict_old = self.state_dict()
+ for key in state_dict_old.keys():
+ if key in checkpoint.keys():
+ if state_dict_old[key].shape != checkpoint[key].shape:
+ del checkpoint[key]
+ state_dict = checkpoint
+ elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
+ state_dict_old = checkpoint['state_dict']
+ state_dict = OrderedDict()
+ # delete 'module.' because it is saved from DataParallel module
+ for key in state_dict_old.keys():
+ if key.startswith('module.'):
+ # state_dict[key[7:]] = state_dict[key]
+ # state_dict.pop(key)
+ state_dict[key[7:]] = state_dict_old[key]
+ else:
+ state_dict[key] = state_dict_old[key]
+ else:
+ raise RuntimeError(
+ 'No state_dict found in checkpoint file {}'.format(
+ pretrained))
+ self.load_state_dict(state_dict, strict=False)
+ else:
+ logger.error('=> imagenet pretrained model dose not exist')
+ logger.error('=> please download it first')
+ raise ValueError('imagenet pretrained model does not exist')
+
+
+class LimbResLayers(nn.Module):
+ def __init__(self,
+ resnet_nums,
+ inplanes,
+ outplanes=None,
+ groups=1,
+ **kwargs):
+ super().__init__()
+
+ self.inplanes = inplanes
+ block, layers = resnet_spec[resnet_nums]
+ self.outplanes = 512 if outplanes == None else outplanes
+ self.layer4 = self._make_layer(block,
+ self.outplanes,
+ layers[3],
+ stride=2,
+ groups=groups)
+
+ self.avg_pooling = nn.AdaptiveAvgPool2d(1)
+
+ def _make_layer(self, block, planes, blocks, stride=1, groups=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(self.inplanes * groups,
+ planes * block.expansion * groups,
+ kernel_size=1,
+ stride=stride,
+ bias=False,
+ groups=groups),
+ nn.BatchNorm2d(planes * block.expansion * groups,
+ momentum=BN_MOMENTUM),
+ )
+
+ layers = []
+ layers.append(
+ block(self.inplanes, planes, stride, downsample, groups=groups))
+ self.inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(block(self.inplanes, planes, groups=groups))
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ x = self.layer4(x)
+ x = self.avg_pooling(x)
+
+ return x
\ No newline at end of file
diff --git a/lib / pymaf /utils / __init__.py b/lib / pymaf /utils / __init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib / pymaf /utils / geometry.py b/lib / pymaf /utils / geometry.py
new file mode 100644
index 0000000000000000000000000000000000000000..d65db5895f5546d9a119955703243c4d4917892c
--- /dev/null
+++ b/lib / pymaf /utils / geometry.py
@@ -0,0 +1,435 @@
+import torch
+import numpy as np
+from torch.nn import functional as F
+"""
+Useful geometric operations, e.g. Perspective projection and a differentiable Rodrigues formula
+Parts of the code are taken from https://github.com/MandyMo/pytorch_HMR
+"""
+
+
+def batch_rodrigues(theta):
+ """Convert axis-angle representation to rotation matrix.
+ Args:
+ theta: size = [B, 3]
+ Returns:
+ Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
+ """
+ l1norm = torch.norm(theta + 1e-8, p=2, dim=1)
+ angle = torch.unsqueeze(l1norm, -1)
+ normalized = torch.div(theta, angle)
+ angle = angle * 0.5
+ v_cos = torch.cos(angle)
+ v_sin = torch.sin(angle)
+ quat = torch.cat([v_cos, v_sin * normalized], dim=1)
+ return quat_to_rotmat(quat)
+
+
+def quat_to_rotmat(quat):
+ """Convert quaternion coefficients to rotation matrix.
+ Args:
+ quat: size = [B, 4] 4 <===>(w, x, y, z)
+ Returns:
+ Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
+ """
+ norm_quat = quat
+ norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
+ w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:,
+ 2], norm_quat[:,
+ 3]
+
+ B = quat.size(0)
+
+ w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
+ wx, wy, wz = w * x, w * y, w * z
+ xy, xz, yz = x * y, x * z, y * z
+
+ rotMat = torch.stack([
+ w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy,
+ w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz,
+ w2 - x2 - y2 + z2
+ ],
+ dim=1).view(B, 3, 3)
+ return rotMat
+
+
+def rotation_matrix_to_angle_axis(rotation_matrix):
+ """
+ This function is borrowed from https://github.com/kornia/kornia
+ Convert 3x4 rotation matrix to Rodrigues vector
+ Args:
+ rotation_matrix (Tensor): rotation matrix.
+ Returns:
+ Tensor: Rodrigues vector transformation.
+ Shape:
+ - Input: :math:`(N, 3, 4)`
+ - Output: :math:`(N, 3)`
+ Example:
+ >>> input = torch.rand(2, 3, 4) # Nx4x4
+ >>> output = tgm.rotation_matrix_to_angle_axis(input) # Nx3
+ """
+ if rotation_matrix.shape[1:] == (3, 3):
+ rot_mat = rotation_matrix.reshape(-1, 3, 3)
+ hom = torch.tensor([0, 0, 1],
+ dtype=torch.float32,
+ device=rotation_matrix.device).reshape(
+ 1, 3, 1).expand(rot_mat.shape[0], -1, -1)
+ rotation_matrix = torch.cat([rot_mat, hom], dim=-1)
+
+ quaternion = rotation_matrix_to_quaternion(rotation_matrix)
+ aa = quaternion_to_angle_axis(quaternion)
+ aa[torch.isnan(aa)] = 0.0
+ return aa
+
+
+def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
+ """
+ This function is borrowed from https://github.com/kornia/kornia
+ Convert quaternion vector to angle axis of rotation.
+ Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
+ Args:
+ quaternion (torch.Tensor): tensor with quaternions.
+ Return:
+ torch.Tensor: tensor with angle axis of rotation.
+ Shape:
+ - Input: :math:`(*, 4)` where `*` means, any number of dimensions
+ - Output: :math:`(*, 3)`
+ Example:
+ >>> quaternion = torch.rand(2, 4) # Nx4
+ >>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
+ """
+ if not torch.is_tensor(quaternion):
+ raise TypeError("Input type is not a torch.Tensor. Got {}".format(
+ type(quaternion)))
+
+ if not quaternion.shape[-1] == 4:
+ raise ValueError(
+ "Input must be a tensor of shape Nx4 or 4. Got {}".format(
+ quaternion.shape))
+ # unpack input and compute conversion
+ q1: torch.Tensor = quaternion[..., 1]
+ q2: torch.Tensor = quaternion[..., 2]
+ q3: torch.Tensor = quaternion[..., 3]
+ sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
+
+ sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
+ cos_theta: torch.Tensor = quaternion[..., 0]
+ two_theta: torch.Tensor = 2.0 * torch.where(
+ cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta),
+ torch.atan2(sin_theta, cos_theta))
+
+ k_pos: torch.Tensor = two_theta / sin_theta
+ k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
+ k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
+
+ angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
+ angle_axis[..., 0] += q1 * k
+ angle_axis[..., 1] += q2 * k
+ angle_axis[..., 2] += q3 * k
+ return angle_axis
+
+
+def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
+ """
+ This function is borrowed from https://github.com/kornia/kornia
+ Convert 3x4 rotation matrix to 4d quaternion vector
+ This algorithm is based on algorithm described in
+ https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201
+ Args:
+ rotation_matrix (Tensor): the rotation matrix to convert.
+ Return:
+ Tensor: the rotation in quaternion
+ Shape:
+ - Input: :math:`(N, 3, 4)`
+ - Output: :math:`(N, 4)`
+ Example:
+ >>> input = torch.rand(4, 3, 4) # Nx3x4
+ >>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4
+ """
+ if not torch.is_tensor(rotation_matrix):
+ raise TypeError("Input type is not a torch.Tensor. Got {}".format(
+ type(rotation_matrix)))
+
+ if len(rotation_matrix.shape) > 3:
+ raise ValueError(
+ "Input size must be a three dimensional tensor. Got {}".format(
+ rotation_matrix.shape))
+ if not rotation_matrix.shape[-2:] == (3, 4):
+ raise ValueError(
+ "Input size must be a N x 3 x 4 tensor. Got {}".format(
+ rotation_matrix.shape))
+
+ rmat_t = torch.transpose(rotation_matrix, 1, 2)
+
+ mask_d2 = rmat_t[:, 2, 2] < eps
+
+ mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1]
+ mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]
+
+ t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
+ q0 = torch.stack([
+ rmat_t[:, 1, 2] - rmat_t[:, 2, 1], t0,
+ rmat_t[:, 0, 1] + rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2]
+ ], -1)
+ t0_rep = t0.repeat(4, 1).t()
+
+ t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
+ q1 = torch.stack([
+ rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
+ t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]
+ ], -1)
+ t1_rep = t1.repeat(4, 1).t()
+
+ t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
+ q2 = torch.stack([
+ rmat_t[:, 0, 1] - rmat_t[:, 1, 0], rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
+ rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2
+ ], -1)
+ t2_rep = t2.repeat(4, 1).t()
+
+ t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
+ q3 = torch.stack([
+ t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
+ rmat_t[:, 2, 0] - rmat_t[:, 0, 2], rmat_t[:, 0, 1] - rmat_t[:, 1, 0]
+ ], -1)
+ t3_rep = t3.repeat(4, 1).t()
+
+ mask_c0 = mask_d2 * mask_d0_d1
+ mask_c1 = mask_d2 * ~mask_d0_d1
+ mask_c2 = ~mask_d2 * mask_d0_nd1
+ mask_c3 = ~mask_d2 * ~mask_d0_nd1
+ mask_c0 = mask_c0.view(-1, 1).type_as(q0)
+ mask_c1 = mask_c1.view(-1, 1).type_as(q1)
+ mask_c2 = mask_c2.view(-1, 1).type_as(q2)
+ mask_c3 = mask_c3.view(-1, 1).type_as(q3)
+
+ q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
+ q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
+ t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
+ q *= 0.5
+ return q
+
+
+def rot6d_to_rotmat(x):
+ """Convert 6D rotation representation to 3x3 rotation matrix.
+ Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
+ Input:
+ (B,6) Batch of 6-D rotation representations
+ Output:
+ (B,3,3) Batch of corresponding rotation matrices
+ """
+ x = x.view(-1, 3, 2)
+ a1 = x[:, :, 0]
+ a2 = x[:, :, 1]
+ b1 = F.normalize(a1)
+ b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
+ b3 = torch.cross(b1, b2)
+ return torch.stack((b1, b2, b3), dim=-1)
+
+
+def projection(pred_joints, pred_camera, retain_z=False):
+ pred_cam_t = torch.stack([
+ pred_camera[:, 1], pred_camera[:, 2], 2 * 5000. /
+ (224. * pred_camera[:, 0] + 1e-9)
+ ],
+ dim=-1)
+ batch_size = pred_joints.shape[0]
+ camera_center = torch.zeros(batch_size, 2)
+ pred_keypoints_2d = perspective_projection(
+ pred_joints,
+ rotation=torch.eye(3).unsqueeze(0).expand(batch_size, -1,
+ -1).to(pred_joints.device),
+ translation=pred_cam_t,
+ focal_length=5000.,
+ camera_center=camera_center,
+ retain_z=retain_z)
+ # Normalize keypoints to [-1,1]
+ pred_keypoints_2d = pred_keypoints_2d / (224. / 2.)
+ return pred_keypoints_2d
+
+
+def perspective_projection(points,
+ rotation,
+ translation,
+ focal_length,
+ camera_center,
+ retain_z=False):
+ """
+ This function computes the perspective projection of a set of points.
+ Input:
+ points (bs, N, 3): 3D points
+ rotation (bs, 3, 3): Camera rotation
+ translation (bs, 3): Camera translation
+ focal_length (bs,) or scalar: Focal length
+ camera_center (bs, 2): Camera center
+ """
+ batch_size = points.shape[0]
+ K = torch.zeros([batch_size, 3, 3], device=points.device)
+ K[:, 0, 0] = focal_length
+ K[:, 1, 1] = focal_length
+ K[:, 2, 2] = 1.
+ K[:, :-1, -1] = camera_center
+
+ # Transform points
+ points = torch.einsum('bij,bkj->bki', rotation, points)
+ points = points + translation.unsqueeze(1)
+
+ # Apply perspective distortion
+ projected_points = points / points[:, :, -1].unsqueeze(-1)
+
+ # Apply camera intrinsics
+ projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
+
+ if retain_z:
+ return projected_points
+ else:
+ return projected_points[:, :, :-1]
+
+
+def estimate_translation_np(S,
+ joints_2d,
+ joints_conf,
+ focal_length=5000,
+ img_size=224):
+ """Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
+ Input:
+ S: (25, 3) 3D joint locations
+ joints: (25, 3) 2D joint locations and confidence
+ Returns:
+ (3,) camera translation vector
+ """
+
+ num_joints = S.shape[0]
+ # focal length
+ f = np.array([focal_length, focal_length])
+ # optical center
+ center = np.array([img_size / 2., img_size / 2.])
+
+ # transformations
+ Z = np.reshape(np.tile(S[:, 2], (2, 1)).T, -1)
+ XY = np.reshape(S[:, 0:2], -1)
+ O = np.tile(center, num_joints)
+ F = np.tile(f, num_joints)
+ weight2 = np.reshape(np.tile(np.sqrt(joints_conf), (2, 1)).T, -1)
+
+ # least squares
+ Q = np.array([
+ F * np.tile(np.array([1, 0]), num_joints),
+ F * np.tile(np.array([0, 1]), num_joints),
+ O - np.reshape(joints_2d, -1)
+ ]).T
+ c = (np.reshape(joints_2d, -1) - O) * Z - F * XY
+
+ # weighted least squares
+ W = np.diagflat(weight2)
+ Q = np.dot(W, Q)
+ c = np.dot(W, c)
+
+ # square matrix
+ A = np.dot(Q.T, Q)
+ b = np.dot(Q.T, c)
+
+ # solution
+ trans = np.linalg.solve(A, b)
+
+ return trans
+
+
+def estimate_translation(S, joints_2d, focal_length=5000., img_size=224.):
+ """Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
+ Input:
+ S: (B, 49, 3) 3D joint locations
+ joints: (B, 49, 3) 2D joint locations and confidence
+ Returns:
+ (B, 3) camera translation vectors
+ """
+
+ device = S.device
+ # Use only joints 25:49 (GT joints)
+ S = S[:, 25:, :].cpu().numpy()
+ joints_2d = joints_2d[:, 25:, :].cpu().numpy()
+ joints_conf = joints_2d[:, :, -1]
+ joints_2d = joints_2d[:, :, :-1]
+ trans = np.zeros((S.shape[0], 3), dtype=np.float32)
+ # Find the translation for each example in the batch
+ for i in range(S.shape[0]):
+ S_i = S[i]
+ joints_i = joints_2d[i]
+ conf_i = joints_conf[i]
+ trans[i] = estimate_translation_np(S_i,
+ joints_i,
+ conf_i,
+ focal_length=focal_length,
+ img_size=img_size)
+ return torch.from_numpy(trans).to(device)
+
+
+def Rot_y(angle, category='torch', prepend_dim=True, device=None):
+ '''Rotate around y-axis by angle
+ Args:
+ category: 'torch' or 'numpy'
+ prepend_dim: prepend an extra dimension
+ Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
+ '''
+ m = np.array([[np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],
+ [-np.sin(angle), 0., np.cos(angle)]])
+ if category == 'torch':
+ if prepend_dim:
+ return torch.tensor(m, dtype=torch.float,
+ device=device).unsqueeze(0)
+ else:
+ return torch.tensor(m, dtype=torch.float, device=device)
+ elif category == 'numpy':
+ if prepend_dim:
+ return np.expand_dims(m, 0)
+ else:
+ return m
+ else:
+ raise ValueError("category must be 'torch' or 'numpy'")
+
+
+def Rot_x(angle, category='torch', prepend_dim=True, device=None):
+ '''Rotate around x-axis by angle
+ Args:
+ category: 'torch' or 'numpy'
+ prepend_dim: prepend an extra dimension
+ Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
+ '''
+ m = np.array([[1., 0., 0.], [0., np.cos(angle), -np.sin(angle)],
+ [0., np.sin(angle), np.cos(angle)]])
+ if category == 'torch':
+ if prepend_dim:
+ return torch.tensor(m, dtype=torch.float,
+ device=device).unsqueeze(0)
+ else:
+ return torch.tensor(m, dtype=torch.float, device=device)
+ elif category == 'numpy':
+ if prepend_dim:
+ return np.expand_dims(m, 0)
+ else:
+ return m
+ else:
+ raise ValueError("category must be 'torch' or 'numpy'")
+
+
+def Rot_z(angle, category='torch', prepend_dim=True, device=None):
+ '''Rotate around z-axis by angle
+ Args:
+ category: 'torch' or 'numpy'
+ prepend_dim: prepend an extra dimension
+ Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
+ '''
+ m = np.array([[np.cos(angle), -np.sin(angle), 0.],
+ [np.sin(angle), np.cos(angle), 0.], [0., 0., 1.]])
+ if category == 'torch':
+ if prepend_dim:
+ return torch.tensor(m, dtype=torch.float,
+ device=device).unsqueeze(0)
+ else:
+ return torch.tensor(m, dtype=torch.float, device=device)
+ elif category == 'numpy':
+ if prepend_dim:
+ return np.expand_dims(m, 0)
+ else:
+ return m
+ else:
+ raise ValueError("category must be 'torch' or 'numpy'")
\ No newline at end of file
diff --git a/lib / pymaf /utils / imutils.py b/lib / pymaf /utils / imutils.py
new file mode 100644
index 0000000000000000000000000000000000000000..000c9357842036abe496bdafe8223458ca12bb75
--- /dev/null
+++ b/lib / pymaf /utils / imutils.py
@@ -0,0 +1,491 @@
+"""
+This file contains functions that are used to perform data augmentation.
+"""
+import cv2
+import io
+import torch
+import numpy as np
+from PIL import Image
+from rembg import remove
+from rembg.session_factory import new_session
+from torchvision.models import detection
+
+from lib.pymaf.core import constants
+from lib.pymaf.utils.streamer import aug_matrix
+from lib.common.cloth_extraction import load_segmentation
+from torchvision import transforms
+
+
+def load_img(img_file):
+
+ img = cv2.imread(img_file, cv2.IMREAD_UNCHANGED)
+ if len(img.shape) == 2:
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
+
+ if not img_file.endswith("png"):
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ else:
+ img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR)
+
+ return img
+
+
+def get_bbox(img, det):
+
+ input = np.float32(img)
+ input = (input / 255.0 -
+ (0.5, 0.5, 0.5)) / (0.5, 0.5, 0.5) # TO [-1.0, 1.0]
+ input = input.transpose(2, 0, 1) # TO [3 x H x W]
+ bboxes, probs = det(torch.from_numpy(input).float().unsqueeze(0))
+
+ probs = probs.unsqueeze(3)
+ bboxes = (bboxes * probs).sum(dim=1, keepdim=True) / probs.sum(
+ dim=1, keepdim=True)
+ bbox = bboxes[0, 0, 0].cpu().numpy()
+
+ return bbox
+# Michael Black is
+
+
+def get_transformer(input_res):
+
+ image_to_tensor = transforms.Compose([
+ transforms.Resize(input_res),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
+ ])
+
+ mask_to_tensor = transforms.Compose([
+ transforms.Resize(input_res),
+ transforms.ToTensor(),
+ transforms.Normalize((0.0, ), (1.0, ))
+ ])
+
+ image_to_pymaf_tensor = transforms.Compose([
+ transforms.Resize(size=224),
+ transforms.Normalize(mean=constants.IMG_NORM_MEAN,
+ std=constants.IMG_NORM_STD)
+ ])
+
+ image_to_pixie_tensor = transforms.Compose([
+ transforms.Resize(224)
+ ])
+
+ def image_to_hybrik_tensor(img):
+ # mean
+ img[0].add_(-0.406)
+ img[1].add_(-0.457)
+ img[2].add_(-0.480)
+
+ # std
+ img[0].div_(0.225)
+ img[1].div_(0.224)
+ img[2].div_(0.229)
+ return img
+
+ return [image_to_tensor, mask_to_tensor, image_to_pymaf_tensor, image_to_pixie_tensor, image_to_hybrik_tensor]
+
+
+def process_image(img_file, hps_type, input_res=512, device=None, seg_path=None):
+ """Read image, do preprocessing and possibly crop it according to the bounding box.
+ If there are bounding box annotations, use them to crop the image.
+ If no bounding box is specified but openpose detections are available, use them to get the bounding box.
+ """
+
+ [image_to_tensor, mask_to_tensor, image_to_pymaf_tensor,
+ image_to_pixie_tensor, image_to_hybrik_tensor] = get_transformer(input_res)
+
+ img_ori = load_img(img_file)
+
+ in_height, in_width, _ = img_ori.shape
+ M = aug_matrix(in_width, in_height, input_res*2, input_res*2)
+
+ # from rectangle to square
+ img_for_crop = cv2.warpAffine(img_ori, M[0:2, :],
+ (input_res*2, input_res*2), flags=cv2.INTER_CUBIC)
+
+ # detection for bbox
+ detector = detection.maskrcnn_resnet50_fpn(pretrained=True)
+ detector.eval()
+ predictions = detector(
+ [torch.from_numpy(img_for_crop).permute(2, 0, 1) / 255.])[0]
+ human_ids = torch.where(
+ predictions["scores"] == predictions["scores"][predictions['labels'] == 1].max())
+ bbox = predictions["boxes"][human_ids, :].flatten().detach().cpu().numpy()
+
+ width = bbox[2] - bbox[0]
+ height = bbox[3] - bbox[1]
+ center = np.array([(bbox[0] + bbox[2]) / 2.0,
+ (bbox[1] + bbox[3]) / 2.0])
+
+ scale = max(height, width) / 180
+
+ if hps_type == 'hybrik':
+ img_np = crop_for_hybrik(img_for_crop, center,
+ np.array([scale * 180, scale * 180]))
+ else:
+ img_np, cropping_parameters = crop(
+ img_for_crop, center, scale, (input_res, input_res))
+
+ img_pil = Image.fromarray(remove(img_np, post_process_mask=True, session=new_session("u2net")))
+
+ # for icon
+ img_rgb = image_to_tensor(img_pil.convert("RGB"))
+ img_mask = torch.tensor(1.0) - (mask_to_tensor(img_pil.split()[-1]) <
+ torch.tensor(0.5)).float()
+ img_tensor = img_rgb * img_mask
+
+ # for hps
+ img_hps = img_np.astype(np.float32) / 255.
+ img_hps = torch.from_numpy(img_hps).permute(2, 0, 1)
+
+ if hps_type == 'bev':
+ img_hps = img_np[:, :, [2, 1, 0]]
+ elif hps_type == 'hybrik':
+ img_hps = image_to_hybrik_tensor(img_hps).unsqueeze(0).to(device)
+ elif hps_type != 'pixie':
+ img_hps = image_to_pymaf_tensor(img_hps).unsqueeze(0).to(device)
+ else:
+ img_hps = image_to_pixie_tensor(img_hps).unsqueeze(0).to(device)
+
+ # uncrop params
+ uncrop_param = {'center': center,
+ 'scale': scale,
+ 'ori_shape': img_ori.shape,
+ 'box_shape': img_np.shape,
+ 'crop_shape': img_for_crop.shape,
+ 'M': M}
+
+ if not (seg_path is None):
+ segmentations = load_segmentation(seg_path, (in_height, in_width))
+ seg_coord_normalized = []
+ for seg in segmentations:
+ coord_normalized = []
+ for xy in seg['coordinates']:
+ xy_h = np.vstack((xy[:, 0], xy[:, 1], np.ones(len(xy)))).T
+ warped_indeces = M[0:2, :] @ xy_h[:, :, None]
+ warped_indeces = np.array(warped_indeces).astype(int)
+ warped_indeces.resize((warped_indeces.shape[:2]))
+
+ # cropped_indeces = crop_segmentation(warped_indeces, center, scale, (input_res, input_res), img_np.shape)
+ cropped_indeces = crop_segmentation(
+ warped_indeces, (input_res, input_res), cropping_parameters)
+
+ indices = np.vstack(
+ (cropped_indeces[:, 0], cropped_indeces[:, 1])).T
+
+ # Convert to NDC coordinates
+ seg_cropped_normalized = 2*(indices / input_res) - 1
+ # Don't know why we need to divide by 50 but it works ÂŻ\_(ă)_/ÂŻ (probably some scaling factor somewhere)
+ # Divide only by 45 on the horizontal axis to take the curve of the human body into account
+ seg_cropped_normalized[:, 0] = (
+ 1/40) * seg_cropped_normalized[:, 0]
+ seg_cropped_normalized[:, 1] = (
+ 1/50) * seg_cropped_normalized[:, 1]
+ coord_normalized.append(seg_cropped_normalized)
+
+ seg['coord_normalized'] = coord_normalized
+ seg_coord_normalized.append(seg)
+
+ return img_tensor, img_hps, img_ori, img_mask, uncrop_param, seg_coord_normalized
+
+ return img_tensor, img_hps, img_ori, img_mask, uncrop_param
+
+
+def get_transform(center, scale, res):
+ """Generate transformation matrix."""
+ h = 200 * scale
+ t = np.zeros((3, 3))
+ t[0, 0] = float(res[1]) / h
+ t[1, 1] = float(res[0]) / h
+ t[0, 2] = res[1] * (-float(center[0]) / h + .5)
+ t[1, 2] = res[0] * (-float(center[1]) / h + .5)
+ t[2, 2] = 1
+
+ return t
+
+
+def transform(pt, center, scale, res, invert=0):
+ """Transform pixel location to different reference."""
+ t = get_transform(center, scale, res)
+ if invert:
+ t = np.linalg.inv(t)
+ new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
+ new_pt = np.dot(t, new_pt)
+ return np.around(new_pt[:2]).astype(np.int16)
+
+
+def crop(img, center, scale, res):
+ """Crop image according to the supplied bounding box."""
+
+ # Upper left point
+ ul = np.array(transform([0, 0], center, scale, res, invert=1))
+
+ # Bottom right point
+ br = np.array(transform(res, center, scale, res, invert=1))
+
+ new_shape = [br[1] - ul[1], br[0] - ul[0]]
+ if len(img.shape) > 2:
+ new_shape += [img.shape[2]]
+ new_img = np.zeros(new_shape)
+
+ # Range to fill new array
+ new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
+ new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
+
+ # Range to sample from original image
+ old_x = max(0, ul[0]), min(len(img[0]), br[0])
+ old_y = max(0, ul[1]), min(len(img), br[1])
+
+ new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]
+ ] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
+ if len(img.shape) == 2:
+ new_img = np.array(Image.fromarray(new_img).resize(res))
+ else:
+ new_img = np.array(Image.fromarray(
+ new_img.astype(np.uint8)).resize(res))
+
+ return new_img, (old_x, new_x, old_y, new_y, new_shape)
+
+
+def crop_segmentation(org_coord, res, cropping_parameters):
+ old_x, new_x, old_y, new_y, new_shape = cropping_parameters
+
+ new_coord = np.zeros((org_coord.shape))
+ new_coord[:, 0] = new_x[0] + (org_coord[:, 0] - old_x[0])
+ new_coord[:, 1] = new_y[0] + (org_coord[:, 1] - old_y[0])
+
+ new_coord[:, 0] = res[0] * (new_coord[:, 0] / new_shape[1])
+ new_coord[:, 1] = res[1] * (new_coord[:, 1] / new_shape[0])
+
+ return new_coord
+
+
+def crop_for_hybrik(img, center, scale):
+ inp_h, inp_w = (256, 256)
+ trans = get_affine_transform(center, scale, 0, [inp_w, inp_h])
+ new_img = cv2.warpAffine(
+ img, trans, (int(inp_w), int(inp_h)), flags=cv2.INTER_LINEAR)
+ return new_img
+
+
+def get_affine_transform(center,
+ scale,
+ rot,
+ output_size,
+ shift=np.array([0, 0], dtype=np.float32),
+ inv=0):
+
+ def get_dir(src_point, rot_rad):
+ """Rotate the point by `rot_rad` degree."""
+ sn, cs = np.sin(rot_rad), np.cos(rot_rad)
+
+ src_result = [0, 0]
+ src_result[0] = src_point[0] * cs - src_point[1] * sn
+ src_result[1] = src_point[0] * sn + src_point[1] * cs
+
+ return src_result
+
+ def get_3rd_point(a, b):
+ """Return vector c that perpendicular to (a - b)."""
+ direct = a - b
+ return b + np.array([-direct[1], direct[0]], dtype=np.float32)
+
+ if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
+ scale = np.array([scale, scale])
+
+ scale_tmp = scale
+ src_w = scale_tmp[0]
+ dst_w = output_size[0]
+ dst_h = output_size[1]
+
+ rot_rad = np.pi * rot / 180
+ src_dir = get_dir([0, src_w * -0.5], rot_rad)
+ dst_dir = np.array([0, dst_w * -0.5], np.float32)
+
+ src = np.zeros((3, 2), dtype=np.float32)
+ dst = np.zeros((3, 2), dtype=np.float32)
+ src[0, :] = center + scale_tmp * shift
+ src[1, :] = center + src_dir + scale_tmp * shift
+ dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
+ dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
+
+ src[2:, :] = get_3rd_point(src[0, :], src[1, :])
+ dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
+
+ if inv:
+ trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
+ else:
+ trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
+
+ return trans
+
+
+def corner_align(ul, br):
+
+ if ul[1]-ul[0] != br[1]-br[0]:
+ ul[1] = ul[0]+br[1]-br[0]
+
+ return ul, br
+
+
+def uncrop(img, center, scale, orig_shape):
+ """'Undo' the image cropping/resizing.
+ This function is used when evaluating mask/part segmentation.
+ """
+
+ res = img.shape[:2]
+
+ # Upper left point
+ ul = np.array(transform([0, 0], center, scale, res, invert=1))
+ # Bottom right point
+ br = np.array(transform(res, center, scale, res, invert=1))
+
+ # quick fix
+ ul, br = corner_align(ul, br)
+
+ # size of cropped image
+ crop_shape = [br[1] - ul[1], br[0] - ul[0]]
+ new_img = np.zeros(orig_shape, dtype=np.uint8)
+
+ # Range to fill new array
+ new_x = max(0, -ul[0]), min(br[0], orig_shape[1]) - ul[0]
+ new_y = max(0, -ul[1]), min(br[1], orig_shape[0]) - ul[1]
+
+ # Range to sample from original image
+ old_x = max(0, ul[0]), min(orig_shape[1], br[0])
+ old_y = max(0, ul[1]), min(orig_shape[0], br[1])
+
+ img = np.array(Image.fromarray(img.astype(np.uint8)).resize(crop_shape))
+
+ new_img[old_y[0]:old_y[1], old_x[0]:old_x[1]
+ ] = img[new_y[0]:new_y[1], new_x[0]:new_x[1]]
+
+ return new_img
+
+
+def rot_aa(aa, rot):
+ """Rotate axis angle parameters."""
+ # pose parameters
+ R = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0],
+ [np.sin(np.deg2rad(-rot)),
+ np.cos(np.deg2rad(-rot)), 0], [0, 0, 1]])
+ # find the rotation of the body in camera frame
+ per_rdg, _ = cv2.Rodrigues(aa)
+ # apply the global rotation to the global orientation
+ resrot, _ = cv2.Rodrigues(np.dot(R, per_rdg))
+ aa = (resrot.T)[0]
+ return aa
+
+
+def flip_img(img):
+ """Flip rgb images or masks.
+ channels come last, e.g. (256,256,3).
+ """
+ img = np.fliplr(img)
+ return img
+
+
+def flip_kp(kp, is_smpl=False):
+ """Flip keypoints."""
+ if len(kp) == 24:
+ if is_smpl:
+ flipped_parts = constants.SMPL_JOINTS_FLIP_PERM
+ else:
+ flipped_parts = constants.J24_FLIP_PERM
+ elif len(kp) == 49:
+ if is_smpl:
+ flipped_parts = constants.SMPL_J49_FLIP_PERM
+ else:
+ flipped_parts = constants.J49_FLIP_PERM
+ kp = kp[flipped_parts]
+ kp[:, 0] = -kp[:, 0]
+ return kp
+
+
+def flip_pose(pose):
+ """Flip pose.
+ The flipping is based on SMPL parameters.
+ """
+ flipped_parts = constants.SMPL_POSE_FLIP_PERM
+ pose = pose[flipped_parts]
+ # we also negate the second and the third dimension of the axis-angle
+ pose[1::3] = -pose[1::3]
+ pose[2::3] = -pose[2::3]
+ return pose
+
+
+def normalize_2d_kp(kp_2d, crop_size=224, inv=False):
+ # Normalize keypoints between -1, 1
+ if not inv:
+ ratio = 1.0 / crop_size
+ kp_2d = 2.0 * kp_2d * ratio - 1.0
+ else:
+ ratio = 1.0 / crop_size
+ kp_2d = (kp_2d + 1.0) / (2 * ratio)
+
+ return kp_2d
+
+
+def generate_heatmap(joints, heatmap_size, sigma=1, joints_vis=None):
+ '''
+ param joints: [num_joints, 3]
+ param joints_vis: [num_joints, 3]
+ return: target, target_weight(1: visible, 0: invisible)
+ '''
+ num_joints = joints.shape[0]
+ device = joints.device
+ cur_device = torch.device(device.type, device.index)
+ if not hasattr(heatmap_size, '__len__'):
+ # width height
+ heatmap_size = [heatmap_size, heatmap_size]
+ assert len(heatmap_size) == 2
+ target_weight = np.ones((num_joints, 1), dtype=np.float32)
+ if joints_vis is not None:
+ target_weight[:, 0] = joints_vis[:, 0]
+ target = torch.zeros((num_joints, heatmap_size[1], heatmap_size[0]),
+ dtype=torch.float32,
+ device=cur_device)
+
+ tmp_size = sigma * 3
+
+ for joint_id in range(num_joints):
+ mu_x = int(joints[joint_id][0] * heatmap_size[0] + 0.5)
+ mu_y = int(joints[joint_id][1] * heatmap_size[1] + 0.5)
+ # Check that any part of the gaussian is in-bounds
+ ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
+ br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
+ if ul[0] >= heatmap_size[0] or ul[1] >= heatmap_size[1] \
+ or br[0] < 0 or br[1] < 0:
+ # If not, just return the image as is
+ target_weight[joint_id] = 0
+ continue
+
+ # # Generate gaussian
+ size = 2 * tmp_size + 1
+ # x = np.arange(0, size, 1, np.float32)
+ # y = x[:, np.newaxis]
+ # x0 = y0 = size // 2
+ # # The gaussian is not normalized, we want the center value to equal 1
+ # g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
+ # g = torch.from_numpy(g.astype(np.float32))
+
+ x = torch.arange(0, size, dtype=torch.float32, device=cur_device)
+ y = x.unsqueeze(-1)
+ x0 = y0 = size // 2
+ # The gaussian is not normalized, we want the center value to equal 1
+ g = torch.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2))
+
+ # Usable gaussian range
+ g_x = max(0, -ul[0]), min(br[0], heatmap_size[0]) - ul[0]
+ g_y = max(0, -ul[1]), min(br[1], heatmap_size[1]) - ul[1]
+ # Image range
+ img_x = max(0, ul[0]), min(br[0], heatmap_size[0])
+ img_y = max(0, ul[1]), min(br[1], heatmap_size[1])
+
+ v = target_weight[joint_id]
+ if v > 0.5:
+ target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
+ g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
+
+ return target, target_weight
\ No newline at end of file
diff --git a/lib / pymaf /utils / streamer.py b/lib / pymaf /utils / streamer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ab3074b2cf5b87f9332afa6239d0911efeff1ac
--- /dev/null
+++ b/lib / pymaf /utils / streamer.py
@@ -0,0 +1,142 @@
+import cv2
+import torch
+import numpy as np
+import imageio
+
+
+def aug_matrix(w1, h1, w2, h2):
+ dx = (w2 - w1) / 2.0
+ dy = (h2 - h1) / 2.0
+
+ matrix_trans = np.array([[1.0, 0, dx],
+ [0, 1.0, dy],
+ [0, 0, 1.0]])
+
+ scale = np.min([float(w2)/w1, float(h2)/h1])
+
+ M = get_affine_matrix(
+ center=(w2 / 2.0, h2 / 2.0),
+ translate=(0, 0),
+ scale=scale)
+
+ M = np.array(M + [0., 0., 1.]).reshape(3, 3)
+ M = M.dot(matrix_trans)
+
+ return M
+
+
+def get_affine_matrix(center, translate, scale):
+ cx, cy = center
+ tx, ty = translate
+
+ M = [1, 0, 0,
+ 0, 1, 0]
+ M = [x * scale for x in M]
+
+ # Apply translation and of center translation: RSS * C^-1
+ M[2] += M[0] * (-cx) + M[1] * (-cy)
+ M[5] += M[3] * (-cx) + M[4] * (-cy)
+
+ # Apply center translation: T * C * RSS * C^-1
+ M[2] += cx + tx
+ M[5] += cy + ty
+ return M
+
+
+class BaseStreamer():
+ """This streamer will return images at 512x512 size.
+ """
+
+ def __init__(self,
+ width=512, height=512, pad=True,
+ mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
+ **kwargs):
+ self.width = width
+ self.height = height
+ self.pad = pad
+ self.mean = np.array(mean)
+ self.std = np.array(std)
+
+ self.loader = self.create_loader()
+
+ def create_loader(self):
+ raise NotImplementedError
+ yield np.zeros((600, 400, 3)) # in RGB (0, 255)
+
+ def __getitem__(self, index):
+ image = next(self.loader)
+ in_height, in_width, _ = image.shape
+ M = aug_matrix(in_width, in_height, self.width, self.height, self.pad)
+ image = cv2.warpAffine(
+ image, M[0:2, :], (self.width, self.height), flags=cv2.INTER_CUBIC)
+
+ input = np.float32(image)
+ input = (input / 255.0 - self.mean) / self.std # TO [-1.0, 1.0]
+ input = input.transpose(2, 0, 1) # TO [3 x H x W]
+ return torch.from_numpy(input).float()
+
+ def __len__(self):
+ raise NotImplementedError
+
+
+class CaptureStreamer(BaseStreamer):
+ """This streamer takes webcam as input.
+ """
+
+ def __init__(self, id=0, width=512, height=512, pad=True, **kwargs):
+ super().__init__(width, height, pad, **kwargs)
+ self.capture = cv2.VideoCapture(id)
+
+ def create_loader(self):
+ while True:
+ _, image = self.capture.read()
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # RGB
+ yield image
+
+ def __len__(self):
+ return 100_000_000
+
+ def __del__(self):
+ self.capture.release()
+
+
+class VideoListStreamer(BaseStreamer):
+ """This streamer takes a list of video files as input.
+ """
+
+ def __init__(self, files, width=512, height=512, pad=True, **kwargs):
+ super().__init__(width, height, pad, **kwargs)
+ self.files = files
+ self.captures = [imageio.get_reader(f) for f in files]
+ self.nframes = sum([int(cap._meta["fps"] * cap._meta["duration"])
+ for cap in self.captures])
+
+ def create_loader(self):
+ for capture in self.captures:
+ for image in capture: # RGB
+ yield image
+
+ def __len__(self):
+ return self.nframes
+
+ def __del__(self):
+ for capture in self.captures:
+ capture.close()
+
+
+class ImageListStreamer(BaseStreamer):
+ """This streamer takes a list of image files as input.
+ """
+
+ def __init__(self, files, width=512, height=512, pad=True, **kwargs):
+ super().__init__(width, height, pad, **kwargs)
+ self.files = files
+
+ def create_loader(self):
+ for f in self.files:
+ image = cv2.imread(f, cv2.IMREAD_UNCHANGED)[:, :, 0:3]
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # RGB
+ yield image
+
+ def __len__(self):
+ return len(self.files)
\ No newline at end of file
diff --git a/lib / pymaf /utils /transforms.py b/lib / pymaf /utils /transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..db0bf42829bf07a1a22d962c1378098ba6e26e9d
--- /dev/null
+++ b/lib / pymaf /utils /transforms.py
@@ -0,0 +1,78 @@
+# ------------------------------------------------------------------------------
+# Copyright (c) Microsoft
+# Licensed under the MIT License.
+# Written by Bin Xiao (Bin.Xiao@microsoft.com)
+# ------------------------------------------------------------------------------
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import cv2
+import numpy as np
+
+
+def transform_preds(coords, center, scale, output_size):
+ target_coords = np.zeros(coords.shape)
+ trans = get_affine_transform(center, scale, 0, output_size, inv=1)
+ for p in range(coords.shape[0]):
+ target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
+ return target_coords
+
+
+def get_affine_transform(center,
+ scale,
+ rot,
+ output_size,
+ shift=np.array([0, 0], dtype=np.float32),
+ inv=0):
+ if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
+ # print(scale)
+ scale = np.array([scale, scale])
+
+ scale_tmp = scale * 200.0
+ src_w = scale_tmp[0]
+ dst_w = output_size[0]
+ dst_h = output_size[1]
+
+ rot_rad = np.pi * rot / 180
+ src_dir = get_dir([0, src_w * -0.5], rot_rad)
+ dst_dir = np.array([0, dst_w * -0.5], np.float32)
+
+ src = np.zeros((3, 2), dtype=np.float32)
+ dst = np.zeros((3, 2), dtype=np.float32)
+ src[0, :] = center + scale_tmp * shift
+ src[1, :] = center + src_dir + scale_tmp * shift
+ dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
+ dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
+
+ src[2:, :] = get_3rd_point(src[0, :], src[1, :])
+ dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
+
+ if inv:
+ trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
+ else:
+ trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
+
+ return trans
+
+
+def affine_transform(pt, t):
+ new_pt = np.array([pt[0], pt[1], 1.]).T
+ new_pt = np.dot(t, new_pt)
+ return new_pt[:2]
+
+
+def get_3rd_point(a, b):
+ direct = a - b
+ return b + np.array([-direct[1], direct[0]], dtype=np.float32)
+
+
+def get_dir(src_point, rot_rad):
+ sn, cs = np.sin(rot_rad), np.cos(rot_rad)
+
+ src_result = [0, 0]
+ src_result[0] = src_point[0] * cs - src_point[1] * sn
+ src_result[1] = src_point[0] * sn + src_point[1] * cs
+
+ return src_result
\ No newline at end of file
diff --git a/lib / renderer / __init__.py b/lib / renderer / __init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib / renderer / camera.py b/lib / renderer / camera.py
new file mode 100644
index 0000000000000000000000000000000000000000..fde488826c8eba933a00485d39cdc21d4d2dae8f
--- /dev/null
+++ b/lib / renderer / camera.py
@@ -0,0 +1,226 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import cv2
+import numpy as np
+
+from .glm import ortho
+
+
+class Camera:
+ def __init__(self, width=1600, height=1200):
+ # Focal Length
+ # equivalent 50mm
+ focal = np.sqrt(width * width + height * height)
+ self.focal_x = focal
+ self.focal_y = focal
+ # Principal Point Offset
+ self.principal_x = width / 2
+ self.principal_y = height / 2
+ # Axis Skew
+ self.skew = 0
+ # Image Size
+ self.width = width
+ self.height = height
+
+ self.near = 1
+ self.far = 10
+
+ # Camera Center
+ self.center = np.array([0, 0, 1.6])
+ self.direction = np.array([0, 0, -1])
+ self.right = np.array([1, 0, 0])
+ self.up = np.array([0, 1, 0])
+
+ self.ortho_ratio = None
+
+ def sanity_check(self):
+ self.center = self.center.reshape([-1])
+ self.direction = self.direction.reshape([-1])
+ self.right = self.right.reshape([-1])
+ self.up = self.up.reshape([-1])
+
+ assert len(self.center) == 3
+ assert len(self.direction) == 3
+ assert len(self.right) == 3
+ assert len(self.up) == 3
+
+ @staticmethod
+ def normalize_vector(v):
+ v_norm = np.linalg.norm(v)
+ return v if v_norm == 0 else v / v_norm
+
+ def get_real_z_value(self, z):
+ z_near = self.near
+ z_far = self.far
+ z_n = 2.0 * z - 1.0
+ z_e = 2.0 * z_near * z_far / (z_far + z_near - z_n * (z_far - z_near))
+ return z_e
+
+ def get_rotation_matrix(self):
+ rot_mat = np.eye(3)
+ s = self.right
+ s = self.normalize_vector(s)
+ rot_mat[0, :] = s
+ u = self.up
+ u = self.normalize_vector(u)
+ rot_mat[1, :] = -u
+ rot_mat[2, :] = self.normalize_vector(self.direction)
+
+ return rot_mat
+
+ def get_translation_vector(self):
+ rot_mat = self.get_rotation_matrix()
+ trans = -np.dot(rot_mat, self.center)
+ return trans
+
+ def get_intrinsic_matrix(self):
+ int_mat = np.eye(3)
+
+ int_mat[0, 0] = self.focal_x
+ int_mat[1, 1] = self.focal_y
+ int_mat[0, 1] = self.skew
+ int_mat[0, 2] = self.principal_x
+ int_mat[1, 2] = self.principal_y
+
+ return int_mat
+
+ def get_projection_matrix(self):
+ ext_mat = self.get_extrinsic_matrix()
+ int_mat = self.get_intrinsic_matrix()
+
+ return np.matmul(int_mat, ext_mat)
+
+ def get_extrinsic_matrix(self):
+ rot_mat = self.get_rotation_matrix()
+ int_mat = self.get_intrinsic_matrix()
+ trans = self.get_translation_vector()
+
+ extrinsic = np.eye(4)
+ extrinsic[:3, :3] = rot_mat
+ extrinsic[:3, 3] = trans
+
+ return extrinsic[:3, :]
+
+ def set_rotation_matrix(self, rot_mat):
+ self.direction = rot_mat[2, :]
+ self.up = -rot_mat[1, :]
+ self.right = rot_mat[0, :]
+
+ def set_intrinsic_matrix(self, int_mat):
+ self.focal_x = int_mat[0, 0]
+ self.focal_y = int_mat[1, 1]
+ self.skew = int_mat[0, 1]
+ self.principal_x = int_mat[0, 2]
+ self.principal_y = int_mat[1, 2]
+
+ def set_projection_matrix(self, proj_mat):
+ res = cv2.decomposeProjectionMatrix(proj_mat)
+ int_mat, rot_mat, camera_center_homo = res[0], res[1], res[2]
+ camera_center = camera_center_homo[0:3] / camera_center_homo[3]
+ camera_center = camera_center.reshape(-1)
+ int_mat = int_mat / int_mat[2][2]
+
+ self.set_intrinsic_matrix(int_mat)
+ self.set_rotation_matrix(rot_mat)
+ self.center = camera_center
+
+ self.sanity_check()
+
+ def get_gl_matrix(self):
+ z_near = self.near
+ z_far = self.far
+ rot_mat = self.get_rotation_matrix()
+ int_mat = self.get_intrinsic_matrix()
+ trans = self.get_translation_vector()
+
+ extrinsic = np.eye(4)
+ extrinsic[:3, :3] = rot_mat
+ extrinsic[:3, 3] = trans
+ axis_adj = np.eye(4)
+ axis_adj[2, 2] = -1
+ axis_adj[1, 1] = -1
+ model_view = np.matmul(axis_adj, extrinsic)
+
+ projective = np.zeros([4, 4])
+ projective[:2, :2] = int_mat[:2, :2]
+ projective[:2, 2:3] = -int_mat[:2, 2:3]
+ projective[3, 2] = -1
+ projective[2, 2] = (z_near + z_far)
+ projective[2, 3] = (z_near * z_far)
+
+ if self.ortho_ratio is None:
+ ndc = ortho(0, self.width, 0, self.height, z_near, z_far)
+ perspective = np.matmul(ndc, projective)
+ else:
+ perspective = ortho(-self.width * self.ortho_ratio / 2,
+ self.width * self.ortho_ratio / 2,
+ -self.height * self.ortho_ratio / 2,
+ self.height * self.ortho_ratio / 2, z_near,
+ z_far)
+
+ return perspective, model_view
+
+
+def KRT_from_P(proj_mat, normalize_K=True):
+ res = cv2.decomposeProjectionMatrix(proj_mat)
+ K, Rot, camera_center_homog = res[0], res[1], res[2]
+ camera_center = camera_center_homog[0:3] / camera_center_homog[3]
+ trans = -Rot.dot(camera_center)
+ if normalize_K:
+ K = K / K[2][2]
+ return K, Rot, trans
+
+
+def MVP_from_P(proj_mat, width, height, near=0.1, far=10000):
+ '''
+ Convert OpenCV camera calibration matrix to OpenGL projection and model view matrix
+ :param proj_mat: OpenCV camera projeciton matrix
+ :param width: Image width
+ :param height: Image height
+ :param near: Z near value
+ :param far: Z far value
+ :return: OpenGL projection matrix and model view matrix
+ '''
+ res = cv2.decomposeProjectionMatrix(proj_mat)
+ K, Rot, camera_center_homog = res[0], res[1], res[2]
+ camera_center = camera_center_homog[0:3] / camera_center_homog[3]
+ trans = -Rot.dot(camera_center)
+ K = K / K[2][2]
+
+ extrinsic = np.eye(4)
+ extrinsic[:3, :3] = Rot
+ extrinsic[:3, 3:4] = trans
+ axis_adj = np.eye(4)
+ axis_adj[2, 2] = -1
+ axis_adj[1, 1] = -1
+ model_view = np.matmul(axis_adj, extrinsic)
+
+ zFar = far
+ zNear = near
+ projective = np.zeros([4, 4])
+ projective[:2, :2] = K[:2, :2]
+ projective[:2, 2:3] = -K[:2, 2:3]
+ projective[3, 2] = -1
+ projective[2, 2] = (zNear + zFar)
+ projective[2, 3] = (zNear * zFar)
+
+ ndc = ortho(0, width, 0, height, zNear, zFar)
+
+ perspective = np.matmul(ndc, projective)
+
+ return perspective, model_view
diff --git a/lib / renderer / gl / __init__.py b/lib / renderer / gl / __init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib / renderer / gl / data / color.fs b/lib / renderer / gl / data / color.fs
new file mode 100644
index 0000000000000000000000000000000000000000..e28b6f65605bb380428c84c326d00d9e1445a899
--- /dev/null
+++ b/lib / renderer / gl / data / color.fs
@@ -0,0 +1,20 @@
+#version 330 core
+
+layout (location = 0) out vec4 FragColor;
+layout (location = 1) out vec4 FragNormal;
+layout (location = 2) out vec4 FragDepth;
+
+in vec3 Color;
+in vec3 CamNormal;
+in vec3 depth;
+
+
+void main()
+{
+ FragColor = vec4(Color,1.0);
+
+ vec3 cam_norm_normalized = normalize(CamNormal);
+ vec3 rgb = (cam_norm_normalized + 1.0) / 2.0;
+ FragNormal = vec4(rgb, 1.0);
+ FragDepth = vec4(depth.xyz, 1.0);
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /color.vs b/lib / renderer / gl / data /color.vs
new file mode 100644
index 0000000000000000000000000000000000000000..1256f7eb3f3605b8f848d452fcf601d5a18b95e2
--- /dev/null
+++ b/lib / renderer / gl / data /color.vs
@@ -0,0 +1,29 @@
+#version 330 core
+
+layout (location = 0) in vec3 a_Position;
+layout (location = 1) in vec3 a_Color;
+layout (location = 2) in vec3 a_Normal;
+
+out vec3 CamNormal;
+out vec3 CamPos;
+out vec3 Color;
+out vec3 depth;
+
+
+uniform mat3 RotMat;
+uniform mat4 NormMat;
+uniform mat4 ModelMat;
+uniform mat4 PerspMat;
+
+void main()
+{
+ vec3 a_Position = (NormMat * vec4(a_Position,1.0)).xyz;
+ gl_Position = PerspMat * ModelMat * vec4(RotMat * a_Position, 1.0);
+ Color = a_Color;
+
+ mat3 R = mat3(ModelMat) * RotMat;
+ CamNormal = (R * a_Normal);
+
+ depth = vec3(gl_Position.z / gl_Position.w);
+
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /normal.fs b/lib / renderer / gl / data /normal.fs
new file mode 100644
index 0000000000000000000000000000000000000000..9e2770952e27d9265ccb100833245beed3ebebe5
--- /dev/null
+++ b/lib / renderer / gl / data /normal.fs
@@ -0,0 +1,12 @@
+#version 330
+
+out vec4 FragColor;
+
+in vec3 CamNormal;
+
+void main()
+{
+ vec3 cam_norm_normalized = normalize(CamNormal);
+ vec3 rgb = (cam_norm_normalized + 1.0) / 2.0;
+ FragColor = vec4(rgb, 1.0);
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /normal.vs b/lib / renderer / gl / data /normal.vs
new file mode 100644
index 0000000000000000000000000000000000000000..a0f7f50b1cedfd677843b2a60cf9051b2134c347
--- /dev/null
+++ b/lib / renderer / gl / data /normal.vs
@@ -0,0 +1,15 @@
+#version 330
+
+layout (location = 0) in vec3 Position;
+layout (location = 1) in vec3 Normal;
+
+out vec3 CamNormal;
+
+uniform mat4 ModelMat;
+uniform mat4 PerspMat;
+
+void main()
+{
+ gl_Position = PerspMat * ModelMat * vec4(Position, 1.0);
+ CamNormal = (ModelMat * vec4(Normal, 0.0)).xyz;
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /prt.fs b/lib / renderer / gl / data /prt.fs
new file mode 100644
index 0000000000000000000000000000000000000000..3737e2a5b51d5b001cdb2f3d8157793caa98fd54
--- /dev/null
+++ b/lib / renderer / gl / data /prt.fs
@@ -0,0 +1,157 @@
+#version 330
+
+uniform vec3 SHCoeffs[9];
+uniform uint analytic;
+
+uniform uint hasNormalMap;
+uniform uint hasAlbedoMap;
+
+uniform sampler2D AlbedoMap;
+uniform sampler2D NormalMap;
+
+in VertexData {
+ vec3 Position;
+ vec3 Depth;
+ vec3 ModelNormal;
+ vec2 Texcoord;
+ vec3 Tangent;
+ vec3 Bitangent;
+ vec3 PRT1;
+ vec3 PRT2;
+ vec3 PRT3;
+ vec3 Label;
+} VertexIn;
+
+layout (location = 0) out vec4 FragColor;
+layout (location = 1) out vec4 FragNormal;
+layout (location = 2) out vec4 FragPosition;
+layout (location = 3) out vec4 FragAlbedo;
+layout (location = 4) out vec4 FragShading;
+layout (location = 5) out vec4 FragPRT1;
+layout (location = 6) out vec4 FragPRT2;
+// layout (location = 7) out vec4 FragPRT3;
+layout (location = 7) out vec4 FragLabel;
+
+
+vec4 gammaCorrection(vec4 vec, float g)
+{
+ return vec4(pow(vec.x, 1.0/g), pow(vec.y, 1.0/g), pow(vec.z, 1.0/g), vec.w);
+}
+
+vec3 gammaCorrection(vec3 vec, float g)
+{
+ return vec3(pow(vec.x, 1.0/g), pow(vec.y, 1.0/g), pow(vec.z, 1.0/g));
+}
+
+void evaluateH(vec3 n, out float H[9])
+{
+ float c1 = 0.429043, c2 = 0.511664,
+ c3 = 0.743125, c4 = 0.886227, c5 = 0.247708;
+
+ H[0] = c4;
+ H[1] = 2.0 * c2 * n[1];
+ H[2] = 2.0 * c2 * n[2];
+ H[3] = 2.0 * c2 * n[0];
+ H[4] = 2.0 * c1 * n[0] * n[1];
+ H[5] = 2.0 * c1 * n[1] * n[2];
+ H[6] = c3 * n[2] * n[2] - c5;
+ H[7] = 2.0 * c1 * n[2] * n[0];
+ H[8] = c1 * (n[0] * n[0] - n[1] * n[1]);
+}
+
+vec3 evaluateLightingModel(vec3 normal)
+{
+ float H[9];
+ evaluateH(normal, H);
+ vec3 res = vec3(0.0);
+ for (int i = 0; i < 9; i++) {
+ res += H[i] * SHCoeffs[i];
+ }
+ return res;
+}
+
+// nC: coarse geometry normal, nH: fine normal from normal map
+vec3 evaluateLightingModelHybrid(vec3 nC, vec3 nH, mat3 prt)
+{
+ float HC[9], HH[9];
+ evaluateH(nC, HC);
+ evaluateH(nH, HH);
+
+ vec3 res = vec3(0.0);
+ vec3 shadow = vec3(0.0);
+ vec3 unshadow = vec3(0.0);
+ for(int i = 0; i < 3; ++i){
+ for(int j = 0; j < 3; ++j){
+ int id = i*3+j;
+ res += HH[id]* SHCoeffs[id];
+ shadow += prt[i][j] * SHCoeffs[id];
+ unshadow += HC[id] * SHCoeffs[id];
+ }
+ }
+ vec3 ratio = clamp(shadow/unshadow,0.0,1.0);
+ res = ratio * res;
+
+ return res;
+}
+
+vec3 evaluateLightingModelPRT(mat3 prt)
+{
+ vec3 res = vec3(0.0);
+ for(int i = 0; i < 3; ++i){
+ for(int j = 0; j < 3; ++j){
+ res += prt[i][j] * SHCoeffs[i*3+j];
+ }
+ }
+
+ return res;
+}
+
+void main()
+{
+ vec2 uv = VertexIn.Texcoord;
+ vec3 nC = normalize(VertexIn.ModelNormal);
+ vec3 nml = nC;
+ mat3 prt = mat3(VertexIn.PRT1, VertexIn.PRT2, VertexIn.PRT3);
+
+ if(hasAlbedoMap == uint(0))
+ FragAlbedo = vec4(1.0);
+ else
+ FragAlbedo = texture(AlbedoMap, uv);//gammaCorrection(texture(AlbedoMap, uv), 1.0/2.2);
+
+ if(hasNormalMap == uint(0))
+ {
+ if(analytic == uint(0))
+ FragShading = vec4(evaluateLightingModelPRT(prt), 1.0f);
+ else
+ FragShading = vec4(evaluateLightingModel(nC), 1.0f);
+ }
+ else
+ {
+ vec3 n_tan = normalize(texture(NormalMap, uv).rgb*2.0-vec3(1.0));
+
+ mat3 TBN = mat3(normalize(VertexIn.Tangent),normalize(VertexIn.Bitangent),nC);
+ vec3 nH = normalize(TBN * n_tan);
+
+ if(analytic == uint(0))
+ FragShading = vec4(evaluateLightingModelHybrid(nC,nH,prt),1.0f);
+ else
+ FragShading = vec4(evaluateLightingModel(nH), 1.0f);
+
+ nml = nH;
+ }
+
+ FragShading = gammaCorrection(FragShading, 2.2);
+ FragColor = clamp(FragAlbedo * FragShading, 0.0, 1.0);
+ FragNormal = vec4(0.5*(nml+vec3(1.0)), 1.0);
+ FragPosition = vec4(VertexIn.Depth.xyz, 1.0);
+ FragShading = vec4(clamp(0.5*FragShading.xyz, 0.0, 1.0),1.0);
+ // FragColor = gammaCorrection(clamp(FragAlbedo * FragShading, 0.0, 1.0),2.2);
+ // FragNormal = vec4(0.5*(nml+vec3(1.0)), 1.0);
+ // FragPosition = vec4(VertexIn.Position,VertexIn.Depth.x);
+ // FragShading = vec4(gammaCorrection(clamp(0.5*FragShading.xyz, 0.0, 1.0),2.2),1.0);
+ // FragAlbedo = gammaCorrection(FragAlbedo,2.2);
+ FragPRT1 = vec4(VertexIn.PRT1,1.0);
+ FragPRT2 = vec4(VertexIn.PRT2,1.0);
+ // FragPRT3 = vec4(VertexIn.PRT3,1.0);
+ FragLabel = vec4(VertexIn.Label,1.0);
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /prt.vs b/lib / renderer / gl / data /prt.vs
new file mode 100644
index 0000000000000000000000000000000000000000..60cc800f20a5cc765a433e08e634684f014f7513
--- /dev/null
+++ b/lib / renderer / gl / data /prt.vs
@@ -0,0 +1,156 @@
+#version 330
+
+layout (location = 0) in vec3 a_Position;
+layout (location = 1) in vec3 a_Normal;
+layout (location = 2) in vec2 a_TextureCoord;
+layout (location = 3) in vec3 a_Tangent;
+layout (location = 4) in vec3 a_Bitangent;
+layout (location = 5) in vec3 a_PRT1;
+layout (location = 6) in vec3 a_PRT2;
+layout (location = 7) in vec3 a_PRT3;
+layout (location = 8) in vec3 a_Label;
+out VertexData {
+ vec3 Position;
+ vec3 Depth;
+ vec3 ModelNormal;
+ vec2 Texcoord;
+ vec3 Tangent;
+ vec3 Bitangent;
+ vec3 PRT1;
+ vec3 PRT2;
+ vec3 PRT3;
+ vec3 Label;
+} VertexOut;
+uniform mat3 RotMat;
+uniform mat4 NormMat;
+uniform mat4 ModelMat;
+uniform mat4 PerspMat;
+float s_c3 = 0.94617469575; // (3*sqrt(5))/(4*sqrt(pi))
+float s_c4 = -0.31539156525;// (-sqrt(5))/(4*sqrt(pi))
+float s_c5 = 0.54627421529; // (sqrt(15))/(4*sqrt(pi))
+float s_c_scale = 1.0/0.91529123286551084;
+float s_c_scale_inv = 0.91529123286551084;
+float s_rc2 = 1.5853309190550713*s_c_scale;
+float s_c4_div_c3 = s_c4/s_c3;
+float s_c4_div_c3_x2 = (s_c4/s_c3)*2.0;
+float s_scale_dst2 = s_c3 * s_c_scale_inv;
+float s_scale_dst4 = s_c5 * s_c_scale_inv;
+void OptRotateBand0(float x[1], mat3 R, out float dst[1])
+{
+ dst[0] = x[0];
+}
+// 9 multiplies
+void OptRotateBand1(float x[3], mat3 R, out float dst[3])
+{
+ // derived from SlowRotateBand1
+ dst[0] = ( R[1][1])*x[0] + (-R[1][2])*x[1] + ( R[1][0])*x[2];
+ dst[1] = (-R[2][1])*x[0] + ( R[2][2])*x[1] + (-R[2][0])*x[2];
+ dst[2] = ( R[0][1])*x[0] + (-R[0][2])*x[1] + ( R[0][0])*x[2];
+}
+// 48 multiplies
+void OptRotateBand2(float x[5], mat3 R, out float dst[5])
+{
+ // Sparse matrix multiply
+ float sh0 = x[3] + x[4] + x[4] - x[1];
+ float sh1 = x[0] + s_rc2*x[2] + x[3] + x[4];
+ float sh2 = x[0];
+ float sh3 = -x[3];
+ float sh4 = -x[1];
+
+ // Rotations. R0 and R1 just use the raw matrix columns
+ float r2x = R[0][0] + R[0][1];
+ float r2y = R[1][0] + R[1][1];
+ float r2z = R[2][0] + R[2][1];
+
+ float r3x = R[0][0] + R[0][2];
+ float r3y = R[1][0] + R[1][2];
+ float r3z = R[2][0] + R[2][2];
+
+ float r4x = R[0][1] + R[0][2];
+ float r4y = R[1][1] + R[1][2];
+ float r4z = R[2][1] + R[2][2];
+
+ // dense matrix multiplication one column at a time
+
+ // column 0
+ float sh0_x = sh0 * R[0][0];
+ float sh0_y = sh0 * R[1][0];
+ float d0 = sh0_x * R[1][0];
+ float d1 = sh0_y * R[2][0];
+ float d2 = sh0 * (R[2][0] * R[2][0] + s_c4_div_c3);
+ float d3 = sh0_x * R[2][0];
+ float d4 = sh0_x * R[0][0] - sh0_y * R[1][0];
+
+ // column 1
+ float sh1_x = sh1 * R[0][2];
+ float sh1_y = sh1 * R[1][2];
+ d0 += sh1_x * R[1][2];
+ d1 += sh1_y * R[2][2];
+ d2 += sh1 * (R[2][2] * R[2][2] + s_c4_div_c3);
+ d3 += sh1_x * R[2][2];
+ d4 += sh1_x * R[0][2] - sh1_y * R[1][2];
+
+ // column 2
+ float sh2_x = sh2 * r2x;
+ float sh2_y = sh2 * r2y;
+ d0 += sh2_x * r2y;
+ d1 += sh2_y * r2z;
+ d2 += sh2 * (r2z * r2z + s_c4_div_c3_x2);
+ d3 += sh2_x * r2z;
+ d4 += sh2_x * r2x - sh2_y * r2y;
+
+ // column 3
+ float sh3_x = sh3 * r3x;
+ float sh3_y = sh3 * r3y;
+ d0 += sh3_x * r3y;
+ d1 += sh3_y * r3z;
+ d2 += sh3 * (r3z * r3z + s_c4_div_c3_x2);
+ d3 += sh3_x * r3z;
+ d4 += sh3_x * r3x - sh3_y * r3y;
+
+ // column 4
+ float sh4_x = sh4 * r4x;
+ float sh4_y = sh4 * r4y;
+ d0 += sh4_x * r4y;
+ d1 += sh4_y * r4z;
+ d2 += sh4 * (r4z * r4z + s_c4_div_c3_x2);
+ d3 += sh4_x * r4z;
+ d4 += sh4_x * r4x - sh4_y * r4y;
+
+ // extra multipliers
+ dst[0] = d0;
+ dst[1] = -d1;
+ dst[2] = d2 * s_scale_dst2;
+ dst[3] = -d3;
+ dst[4] = d4 * s_scale_dst4;
+}
+void main()
+{
+ // normalization
+ vec3 pos = (NormMat * vec4(a_Position,1.0)).xyz;
+ mat3 R = mat3(ModelMat) * RotMat;
+ VertexOut.ModelNormal = (R * a_Normal);
+ VertexOut.Position = R * pos;
+ VertexOut.Texcoord = a_TextureCoord;
+ VertexOut.Tangent = (R * a_Tangent);
+ VertexOut.Bitangent = (R * a_Bitangent);
+ VertexOut.Label = a_Label;
+ float PRT0, PRT1[3], PRT2[5];
+ PRT0 = a_PRT1[0];
+ PRT1[0] = a_PRT1[1];
+ PRT1[1] = a_PRT1[2];
+ PRT1[2] = a_PRT2[0];
+ PRT2[0] = a_PRT2[1];
+ PRT2[1] = a_PRT2[2];
+ PRT2[2] = a_PRT3[0];
+ PRT2[3] = a_PRT3[1];
+ PRT2[4] = a_PRT3[2];
+ OptRotateBand1(PRT1, R, PRT1);
+ OptRotateBand2(PRT2, R, PRT2);
+ VertexOut.PRT1 = vec3(PRT0,PRT1[0],PRT1[1]);
+ VertexOut.PRT2 = vec3(PRT1[2],PRT2[0],PRT2[1]);
+ VertexOut.PRT3 = vec3(PRT2[2],PRT2[3],PRT2[4]);
+ gl_Position = PerspMat * ModelMat * vec4(RotMat * pos, 1.0);
+
+ VertexOut.Depth = vec3(gl_Position.z / gl_Position.w);
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /prt_uv.fs b/lib / renderer / gl / data /prt_uv.fs
new file mode 100644
index 0000000000000000000000000000000000000000..6e90b25c62b41c8cf61afd29333372193047d5f1
--- /dev/null
+++ b/lib / renderer / gl / data /prt_uv.fs
@@ -0,0 +1,141 @@
+#version 330
+
+uniform vec3 SHCoeffs[9];
+uniform uint analytic;
+
+uniform uint hasNormalMap;
+uniform uint hasAlbedoMap;
+
+uniform sampler2D AlbedoMap;
+uniform sampler2D NormalMap;
+
+in VertexData {
+ vec3 Position;
+ vec3 ModelNormal;
+ vec3 CameraNormal;
+ vec2 Texcoord;
+ vec3 Tangent;
+ vec3 Bitangent;
+ vec3 PRT1;
+ vec3 PRT2;
+ vec3 PRT3;
+} VertexIn;
+
+layout (location = 0) out vec4 FragColor;
+layout (location = 1) out vec4 FragPosition;
+layout (location = 2) out vec4 FragNormal;
+
+vec4 gammaCorrection(vec4 vec, float g)
+{
+ return vec4(pow(vec.x, 1.0/g), pow(vec.y, 1.0/g), pow(vec.z, 1.0/g), vec.w);
+}
+
+vec3 gammaCorrection(vec3 vec, float g)
+{
+ return vec3(pow(vec.x, 1.0/g), pow(vec.y, 1.0/g), pow(vec.z, 1.0/g));
+}
+
+void evaluateH(vec3 n, out float H[9])
+{
+ float c1 = 0.429043, c2 = 0.511664,
+ c3 = 0.743125, c4 = 0.886227, c5 = 0.247708;
+
+ H[0] = c4;
+ H[1] = 2.0 * c2 * n[1];
+ H[2] = 2.0 * c2 * n[2];
+ H[3] = 2.0 * c2 * n[0];
+ H[4] = 2.0 * c1 * n[0] * n[1];
+ H[5] = 2.0 * c1 * n[1] * n[2];
+ H[6] = c3 * n[2] * n[2] - c5;
+ H[7] = 2.0 * c1 * n[2] * n[0];
+ H[8] = c1 * (n[0] * n[0] - n[1] * n[1]);
+}
+
+vec3 evaluateLightingModel(vec3 normal)
+{
+ float H[9];
+ evaluateH(normal, H);
+ vec3 res = vec3(0.0);
+ for (int i = 0; i < 9; i++) {
+ res += H[i] * SHCoeffs[i];
+ }
+ return res;
+}
+
+// nC: coarse geometry normal, nH: fine normal from normal map
+vec3 evaluateLightingModelHybrid(vec3 nC, vec3 nH, mat3 prt)
+{
+ float HC[9], HH[9];
+ evaluateH(nC, HC);
+ evaluateH(nH, HH);
+
+ vec3 res = vec3(0.0);
+ vec3 shadow = vec3(0.0);
+ vec3 unshadow = vec3(0.0);
+ for(int i = 0; i < 3; ++i){
+ for(int j = 0; j < 3; ++j){
+ int id = i*3+j;
+ res += HH[id]* SHCoeffs[id];
+ shadow += prt[i][j] * SHCoeffs[id];
+ unshadow += HC[id] * SHCoeffs[id];
+ }
+ }
+ vec3 ratio = clamp(shadow/unshadow,0.0,1.0);
+ res = ratio * res;
+
+ return res;
+}
+
+vec3 evaluateLightingModelPRT(mat3 prt)
+{
+ vec3 res = vec3(0.0);
+ for(int i = 0; i < 3; ++i){
+ for(int j = 0; j < 3; ++j){
+ res += prt[i][j] * SHCoeffs[i*3+j];
+ }
+ }
+
+ return res;
+}
+
+void main()
+{
+ vec2 uv = VertexIn.Texcoord;
+ vec3 nM = normalize(VertexIn.ModelNormal);
+ vec3 nC = normalize(VertexIn.CameraNormal);
+ vec3 nml = nC;
+ mat3 prt = mat3(VertexIn.PRT1, VertexIn.PRT2, VertexIn.PRT3);
+
+ vec4 albedo, shading;
+ if(hasAlbedoMap == uint(0))
+ albedo = vec4(1.0);
+ else
+ albedo = texture(AlbedoMap, uv);//gammaCorrection(texture(AlbedoMap, uv), 1.0/2.2);
+
+ if(hasNormalMap == uint(0))
+ {
+ if(analytic == uint(0))
+ shading = vec4(evaluateLightingModelPRT(prt), 1.0f);
+ else
+ shading = vec4(evaluateLightingModel(nC), 1.0f);
+ }
+ else
+ {
+ vec3 n_tan = normalize(texture(NormalMap, uv).rgb*2.0-vec3(1.0));
+
+ mat3 TBN = mat3(normalize(VertexIn.Tangent),normalize(VertexIn.Bitangent),nC);
+ vec3 nH = normalize(TBN * n_tan);
+
+ if(analytic == uint(0))
+ shading = vec4(evaluateLightingModelHybrid(nC,nH,prt),1.0f);
+ else
+ shading = vec4(evaluateLightingModel(nH), 1.0f);
+
+ nml = nH;
+ }
+
+ shading = gammaCorrection(shading, 2.2);
+ FragColor = clamp(albedo * shading, 0.0, 1.0);
+ FragPosition = vec4(VertexIn.Position,1.0);
+ FragNormal = vec4(0.5*(nM+vec3(1.0)),1.0);
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /prt_uv.vs b/lib / renderer / gl / data /prt_uv.vs
new file mode 100644
index 0000000000000000000000000000000000000000..c3cc0a2f2b381032780e450020a96d353168cded
--- /dev/null
+++ b/lib / renderer / gl / data /prt_uv.vs
@@ -0,0 +1,168 @@
+#version 330
+
+layout (location = 0) in vec3 a_Position;
+layout (location = 1) in vec3 a_Normal;
+layout (location = 2) in vec2 a_TextureCoord;
+layout (location = 3) in vec3 a_Tangent;
+layout (location = 4) in vec3 a_Bitangent;
+layout (location = 5) in vec3 a_PRT1;
+layout (location = 6) in vec3 a_PRT2;
+layout (location = 7) in vec3 a_PRT3;
+
+out VertexData {
+ vec3 Position;
+ vec3 ModelNormal;
+ vec3 CameraNormal;
+ vec2 Texcoord;
+ vec3 Tangent;
+ vec3 Bitangent;
+ vec3 PRT1;
+ vec3 PRT2;
+ vec3 PRT3;
+} VertexOut;
+
+uniform mat3 RotMat;
+uniform mat4 NormMat;
+uniform mat4 ModelMat;
+uniform mat4 PerspMat;
+
+#define pi 3.1415926535897932384626433832795
+
+float s_c3 = 0.94617469575; // (3*sqrt(5))/(4*sqrt(pi))
+float s_c4 = -0.31539156525;// (-sqrt(5))/(4*sqrt(pi))
+float s_c5 = 0.54627421529; // (sqrt(15))/(4*sqrt(pi))
+
+float s_c_scale = 1.0/0.91529123286551084;
+float s_c_scale_inv = 0.91529123286551084;
+
+float s_rc2 = 1.5853309190550713*s_c_scale;
+float s_c4_div_c3 = s_c4/s_c3;
+float s_c4_div_c3_x2 = (s_c4/s_c3)*2.0;
+
+float s_scale_dst2 = s_c3 * s_c_scale_inv;
+float s_scale_dst4 = s_c5 * s_c_scale_inv;
+
+void OptRotateBand0(float x[1], mat3 R, out float dst[1])
+{
+ dst[0] = x[0];
+}
+
+// 9 multiplies
+void OptRotateBand1(float x[3], mat3 R, out float dst[3])
+{
+ // derived from SlowRotateBand1
+ dst[0] = ( R[1][1])*x[0] + (-R[1][2])*x[1] + ( R[1][0])*x[2];
+ dst[1] = (-R[2][1])*x[0] + ( R[2][2])*x[1] + (-R[2][0])*x[2];
+ dst[2] = ( R[0][1])*x[0] + (-R[0][2])*x[1] + ( R[0][0])*x[2];
+}
+
+// 48 multiplies
+void OptRotateBand2(float x[5], mat3 R, out float dst[5])
+{
+ // Sparse matrix multiply
+ float sh0 = x[3] + x[4] + x[4] - x[1];
+ float sh1 = x[0] + s_rc2*x[2] + x[3] + x[4];
+ float sh2 = x[0];
+ float sh3 = -x[3];
+ float sh4 = -x[1];
+
+ // Rotations. R0 and R1 just use the raw matrix columns
+ float r2x = R[0][0] + R[0][1];
+ float r2y = R[1][0] + R[1][1];
+ float r2z = R[2][0] + R[2][1];
+
+ float r3x = R[0][0] + R[0][2];
+ float r3y = R[1][0] + R[1][2];
+ float r3z = R[2][0] + R[2][2];
+
+ float r4x = R[0][1] + R[0][2];
+ float r4y = R[1][1] + R[1][2];
+ float r4z = R[2][1] + R[2][2];
+
+ // dense matrix multiplication one column at a time
+
+ // column 0
+ float sh0_x = sh0 * R[0][0];
+ float sh0_y = sh0 * R[1][0];
+ float d0 = sh0_x * R[1][0];
+ float d1 = sh0_y * R[2][0];
+ float d2 = sh0 * (R[2][0] * R[2][0] + s_c4_div_c3);
+ float d3 = sh0_x * R[2][0];
+ float d4 = sh0_x * R[0][0] - sh0_y * R[1][0];
+
+ // column 1
+ float sh1_x = sh1 * R[0][2];
+ float sh1_y = sh1 * R[1][2];
+ d0 += sh1_x * R[1][2];
+ d1 += sh1_y * R[2][2];
+ d2 += sh1 * (R[2][2] * R[2][2] + s_c4_div_c3);
+ d3 += sh1_x * R[2][2];
+ d4 += sh1_x * R[0][2] - sh1_y * R[1][2];
+
+ // column 2
+ float sh2_x = sh2 * r2x;
+ float sh2_y = sh2 * r2y;
+ d0 += sh2_x * r2y;
+ d1 += sh2_y * r2z;
+ d2 += sh2 * (r2z * r2z + s_c4_div_c3_x2);
+ d3 += sh2_x * r2z;
+ d4 += sh2_x * r2x - sh2_y * r2y;
+
+ // column 3
+ float sh3_x = sh3 * r3x;
+ float sh3_y = sh3 * r3y;
+ d0 += sh3_x * r3y;
+ d1 += sh3_y * r3z;
+ d2 += sh3 * (r3z * r3z + s_c4_div_c3_x2);
+ d3 += sh3_x * r3z;
+ d4 += sh3_x * r3x - sh3_y * r3y;
+
+ // column 4
+ float sh4_x = sh4 * r4x;
+ float sh4_y = sh4 * r4y;
+ d0 += sh4_x * r4y;
+ d1 += sh4_y * r4z;
+ d2 += sh4 * (r4z * r4z + s_c4_div_c3_x2);
+ d3 += sh4_x * r4z;
+ d4 += sh4_x * r4x - sh4_y * r4y;
+
+ // extra multipliers
+ dst[0] = d0;
+ dst[1] = -d1;
+ dst[2] = d2 * s_scale_dst2;
+ dst[3] = -d3;
+ dst[4] = d4 * s_scale_dst4;
+}
+
+void main()
+{
+ // normalization
+ mat3 R = mat3(ModelMat) * RotMat;
+ VertexOut.ModelNormal = a_Normal;
+ VertexOut.CameraNormal = (R * a_Normal);
+ VertexOut.Position = a_Position;
+ VertexOut.Texcoord = a_TextureCoord;
+ VertexOut.Tangent = (R * a_Tangent);
+ VertexOut.Bitangent = (R * a_Bitangent);
+ float PRT0, PRT1[3], PRT2[5];
+ PRT0 = a_PRT1[0];
+ PRT1[0] = a_PRT1[1];
+ PRT1[1] = a_PRT1[2];
+ PRT1[2] = a_PRT2[0];
+ PRT2[0] = a_PRT2[1];
+ PRT2[1] = a_PRT2[2];
+ PRT2[2] = a_PRT3[0];
+ PRT2[3] = a_PRT3[1];
+ PRT2[4] = a_PRT3[2];
+
+ OptRotateBand1(PRT1, R, PRT1);
+ OptRotateBand2(PRT2, R, PRT2);
+
+ VertexOut.PRT1 = vec3(PRT0,PRT1[0],PRT1[1]);
+ VertexOut.PRT2 = vec3(PRT1[2],PRT2[0],PRT2[1]);
+ VertexOut.PRT3 = vec3(PRT2[2],PRT2[3],PRT2[4]);
+
+ gl_Position = vec4(a_TextureCoord, 0.0, 1.0) - vec4(0.5, 0.5, 0, 0);
+ gl_Position[0] *= 2.0;
+ gl_Position[1] *= 2.0;
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /quad.fs b/lib / renderer / gl / data /quad.fs
new file mode 100644
index 0000000000000000000000000000000000000000..f43502f2352ca2adf19d11e809946b51498df5a5
--- /dev/null
+++ b/lib / renderer / gl / data /quad.fs
@@ -0,0 +1,11 @@
+#version 330 core
+out vec4 FragColor;
+
+in vec2 TexCoord;
+
+uniform sampler2D screenTexture;
+
+void main()
+{
+ FragColor = texture(screenTexture, TexCoord);
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / data /quad.vs b/lib / renderer / gl / data /quad.vs
new file mode 100644
index 0000000000000000000000000000000000000000..811044631a1f29f5b45c490b2d40297f3127b6ea
--- /dev/null
+++ b/lib / renderer / gl / data /quad.vs
@@ -0,0 +1,11 @@
+#version 330 core
+layout (location = 0) in vec2 aPos;
+layout (location = 1) in vec2 aTexCoord;
+
+out vec2 TexCoord;
+
+void main()
+{
+ gl_Position = vec4(aPos.x, aPos.y, 0.0, 1.0);
+ TexCoord = aTexCoord;
+}
\ No newline at end of file
diff --git a/lib / renderer / gl / framework.py b/lib / renderer / gl / framework.py
new file mode 100644
index 0000000000000000000000000000000000000000..90bc4c64a9f855d455da70f085358118a2f49b15
--- /dev/null
+++ b/lib / renderer / gl / framework.py
@@ -0,0 +1,95 @@
+# Mario Rosasco, 2016
+# adapted from framework.cpp, Copyright (C) 2010-2012 by Jason L. McKesson
+# This file is licensed under the MIT License.
+#
+# NB: Unlike in the framework.cpp organization, the main loop is contained
+# in the tutorial files, not in this framework file. Additionally, a copy of
+# this module file must exist in the same directory as the tutorial files
+# to be imported properly.
+
+import os
+from OpenGL.GL import *
+
+
+# Function that creates and compiles shaders according to the given type (a GL enum value) and
+# shader program (a file containing a GLSL program).
+def loadShader(shaderType, shaderFile):
+ # check if file exists, get full path name
+ strFilename = findFileOrThrow(shaderFile)
+ shaderData = None
+ with open(strFilename, 'r') as f:
+ shaderData = f.read()
+
+ shader = glCreateShader(shaderType)
+ glShaderSource(
+ shader,
+ shaderData) # note that this is a simpler function call than in C
+
+ # This shader compilation is more explicit than the one used in
+ # framework.cpp, which relies on a glutil wrapper function.
+ # This is made explicit here mainly to decrease dependence on pyOpenGL
+ # utilities and wrappers, which docs caution may change in future versions.
+ glCompileShader(shader)
+
+ status = glGetShaderiv(shader, GL_COMPILE_STATUS)
+ if status == GL_FALSE:
+ # Note that getting the error log is much simpler in Python than in C/C++
+ # and does not require explicit handling of the string buffer
+ strInfoLog = glGetShaderInfoLog(shader)
+ strShaderType = ""
+ if shaderType is GL_VERTEX_SHADER:
+ strShaderType = "vertex"
+ elif shaderType is GL_GEOMETRY_SHADER:
+ strShaderType = "geometry"
+ elif shaderType is GL_FRAGMENT_SHADER:
+ strShaderType = "fragment"
+
+ print("Compilation failure for " + strShaderType + " shader:\n" +
+ str(strInfoLog))
+
+ return shader
+
+
+# Function that accepts a list of shaders, compiles them, and returns a handle to the compiled program
+def createProgram(shaderList):
+ program = glCreateProgram()
+
+ for shader in shaderList:
+ glAttachShader(program, shader)
+
+ glLinkProgram(program)
+
+ status = glGetProgramiv(program, GL_LINK_STATUS)
+ if status == GL_FALSE:
+ # Note that getting the error log is much simpler in Python than in C/C++
+ # and does not require explicit handling of the string buffer
+ strInfoLog = glGetProgramInfoLog(program)
+ print("Linker failure: \n" + str(strInfoLog))
+
+ for shader in shaderList:
+ glDetachShader(program, shader)
+
+ return program
+
+
+# Helper function to locate and open the target file (passed in as a string).
+# Returns the full path to the file as a string.
+def findFileOrThrow(strBasename):
+ # Keep constant names in C-style convention, for readability
+ # when comparing to C(/C++) code.
+ if os.path.isfile(strBasename):
+ return strBasename
+
+ LOCAL_FILE_DIR = "data" + os.sep
+ GLOBAL_FILE_DIR = os.path.dirname(
+ os.path.abspath(__file__)) + os.sep + "data" + os.sep
+
+ strFilename = LOCAL_FILE_DIR + strBasename
+ if os.path.isfile(strFilename):
+ return strFilename
+
+ strFilename = GLOBAL_FILE_DIR + strBasename
+ if os.path.isfile(strFilename):
+ return strFilename
+
+ raise IOError('Could not find target file ' + strBasename)
\ No newline at end of file
diff --git a/lib / renderer / gl / glcontext.py b/lib / renderer / gl / glcontext.py
new file mode 100644
index 0000000000000000000000000000000000000000..0601b79d856e83675d219dde3feade7a9f2c836c
--- /dev/null
+++ b/lib / renderer / gl / glcontext.py
@@ -0,0 +1,136 @@
+"""Headless GPU-accelerated OpenGL context creation on Google Colaboratory.
+
+Typical usage:
+
+ # Optional PyOpenGL configuratiopn can be done here.
+ # import OpenGL
+ # OpenGL.ERROR_CHECKING = True
+
+ # 'glcontext' must be imported before any OpenGL.* API.
+ from lucid.misc.gl.glcontext import create_opengl_context
+
+ # Now it's safe to import OpenGL and EGL functions
+ import OpenGL.GL as gl
+
+ # create_opengl_context() creates a GL context that is attached to an
+ # offscreen surface of the specified size. Note that rendering to buffers
+ # of other sizes and formats is still possible with OpenGL Framebuffers.
+ #
+ # Users are expected to directly use the EGL API in case more advanced
+ # context management is required.
+ width, height = 640, 480
+ create_opengl_context((width, height))
+
+ # OpenGL context is available here.
+
+"""
+
+from __future__ import print_function
+
+# pylint: disable=unused-import,g-import-not-at-top,g-statement-before-imports
+
+try:
+ import OpenGL
+except:
+ print('This module depends on PyOpenGL.')
+ print('Please run "\033[1m!pip install -q pyopengl\033[0m" '
+ 'prior importing this module.')
+ raise
+
+import ctypes
+from ctypes import pointer, util
+import os
+
+os.environ['PYOPENGL_PLATFORM'] = 'egl'
+
+# OpenGL loading workaround.
+#
+# * PyOpenGL tries to load libGL, but we need libOpenGL, see [1,2].
+# This could have been solved by a symlink libGL->libOpenGL, but:
+#
+# * Python 2.7 can't find libGL and linEGL due to a bug (see [3])
+# in ctypes.util, that was only wixed in Python 3.6.
+#
+# So, the only solution I've found is to monkeypatch ctypes.util
+# [1] https://devblogs.nvidia.com/egl-eye-opengl-visualization-without-x-server/
+# [2] https://devblogs.nvidia.com/linking-opengl-server-side-rendering/
+# [3] https://bugs.python.org/issue9998
+_find_library_old = ctypes.util.find_library
+try:
+
+ def _find_library_new(name):
+ return {
+ 'GL': 'libOpenGL.so',
+ 'EGL': 'libEGL.so',
+ }.get(name, _find_library_old(name))
+
+ util.find_library = _find_library_new
+ import OpenGL.GL as gl
+ import OpenGL.EGL as egl
+except:
+ print('Unable to load OpenGL libraries. '
+ 'Make sure you use GPU-enabled backend.')
+ print('Press "Runtime->Change runtime type" and set '
+ '"Hardware accelerator" to GPU.')
+ raise
+finally:
+ util.find_library = _find_library_old
+
+
+def create_opengl_context(surface_size=(640, 480)):
+ """Create offscreen OpenGL context and make it current.
+
+ Users are expected to directly use EGL API in case more advanced
+ context management is required.
+
+ Args:
+ surface_size: (width, height), size of the offscreen rendering surface.
+ """
+ egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)
+
+ major, minor = egl.EGLint(), egl.EGLint()
+ egl.eglInitialize(egl_display, pointer(major), pointer(minor))
+
+ config_attribs = [
+ egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8,
+ egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24,
+ egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE
+ ]
+ config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs)
+
+ num_configs = egl.EGLint()
+ egl_cfg = egl.EGLConfig()
+ egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1,
+ pointer(num_configs))
+
+ width, height = surface_size
+ pbuffer_attribs = [
+ egl.EGL_WIDTH,
+ width,
+ egl.EGL_HEIGHT,
+ height,
+ egl.EGL_NONE,
+ ]
+ pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs)
+ egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg,
+ pbuffer_attribs)
+
+ egl.eglBindAPI(egl.EGL_OPENGL_API)
+
+ context_attribs = None
+ # context_attribs = [
+ # egl.EGL_CONTEXT_MAJOR_VERSION,
+ # 4,
+ # egl.EGL_CONTEXT_MINOR_VERSION,
+ # 1,
+ # egl.EGL_NONE,
+ # ]
+
+ egl_context = egl.eglCreateContext(egl_display, egl_cfg,
+ egl.EGL_NO_CONTEXT, context_attribs)
+ egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
+
+ buffer_type = egl.EGLint()
+ out = egl.eglQueryContext(egl_display, egl_context,
+ egl.EGL_CONTEXT_CLIENT_VERSION, buffer_type)
+ # print(buffer_type)
\ No newline at end of file
diff --git a/lib / renderer / gl / init_gl.py b/lib / renderer / gl / init_gl.py
new file mode 100644
index 0000000000000000000000000000000000000000..92d7e1a90b54d7a568e5849d6400bd960e14958a
--- /dev/null
+++ b/lib / renderer / gl / init_gl.py
@@ -0,0 +1,24 @@
+_glut_window = None
+_context_inited = None
+
+
+def initialize_GL_context(width=512, height=512, egl=False):
+ '''
+ default context uses GLUT
+ '''
+ if not egl:
+ import OpenGL.GLUT as GLUT
+ display_mode = GLUT.GLUT_DOUBLE | GLUT.GLUT_RGB | GLUT.GLUT_DEPTH
+ global _glut_window
+ if _glut_window is None:
+ GLUT.glutInit()
+ GLUT.glutInitDisplayMode(display_mode)
+ GLUT.glutInitWindowSize(width, height)
+ GLUT.glutInitWindowPosition(0, 0)
+ _glut_window = GLUT.glutCreateWindow("My Render.")
+ else:
+ from .glcontext import create_opengl_context
+ global _context_inited
+ if _context_inited is None:
+ create_opengl_context((width, height))
+ _context_inited = True
\ No newline at end of file
diff --git a/lib / renderer / gl / norm_render.py b/lib / renderer / gl / norm_render.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa1ca7ea15139f3eae8d375a9a52223fee507dda
--- /dev/null
+++ b/lib / renderer / gl / norm_render.py
@@ -0,0 +1,75 @@
+'''
+MIT License
+Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+'''
+from OpenGL.GLUT import *
+
+from .render2 import Render
+
+
+class NormRender(Render):
+ def __init__(self,
+ width=1600,
+ height=1200,
+ name='Cam Renderer',
+ program_files=['simple.fs', 'simple.vs'],
+ color_size=1,
+ ms_rate=1):
+ Render.__init__(self, width, height, name, program_files, color_size,
+ ms_rate)
+ self.camera = None
+
+ glutDisplayFunc(self.display)
+ glutKeyboardFunc(self.keyboard)
+
+ def set_camera(self, camera):
+ self.camera = camera
+ self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix()
+
+ def set_matrices(self, projection, modelview):
+ self.projection_matrix = projection
+ self.model_view_matrix = modelview
+
+ def keyboard(self, key, x, y):
+ # up
+ eps = 1
+ # print(key)
+ if key == b'w':
+ self.camera.center += eps * self.camera.direction
+ elif key == b's':
+ self.camera.center -= eps * self.camera.direction
+ if key == b'a':
+ self.camera.center -= eps * self.camera.right
+ elif key == b'd':
+ self.camera.center += eps * self.camera.right
+ if key == b' ':
+ self.camera.center += eps * self.camera.up
+ elif key == b'x':
+ self.camera.center -= eps * self.camera.up
+ elif key == b'i':
+ self.camera.near += 0.1 * eps
+ self.camera.far += 0.1 * eps
+ elif key == b'o':
+ self.camera.near -= 0.1 * eps
+ self.camera.far -= 0.1 * eps
+
+ self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix(
+ )
+
+ def show(self):
+ glutMainLoop()
\ No newline at end of file
diff --git a/lib / renderer / gl / render.py b/lib / renderer / gl / render.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a7e8da2bb1d041ecb74596f4cd9b73a4b30db27
--- /dev/null
+++ b/lib / renderer / gl / render.py
@@ -0,0 +1,380 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from ctypes import *
+
+import numpy as np
+from .framework import *
+
+GLUT = None
+
+
+# NOTE: Render class assumes GL context is created already.
+class Render:
+ def __init__(self,
+ width=1600,
+ height=1200,
+ name='GL Renderer',
+ program_files=['simple.fs', 'simple.vs'],
+ color_size=1,
+ ms_rate=1,
+ egl=False):
+ self.width = width
+ self.height = height
+ self.name = name
+ self.use_inverse_depth = False
+ self.egl = egl
+
+ glEnable(GL_DEPTH_TEST)
+
+ glClampColor(GL_CLAMP_READ_COLOR, GL_FALSE)
+ glClampColor(GL_CLAMP_FRAGMENT_COLOR, GL_FALSE)
+ glClampColor(GL_CLAMP_VERTEX_COLOR, GL_FALSE)
+
+ # init program
+ shader_list = []
+
+ for program_file in program_files:
+ _, ext = os.path.splitext(program_file)
+ if ext == '.vs':
+ shader_list.append(loadShader(GL_VERTEX_SHADER, program_file))
+ elif ext == '.fs':
+ shader_list.append(loadShader(GL_FRAGMENT_SHADER,
+ program_file))
+ elif ext == '.gs':
+ shader_list.append(loadShader(GL_GEOMETRY_SHADER,
+ program_file))
+
+ self.program = createProgram(shader_list)
+
+ for shader in shader_list:
+ glDeleteShader(shader)
+
+ # Init uniform variables
+ self.model_mat_unif = glGetUniformLocation(self.program, 'ModelMat')
+ self.persp_mat_unif = glGetUniformLocation(self.program, 'PerspMat')
+
+ self.vertex_buffer = glGenBuffers(1)
+
+ # Init screen quad program and buffer
+ self.quad_program, self.quad_buffer = self.init_quad_program()
+
+ # Configure frame buffer
+ self.frame_buffer = glGenFramebuffers(1)
+ glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
+
+ self.intermediate_fbo = None
+ if ms_rate > 1:
+ # Configure texture buffer to render to
+ self.color_buffer = []
+ for i in range(color_size):
+ color_buffer = glGenTextures(1)
+ multi_sample_rate = ms_rate
+ glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_buffer)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
+ GL_LINEAR)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR)
+ glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
+ multi_sample_rate, GL_RGBA32F,
+ self.width, self.height, GL_TRUE)
+ glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0)
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0 + i,
+ GL_TEXTURE_2D_MULTISAMPLE, color_buffer,
+ 0)
+ self.color_buffer.append(color_buffer)
+
+ self.render_buffer = glGenRenderbuffers(1)
+ glBindRenderbuffer(GL_RENDERBUFFER, self.render_buffer)
+ glRenderbufferStorageMultisample(GL_RENDERBUFFER,
+ multi_sample_rate,
+ GL_DEPTH24_STENCIL8, self.width,
+ self.height)
+ glBindRenderbuffer(GL_RENDERBUFFER, 0)
+ glFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_DEPTH_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER, self.render_buffer)
+
+ attachments = []
+ for i in range(color_size):
+ attachments.append(GL_COLOR_ATTACHMENT0 + i)
+ glDrawBuffers(color_size, attachments)
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+
+ self.intermediate_fbo = glGenFramebuffers(1)
+ glBindFramebuffer(GL_FRAMEBUFFER, self.intermediate_fbo)
+
+ self.screen_texture = []
+ for i in range(color_size):
+ screen_texture = glGenTextures(1)
+ glBindTexture(GL_TEXTURE_2D, screen_texture)
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width,
+ self.height, 0, GL_RGBA, GL_FLOAT, None)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
+ GL_LINEAR)
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D,
+ screen_texture, 0)
+ self.screen_texture.append(screen_texture)
+
+ glDrawBuffers(color_size, attachments)
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+ else:
+ self.color_buffer = []
+ for i in range(color_size):
+ color_buffer = glGenTextures(1)
+ glBindTexture(GL_TEXTURE_2D, color_buffer)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
+ GL_NEAREST)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST)
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width,
+ self.height, 0, GL_RGBA, GL_FLOAT, None)
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D,
+ color_buffer, 0)
+ self.color_buffer.append(color_buffer)
+
+ # Configure depth texture map to render to
+ self.depth_buffer = glGenTextures(1)
+ glBindTexture(GL_TEXTURE_2D, self.depth_buffer)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
+ glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_INTENSITY)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE,
+ GL_COMPARE_R_TO_TEXTURE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL)
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, self.width,
+ self.height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None)
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
+ GL_TEXTURE_2D, self.depth_buffer, 0)
+
+ attachments = []
+ for i in range(color_size):
+ attachments.append(GL_COLOR_ATTACHMENT0 + i)
+ glDrawBuffers(color_size, attachments)
+ self.screen_texture = self.color_buffer
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+
+ # Configure texture buffer if needed
+ self.render_texture = None
+
+ # NOTE: original render_texture only support one input
+ # this is tentative member of this issue
+ self.render_texture_v2 = {}
+
+ # Inner storage for buffer data
+ self.vertex_data = None
+ self.vertex_dim = None
+ self.n_vertices = None
+
+ self.model_view_matrix = None
+ self.projection_matrix = None
+
+ if not egl:
+ global GLUT
+ import OpenGL.GLUT as GLUT
+ GLUT.glutDisplayFunc(self.display)
+
+ def init_quad_program(self):
+ shader_list = []
+
+ shader_list.append(loadShader(GL_VERTEX_SHADER, "quad.vs"))
+ shader_list.append(loadShader(GL_FRAGMENT_SHADER, "quad.fs"))
+
+ the_program = createProgram(shader_list)
+
+ for shader in shader_list:
+ glDeleteShader(shader)
+
+ # vertex attributes for a quad that fills the entire screen in Normalized Device Coordinates.
+ # positions # texCoords
+ quad_vertices = np.array([
+ -1.0, 1.0, 0.0, 1.0, -1.0, -1.0, 0.0, 0.0, 1.0, -1.0, 1.0, 0.0,
+ -1.0, 1.0, 0.0, 1.0, 1.0, -1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0
+ ])
+
+ quad_buffer = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, quad_buffer)
+ glBufferData(GL_ARRAY_BUFFER, quad_vertices, GL_STATIC_DRAW)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ return the_program, quad_buffer
+
+ def set_mesh(self, vertices, faces):
+ self.vertex_data = vertices[faces.reshape([-1])]
+ self.vertex_dim = self.vertex_data.shape[1]
+ self.n_vertices = self.vertex_data.shape[0]
+
+ glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
+ glBufferData(GL_ARRAY_BUFFER, self.vertex_data, GL_STATIC_DRAW)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ def set_viewpoint(self, projection, model_view):
+ self.projection_matrix = projection
+ self.model_view_matrix = model_view
+
+ def draw_init(self):
+ glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
+ glEnable(GL_DEPTH_TEST)
+
+ glClearColor(0.0, 0.0, 0.0, 0.0)
+ if self.use_inverse_depth:
+ glDepthFunc(GL_GREATER)
+ glClearDepth(0.0)
+ else:
+ glDepthFunc(GL_LESS)
+ glClearDepth(1.0)
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
+
+ def draw_end(self):
+ if self.intermediate_fbo is not None:
+ for i in range(len(self.color_buffer)):
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, self.frame_buffer)
+ glReadBuffer(GL_COLOR_ATTACHMENT0 + i)
+ glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.intermediate_fbo)
+ glDrawBuffer(GL_COLOR_ATTACHMENT0 + i)
+ glBlitFramebuffer(0, 0, self.width, self.height, 0, 0,
+ self.width, self.height, GL_COLOR_BUFFER_BIT,
+ GL_NEAREST)
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+ glDepthFunc(GL_LESS)
+ glClearDepth(1.0)
+
+ def draw(self):
+ self.draw_init()
+
+ glUseProgram(self.program)
+ glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE,
+ self.model_view_matrix.transpose())
+ glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE,
+ self.projection_matrix.transpose())
+
+ glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
+
+ glEnableVertexAttribArray(0)
+ glVertexAttribPointer(0, self.vertex_dim, GL_DOUBLE, GL_FALSE, 0, None)
+
+ glDrawArrays(GL_TRIANGLES, 0, self.n_vertices)
+
+ glDisableVertexAttribArray(0)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ glUseProgram(0)
+
+ self.draw_end()
+
+ def get_color(self, color_id=0):
+ glBindFramebuffer(
+ GL_FRAMEBUFFER, self.intermediate_fbo
+ if self.intermediate_fbo is not None else self.frame_buffer)
+ glReadBuffer(GL_COLOR_ATTACHMENT0 + color_id)
+ data = glReadPixels(0,
+ 0,
+ self.width,
+ self.height,
+ GL_RGBA,
+ GL_FLOAT,
+ outputType=None)
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+ rgb = data.reshape(self.height, self.width, -1)
+ rgb = np.flip(rgb, 0)
+ return rgb
+
+ def get_z_value(self):
+ glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
+ data = glReadPixels(0,
+ 0,
+ self.width,
+ self.height,
+ GL_DEPTH_COMPONENT,
+ GL_FLOAT,
+ outputType=None)
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+ z = data.reshape(self.height, self.width)
+ z = np.flip(z, 0)
+ return z
+
+ def display(self):
+ self.draw()
+
+ if not self.egl:
+ # First we draw a scene.
+ # Notice the result is stored in the texture buffer.
+
+ # Then we return to the default frame buffer since we will display on the screen.
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+
+ # Do the clean-up.
+ glClearColor(0.0, 0.0, 0.0, 0.0)
+ glClear(GL_COLOR_BUFFER_BIT)
+
+ # We draw a rectangle which covers the whole screen.
+ glUseProgram(self.quad_program)
+ glBindBuffer(GL_ARRAY_BUFFER, self.quad_buffer)
+
+ size_of_double = 8
+ glEnableVertexAttribArray(0)
+ glVertexAttribPointer(0, 2, GL_DOUBLE, GL_FALSE,
+ 4 * size_of_double, None)
+ glEnableVertexAttribArray(1)
+ glVertexAttribPointer(1, 2, GL_DOUBLE, GL_FALSE,
+ 4 * size_of_double,
+ c_void_p(2 * size_of_double))
+
+ glDisable(GL_DEPTH_TEST)
+
+ # The stored texture is then mapped to this rectangle.
+ # properly assing color buffer texture
+ glActiveTexture(GL_TEXTURE0)
+ glBindTexture(GL_TEXTURE_2D, self.screen_texture[0])
+ glUniform1i(
+ glGetUniformLocation(self.quad_program, 'screenTexture'), 0)
+
+ glDrawArrays(GL_TRIANGLES, 0, 6)
+
+ glDisableVertexAttribArray(1)
+ glDisableVertexAttribArray(0)
+
+ glEnable(GL_DEPTH_TEST)
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+ glUseProgram(0)
+
+ GLUT.glutSwapBuffers()
+ GLUT.glutPostRedisplay()
+
+ def show(self):
+ if not self.egl:
+ GLUT.glutMainLoop()
\ No newline at end of file
diff --git a/lib / renderer / gl /cam_render.py b/lib / renderer / gl /cam_render.py
new file mode 100644
index 0000000000000000000000000000000000000000..6379b5d8a8b443fb1281eb7e025d4fc4d63c523d
--- /dev/null
+++ b/lib / renderer / gl /cam_render.py
@@ -0,0 +1,80 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from .render import Render
+
+GLUT = None
+
+
+class CamRender(Render):
+ def __init__(self,
+ width=1600,
+ height=1200,
+ name='Cam Renderer',
+ program_files=['simple.fs', 'simple.vs'],
+ color_size=1,
+ ms_rate=1,
+ egl=False):
+ Render.__init__(self,
+ width,
+ height,
+ name,
+ program_files,
+ color_size,
+ ms_rate=ms_rate,
+ egl=egl)
+ self.camera = None
+
+ if not egl:
+ global GLUT
+ import OpenGL.GLUT as GLUT
+ GLUT.glutDisplayFunc(self.display)
+ GLUT.glutKeyboardFunc(self.keyboard)
+
+ def set_camera(self, camera):
+ self.camera = camera
+ self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix()
+
+ def keyboard(self, key, x, y):
+ # up
+ eps = 1
+ # print(key)
+ if key == b'w':
+ self.camera.center += eps * self.camera.direction
+ elif key == b's':
+ self.camera.center -= eps * self.camera.direction
+ if key == b'a':
+ self.camera.center -= eps * self.camera.right
+ elif key == b'd':
+ self.camera.center += eps * self.camera.right
+ if key == b' ':
+ self.camera.center += eps * self.camera.up
+ elif key == b'x':
+ self.camera.center -= eps * self.camera.up
+ elif key == b'i':
+ self.camera.near += 0.1 * eps
+ self.camera.far += 0.1 * eps
+ elif key == b'o':
+ self.camera.near -= 0.1 * eps
+ self.camera.far -= 0.1 * eps
+
+ self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix(
+ )
+
+ def show(self):
+ if GLUT is not None:
+ GLUT.glutMainLoop()
\ No newline at end of file
diff --git a/lib / renderer / gl /color_render.py b/lib / renderer / gl /color_render.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fd7b51db4816e8feafde843335acaa60856c174
--- /dev/null
+++ b/lib / renderer / gl /color_render.py
@@ -0,0 +1,158 @@
+'''
+MIT License
+Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+'''
+import numpy as np
+import random
+
+from .framework import *
+from .cam_render import CamRender
+
+
+class ColorRender(CamRender):
+ def __init__(self, width=1600, height=1200, name='Color Renderer', egl=False):
+ program_files = ['color.vs', 'color.fs']
+ CamRender.__init__(self,
+ width,
+ height,
+ name,
+ program_files=program_files,
+ color_size=3, egl=egl)
+
+ # WARNING: this differs from vertex_buffer and vertex_data in Render
+ self.vert_buffer = {}
+ self.vert_data = {}
+
+ # normal
+ self.norm_buffer = {}
+ self.norm_data = {}
+
+ self.color_buffer = {}
+ self.color_data = {}
+
+ self.vertex_dim = {}
+ self.n_vertices = {}
+
+ self.rot_mat_unif = glGetUniformLocation(self.program, 'RotMat')
+ self.rot_matrix = np.eye(3)
+
+ self.norm_mat_unif = glGetUniformLocation(self.program, 'NormMat')
+ self.normalize_matrix = np.eye(4)
+
+ def set_norm_mat(self, scale, center):
+ N = np.eye(4)
+ N[:3, :3] = scale * np.eye(3)
+ N[:3, 3] = -scale * center
+
+ self.normalize_matrix = N
+
+ def set_mesh(self, vertices, faces, color, normals, mat_name='all'):
+
+ self.vert_data[mat_name] = vertices[faces.reshape([-1])]
+ self.n_vertices[mat_name] = self.vert_data[mat_name].shape[0]
+ self.vertex_dim[mat_name] = self.vert_data[mat_name].shape[1]
+ self.color_data[mat_name] = color[faces.reshape([-1])]
+ self.norm_data[mat_name] = normals[faces.reshape([-1])]
+
+ if mat_name not in self.vert_buffer.keys():
+ self.vert_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.vert_data[mat_name], GL_STATIC_DRAW)
+
+ if mat_name not in self.color_buffer.keys():
+ self.color_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.color_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.color_data[mat_name],
+ GL_STATIC_DRAW)
+
+ if mat_name not in self.norm_buffer.keys():
+ self.norm_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.norm_data[mat_name], GL_STATIC_DRAW)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ def cleanup(self):
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ for key in self.vert_data:
+ glDeleteBuffers(1, [self.vert_buffer[key]])
+ glDeleteBuffers(1, [self.color_buffer[key]])
+ glDeleteBuffers(1, [self.norm_buffer[key]])
+
+ self.norm_buffer = {}
+ self.norm_data = {}
+
+ self.vert_buffer = {}
+ self.vert_data = {}
+
+ self.color_buffer = {}
+ self.color_data = {}
+
+ self.render_texture_mat = {}
+
+ self.vertex_dim = {}
+ self.n_vertices = {}
+
+ def draw(self):
+ self.draw_init()
+
+ glEnable(GL_MULTISAMPLE)
+
+ glUseProgram(self.program)
+ glUniformMatrix4fv(self.norm_mat_unif, 1, GL_FALSE,
+ self.normalize_matrix.transpose())
+ glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE,
+ self.model_view_matrix.transpose())
+ glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE,
+ self.projection_matrix.transpose())
+ glUniformMatrix3fv(self.rot_mat_unif, 1, GL_FALSE,
+ self.rot_matrix.transpose())
+
+ for mat in self.vert_buffer:
+
+ # Handle vertex buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[mat])
+ glEnableVertexAttribArray(0)
+ glVertexAttribPointer(0, self.vertex_dim[mat], GL_DOUBLE, GL_FALSE,
+ 0, None)
+
+ # Handle color buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.color_buffer[mat])
+ glEnableVertexAttribArray(1)
+ glVertexAttribPointer(1, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ # Handle normal buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer[mat])
+ glEnableVertexAttribArray(2)
+ glVertexAttribPointer(2, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ glDrawArrays(GL_TRIANGLES, 0, self.n_vertices[mat])
+
+ glDisableVertexAttribArray(2)
+ glDisableVertexAttribArray(1)
+ glDisableVertexAttribArray(0)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ glUseProgram(0)
+
+ glDisable(GL_MULTISAMPLE)
+
+ self.draw_end()
\ No newline at end of file
diff --git a/lib / renderer / gl /normal_render.py b/lib / renderer / gl /normal_render.py
new file mode 100644
index 0000000000000000000000000000000000000000..02cc72311564d615793c2e53863b7230d0a17a5b
--- /dev/null
+++ b/lib / renderer / gl /normal_render.py
@@ -0,0 +1,93 @@
+'''
+MIT License
+Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+'''
+import numpy as np
+import math
+
+from .framework import *
+from .norm_render import NormRender
+
+
+class NormalRender(NormRender):
+ def __init__(self, width=1600, height=1200, name='Normal Renderer'):
+ NormRender.__init__(self,
+ width,
+ height,
+ name,
+ program_files=['normal.vs', 'normal.fs'])
+
+ self.norm_buffer = glGenBuffers(1)
+
+ self.norm_data = None
+
+ def set_normal_mesh(self, vertices, faces, norms, face_normals):
+ NormRender.set_mesh(self, vertices, faces)
+
+ self.norm_data = norms[face_normals.reshape([-1])]
+
+ glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer)
+ glBufferData(GL_ARRAY_BUFFER, self.norm_data, GL_STATIC_DRAW)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ def euler_to_rot_mat(self, r_x, r_y, r_z):
+ R_x = np.array([[1, 0, 0], [0, math.cos(r_x), -math.sin(r_x)],
+ [0, math.sin(r_x), math.cos(r_x)]])
+
+ R_y = np.array([[math.cos(r_y), 0, math.sin(r_y)], [0, 1, 0],
+ [-math.sin(r_y), 0, math.cos(r_y)]])
+
+ R_z = np.array([[math.cos(r_z), -math.sin(r_z), 0],
+ [math.sin(r_z), math.cos(r_z), 0], [0, 0, 1]])
+
+ R = np.dot(R_z, np.dot(R_y, R_x))
+
+ return R
+
+ def draw(self):
+ self.draw_init()
+
+ glUseProgram(self.program)
+ glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE,
+ self.model_view_matrix.transpose())
+ glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE,
+ self.projection_matrix.transpose())
+
+ # Handle vertex buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
+
+ glEnableVertexAttribArray(0)
+ glVertexAttribPointer(0, self.vertex_dim, GL_DOUBLE, GL_FALSE, 0, None)
+
+ # Handle normal buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer)
+
+ glEnableVertexAttribArray(1)
+ glVertexAttribPointer(1, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ glDrawArrays(GL_TRIANGLES, 0, self.n_vertices)
+
+ glDisableVertexAttribArray(1)
+ glDisableVertexAttribArray(0)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ glUseProgram(0)
+
+ self.draw_end()
\ No newline at end of file
diff --git a/lib / renderer / gl /prt_render.py b/lib / renderer / gl /prt_render.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8b3ce290c6d8b7e0a3eb894230a99b0329a7818
--- /dev/null
+++ b/lib / renderer / gl /prt_render.py
@@ -0,0 +1,450 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import numpy as np
+import random
+
+from .framework import *
+from .cam_render import CamRender
+
+
+class PRTRender(CamRender):
+ def __init__(self,
+ width=1600,
+ height=1200,
+ name='PRT Renderer',
+ uv_mode=False,
+ ms_rate=1,
+ egl=False):
+ program_files = ['prt.vs', 'prt.fs'
+ ] if not uv_mode else ['prt_uv.vs', 'prt_uv.fs']
+ CamRender.__init__(self,
+ width,
+ height,
+ name,
+ program_files=program_files,
+ color_size=8,
+ ms_rate=ms_rate,
+ egl=egl)
+
+ # WARNING: this differs from vertex_buffer and vertex_data in Render
+ self.vert_buffer = {}
+ self.vert_data = {}
+
+ self.vert_label_buffer = {}
+ self.vert_label_data = {}
+
+ self.norm_buffer = {}
+ self.norm_data = {}
+
+ self.tan_buffer = {}
+ self.tan_data = {}
+
+ self.btan_buffer = {}
+ self.btan_data = {}
+
+ self.prt1_buffer = {}
+ self.prt1_data = {}
+
+ self.prt2_buffer = {}
+ self.prt2_data = {}
+
+ self.prt3_buffer = {}
+ self.prt3_data = {}
+
+ self.uv_buffer = {}
+ self.uv_data = {}
+
+ self.render_texture_mat = {}
+
+ self.vertex_dim = {}
+ self.n_vertices = {}
+ self.label_dim = {}
+
+ self.norm_mat_unif = glGetUniformLocation(self.program, 'NormMat')
+ self.normalize_matrix = np.eye(4)
+
+ self.shcoeff_unif = glGetUniformLocation(self.program, 'SHCoeffs')
+ self.shcoeffs = np.zeros((9, 3))
+ self.shcoeffs[0, :] = 1.0
+ #self.shcoeffs[1:,:] = np.random.rand(8,3)
+
+ self.hasAlbedoUnif = glGetUniformLocation(self.program, 'hasAlbedoMap')
+ self.hasNormalUnif = glGetUniformLocation(self.program, 'hasNormalMap')
+
+ self.analyticUnif = glGetUniformLocation(self.program, 'analytic')
+ self.analytic = False
+
+ self.rot_mat_unif = glGetUniformLocation(self.program, 'RotMat')
+ self.rot_matrix = np.eye(3)
+
+ def set_texture(self, mat_name, smplr_name, texture):
+ # texture_image: H x W x 3
+ width = texture.shape[1]
+ height = texture.shape[0]
+ texture = np.flip(texture, 0)
+ img_data = np.fromstring(texture.tostring(), np.uint8)
+
+ if mat_name not in self.render_texture_mat:
+ self.render_texture_mat[mat_name] = {}
+ if smplr_name in self.render_texture_mat[mat_name].keys():
+ glDeleteTextures([self.render_texture_mat[mat_name][smplr_name]])
+ del self.render_texture_mat[mat_name][smplr_name]
+
+ self.render_texture_mat[mat_name][smplr_name] = glGenTextures(1)
+ glActiveTexture(GL_TEXTURE0)
+
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
+ glBindTexture(GL_TEXTURE_2D,
+ self.render_texture_mat[mat_name][smplr_name])
+
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB,
+ GL_UNSIGNED_BYTE, img_data)
+
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 3)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR_MIPMAP_LINEAR)
+
+ glGenerateMipmap(GL_TEXTURE_2D)
+
+ def set_albedo(self, texture_image, mat_name='all'):
+ self.set_texture(mat_name, 'AlbedoMap', texture_image)
+
+ def set_normal_map(self, texture_image, mat_name='all'):
+ self.set_texture(mat_name, 'NormalMap', texture_image)
+
+ def set_mesh(self,
+ vertices,
+ faces,
+ norms,
+ faces_nml,
+ uvs,
+ faces_uvs,
+ prt,
+ faces_prt,
+ tans,
+ bitans,
+ verts_label=None,
+ mat_name='all'):
+
+ self.vert_data[mat_name] = vertices[faces.reshape([-1])]
+ self.vert_label_data[mat_name] = verts_label[faces.reshape([-1])]
+ self.n_vertices[mat_name] = self.vert_data[mat_name].shape[0]
+ self.vertex_dim[mat_name] = self.vert_data[mat_name].shape[1]
+ self.label_dim[mat_name] = self.vert_label_data[mat_name].shape[1]
+
+ if mat_name not in self.vert_buffer.keys():
+ self.vert_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.vert_data[mat_name], GL_STATIC_DRAW)
+
+ if mat_name not in self.vert_label_buffer.keys():
+ self.vert_label_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.vert_label_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.vert_label_data[mat_name],
+ GL_STATIC_DRAW)
+
+ self.uv_data[mat_name] = uvs[faces_uvs.reshape([-1])]
+ if mat_name not in self.uv_buffer.keys():
+ self.uv_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.uv_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.uv_data[mat_name], GL_STATIC_DRAW)
+
+ self.norm_data[mat_name] = norms[faces_nml.reshape([-1])]
+ if mat_name not in self.norm_buffer.keys():
+ self.norm_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.norm_data[mat_name], GL_STATIC_DRAW)
+
+ self.tan_data[mat_name] = tans[faces_nml.reshape([-1])]
+ if mat_name not in self.tan_buffer.keys():
+ self.tan_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.tan_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.tan_data[mat_name], GL_STATIC_DRAW)
+
+ self.btan_data[mat_name] = bitans[faces_nml.reshape([-1])]
+ if mat_name not in self.btan_buffer.keys():
+ self.btan_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.btan_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.btan_data[mat_name], GL_STATIC_DRAW)
+
+ self.prt1_data[mat_name] = prt[faces_prt.reshape([-1])][:, :3]
+ self.prt2_data[mat_name] = prt[faces_prt.reshape([-1])][:, 3:6]
+ self.prt3_data[mat_name] = prt[faces_prt.reshape([-1])][:, 6:]
+
+ if mat_name not in self.prt1_buffer.keys():
+ self.prt1_buffer[mat_name] = glGenBuffers(1)
+ if mat_name not in self.prt2_buffer.keys():
+ self.prt2_buffer[mat_name] = glGenBuffers(1)
+ if mat_name not in self.prt3_buffer.keys():
+ self.prt3_buffer[mat_name] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt1_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.prt1_data[mat_name], GL_STATIC_DRAW)
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt2_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.prt2_data[mat_name], GL_STATIC_DRAW)
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt3_buffer[mat_name])
+ glBufferData(GL_ARRAY_BUFFER, self.prt3_data[mat_name], GL_STATIC_DRAW)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ def set_mesh_mtl(self,
+ vertices,
+ faces,
+ norms,
+ faces_nml,
+ uvs,
+ faces_uvs,
+ tans,
+ bitans,
+ prt,
+ verts_label=None):
+ for key in faces:
+ self.vert_data[key] = vertices[faces[key].reshape([-1])]
+ self.vert_label_data[key] = verts_label[faces[key].reshape([-1])]
+ self.n_vertices[key] = self.vert_data[key].shape[0]
+ self.vertex_dim[key] = self.vert_data[key].shape[1]
+ self.label_dim[key] = self.vert_label_data[key].shape[1]
+
+ if key not in self.vert_buffer.keys():
+ self.vert_buffer[key] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.vert_data[key], GL_STATIC_DRAW)
+
+ if key not in self.vert_label_buffer.keys():
+ self.vert_label_buffer[key] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.vert_label_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.vert_label_data[key],
+ GL_STATIC_DRAW)
+
+ self.uv_data[key] = uvs[faces_uvs[key].reshape([-1])]
+ if key not in self.uv_buffer.keys():
+ self.uv_buffer[key] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.uv_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.uv_data[key], GL_STATIC_DRAW)
+
+ self.norm_data[key] = norms[faces_nml[key].reshape([-1])]
+ if key not in self.norm_buffer.keys():
+ self.norm_buffer[key] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.norm_data[key], GL_STATIC_DRAW)
+
+ self.tan_data[key] = tans[faces_nml[key].reshape([-1])]
+ if key not in self.tan_buffer.keys():
+ self.tan_buffer[key] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.tan_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.tan_data[key], GL_STATIC_DRAW)
+
+ self.btan_data[key] = bitans[faces_nml[key].reshape([-1])]
+ if key not in self.btan_buffer.keys():
+ self.btan_buffer[key] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.btan_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.btan_data[key], GL_STATIC_DRAW)
+
+ self.prt1_data[key] = prt[faces[key].reshape([-1])][:, :3]
+ self.prt2_data[key] = prt[faces[key].reshape([-1])][:, 3:6]
+ self.prt3_data[key] = prt[faces[key].reshape([-1])][:, 6:]
+
+ if key not in self.prt1_buffer.keys():
+ self.prt1_buffer[key] = glGenBuffers(1)
+ if key not in self.prt2_buffer.keys():
+ self.prt2_buffer[key] = glGenBuffers(1)
+ if key not in self.prt3_buffer.keys():
+ self.prt3_buffer[key] = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt1_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.prt1_data[key], GL_STATIC_DRAW)
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt2_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.prt2_data[key], GL_STATIC_DRAW)
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt3_buffer[key])
+ glBufferData(GL_ARRAY_BUFFER, self.prt3_data[key], GL_STATIC_DRAW)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ def cleanup(self):
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+ for key in self.vert_data:
+ glDeleteBuffers(1, [self.vert_buffer[key]])
+ glDeleteBuffers(1, [self.norm_buffer[key]])
+ glDeleteBuffers(1, [self.uv_buffer[key]])
+ glDeleteBuffers(1, [self.vert_label_buffer[key]])
+
+ glDeleteBuffers(1, [self.tan_buffer[key]])
+ glDeleteBuffers(1, [self.btan_buffer[key]])
+ glDeleteBuffers(1, [self.prt1_buffer[key]])
+ glDeleteBuffers(1, [self.prt2_buffer[key]])
+ glDeleteBuffers(1, [self.prt3_buffer[key]])
+
+ glDeleteBuffers(1, [])
+
+ for smplr in self.render_texture_mat[key]:
+ glDeleteTextures([self.render_texture_mat[key][smplr]])
+
+ self.vert_buffer = {}
+ self.vert_data = {}
+
+ self.vert_label_buffer = {}
+ self.vert_label_data = {}
+
+ self.norm_buffer = {}
+ self.norm_data = {}
+
+ self.tan_buffer = {}
+ self.tan_data = {}
+
+ self.btan_buffer = {}
+ self.btan_data = {}
+
+ self.prt1_buffer = {}
+ self.prt1_data = {}
+
+ self.prt2_buffer = {}
+ self.prt2_data = {}
+
+ self.prt3_buffer = {}
+ self.prt3_data = {}
+
+ self.uv_buffer = {}
+ self.uv_data = {}
+
+ self.render_texture_mat = {}
+
+ self.vertex_dim = {}
+ self.n_vertices = {}
+ self.label_dim = {}
+
+ def randomize_sh(self):
+ self.shcoeffs[0, :] = 0.8
+ self.shcoeffs[1:, :] = 1.0 * np.random.rand(8, 3)
+
+ def set_sh(self, sh):
+ self.shcoeffs = sh
+
+ def set_norm_mat(self, scale, center):
+ N = np.eye(4)
+ N[:3, :3] = scale * np.eye(3)
+ N[:3, 3] = -scale * center
+
+ self.normalize_matrix = N
+
+ def draw(self):
+ self.draw_init()
+
+ glDisable(GL_BLEND)
+ #glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
+ glEnable(GL_MULTISAMPLE)
+
+ glUseProgram(self.program)
+ glUniformMatrix4fv(self.norm_mat_unif, 1, GL_FALSE,
+ self.normalize_matrix.transpose())
+ glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE,
+ self.model_view_matrix.transpose())
+ glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE,
+ self.projection_matrix.transpose())
+
+ if 'AlbedoMap' in self.render_texture_mat['all']:
+ glUniform1ui(self.hasAlbedoUnif, GLuint(1))
+ else:
+ glUniform1ui(self.hasAlbedoUnif, GLuint(0))
+
+ if 'NormalMap' in self.render_texture_mat['all']:
+ glUniform1ui(self.hasNormalUnif, GLuint(1))
+ else:
+ glUniform1ui(self.hasNormalUnif, GLuint(0))
+
+ glUniform1ui(self.analyticUnif,
+ GLuint(1) if self.analytic else GLuint(0))
+
+ glUniform3fv(self.shcoeff_unif, 9, self.shcoeffs)
+
+ glUniformMatrix3fv(self.rot_mat_unif, 1, GL_FALSE,
+ self.rot_matrix.transpose())
+
+ for mat in self.vert_buffer:
+ # Handle vertex buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[mat])
+ glEnableVertexAttribArray(0)
+ glVertexAttribPointer(0, self.vertex_dim[mat], GL_DOUBLE, GL_FALSE,
+ 0, None)
+
+ # Handle normal buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer[mat])
+ glEnableVertexAttribArray(1)
+ glVertexAttribPointer(1, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ # Handle uv buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.uv_buffer[mat])
+ glEnableVertexAttribArray(2)
+ glVertexAttribPointer(2, 2, GL_DOUBLE, GL_FALSE, 0, None)
+
+ # Handle tan buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.tan_buffer[mat])
+ glEnableVertexAttribArray(3)
+ glVertexAttribPointer(3, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ # Handle btan buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.btan_buffer[mat])
+ glEnableVertexAttribArray(4)
+ glVertexAttribPointer(4, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ # Handle PTR buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt1_buffer[mat])
+ glEnableVertexAttribArray(5)
+ glVertexAttribPointer(5, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt2_buffer[mat])
+ glEnableVertexAttribArray(6)
+ glVertexAttribPointer(6, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ glBindBuffer(GL_ARRAY_BUFFER, self.prt3_buffer[mat])
+ glEnableVertexAttribArray(7)
+ glVertexAttribPointer(7, 3, GL_DOUBLE, GL_FALSE, 0, None)
+
+ # Handle vertex label buffer
+ glBindBuffer(GL_ARRAY_BUFFER, self.vert_label_buffer[mat])
+ glEnableVertexAttribArray(8)
+ glVertexAttribPointer(8, self.label_dim[mat], GL_DOUBLE, GL_FALSE,
+ 0, None)
+
+ for i, smplr in enumerate(self.render_texture_mat[mat]):
+ glActiveTexture(GL_TEXTURE0 + i)
+ glBindTexture(GL_TEXTURE_2D,
+ self.render_texture_mat[mat][smplr])
+ glUniform1i(glGetUniformLocation(self.program, smplr), i)
+
+ glDrawArrays(GL_TRIANGLES, 0, self.n_vertices[mat])
+
+ glDisableVertexAttribArray(8)
+ glDisableVertexAttribArray(7)
+ glDisableVertexAttribArray(6)
+ glDisableVertexAttribArray(5)
+ glDisableVertexAttribArray(4)
+ glDisableVertexAttribArray(3)
+ glDisableVertexAttribArray(2)
+ glDisableVertexAttribArray(1)
+ glDisableVertexAttribArray(0)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ glUseProgram(0)
+
+ glDisable(GL_BLEND)
+ glDisable(GL_MULTISAMPLE)
+
+ self.draw_end()
diff --git a/lib / renderer / gl /render2.py b/lib / renderer / gl /render2.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f9f66beda68adaa141af27591c131c5a7999c1e
--- /dev/null
+++ b/lib / renderer / gl /render2.py
@@ -0,0 +1,384 @@
+'''
+MIT License
+Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+'''
+import numpy as np
+from OpenGL.GLUT import *
+from .framework import *
+
+_glut_window = None
+
+
+class Render:
+ def __init__(self,
+ width=1600,
+ height=1200,
+ name='GL Renderer',
+ program_files=['simple.fs', 'simple.vs'],
+ color_size=1,
+ ms_rate=1):
+ self.width = width
+ self.height = height
+ self.name = name
+ self.display_mode = GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH
+ self.use_inverse_depth = False
+
+ global _glut_window
+ if _glut_window is None:
+ glutInit()
+ glutInitDisplayMode(self.display_mode)
+ glutInitWindowSize(self.width, self.height)
+ glutInitWindowPosition(0, 0)
+ _glut_window = glutCreateWindow("My Render.")
+
+ # glEnable(GL_DEPTH_CLAMP)
+ glEnable(GL_DEPTH_TEST)
+
+ glClampColor(GL_CLAMP_READ_COLOR, GL_FALSE)
+ glClampColor(GL_CLAMP_FRAGMENT_COLOR, GL_FALSE)
+ glClampColor(GL_CLAMP_VERTEX_COLOR, GL_FALSE)
+
+ # init program
+ shader_list = []
+
+ for program_file in program_files:
+ _, ext = os.path.splitext(program_file)
+ if ext == '.vs':
+ shader_list.append(loadShader(GL_VERTEX_SHADER, program_file))
+ elif ext == '.fs':
+ shader_list.append(loadShader(GL_FRAGMENT_SHADER,
+ program_file))
+ elif ext == '.gs':
+ shader_list.append(loadShader(GL_GEOMETRY_SHADER,
+ program_file))
+
+ self.program = createProgram(shader_list)
+
+ for shader in shader_list:
+ glDeleteShader(shader)
+
+ # Init uniform variables
+ self.model_mat_unif = glGetUniformLocation(self.program, 'ModelMat')
+ self.persp_mat_unif = glGetUniformLocation(self.program, 'PerspMat')
+
+ self.vertex_buffer = glGenBuffers(1)
+
+ # Init screen quad program and buffer
+ self.quad_program, self.quad_buffer = self.init_quad_program()
+
+ # Configure frame buffer
+ self.frame_buffer = glGenFramebuffers(1)
+ glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
+
+ self.intermediate_fbo = None
+ if ms_rate > 1:
+ # Configure texture buffer to render to
+ self.color_buffer = []
+ for i in range(color_size):
+ color_buffer = glGenTextures(1)
+ multi_sample_rate = ms_rate
+ glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_buffer)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
+ GL_LINEAR)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR)
+ glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
+ multi_sample_rate, GL_RGBA32F,
+ self.width, self.height, GL_TRUE)
+ glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0)
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0 + i,
+ GL_TEXTURE_2D_MULTISAMPLE, color_buffer,
+ 0)
+ self.color_buffer.append(color_buffer)
+
+ self.render_buffer = glGenRenderbuffers(1)
+ glBindRenderbuffer(GL_RENDERBUFFER, self.render_buffer)
+ glRenderbufferStorageMultisample(GL_RENDERBUFFER,
+ multi_sample_rate,
+ GL_DEPTH24_STENCIL8, self.width,
+ self.height)
+ glBindRenderbuffer(GL_RENDERBUFFER, 0)
+ glFramebufferRenderbuffer(GL_FRAMEBUFFER,
+ GL_DEPTH_STENCIL_ATTACHMENT,
+ GL_RENDERBUFFER, self.render_buffer)
+
+ attachments = []
+ for i in range(color_size):
+ attachments.append(GL_COLOR_ATTACHMENT0 + i)
+ glDrawBuffers(color_size, attachments)
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+
+ self.intermediate_fbo = glGenFramebuffers(1)
+ glBindFramebuffer(GL_FRAMEBUFFER, self.intermediate_fbo)
+
+ self.screen_texture = []
+ for i in range(color_size):
+ screen_texture = glGenTextures(1)
+ glBindTexture(GL_TEXTURE_2D, screen_texture)
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width,
+ self.height, 0, GL_RGBA, GL_FLOAT, None)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_LINEAR)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
+ GL_LINEAR)
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D,
+ screen_texture, 0)
+ self.screen_texture.append(screen_texture)
+
+ glDrawBuffers(color_size, attachments)
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+ else:
+ self.color_buffer = []
+ for i in range(color_size):
+ color_buffer = glGenTextures(1)
+ glBindTexture(GL_TEXTURE_2D, color_buffer)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
+ GL_NEAREST)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
+ GL_NEAREST)
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width,
+ self.height, 0, GL_RGBA, GL_FLOAT, None)
+ glFramebufferTexture2D(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D,
+ color_buffer, 0)
+ self.color_buffer.append(color_buffer)
+
+ # Configure depth texture map to render to
+ self.depth_buffer = glGenTextures(1)
+ glBindTexture(GL_TEXTURE_2D, self.depth_buffer)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
+ glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_INTENSITY)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE,
+ GL_COMPARE_R_TO_TEXTURE)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL)
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, self.width,
+ self.height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None)
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
+ GL_TEXTURE_2D, self.depth_buffer, 0)
+
+ attachments = []
+ for i in range(color_size):
+ attachments.append(GL_COLOR_ATTACHMENT0 + i)
+ glDrawBuffers(color_size, attachments)
+ self.screen_texture = self.color_buffer
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+
+ # Configure texture buffer if needed
+ self.render_texture = None
+
+ # NOTE: original render_texture only support one input
+ # this is tentative member of this issue
+ self.render_texture_v2 = {}
+
+ # Inner storage for buffer data
+ self.vertex_data = None
+ self.vertex_dim = None
+ self.n_vertices = None
+
+ self.model_view_matrix = None
+ self.projection_matrix = None
+
+ glutDisplayFunc(self.display)
+
+ def init_quad_program(self):
+ shader_list = []
+
+ shader_list.append(loadShader(GL_VERTEX_SHADER, "quad.vs"))
+ shader_list.append(loadShader(GL_FRAGMENT_SHADER, "quad.fs"))
+
+ the_program = createProgram(shader_list)
+
+ for shader in shader_list:
+ glDeleteShader(shader)
+
+ # vertex attributes for a quad that fills the entire screen in Normalized Device Coordinates.
+ # positions # texCoords
+ quad_vertices = np.array([
+ -1.0, 1.0, 0.0, 1.0, -1.0, -1.0, 0.0, 0.0, 1.0, -1.0, 1.0, 0.0,
+ -1.0, 1.0, 0.0, 1.0, 1.0, -1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0
+ ])
+
+ quad_buffer = glGenBuffers(1)
+ glBindBuffer(GL_ARRAY_BUFFER, quad_buffer)
+ glBufferData(GL_ARRAY_BUFFER, quad_vertices, GL_STATIC_DRAW)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ return the_program, quad_buffer
+
+ def set_mesh(self, vertices, faces):
+ self.vertex_data = vertices[faces.reshape([-1])]
+ self.vertex_dim = self.vertex_data.shape[1]
+ self.n_vertices = self.vertex_data.shape[0]
+
+ glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
+ glBufferData(GL_ARRAY_BUFFER, self.vertex_data, GL_STATIC_DRAW)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ def set_viewpoint(self, projection, model_view):
+ self.projection_matrix = projection
+ self.model_view_matrix = model_view
+
+ def draw_init(self):
+ glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
+ glEnable(GL_DEPTH_TEST)
+
+ # glClearColor(0.0, 0.0, 0.0, 0.0)
+ glClearColor(1.0, 1.0, 1.0, 0.0) # Black background
+
+ if self.use_inverse_depth:
+ glDepthFunc(GL_GREATER)
+ glClearDepth(0.0)
+ else:
+ glDepthFunc(GL_LESS)
+ glClearDepth(1.0)
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
+
+ def draw_end(self):
+ if self.intermediate_fbo is not None:
+ for i in range(len(self.color_buffer)):
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, self.frame_buffer)
+ glReadBuffer(GL_COLOR_ATTACHMENT0 + i)
+ glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.intermediate_fbo)
+ glDrawBuffer(GL_COLOR_ATTACHMENT0 + i)
+ glBlitFramebuffer(0, 0, self.width, self.height, 0, 0,
+ self.width, self.height, GL_COLOR_BUFFER_BIT,
+ GL_NEAREST)
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+ glDepthFunc(GL_LESS)
+ glClearDepth(1.0)
+
+ def draw(self):
+ self.draw_init()
+
+ glUseProgram(self.program)
+ glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE,
+ self.model_view_matrix.transpose())
+ glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE,
+ self.projection_matrix.transpose())
+
+ glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)
+
+ glEnableVertexAttribArray(0)
+ glVertexAttribPointer(0, self.vertex_dim, GL_DOUBLE, GL_FALSE, 0, None)
+
+ glDrawArrays(GL_TRIANGLES, 0, self.n_vertices)
+
+ glDisableVertexAttribArray(0)
+
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+
+ glUseProgram(0)
+
+ self.draw_end()
+
+ def get_color(self, color_id=0):
+ glBindFramebuffer(
+ GL_FRAMEBUFFER, self.intermediate_fbo
+ if self.intermediate_fbo is not None else self.frame_buffer)
+ glReadBuffer(GL_COLOR_ATTACHMENT0 + color_id)
+ data = glReadPixels(0,
+ 0,
+ self.width,
+ self.height,
+ GL_RGBA,
+ GL_FLOAT,
+ outputType=None)
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+ rgb = data.reshape(self.height, self.width, -1)
+ rgb = np.flip(rgb, 0)
+ return rgb
+
+ def get_z_value(self):
+ glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer)
+ data = glReadPixels(0,
+ 0,
+ self.width,
+ self.height,
+ GL_DEPTH_COMPONENT,
+ GL_FLOAT,
+ outputType=None)
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+ z = data.reshape(self.height, self.width)
+ z = np.flip(z, 0)
+ return z
+
+ def display(self):
+ # First we draw a scene.
+ # Notice the result is stored in the texture buffer.
+ self.draw()
+
+ # Then we return to the default frame buffer since we will display on the screen.
+ glBindFramebuffer(GL_FRAMEBUFFER, 0)
+
+ # Do the clean-up.
+ # glClearColor(0.0, 0.0, 0.0, 0.0) #Black background
+ glClearColor(1.0, 1.0, 1.0, 0.0) # Black background
+ glClear(GL_COLOR_BUFFER_BIT)
+
+ # We draw a rectangle which covers the whole screen.
+ glUseProgram(self.quad_program)
+ glBindBuffer(GL_ARRAY_BUFFER, self.quad_buffer)
+
+ size_of_double = 8
+ glEnableVertexAttribArray(0)
+ glVertexAttribPointer(0, 2, GL_DOUBLE, GL_FALSE, 4 * size_of_double,
+ None)
+ glEnableVertexAttribArray(1)
+ glVertexAttribPointer(1, 2, GL_DOUBLE, GL_FALSE, 4 * size_of_double,
+ c_void_p(2 * size_of_double))
+
+ glDisable(GL_DEPTH_TEST)
+
+ # The stored texture is then mapped to this rectangle.
+ # properly assing color buffer texture
+ glActiveTexture(GL_TEXTURE0)
+ glBindTexture(GL_TEXTURE_2D, self.screen_texture[0])
+ glUniform1i(glGetUniformLocation(self.quad_program, 'screenTexture'),
+ 0)
+
+ glDrawArrays(GL_TRIANGLES, 0, 6)
+
+ glDisableVertexAttribArray(1)
+ glDisableVertexAttribArray(0)
+
+ glEnable(GL_DEPTH_TEST)
+ glBindBuffer(GL_ARRAY_BUFFER, 0)
+ glUseProgram(0)
+
+ glutSwapBuffers()
+ glutPostRedisplay()
+
+ def show(self):
+ glutMainLoop()
\ No newline at end of file
diff --git a/lib / renderer /glm.py b/lib / renderer /glm.py
new file mode 100644
index 0000000000000000000000000000000000000000..5068d464c398710e3480078aad3ad13925d3a73b
--- /dev/null
+++ b/lib / renderer /glm.py
@@ -0,0 +1,143 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import numpy as np
+
+
+def vec3(x, y, z):
+ return np.array([x, y, z], dtype=np.float32)
+
+
+def radians(v):
+ return np.radians(v)
+
+
+def identity():
+ return np.identity(4, dtype=np.float32)
+
+
+def empty():
+ return np.zeros([4, 4], dtype=np.float32)
+
+
+def magnitude(v):
+ return np.linalg.norm(v)
+
+
+def normalize(v):
+ m = magnitude(v)
+ return v if m == 0 else v / m
+
+
+def dot(u, v):
+ return np.sum(u * v)
+
+
+def cross(u, v):
+ res = vec3(0, 0, 0)
+ res[0] = u[1] * v[2] - u[2] * v[1]
+ res[1] = u[2] * v[0] - u[0] * v[2]
+ res[2] = u[0] * v[1] - u[1] * v[0]
+ return res
+
+
+# below functions can be optimized
+
+
+def translate(m, v):
+ res = np.copy(m)
+ res[:, 3] = m[:, 0] * v[0] + m[:, 1] * v[1] + m[:, 2] * v[2] + m[:, 3]
+ return res
+
+
+def rotate(m, angle, v):
+ a = angle
+ c = np.cos(a)
+ s = np.sin(a)
+
+ axis = normalize(v)
+ temp = (1 - c) * axis
+
+ rot = empty()
+ rot[0][0] = c + temp[0] * axis[0]
+ rot[0][1] = temp[0] * axis[1] + s * axis[2]
+ rot[0][2] = temp[0] * axis[2] - s * axis[1]
+
+ rot[1][0] = temp[1] * axis[0] - s * axis[2]
+ rot[1][1] = c + temp[1] * axis[1]
+ rot[1][2] = temp[1] * axis[2] + s * axis[0]
+
+ rot[2][0] = temp[2] * axis[0] + s * axis[1]
+ rot[2][1] = temp[2] * axis[1] - s * axis[0]
+ rot[2][2] = c + temp[2] * axis[2]
+
+ res = empty()
+ res[:, 0] = m[:, 0] * rot[0][0] + m[:, 1] * rot[0][1] + m[:, 2] * rot[0][2]
+ res[:, 1] = m[:, 0] * rot[1][0] + m[:, 1] * rot[1][1] + m[:, 2] * rot[1][2]
+ res[:, 2] = m[:, 0] * rot[2][0] + m[:, 1] * rot[2][1] + m[:, 2] * rot[2][2]
+ res[:, 3] = m[:, 3]
+ return res
+
+
+def perspective(fovy, aspect, zNear, zFar):
+ tanHalfFovy = np.tan(fovy / 2)
+
+ res = empty()
+ res[0][0] = 1 / (aspect * tanHalfFovy)
+ res[1][1] = 1 / (tanHalfFovy)
+ res[2][3] = -1
+ res[2][2] = -(zFar + zNear) / (zFar - zNear)
+ res[3][2] = -(2 * zFar * zNear) / (zFar - zNear)
+
+ return res.T
+
+
+def ortho(left, right, bottom, top, zNear, zFar):
+ # res = np.ones([4, 4], dtype=np.float32)
+ res = identity()
+ res[0][0] = 2 / (right - left)
+ res[1][1] = 2 / (top - bottom)
+ res[2][2] = -2 / (zFar - zNear)
+ res[3][0] = -(right + left) / (right - left)
+ res[3][1] = -(top + bottom) / (top - bottom)
+ res[3][2] = -(zFar + zNear) / (zFar - zNear)
+ return res.T
+
+
+def lookat(eye, center, up):
+ f = normalize(center - eye)
+ s = normalize(cross(f, up))
+ u = cross(s, f)
+
+ res = identity()
+ res[0][0] = s[0]
+ res[1][0] = s[1]
+ res[2][0] = s[2]
+ res[0][1] = u[0]
+ res[1][1] = u[1]
+ res[2][1] = u[2]
+ res[0][2] = -f[0]
+ res[1][2] = -f[1]
+ res[2][2] = -f[2]
+ res[3][0] = -dot(s, eye)
+ res[3][1] = -dot(u, eye)
+ res[3][2] = -dot(f, eye)
+ return res.T
+
+
+def transform(d, m):
+ return np.dot(m, d.T).T
\ No newline at end of file
diff --git a/lib / renderer /mesh.py b/lib / renderer /mesh.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bba90625694abd908c86089914956b63afe0ed6
--- /dev/null
+++ b/lib / renderer /mesh.py
@@ -0,0 +1,526 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from lib.dataset.mesh_util import SMPLX
+from lib.common.render_utils import face_vertices
+import numpy as np
+import lib.smplx as smplx
+import trimesh
+import torch
+import torch.nn.functional as F
+
+model_init_params = dict(
+ gender='male',
+ model_type='smplx',
+ model_path=SMPLX().model_dir,
+ create_global_orient=False,
+ create_body_pose=False,
+ create_betas=False,
+ create_left_hand_pose=False,
+ create_right_hand_pose=False,
+ create_expression=False,
+ create_jaw_pose=False,
+ create_leye_pose=False,
+ create_reye_pose=False,
+ create_transl=False,
+ num_pca_comps=12)
+
+
+def get_smpl_model(model_type, gender): return smplx.create(
+ **model_init_params)
+
+
+def normalization(data):
+ _range = np.max(data) - np.min(data)
+ return ((data - np.min(data)) / _range)
+
+
+def sigmoid(x):
+ z = 1 / (1 + np.exp(-x))
+ return z
+
+
+def load_fit_body(fitted_path, scale, smpl_type='smplx', smpl_gender='neutral', noise_dict=None):
+
+ param = np.load(fitted_path, allow_pickle=True)
+ for key in param.keys():
+ param[key] = torch.as_tensor(param[key])
+
+ smpl_model = get_smpl_model(smpl_type, smpl_gender)
+ model_forward_params = dict(betas=param['betas'],
+ global_orient=param['global_orient'],
+ body_pose=param['body_pose'],
+ left_hand_pose=param['left_hand_pose'],
+ right_hand_pose=param['right_hand_pose'],
+ jaw_pose=param['jaw_pose'],
+ leye_pose=param['leye_pose'],
+ reye_pose=param['reye_pose'],
+ expression=param['expression'],
+ return_verts=True)
+
+ if noise_dict is not None:
+ model_forward_params.update(noise_dict)
+
+ smpl_out = smpl_model(**model_forward_params)
+
+ smpl_verts = (
+ (smpl_out.vertices[0] * param['scale'] + param['translation']) * scale).detach()
+ smpl_joints = (
+ (smpl_out.joints[0] * param['scale'] + param['translation']) * scale).detach()
+ smpl_mesh = trimesh.Trimesh(smpl_verts,
+ smpl_model.faces,
+ process=False, maintain_order=True)
+
+ return smpl_mesh, smpl_joints
+
+
+def load_ori_fit_body(fitted_path, smpl_type='smplx', smpl_gender='neutral'):
+
+ param = np.load(fitted_path, allow_pickle=True)
+ for key in param.keys():
+ param[key] = torch.as_tensor(param[key])
+
+ smpl_model = get_smpl_model(smpl_type, smpl_gender)
+ model_forward_params = dict(betas=param['betas'],
+ global_orient=param['global_orient'],
+ body_pose=param['body_pose'],
+ left_hand_pose=param['left_hand_pose'],
+ right_hand_pose=param['right_hand_pose'],
+ jaw_pose=param['jaw_pose'],
+ leye_pose=param['leye_pose'],
+ reye_pose=param['reye_pose'],
+ expression=param['expression'],
+ return_verts=True)
+
+ smpl_out = smpl_model(**model_forward_params)
+
+ smpl_verts = smpl_out.vertices[0].detach()
+ smpl_mesh = trimesh.Trimesh(smpl_verts,
+ smpl_model.faces,
+ process=False, maintain_order=True)
+
+ return smpl_mesh
+
+
+def save_obj_mesh(mesh_path, verts, faces):
+ file = open(mesh_path, 'w')
+ for v in verts:
+ file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2]))
+ for f in faces:
+ f_plus = f + 1
+ file.write('f %d %d %d\n' % (f_plus[0], f_plus[1], f_plus[2]))
+ file.close()
+
+
+# https://github.com/ratcave/wavefront_reader
+def read_mtlfile(fname):
+ materials = {}
+ with open(fname) as f:
+ lines = f.read().splitlines()
+
+ for line in lines:
+ if line:
+ split_line = line.strip().split(' ', 1)
+ if len(split_line) < 2:
+ continue
+
+ prefix, data = split_line[0], split_line[1]
+ if 'newmtl' in prefix:
+ material = {}
+ materials[data] = material
+ elif materials:
+ if data:
+ split_data = data.strip().split(' ')
+
+ # assume texture maps are in the same level
+ # WARNING: do not include space in your filename!!
+ if 'map' in prefix:
+ material[prefix] = split_data[-1].split('\\')[-1]
+ elif len(split_data) > 1:
+ material[prefix] = tuple(float(d) for d in split_data)
+ else:
+ try:
+ material[prefix] = int(data)
+ except ValueError:
+ material[prefix] = float(data)
+
+ return materials
+
+
+def load_obj_mesh_mtl(mesh_file):
+ vertex_data = []
+ norm_data = []
+ uv_data = []
+
+ face_data = []
+ face_norm_data = []
+ face_uv_data = []
+
+ # face per material
+ face_data_mat = {}
+ face_norm_data_mat = {}
+ face_uv_data_mat = {}
+
+ # current material name
+ mtl_data = None
+ cur_mat = None
+
+ if isinstance(mesh_file, str):
+ f = open(mesh_file, "r")
+ else:
+ f = mesh_file
+ for line in f:
+ if isinstance(line, bytes):
+ line = line.decode("utf-8")
+ if line.startswith('#'):
+ continue
+ values = line.split()
+ if not values:
+ continue
+
+ if values[0] == 'v':
+ v = list(map(float, values[1:4]))
+ vertex_data.append(v)
+ elif values[0] == 'vn':
+ vn = list(map(float, values[1:4]))
+ norm_data.append(vn)
+ elif values[0] == 'vt':
+ vt = list(map(float, values[1:3]))
+ uv_data.append(vt)
+ elif values[0] == 'mtllib':
+ mtl_data = read_mtlfile(
+ mesh_file.replace(mesh_file.split('/')[-1], values[1]))
+ elif values[0] == 'usemtl':
+ cur_mat = values[1]
+ elif values[0] == 'f':
+ # local triangle data
+ l_face_data = []
+ l_face_uv_data = []
+ l_face_norm_data = []
+
+ # quad mesh
+ if len(values) > 4:
+ f = list(
+ map(
+ lambda x: int(x.split('/')[0]) if int(x.split('/')[0])
+ < 0 else int(x.split('/')[0]) - 1, values[1:4]))
+ l_face_data.append(f)
+ f = list(
+ map(
+ lambda x: int(x.split('/')[0])
+ if int(x.split('/')[0]) < 0 else int(x.split('/')[0]) -
+ 1, [values[3], values[4], values[1]]))
+ l_face_data.append(f)
+ # tri mesh
+ else:
+ f = list(
+ map(
+ lambda x: int(x.split('/')[0]) if int(x.split('/')[0])
+ < 0 else int(x.split('/')[0]) - 1, values[1:4]))
+ l_face_data.append(f)
+ # deal with texture
+ if len(values[1].split('/')) >= 2:
+ # quad mesh
+ if len(values) > 4:
+ f = list(
+ map(
+ lambda x: int(x.split('/')[1])
+ if int(x.split('/')[1]) < 0 else int(
+ x.split('/')[1]) - 1, values[1:4]))
+ l_face_uv_data.append(f)
+ f = list(
+ map(
+ lambda x: int(x.split('/')[1])
+ if int(x.split('/')[1]) < 0 else int(
+ x.split('/')[1]) - 1,
+ [values[3], values[4], values[1]]))
+ l_face_uv_data.append(f)
+ # tri mesh
+ elif len(values[1].split('/')[1]) != 0:
+ f = list(
+ map(
+ lambda x: int(x.split('/')[1])
+ if int(x.split('/')[1]) < 0 else int(
+ x.split('/')[1]) - 1, values[1:4]))
+ l_face_uv_data.append(f)
+ # deal with normal
+ if len(values[1].split('/')) == 3:
+ # quad mesh
+ if len(values) > 4:
+ f = list(
+ map(
+ lambda x: int(x.split('/')[2])
+ if int(x.split('/')[2]) < 0 else int(
+ x.split('/')[2]) - 1, values[1:4]))
+ l_face_norm_data.append(f)
+ f = list(
+ map(
+ lambda x: int(x.split('/')[2])
+ if int(x.split('/')[2]) < 0 else int(
+ x.split('/')[2]) - 1,
+ [values[3], values[4], values[1]]))
+ l_face_norm_data.append(f)
+ # tri mesh
+ elif len(values[1].split('/')[2]) != 0:
+ f = list(
+ map(
+ lambda x: int(x.split('/')[2])
+ if int(x.split('/')[2]) < 0 else int(
+ x.split('/')[2]) - 1, values[1:4]))
+ l_face_norm_data.append(f)
+
+ face_data += l_face_data
+ face_uv_data += l_face_uv_data
+ face_norm_data += l_face_norm_data
+
+ if cur_mat is not None:
+ if cur_mat not in face_data_mat.keys():
+ face_data_mat[cur_mat] = []
+ if cur_mat not in face_uv_data_mat.keys():
+ face_uv_data_mat[cur_mat] = []
+ if cur_mat not in face_norm_data_mat.keys():
+ face_norm_data_mat[cur_mat] = []
+ face_data_mat[cur_mat] += l_face_data
+ face_uv_data_mat[cur_mat] += l_face_uv_data
+ face_norm_data_mat[cur_mat] += l_face_norm_data
+
+ vertices = np.array(vertex_data)
+ faces = np.array(face_data)
+
+ norms = np.array(norm_data)
+ norms = normalize_v3(norms)
+ face_normals = np.array(face_norm_data)
+
+ uvs = np.array(uv_data)
+ face_uvs = np.array(face_uv_data)
+
+ out_tuple = (vertices, faces, norms, face_normals, uvs, face_uvs)
+
+ if cur_mat is not None and mtl_data is not None:
+ for key in face_data_mat:
+ face_data_mat[key] = np.array(face_data_mat[key])
+ face_uv_data_mat[key] = np.array(face_uv_data_mat[key])
+ face_norm_data_mat[key] = np.array(face_norm_data_mat[key])
+
+ out_tuple += (face_data_mat, face_norm_data_mat, face_uv_data_mat,
+ mtl_data)
+
+ return out_tuple
+
+
+def load_scan(mesh_file, with_normal=False, with_texture=False):
+ vertex_data = []
+ norm_data = []
+ uv_data = []
+
+ face_data = []
+ face_norm_data = []
+ face_uv_data = []
+
+ if isinstance(mesh_file, str):
+ f = open(mesh_file, "r")
+ else:
+ f = mesh_file
+ for line in f:
+ if isinstance(line, bytes):
+ line = line.decode("utf-8")
+ if line.startswith('#'):
+ continue
+ values = line.split()
+ if not values:
+ continue
+
+ if values[0] == 'v':
+ v = list(map(float, values[1:4]))
+ vertex_data.append(v)
+ elif values[0] == 'vn':
+ vn = list(map(float, values[1:4]))
+ norm_data.append(vn)
+ elif values[0] == 'vt':
+ vt = list(map(float, values[1:3]))
+ uv_data.append(vt)
+
+ elif values[0] == 'f':
+ # quad mesh
+ if len(values) > 4:
+ f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
+ face_data.append(f)
+ f = list(
+ map(lambda x: int(x.split('/')[0]),
+ [values[3], values[4], values[1]]))
+ face_data.append(f)
+ # tri mesh
+ else:
+ f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
+ face_data.append(f)
+
+ # deal with texture
+ if len(values[1].split('/')) >= 2:
+ # quad mesh
+ if len(values) > 4:
+ f = list(map(lambda x: int(x.split('/')[1]), values[1:4]))
+ face_uv_data.append(f)
+ f = list(
+ map(lambda x: int(x.split('/')[1]),
+ [values[3], values[4], values[1]]))
+ face_uv_data.append(f)
+ # tri mesh
+ elif len(values[1].split('/')[1]) != 0:
+ f = list(map(lambda x: int(x.split('/')[1]), values[1:4]))
+ face_uv_data.append(f)
+ # deal with normal
+ if len(values[1].split('/')) == 3:
+ # quad mesh
+ if len(values) > 4:
+ f = list(map(lambda x: int(x.split('/')[2]), values[1:4]))
+ face_norm_data.append(f)
+ f = list(
+ map(lambda x: int(x.split('/')[2]),
+ [values[3], values[4], values[1]]))
+ face_norm_data.append(f)
+ # tri mesh
+ elif len(values[1].split('/')[2]) != 0:
+ f = list(map(lambda x: int(x.split('/')[2]), values[1:4]))
+ face_norm_data.append(f)
+
+ vertices = np.array(vertex_data)
+ faces = np.array(face_data) - 1
+
+ if with_texture and with_normal:
+ uvs = np.array(uv_data)
+ face_uvs = np.array(face_uv_data) - 1
+ norms = np.array(norm_data)
+ if norms.shape[0] == 0:
+ norms = compute_normal(vertices, faces)
+ face_normals = faces
+ else:
+ norms = normalize_v3(norms)
+ face_normals = np.array(face_norm_data) - 1
+ return vertices, faces, norms, face_normals, uvs, face_uvs
+
+ if with_texture:
+ uvs = np.array(uv_data)
+ face_uvs = np.array(face_uv_data) - 1
+ return vertices, faces, uvs, face_uvs
+
+ if with_normal:
+ norms = np.array(norm_data)
+ norms = normalize_v3(norms)
+ face_normals = np.array(face_norm_data) - 1
+ return vertices, faces, norms, face_normals
+
+ return vertices, faces
+
+
+def normalize_v3(arr):
+ ''' Normalize a numpy array of 3 component vectors shape=(n,3) '''
+ lens = np.sqrt(arr[:, 0]**2 + arr[:, 1]**2 + arr[:, 2]**2)
+ eps = 0.00000001
+ lens[lens < eps] = eps
+ arr[:, 0] /= lens
+ arr[:, 1] /= lens
+ arr[:, 2] /= lens
+ return arr
+
+
+def compute_normal(vertices, faces):
+ # Create a zeroed array with the same type and shape as our vertices i.e., per vertex normal
+ norm = np.zeros(vertices.shape, dtype=vertices.dtype)
+ # Create an indexed view into the vertex array using the array of three indices for triangles
+ tris = vertices[faces]
+ # Calculate the normal for all the triangles, by taking the cross product of the vectors v1-v0, and v2-v0 in each triangle
+ n = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
+ # n is now an array of normals per triangle. The length of each normal is dependent the vertices,
+ # we need to normalize these, so that our next step weights each normal equally.
+ normalize_v3(n)
+ # now we have a normalized array of normals, one per triangle, i.e., per triangle normals.
+ # But instead of one per triangle (i.e., flat shading), we add to each vertex in that triangle,
+ # the triangles' normal. Multiple triangles would then contribute to every vertex, so we need to normalize again afterwards.
+ # The cool part, we can actually add the normals through an indexed view of our (zeroed) per vertex normal array
+ norm[faces[:, 0]] += n
+ norm[faces[:, 1]] += n
+ norm[faces[:, 2]] += n
+ normalize_v3(norm)
+
+ return norm
+
+
+def compute_normal_batch(vertices, faces):
+
+ bs, nv = vertices.shape[:2]
+ bs, nf = faces.shape[:2]
+
+ vert_norm = torch.zeros(bs * nv, 3).type_as(vertices)
+ tris = face_vertices(vertices, faces)
+ face_norm = F.normalize(torch.cross(tris[:, :, 1] - tris[:, :, 0],
+ tris[:, :, 2] - tris[:, :, 0]),
+ dim=-1)
+
+ faces = (faces +
+ (torch.arange(bs).type_as(faces) * nv)[:, None, None]).view(
+ -1, 3)
+
+ vert_norm[faces[:, 0]] += face_norm.view(-1, 3)
+ vert_norm[faces[:, 1]] += face_norm.view(-1, 3)
+ vert_norm[faces[:, 2]] += face_norm.view(-1, 3)
+
+ vert_norm = F.normalize(vert_norm, dim=-1).view(bs, nv, 3)
+
+ return vert_norm
+
+
+# compute tangent and bitangent
+def compute_tangent(vertices, faces, normals, uvs, faceuvs):
+ # NOTE: this could be numerically unstable around [0,0,1]
+ # but other current solutions are pretty freaky somehow
+ c1 = np.cross(normals, np.array([0, 1, 0.0]))
+ tan = c1
+ normalize_v3(tan)
+ btan = np.cross(normals, tan)
+
+ # NOTE: traditional version is below
+
+ # pts_tris = vertices[faces]
+ # uv_tris = uvs[faceuvs]
+
+ # W = np.stack([pts_tris[::, 1] - pts_tris[::, 0], pts_tris[::, 2] - pts_tris[::, 0]],2)
+ # UV = np.stack([uv_tris[::, 1] - uv_tris[::, 0], uv_tris[::, 2] - uv_tris[::, 0]], 1)
+
+ # for i in range(W.shape[0]):
+ # W[i,::] = W[i,::].dot(np.linalg.inv(UV[i,::]))
+
+ # tan = np.zeros(vertices.shape, dtype=vertices.dtype)
+ # tan[faces[:,0]] += W[:,:,0]
+ # tan[faces[:,1]] += W[:,:,0]
+ # tan[faces[:,2]] += W[:,:,0]
+
+ # btan = np.zeros(vertices.shape, dtype=vertices.dtype)
+ # btan[faces[:,0]] += W[:,:,1]
+ # btan[faces[:,1]] += W[:,:,1]
+ # btan[faces[:,2]] += W[:,:,1]
+
+ # normalize_v3(tan)
+
+ # ndott = np.sum(normals*tan, 1, keepdims=True)
+ # tan = tan - ndott * normals
+
+ # normalize_v3(btan)
+ # normalize_v3(tan)
+
+ # tan[np.sum(np.cross(normals, tan) * btan, 1) < 0,:] *= -1.0
+
+ return tan, btan
diff --git a/lib / renderer /opengl_util.py b/lib / renderer /opengl_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..e68bdc2b3c069f6035bcb9c89da5d92ef1a73c54
--- /dev/null
+++ b/lib / renderer /opengl_util.py
@@ -0,0 +1,369 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import os
+
+from lib.renderer.mesh import load_scan, compute_tangent
+from lib.renderer.camera import Camera
+import cv2
+import math
+import random
+import numpy as np
+
+
+def render_result(rndr, shader_id, path, mask=False):
+
+ cam_render = rndr.get_color(shader_id)
+ cam_render = cv2.cvtColor(cam_render, cv2.COLOR_RGBA2BGRA)
+
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ if shader_id != 2:
+ cv2.imwrite(path, np.uint8(255.0 * cam_render))
+ else:
+ cam_render[:, :, -1] -= 0.5
+ cam_render[:, :, -1] *= 2.0
+ if not mask:
+ cv2.imwrite(path, np.uint8(255.0 / 2.0 * (cam_render + 1.0)))
+ else:
+ cv2.imwrite(path, np.uint8(-1.0 * cam_render[:, :, [3]]))
+
+
+def make_rotate(rx, ry, rz):
+ sinX = np.sin(rx)
+ sinY = np.sin(ry)
+ sinZ = np.sin(rz)
+
+ cosX = np.cos(rx)
+ cosY = np.cos(ry)
+ cosZ = np.cos(rz)
+
+ Rx = np.zeros((3, 3))
+ Rx[0, 0] = 1.0
+ Rx[1, 1] = cosX
+ Rx[1, 2] = -sinX
+ Rx[2, 1] = sinX
+ Rx[2, 2] = cosX
+
+ Ry = np.zeros((3, 3))
+ Ry[0, 0] = cosY
+ Ry[0, 2] = sinY
+ Ry[1, 1] = 1.0
+ Ry[2, 0] = -sinY
+ Ry[2, 2] = cosY
+
+ Rz = np.zeros((3, 3))
+ Rz[0, 0] = cosZ
+ Rz[0, 1] = -sinZ
+ Rz[1, 0] = sinZ
+ Rz[1, 1] = cosZ
+ Rz[2, 2] = 1.0
+
+ R = np.matmul(np.matmul(Rz, Ry), Rx)
+ return R
+
+
+def rotateSH(SH, R):
+ SHn = SH
+
+ # 1st order
+ SHn[1] = R[1, 1] * SH[1] - R[1, 2] * SH[2] + R[1, 0] * SH[3]
+ SHn[2] = -R[2, 1] * SH[1] + R[2, 2] * SH[2] - R[2, 0] * SH[3]
+ SHn[3] = R[0, 1] * SH[1] - R[0, 2] * SH[2] + R[0, 0] * SH[3]
+
+ # 2nd order
+ SHn[4:, 0] = rotateBand2(SH[4:, 0], R)
+ SHn[4:, 1] = rotateBand2(SH[4:, 1], R)
+ SHn[4:, 2] = rotateBand2(SH[4:, 2], R)
+
+ return SHn
+
+
+def rotateBand2(x, R):
+ s_c3 = 0.94617469575
+ s_c4 = -0.31539156525
+ s_c5 = 0.54627421529
+
+ s_c_scale = 1.0 / 0.91529123286551084
+ s_c_scale_inv = 0.91529123286551084
+
+ s_rc2 = 1.5853309190550713 * s_c_scale
+ s_c4_div_c3 = s_c4 / s_c3
+ s_c4_div_c3_x2 = (s_c4 / s_c3) * 2.0
+
+ s_scale_dst2 = s_c3 * s_c_scale_inv
+ s_scale_dst4 = s_c5 * s_c_scale_inv
+
+ sh0 = x[3] + x[4] + x[4] - x[1]
+ sh1 = x[0] + s_rc2 * x[2] + x[3] + x[4]
+ sh2 = x[0]
+ sh3 = -x[3]
+ sh4 = -x[1]
+
+ r2x = R[0][0] + R[0][1]
+ r2y = R[1][0] + R[1][1]
+ r2z = R[2][0] + R[2][1]
+
+ r3x = R[0][0] + R[0][2]
+ r3y = R[1][0] + R[1][2]
+ r3z = R[2][0] + R[2][2]
+
+ r4x = R[0][1] + R[0][2]
+ r4y = R[1][1] + R[1][2]
+ r4z = R[2][1] + R[2][2]
+
+ sh0_x = sh0 * R[0][0]
+ sh0_y = sh0 * R[1][0]
+ d0 = sh0_x * R[1][0]
+ d1 = sh0_y * R[2][0]
+ d2 = sh0 * (R[2][0] * R[2][0] + s_c4_div_c3)
+ d3 = sh0_x * R[2][0]
+ d4 = sh0_x * R[0][0] - sh0_y * R[1][0]
+
+ sh1_x = sh1 * R[0][2]
+ sh1_y = sh1 * R[1][2]
+ d0 += sh1_x * R[1][2]
+ d1 += sh1_y * R[2][2]
+ d2 += sh1 * (R[2][2] * R[2][2] + s_c4_div_c3)
+ d3 += sh1_x * R[2][2]
+ d4 += sh1_x * R[0][2] - sh1_y * R[1][2]
+
+ sh2_x = sh2 * r2x
+ sh2_y = sh2 * r2y
+ d0 += sh2_x * r2y
+ d1 += sh2_y * r2z
+ d2 += sh2 * (r2z * r2z + s_c4_div_c3_x2)
+ d3 += sh2_x * r2z
+ d4 += sh2_x * r2x - sh2_y * r2y
+
+ sh3_x = sh3 * r3x
+ sh3_y = sh3 * r3y
+ d0 += sh3_x * r3y
+ d1 += sh3_y * r3z
+ d2 += sh3 * (r3z * r3z + s_c4_div_c3_x2)
+ d3 += sh3_x * r3z
+ d4 += sh3_x * r3x - sh3_y * r3y
+
+ sh4_x = sh4 * r4x
+ sh4_y = sh4 * r4y
+ d0 += sh4_x * r4y
+ d1 += sh4_y * r4z
+ d2 += sh4 * (r4z * r4z + s_c4_div_c3_x2)
+ d3 += sh4_x * r4z
+ d4 += sh4_x * r4x - sh4_y * r4y
+
+ dst = x
+ dst[0] = d0
+ dst[1] = -d1
+ dst[2] = d2 * s_scale_dst2
+ dst[3] = -d3
+ dst[4] = d4 * s_scale_dst4
+
+ return dst
+
+
+def load_calib(param, render_size=512):
+ # pixel unit / world unit
+ ortho_ratio = param['ortho_ratio']
+ # world unit / model unit
+ scale = param['scale']
+ # camera center world coordinate
+ center = param['center']
+ # model rotation
+ R = param['R']
+
+ translate = -np.matmul(R, center).reshape(3, 1)
+ extrinsic = np.concatenate([R, translate], axis=1)
+ extrinsic = np.concatenate(
+ [extrinsic, np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
+ # Match camera space to image pixel space
+ scale_intrinsic = np.identity(4)
+ scale_intrinsic[0, 0] = scale / ortho_ratio
+ scale_intrinsic[1, 1] = -scale / ortho_ratio
+ scale_intrinsic[2, 2] = scale / ortho_ratio
+ # Match image pixel space to image uv space
+ uv_intrinsic = np.identity(4)
+ uv_intrinsic[0, 0] = 1.0 / float(render_size // 2)
+ uv_intrinsic[1, 1] = 1.0 / float(render_size // 2)
+ uv_intrinsic[2, 2] = 1.0 / float(render_size // 2)
+
+ intrinsic = np.matmul(uv_intrinsic, scale_intrinsic)
+ calib = np.concatenate([extrinsic, intrinsic], axis=0)
+ return calib
+
+
+def render_prt_ortho(out_path,
+ folder_name,
+ subject_name,
+ shs,
+ rndr,
+ rndr_uv,
+ im_size,
+ angl_step=4,
+ n_light=1,
+ pitch=[0]):
+ cam = Camera(width=im_size, height=im_size)
+ cam.ortho_ratio = 0.4 * (512 / im_size)
+ cam.near = -100
+ cam.far = 100
+ cam.sanity_check()
+
+ # set path for obj, prt
+ mesh_file = os.path.join(folder_name, subject_name + '_100k.obj')
+ if not os.path.exists(mesh_file):
+ print('ERROR: obj file does not exist!!', mesh_file)
+ return
+ prt_file = os.path.join(folder_name, 'bounce', 'bounce0.txt')
+ if not os.path.exists(prt_file):
+ print('ERROR: prt file does not exist!!!', prt_file)
+ return
+ face_prt_file = os.path.join(folder_name, 'bounce', 'face.npy')
+ if not os.path.exists(face_prt_file):
+ print('ERROR: face prt file does not exist!!!', prt_file)
+ return
+ text_file = os.path.join(folder_name, 'tex', subject_name + '_dif_2k.jpg')
+ if not os.path.exists(text_file):
+ print('ERROR: dif file does not exist!!', text_file)
+ return
+
+ texture_image = cv2.imread(text_file)
+ texture_image = cv2.cvtColor(texture_image, cv2.COLOR_BGR2RGB)
+
+ vertices, faces, normals, faces_normals, textures, face_textures = load_scan(
+ mesh_file, with_normal=True, with_texture=True)
+ vmin = vertices.min(0)
+ vmax = vertices.max(0)
+ up_axis = 1 if (vmax - vmin).argmax() == 1 else 2
+
+ vmed = np.median(vertices, 0)
+ vmed[up_axis] = 0.5 * (vmax[up_axis] + vmin[up_axis])
+ y_scale = 180 / (vmax[up_axis] - vmin[up_axis])
+
+ rndr.set_norm_mat(y_scale, vmed)
+ rndr_uv.set_norm_mat(y_scale, vmed)
+
+ tan, bitan = compute_tangent(vertices, faces, normals, textures,
+ face_textures)
+ prt = np.loadtxt(prt_file)
+ face_prt = np.load(face_prt_file)
+ rndr.set_mesh(vertices, faces, normals, faces_normals, textures,
+ face_textures, prt, face_prt, tan, bitan)
+ rndr.set_albedo(texture_image)
+
+ rndr_uv.set_mesh(vertices, faces, normals, faces_normals, textures,
+ face_textures, prt, face_prt, tan, bitan)
+ rndr_uv.set_albedo(texture_image)
+
+ os.makedirs(os.path.join(out_path, 'GEO', 'OBJ', subject_name),
+ exist_ok=True)
+ os.makedirs(os.path.join(out_path, 'PARAM', subject_name), exist_ok=True)
+ os.makedirs(os.path.join(out_path, 'RENDER', subject_name), exist_ok=True)
+ os.makedirs(os.path.join(out_path, 'MASK', subject_name), exist_ok=True)
+ os.makedirs(os.path.join(out_path, 'UV_RENDER', subject_name),
+ exist_ok=True)
+ os.makedirs(os.path.join(out_path, 'UV_MASK', subject_name), exist_ok=True)
+ os.makedirs(os.path.join(out_path, 'UV_POS', subject_name), exist_ok=True)
+ os.makedirs(os.path.join(out_path, 'UV_NORMAL', subject_name),
+ exist_ok=True)
+
+ if not os.path.exists(os.path.join(out_path, 'val.txt')):
+ f = open(os.path.join(out_path, 'val.txt'), 'w')
+ f.close()
+
+ # copy obj file
+ cmd = 'cp %s %s' % (mesh_file,
+ os.path.join(out_path, 'GEO', 'OBJ', subject_name))
+ print(cmd)
+ os.system(cmd)
+
+ for p in pitch:
+ for y in tqdm(range(0, 360, angl_step)):
+ R = np.matmul(make_rotate(math.radians(p), 0, 0),
+ make_rotate(0, math.radians(y), 0))
+ if up_axis == 2:
+ R = np.matmul(R, make_rotate(math.radians(90), 0, 0))
+
+ rndr.rot_matrix = R
+ rndr_uv.rot_matrix = R
+ rndr.set_camera(cam)
+ rndr_uv.set_camera(cam)
+
+ for j in range(n_light):
+ sh_id = random.randint(0, shs.shape[0] - 1)
+ sh = shs[sh_id]
+ sh_angle = 0.2 * np.pi * (random.random() - 0.5)
+ sh = rotateSH(sh, make_rotate(0, sh_angle, 0).T)
+
+ dic = {
+ 'sh': sh,
+ 'ortho_ratio': cam.ortho_ratio,
+ 'scale': y_scale,
+ 'center': vmed,
+ 'R': R
+ }
+
+ rndr.set_sh(sh)
+ rndr.analytic = False
+ rndr.use_inverse_depth = False
+ rndr.display()
+
+ out_all_f = rndr.get_color(0)
+ out_mask = out_all_f[:, :, 3]
+ out_all_f = cv2.cvtColor(out_all_f, cv2.COLOR_RGBA2BGR)
+
+ np.save(
+ os.path.join(out_path, 'PARAM', subject_name,
+ '%d_%d_%02d.npy' % (y, p, j)), dic)
+ cv2.imwrite(
+ os.path.join(out_path, 'RENDER', subject_name,
+ '%d_%d_%02d.jpg' % (y, p, j)),
+ 255.0 * out_all_f)
+ cv2.imwrite(
+ os.path.join(out_path, 'MASK', subject_name,
+ '%d_%d_%02d.png' % (y, p, j)),
+ 255.0 * out_mask)
+
+ rndr_uv.set_sh(sh)
+ rndr_uv.analytic = False
+ rndr_uv.use_inverse_depth = False
+ rndr_uv.display()
+
+ uv_color = rndr_uv.get_color(0)
+ uv_color = cv2.cvtColor(uv_color, cv2.COLOR_RGBA2BGR)
+ cv2.imwrite(
+ os.path.join(out_path, 'UV_RENDER', subject_name,
+ '%d_%d_%02d.jpg' % (y, p, j)),
+ 255.0 * uv_color)
+
+ if y == 0 and j == 0 and p == pitch[0]:
+ uv_pos = rndr_uv.get_color(1)
+ uv_mask = uv_pos[:, :, 3]
+ cv2.imwrite(
+ os.path.join(out_path, 'UV_MASK', subject_name,
+ '00.png'), 255.0 * uv_mask)
+
+ data = {
+ 'default': uv_pos[:, :, :3]
+ } # default is a reserved name
+ pyexr.write(
+ os.path.join(out_path, 'UV_POS', subject_name,
+ '00.exr'), data)
+
+ uv_nml = rndr_uv.get_color(2)
+ uv_nml = cv2.cvtColor(uv_nml, cv2.COLOR_RGBA2BGR)
+ cv2.imwrite(
+ os.path.join(out_path, 'UV_NORMAL', subject_name,
+ '00.png'), 255.0 * uv_nml)
\ No newline at end of file
diff --git a/lib / renderer /prt_util.py b/lib / renderer /prt_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..de8290ca7a326774b319f09bbb444dc82e9161d7
--- /dev/null
+++ b/lib / renderer /prt_util.py
@@ -0,0 +1,199 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import os
+import trimesh
+import numpy as np
+import math
+from scipy.special import sph_harm
+import argparse
+from tqdm import tqdm
+from trimesh.util import bounds_tree
+
+
+def factratio(N, D):
+ if N >= D:
+ prod = 1.0
+ for i in range(D + 1, N + 1):
+ prod *= i
+ return prod
+ else:
+ prod = 1.0
+ for i in range(N + 1, D + 1):
+ prod *= i
+ return 1.0 / prod
+
+
+def KVal(M, L):
+ return math.sqrt(((2 * L + 1) / (4 * math.pi)) * (factratio(L - M, L + M)))
+
+
+def AssociatedLegendre(M, L, x):
+ if M < 0 or M > L or np.max(np.abs(x)) > 1.0:
+ return np.zeros_like(x)
+
+ pmm = np.ones_like(x)
+ if M > 0:
+ somx2 = np.sqrt((1.0 + x) * (1.0 - x))
+ fact = 1.0
+ for i in range(1, M + 1):
+ pmm = -pmm * fact * somx2
+ fact = fact + 2
+
+ if L == M:
+ return pmm
+ else:
+ pmmp1 = x * (2 * M + 1) * pmm
+ if L == M + 1:
+ return pmmp1
+ else:
+ pll = np.zeros_like(x)
+ for i in range(M + 2, L + 1):
+ pll = (x * (2 * i - 1) * pmmp1 - (i + M - 1) * pmm) / (i - M)
+ pmm = pmmp1
+ pmmp1 = pll
+ return pll
+
+
+def SphericalHarmonic(M, L, theta, phi):
+ if M > 0:
+ return math.sqrt(2.0) * KVal(M, L) * np.cos(
+ M * phi) * AssociatedLegendre(M, L, np.cos(theta))
+ elif M < 0:
+ return math.sqrt(2.0) * KVal(-M, L) * np.sin(
+ -M * phi) * AssociatedLegendre(-M, L, np.cos(theta))
+ else:
+ return KVal(0, L) * AssociatedLegendre(0, L, np.cos(theta))
+
+
+def save_obj(mesh_path, verts):
+ file = open(mesh_path, 'w')
+ for v in verts:
+ file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2]))
+ file.close()
+
+
+def sampleSphericalDirections(n):
+ xv = np.random.rand(n, n)
+ yv = np.random.rand(n, n)
+ theta = np.arccos(1 - 2 * xv)
+ phi = 2.0 * math.pi * yv
+
+ phi = phi.reshape(-1)
+ theta = theta.reshape(-1)
+
+ vx = -np.sin(theta) * np.cos(phi)
+ vy = -np.sin(theta) * np.sin(phi)
+ vz = np.cos(theta)
+ return np.stack([vx, vy, vz], 1), phi, theta
+
+
+def getSHCoeffs(order, phi, theta):
+ shs = []
+ for n in range(0, order + 1):
+ for m in range(-n, n + 1):
+ s = SphericalHarmonic(m, n, theta, phi)
+ shs.append(s)
+
+ return np.stack(shs, 1)
+
+
+def computePRT(mesh_path, scale, n, order):
+
+ prt_dir = os.path.join(os.path.dirname(mesh_path), "prt")
+ bounce_path = os.path.join(prt_dir, "bounce.npy")
+ face_path = os.path.join(prt_dir, "face.npy")
+
+ os.makedirs(prt_dir, exist_ok=True)
+
+ PRT = None
+ F = None
+
+ if os.path.exists(bounce_path) and os.path.exists(face_path):
+
+ PRT = np.load(bounce_path)
+ F = np.load(face_path)
+
+ else:
+
+ mesh = trimesh.load(mesh_path,
+ skip_materials=True,
+ process=False,
+ maintain_order=True)
+ mesh.vertices *= scale
+
+ vectors_orig, phi, theta = sampleSphericalDirections(n)
+ SH_orig = getSHCoeffs(order, phi, theta)
+
+ w = 4.0 * math.pi / (n * n)
+
+ origins = mesh.vertices
+ normals = mesh.vertex_normals
+ n_v = origins.shape[0]
+
+ origins = np.repeat(origins[:, None], n, axis=1).reshape(-1, 3)
+ normals = np.repeat(normals[:, None], n, axis=1).reshape(-1, 3)
+ PRT_all = None
+ for i in range(n):
+ SH = np.repeat(SH_orig[None, (i * n):((i + 1) * n)], n_v,
+ axis=0).reshape(-1, SH_orig.shape[1])
+ vectors = np.repeat(vectors_orig[None, (i * n):((i + 1) * n)],
+ n_v,
+ axis=0).reshape(-1, 3)
+
+ dots = (vectors * normals).sum(1)
+ front = (dots > 0.0)
+
+ delta = 1e-3 * min(mesh.bounding_box.extents)
+
+ hits = mesh.ray.intersects_any(origins + delta * normals, vectors)
+ nohits = np.logical_and(front, np.logical_not(hits))
+
+ PRT = (nohits.astype(np.float) * dots)[:, None] * SH
+
+ if PRT_all is not None:
+ PRT_all += (PRT.reshape(-1, n, SH.shape[1]).sum(1))
+ else:
+ PRT_all = (PRT.reshape(-1, n, SH.shape[1]).sum(1))
+
+ PRT = w * PRT_all
+ F = mesh.faces
+
+ np.save(bounce_path, PRT)
+ np.save(face_path, F)
+
+ # NOTE: trimesh sometimes break the original vertex order, but topology will not change.
+ # when loading PRT in other program, use the triangle list from trimesh.
+
+ return PRT, F
+
+
+def testPRT(obj_path, n=40):
+
+ os.makedirs(os.path.join(os.path.dirname(obj_path),
+ f'../bounce/{os.path.basename(obj_path)[:-4]}'),
+ exist_ok=True)
+
+ PRT, F = computePRT(obj_path, n, 2)
+ np.savetxt(
+ os.path.join(os.path.dirname(obj_path),
+ f'../bounce/{os.path.basename(obj_path)[:-4]}',
+ 'bounce.npy'), PRT)
+ np.save(
+ os.path.join(os.path.dirname(obj_path),
+ f'../bounce/{os.path.basename(obj_path)[:-4]}',
+ 'face.npy'), F)
\ No newline at end of file
diff --git a/lib /common / __init__.py b/lib /common / __init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib /common / seg3d_utils.py b/lib /common / seg3d_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a86a6d37daa3586a30066a856a6b9a8d361d1d5d
--- /dev/null
+++ b/lib /common / seg3d_utils.py
@@ -0,0 +1,390 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import matplotlib.pyplot as plt
+
+
+def plot_mask2D(mask,
+ title="",
+ point_coords=None,
+ figsize=10,
+ point_marker_size=5):
+ '''
+ Simple plotting tool to show intermediate mask predictions and points
+ where PointRend is applied.
+ Args:
+ mask (Tensor): mask prediction of shape HxW
+ title (str): title for the plot
+ point_coords ((Tensor, Tensor)): x and y point coordinates
+ figsize (int): size of the figure to plot
+ point_marker_size (int): marker size for points
+ '''
+
+ H, W = mask.shape
+ plt.figure(figsize=(figsize, figsize))
+ if title:
+ title += ", "
+ plt.title("{}resolution {}x{}".format(title, H, W), fontsize=30)
+ plt.ylabel(H, fontsize=30)
+ plt.xlabel(W, fontsize=30)
+ plt.xticks([], [])
+ plt.yticks([], [])
+ plt.imshow(mask.detach(),
+ interpolation="nearest",
+ cmap=plt.get_cmap('gray'))
+ if point_coords is not None:
+ plt.scatter(x=point_coords[0],
+ y=point_coords[1],
+ color="red",
+ s=point_marker_size,
+ clip_on=True)
+ plt.xlim(-0.5, W - 0.5)
+ plt.ylim(H - 0.5, -0.5)
+ plt.show()
+
+
+def plot_mask3D(mask=None,
+ title="",
+ point_coords=None,
+ figsize=1500,
+ point_marker_size=8,
+ interactive=True):
+ '''
+ Simple plotting tool to show intermediate mask predictions and points
+ where PointRend is applied.
+ Args:
+ mask (Tensor): mask prediction of shape DxHxW
+ title (str): title for the plot
+ point_coords ((Tensor, Tensor, Tensor)): x and y and z point coordinates
+ figsize (int): size of the figure to plot
+ point_marker_size (int): marker size for points
+ '''
+ import trimesh
+ import vtkplotter
+ from skimage import measure
+
+ vp = vtkplotter.Plotter(title=title, size=(figsize, figsize))
+ vis_list = []
+
+ if mask is not None:
+ mask = mask.detach().to("cpu").numpy()
+ mask = mask.transpose(2, 1, 0)
+
+ # marching cube to find surface
+ verts, faces, normals, values = measure.marching_cubes_lewiner(
+ mask, 0.5, gradient_direction='ascent')
+
+ # create a mesh
+ mesh = trimesh.Trimesh(verts, faces)
+ mesh.visual.face_colors = [200, 200, 250, 100]
+ vis_list.append(mesh)
+
+ if point_coords is not None:
+ point_coords = torch.stack(point_coords, 1).to("cpu").numpy()
+
+ # import numpy as np
+ # select_x = np.logical_and(point_coords[:, 0] >= 16, point_coords[:, 0] <= 112)
+ # select_y = np.logical_and(point_coords[:, 1] >= 48, point_coords[:, 1] <= 272)
+ # select_z = np.logical_and(point_coords[:, 2] >= 16, point_coords[:, 2] <= 112)
+ # select = np.logical_and(np.logical_and(select_x, select_y), select_z)
+ # point_coords = point_coords[select, :]
+
+ pc = vtkplotter.Points(point_coords, r=point_marker_size, c='red')
+ vis_list.append(pc)
+
+ vp.show(*vis_list,
+ bg="white",
+ axes=1,
+ interactive=interactive,
+ azimuth=30,
+ elevation=30)
+
+
+def create_grid3D(min, max, steps):
+ if type(min) is int:
+ min = (min, min, min) # (x, y, z)
+ if type(max) is int:
+ max = (max, max, max) # (x, y)
+ if type(steps) is int:
+ steps = (steps, steps, steps) # (x, y, z)
+ arrangeX = torch.linspace(min[0], max[0], steps[0]).long()
+ arrangeY = torch.linspace(min[1], max[1], steps[1]).long()
+ arrangeZ = torch.linspace(min[2], max[2], steps[2]).long()
+ gridD, girdH, gridW = torch.meshgrid([arrangeZ, arrangeY, arrangeX])
+ coords = torch.stack([gridW, girdH,
+ gridD]) # [2, steps[0], steps[1], steps[2]]
+ coords = coords.view(3, -1).t() # [N, 3]
+ return coords
+
+
+def create_grid2D(min, max, steps):
+ if type(min) is int:
+ min = (min, min) # (x, y)
+ if type(max) is int:
+ max = (max, max) # (x, y)
+ if type(steps) is int:
+ steps = (steps, steps) # (x, y)
+ arrangeX = torch.linspace(min[0], max[0], steps[0]).long()
+ arrangeY = torch.linspace(min[1], max[1], steps[1]).long()
+ girdH, gridW = torch.meshgrid([arrangeY, arrangeX])
+ coords = torch.stack([gridW, girdH]) # [2, steps[0], steps[1]]
+ coords = coords.view(2, -1).t() # [N, 2]
+ return coords
+
+
+class SmoothConv2D(nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size=3):
+ super().__init__()
+ assert kernel_size % 2 == 1, "kernel_size for smooth_conv must be odd: {3, 5, ...}"
+ self.padding = (kernel_size - 1) // 2
+
+ weight = torch.ones(
+ (in_channels, out_channels, kernel_size, kernel_size),
+ dtype=torch.float32) / (kernel_size**2)
+ self.register_buffer('weight', weight)
+
+ def forward(self, input):
+ return F.conv2d(input, self.weight, padding=self.padding)
+
+
+class SmoothConv3D(nn.Module):
+ def __init__(self, in_channels, out_channels, kernel_size=3):
+ super().__init__()
+ assert kernel_size % 2 == 1, "kernel_size for smooth_conv must be odd: {3, 5, ...}"
+ self.padding = (kernel_size - 1) // 2
+
+ weight = torch.ones(
+ (in_channels, out_channels, kernel_size, kernel_size, kernel_size),
+ dtype=torch.float32) / (kernel_size**3)
+ self.register_buffer('weight', weight)
+
+ def forward(self, input):
+ return F.conv3d(input, self.weight, padding=self.padding)
+
+
+def build_smooth_conv3D(in_channels=1,
+ out_channels=1,
+ kernel_size=3,
+ padding=1):
+ smooth_conv = torch.nn.Conv3d(in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ padding=padding)
+ smooth_conv.weight.data = torch.ones(
+ (in_channels, out_channels, kernel_size, kernel_size, kernel_size),
+ dtype=torch.float32) / (kernel_size**3)
+ smooth_conv.bias.data = torch.zeros(out_channels)
+ return smooth_conv
+
+
+def build_smooth_conv2D(in_channels=1,
+ out_channels=1,
+ kernel_size=3,
+ padding=1):
+ smooth_conv = torch.nn.Conv2d(in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ padding=padding)
+ smooth_conv.weight.data = torch.ones(
+ (in_channels, out_channels, kernel_size, kernel_size),
+ dtype=torch.float32) / (kernel_size**2)
+ smooth_conv.bias.data = torch.zeros(out_channels)
+ return smooth_conv
+
+
+def get_uncertain_point_coords_on_grid3D(uncertainty_map, num_points,
+ **kwargs):
+ """
+ Find `num_points` most uncertain points from `uncertainty_map` grid.
+ Args:
+ uncertainty_map (Tensor): A tensor of shape (N, 1, H, W, D) that contains uncertainty
+ values for a set of points on a regular H x W x D grid.
+ num_points (int): The number of points P to select.
+ Returns:
+ point_indices (Tensor): A tensor of shape (N, P) that contains indices from
+ [0, H x W x D) of the most uncertain points.
+ point_coords (Tensor): A tensor of shape (N, P, 3) that contains [0, 1] x [0, 1] normalized
+ coordinates of the most uncertain points from the H x W x D grid.
+ """
+ R, _, D, H, W = uncertainty_map.shape
+ # h_step = 1.0 / float(H)
+ # w_step = 1.0 / float(W)
+ # d_step = 1.0 / float(D)
+
+ num_points = min(D * H * W, num_points)
+ point_scores, point_indices = torch.topk(uncertainty_map.view(
+ R, D * H * W),
+ k=num_points,
+ dim=1)
+ point_coords = torch.zeros(R,
+ num_points,
+ 3,
+ dtype=torch.float,
+ device=uncertainty_map.device)
+ # point_coords[:, :, 0] = h_step / 2.0 + (point_indices // (W * D)).to(torch.float) * h_step
+ # point_coords[:, :, 1] = w_step / 2.0 + (point_indices % (W * D) // D).to(torch.float) * w_step
+ # point_coords[:, :, 2] = d_step / 2.0 + (point_indices % D).to(torch.float) * d_step
+ point_coords[:, :, 0] = (point_indices % W).to(torch.float) # x
+ point_coords[:, :, 1] = (point_indices % (H * W) // W).to(torch.float) # y
+ point_coords[:, :, 2] = (point_indices // (H * W)).to(torch.float) # z
+ print(f"resolution {D} x {H} x {W}", point_scores.min(),
+ point_scores.max())
+ return point_indices, point_coords
+
+
+def get_uncertain_point_coords_on_grid3D_faster(uncertainty_map, num_points,
+ clip_min):
+ """
+ Find `num_points` most uncertain points from `uncertainty_map` grid.
+ Args:
+ uncertainty_map (Tensor): A tensor of shape (N, 1, H, W, D) that contains uncertainty
+ values for a set of points on a regular H x W x D grid.
+ num_points (int): The number of points P to select.
+ Returns:
+ point_indices (Tensor): A tensor of shape (N, P) that contains indices from
+ [0, H x W x D) of the most uncertain points.
+ point_coords (Tensor): A tensor of shape (N, P, 3) that contains [0, 1] x [0, 1] normalized
+ coordinates of the most uncertain points from the H x W x D grid.
+ """
+ R, _, D, H, W = uncertainty_map.shape
+ # h_step = 1.0 / float(H)
+ # w_step = 1.0 / float(W)
+ # d_step = 1.0 / float(D)
+
+ assert R == 1, "batchsize > 1 is not implemented!"
+ uncertainty_map = uncertainty_map.view(D * H * W)
+ indices = (uncertainty_map >= clip_min).nonzero().squeeze(1)
+ num_points = min(num_points, indices.size(0))
+ point_scores, point_indices = torch.topk(uncertainty_map[indices],
+ k=num_points,
+ dim=0)
+ point_indices = indices[point_indices].unsqueeze(0)
+
+ point_coords = torch.zeros(R,
+ num_points,
+ 3,
+ dtype=torch.float,
+ device=uncertainty_map.device)
+ # point_coords[:, :, 0] = h_step / 2.0 + (point_indices // (W * D)).to(torch.float) * h_step
+ # point_coords[:, :, 1] = w_step / 2.0 + (point_indices % (W * D) // D).to(torch.float) * w_step
+ # point_coords[:, :, 2] = d_step / 2.0 + (point_indices % D).to(torch.float) * d_step
+ point_coords[:, :, 0] = (point_indices % W).to(torch.float) # x
+ point_coords[:, :, 1] = (point_indices % (H * W) // W).to(torch.float) # y
+ point_coords[:, :, 2] = (point_indices // (H * W)).to(torch.float) # z
+ # print (f"resolution {D} x {H} x {W}", point_scores.min(), point_scores.max())
+ return point_indices, point_coords
+
+
+def get_uncertain_point_coords_on_grid2D(uncertainty_map, num_points,
+ **kwargs):
+ """
+ Find `num_points` most uncertain points from `uncertainty_map` grid.
+ Args:
+ uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty
+ values for a set of points on a regular H x W grid.
+ num_points (int): The number of points P to select.
+ Returns:
+ point_indices (Tensor): A tensor of shape (N, P) that contains indices from
+ [0, H x W) of the most uncertain points.
+ point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized
+ coordinates of the most uncertain points from the H x W grid.
+ """
+ R, _, H, W = uncertainty_map.shape
+ # h_step = 1.0 / float(H)
+ # w_step = 1.0 / float(W)
+
+ num_points = min(H * W, num_points)
+ point_scores, point_indices = torch.topk(uncertainty_map.view(R, H * W),
+ k=num_points,
+ dim=1)
+ point_coords = torch.zeros(R,
+ num_points,
+ 2,
+ dtype=torch.long,
+ device=uncertainty_map.device)
+ # point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step
+ # point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step
+ point_coords[:, :, 0] = (point_indices % W).to(torch.long)
+ point_coords[:, :, 1] = (point_indices // W).to(torch.long)
+ # print (point_scores.min(), point_scores.max())
+ return point_indices, point_coords
+
+
+def get_uncertain_point_coords_on_grid2D_faster(uncertainty_map, num_points,
+ clip_min):
+ """
+ Find `num_points` most uncertain points from `uncertainty_map` grid.
+ Args:
+ uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty
+ values for a set of points on a regular H x W grid.
+ num_points (int): The number of points P to select.
+ Returns:
+ point_indices (Tensor): A tensor of shape (N, P) that contains indices from
+ [0, H x W) of the most uncertain points.
+ point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized
+ coordinates of the most uncertain points from the H x W grid.
+ """
+ R, _, H, W = uncertainty_map.shape
+ # h_step = 1.0 / float(H)
+ # w_step = 1.0 / float(W)
+
+ assert R == 1, "batchsize > 1 is not implemented!"
+ uncertainty_map = uncertainty_map.view(H * W)
+ indices = (uncertainty_map >= clip_min).nonzero().squeeze(1)
+ num_points = min(num_points, indices.size(0))
+ point_scores, point_indices = torch.topk(uncertainty_map[indices],
+ k=num_points,
+ dim=0)
+ point_indices = indices[point_indices].unsqueeze(0)
+
+ point_coords = torch.zeros(R,
+ num_points,
+ 2,
+ dtype=torch.long,
+ device=uncertainty_map.device)
+ # point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step
+ # point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step
+ point_coords[:, :, 0] = (point_indices % W).to(torch.long)
+ point_coords[:, :, 1] = (point_indices // W).to(torch.long)
+ # print (point_scores.min(), point_scores.max())
+ return point_indices, point_coords
+
+
+def calculate_uncertainty(logits, classes=None, balance_value=0.5):
+ """
+ We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
+ foreground class in `classes`.
+ Args:
+ logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or
+ class-agnostic, where R is the total number of predicted masks in all images and C is
+ the number of foreground classes. The values are logits.
+ classes (list): A list of length R that contains either predicted of ground truth class
+ for eash predicted mask.
+ Returns:
+ scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
+ the most uncertain locations having the highest uncertainty score.
+ """
+ if logits.shape[1] == 1:
+ gt_class_logits = logits
+ else:
+ gt_class_logits = logits[
+ torch.arange(logits.shape[0], device=logits.device),
+ classes].unsqueeze(1)
+ return -torch.abs(gt_class_logits - balance_value)
\ No newline at end of file
diff --git a/lib /common /cloth_extraction.py b/lib /common /cloth_extraction.py
new file mode 100644
index 0000000000000000000000000000000000000000..934c2d3b572e7b5ca476ea1147c1d1bb51463c56
--- /dev/null
+++ b/lib /common /cloth_extraction.py
@@ -0,0 +1,170 @@
+import numpy as np
+import json
+import os
+import itertools
+import trimesh
+from matplotlib.path import Path
+from collections import Counter
+from sklearn.neighbors import KNeighborsClassifier
+
+
+def load_segmentation(path, shape):
+ """
+ Get a segmentation mask for a given image
+ Arguments:
+ path: path to the segmentation json file
+ shape: shape of the output mask
+ Returns:
+ Returns a segmentation mask
+ """
+ with open(path) as json_file:
+ dict = json.load(json_file)
+ segmentations = []
+ for key, val in dict.items():
+ if not key.startswith('item'):
+ continue
+
+ # Each item can have multiple polygons. Combine them to one
+ # segmentation_coord = list(itertools.chain.from_iterable(val['segmentation']))
+ # segmentation_coord = np.round(np.array(segmentation_coord)).astype(int)
+
+ coordinates = []
+ for segmentation_coord in val['segmentation']:
+ # The format before is [x1,y1, x2, y2, ....]
+ x = segmentation_coord[::2]
+ y = segmentation_coord[1::2]
+ xy = np.vstack((x, y)).T
+ coordinates.append(xy)
+
+ segmentations.append(
+ {'type': val['category_name'], 'type_id': val['category_id'], 'coordinates': coordinates})
+
+ return segmentations
+
+
+def smpl_to_recon_labels(recon, smpl, k=1):
+ """
+ Get the bodypart labels for the recon object by using the labels from the corresponding smpl object
+ Arguments:
+ recon: trimesh object (fully clothed model)
+ shape: trimesh object (smpl model)
+ k: number of nearest neighbours to use
+ Returns:
+ Returns a dictionary containing the bodypart and the corresponding indices
+ """
+ smpl_vert_segmentation = json.load(
+ open(os.path.join(os.path.dirname(__file__), 'smpl_vert_segmentation.json')))
+ n = smpl.vertices.shape[0]
+ y = np.array([None] * n)
+ for key, val in smpl_vert_segmentation.items():
+ y[val] = key
+
+ classifier = KNeighborsClassifier(n_neighbors=1)
+ classifier.fit(smpl.vertices, y)
+
+ y_pred = classifier.predict(recon.vertices)
+
+ recon_labels = {}
+ for key in smpl_vert_segmentation.keys():
+ recon_labels[key] = list(np.argwhere(
+ y_pred == key).flatten().astype(int))
+
+ return recon_labels
+
+
+def extract_cloth(recon, segmentation, K, R, t, smpl=None):
+ """
+ Extract a portion of a mesh using 2d segmentation coordinates
+ Arguments:
+ recon: fully clothed mesh
+ seg_coord: segmentation coordinates in 2D (NDC)
+ K: intrinsic matrix of the projection
+ R: rotation matrix of the projection
+ t: translation vector of the projection
+ Returns:
+ Returns a submesh using the segmentation coordinates
+ """
+ seg_coord = segmentation['coord_normalized']
+ mesh = trimesh.Trimesh(recon.vertices, recon.faces)
+ extrinsic = np.zeros((3, 4))
+ extrinsic[:3, :3] = R
+ extrinsic[:, 3] = t
+ P = K[:3, :3] @ extrinsic
+
+ P_inv = np.linalg.pinv(P)
+
+ # Each segmentation can contain multiple polygons
+ # We need to check them separately
+ points_so_far = []
+ faces = recon.faces
+ for polygon in seg_coord:
+ n = len(polygon)
+ coords_h = np.hstack((polygon, np.ones((n, 1))))
+ # Apply the inverse projection on homogeneus 2D coordinates to get the corresponding 3d Coordinates
+ XYZ = P_inv @ coords_h[:, :, None]
+ XYZ = XYZ.reshape((XYZ.shape[0], XYZ.shape[1]))
+ XYZ = XYZ[:, :3] / XYZ[:, 3, None]
+
+ p = Path(XYZ[:, :2])
+
+ grid = p.contains_points(recon.vertices[:, :2])
+ indeces = np.argwhere(grid == True)
+ points_so_far += list(indeces.flatten())
+
+ if smpl is not None:
+ num_verts = recon.vertices.shape[0]
+ recon_labels = smpl_to_recon_labels(recon, smpl)
+ body_parts_to_remove = ['rightHand', 'leftToeBase', 'leftFoot', 'rightFoot', 'head',
+ 'leftHandIndex1', 'rightHandIndex1', 'rightToeBase', 'leftHand', 'rightHand']
+ type = segmentation['type_id']
+
+ # Remove additional bodyparts that are most likely not part of the segmentation but might intersect (e.g. hand in front of torso)
+ # https://github.com/switchablenorms/DeepFashion2
+ # Short sleeve clothes
+ if type == 1 or type == 3 or type == 10:
+ body_parts_to_remove += ['leftForeArm', 'rightForeArm']
+ # No sleeves at all or lower body clothes
+ elif type == 5 or type == 6 or type == 12 or type == 13 or type == 8 or type == 9:
+ body_parts_to_remove += ['leftForeArm',
+ 'rightForeArm', 'leftArm', 'rightArm']
+ # Shorts
+ elif type == 7:
+ body_parts_to_remove += ['leftLeg', 'rightLeg',
+ 'leftForeArm', 'rightForeArm', 'leftArm', 'rightArm']
+
+ verts_to_remove = list(itertools.chain.from_iterable(
+ [recon_labels[part] for part in body_parts_to_remove]))
+
+ label_mask = np.zeros(num_verts, dtype=bool)
+ label_mask[verts_to_remove] = True
+
+ seg_mask = np.zeros(num_verts, dtype=bool)
+ seg_mask[points_so_far] = True
+
+ # Remove points that belong to other bodyparts
+ # If a vertice in pointsSoFar is included in the bodyparts to remove, then these points should be removed
+ extra_verts_to_remove = np.array(list(seg_mask) and list(label_mask))
+
+ combine_mask = np.zeros(num_verts, dtype=bool)
+ combine_mask[points_so_far] = True
+ combine_mask[extra_verts_to_remove] = False
+
+ all_indices = np.argwhere(combine_mask == True).flatten()
+
+ i_x = np.where(np.in1d(faces[:, 0], all_indices))[0]
+ i_y = np.where(np.in1d(faces[:, 1], all_indices))[0]
+ i_z = np.where(np.in1d(faces[:, 2], all_indices))[0]
+
+ faces_to_keep = np.array(list(set(i_x).union(i_y).union(i_z)))
+ mask = np.zeros(len(recon.faces), dtype=bool)
+ if len(faces_to_keep) > 0:
+ mask[faces_to_keep] = True
+
+ mesh.update_faces(mask)
+ mesh.remove_unreferenced_vertices()
+
+ # mesh.rezero()
+
+ return mesh
+
+ return None
\ No newline at end of file
diff --git a/lib /common /config.py b/lib /common /config.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a69e8ca0ab7e42ae38055bb0b31e07ba42a4aed
--- /dev/null
+++ b/lib /common /config.py
@@ -0,0 +1,218 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from yacs.config import CfgNode as CN
+import os
+
+_C = CN(new_allowed=True)
+
+# needed by trainer
+_C.name = 'default'
+_C.gpus = [0]
+_C.test_gpus = [1]
+_C.root = "./data/"
+_C.ckpt_dir = './data/ckpt/'
+_C.resume_path = ''
+_C.normal_path = ''
+_C.corr_path = ''
+_C.results_path = './data/results/'
+_C.projection_mode = 'orthogonal'
+_C.num_views = 1
+_C.sdf = False
+_C.sdf_clip = 5.0
+
+_C.lr_G = 1e-3
+_C.lr_C = 1e-3
+_C.lr_N = 2e-4
+_C.weight_decay = 0.0
+_C.momentum = 0.0
+_C.optim = 'RMSprop'
+_C.schedule = [5, 10, 15]
+_C.gamma = 0.1
+
+_C.overfit = False
+_C.resume = False
+_C.test_mode = False
+_C.test_uv = False
+_C.draw_geo_thres = 0.60
+_C.num_sanity_val_steps = 2
+_C.fast_dev = 0
+_C.get_fit = False
+_C.agora = False
+_C.optim_cloth = False
+_C.optim_body = False
+_C.mcube_res = 256
+_C.clean_mesh = True
+_C.remesh = False
+
+_C.batch_size = 4
+_C.num_threads = 8
+
+_C.num_epoch = 10
+_C.freq_plot = 0.01
+_C.freq_show_train = 0.1
+_C.freq_show_val = 0.2
+_C.freq_eval = 0.5
+_C.accu_grad_batch = 4
+
+_C.test_items = ['sv', 'mv', 'mv-fusion', 'hybrid', 'dc-pred', 'gt']
+
+_C.net = CN()
+_C.net.gtype = 'HGPIFuNet'
+_C.net.ctype = 'resnet18'
+_C.net.classifierIMF = 'MultiSegClassifier'
+_C.net.netIMF = 'resnet18'
+_C.net.norm = 'group'
+_C.net.norm_mlp = 'group'
+_C.net.norm_color = 'group'
+_C.net.hg_down = 'ave_pool'
+_C.net.num_views = 1
+
+# kernel_size, stride, dilation, padding
+
+_C.net.conv1 = [7, 2, 1, 3]
+_C.net.conv3x3 = [3, 1, 1, 1]
+
+_C.net.num_stack = 4
+_C.net.num_hourglass = 2
+_C.net.hourglass_dim = 256
+_C.net.voxel_dim = 32
+_C.net.resnet_dim = 120
+_C.net.mlp_dim = [320, 1024, 512, 256, 128, 1]
+_C.net.mlp_dim_knn = [320, 1024, 512, 256, 128, 3]
+_C.net.mlp_dim_color = [513, 1024, 512, 256, 128, 3]
+_C.net.mlp_dim_multiseg = [1088, 2048, 1024, 500]
+_C.net.res_layers = [2, 3, 4]
+_C.net.filter_dim = 256
+_C.net.smpl_dim = 3
+
+_C.net.cly_dim = 3
+_C.net.soft_dim = 64
+_C.net.z_size = 200.0
+_C.net.N_freqs = 10
+_C.net.geo_w = 0.1
+_C.net.norm_w = 0.1
+_C.net.dc_w = 0.1
+_C.net.C_cat_to_G = False
+
+_C.net.skip_hourglass = True
+_C.net.use_tanh = True
+_C.net.soft_onehot = True
+_C.net.no_residual = True
+_C.net.use_attention = False
+
+_C.net.prior_type = "sdf"
+_C.net.smpl_feats = ['sdf', 'cmap', 'norm', 'vis']
+_C.net.use_filter = True
+_C.net.use_cc = False
+_C.net.use_PE = False
+_C.net.use_IGR = False
+_C.net.in_geo = ()
+_C.net.in_nml = ()
+
+_C.dataset = CN()
+_C.dataset.root = ''
+_C.dataset.set_splits = [0.95, 0.04]
+_C.dataset.types = [
+ "3dpeople", "axyz", "renderpeople", "renderpeople_p27", "humanalloy"
+]
+_C.dataset.scales = [1.0, 100.0, 1.0, 1.0, 100.0 / 39.37]
+_C.dataset.rp_type = "pifu900"
+_C.dataset.th_type = 'train'
+_C.dataset.input_size = 512
+_C.dataset.rotation_num = 3
+_C.dataset.num_precomp = 10 # Number of segmentation classifiers
+_C.dataset.num_multiseg = 500 # Number of categories per classifier
+_C.dataset.num_knn = 10 # for loss/error
+_C.dataset.num_knn_dis = 20 # for accuracy
+_C.dataset.num_verts_max = 20000
+_C.dataset.zray_type = False
+_C.dataset.online_smpl = False
+_C.dataset.noise_type = ['z-trans', 'pose', 'beta']
+_C.dataset.noise_scale = [0.0, 0.0, 0.0]
+_C.dataset.num_sample_geo = 10000
+_C.dataset.num_sample_color = 0
+_C.dataset.num_sample_seg = 0
+_C.dataset.num_sample_knn = 10000
+
+_C.dataset.sigma_geo = 5.0
+_C.dataset.sigma_color = 0.10
+_C.dataset.sigma_seg = 0.10
+_C.dataset.thickness_threshold = 20.0
+_C.dataset.ray_sample_num = 2
+_C.dataset.semantic_p = False
+_C.dataset.remove_outlier = False
+
+_C.dataset.train_bsize = 1.0
+_C.dataset.val_bsize = 1.0
+_C.dataset.test_bsize = 1.0
+
+
+def get_cfg_defaults():
+ """Get a yacs CfgNode object with default values for my_project."""
+ # Return a clone so that the defaults will not be altered
+ # This is for the "local variable" use pattern
+ return _C.clone()
+
+
+# Alternatively, provide a way to import the defaults as
+# a global singleton:
+cfg = _C # users can `from config import cfg`
+
+# cfg = get_cfg_defaults()
+# cfg.merge_from_file('./configs/example.yaml')
+
+# # Now override from a list (opts could come from the command line)
+# opts = ['dataset.root', './data/XXXX', 'learning_rate', '1e-2']
+# cfg.merge_from_list(opts)
+
+
+def update_cfg(cfg_file):
+ # cfg = get_cfg_defaults()
+ _C.merge_from_file(cfg_file)
+ # return cfg.clone()
+ return _C
+
+
+def parse_args(args):
+ cfg_file = args.cfg_file
+ if args.cfg_file is not None:
+ cfg = update_cfg(args.cfg_file)
+ else:
+ cfg = get_cfg_defaults()
+
+ # if args.misc is not None:
+ # cfg.merge_from_list(args.misc)
+
+ return cfg
+
+
+def parse_args_extend(args):
+ if args.resume:
+ if not os.path.exists(args.log_dir):
+ raise ValueError(
+ 'Experiment are set to resume mode, but log directory does not exist.'
+ )
+
+ # load log's cfg
+ cfg_file = os.path.join(args.log_dir, 'cfg.yaml')
+ cfg = update_cfg(cfg_file)
+
+ if args.misc is not None:
+ cfg.merge_from_list(args.misc)
+ else:
+ parse_args(args)
diff --git a/lib /common /render.py b/lib /common /render.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d8719232bbfd851102cfdd604eb5ea5ffba07a9
--- /dev/null
+++ b/lib /common /render.py
@@ -0,0 +1,389 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from pytorch3d.renderer import (
+ BlendParams,
+ blending,
+ look_at_view_transform,
+ FoVOrthographicCameras,
+ PointLights,
+ RasterizationSettings,
+ PointsRasterizationSettings,
+ PointsRenderer,
+ AlphaCompositor,
+ PointsRasterizer,
+ MeshRenderer,
+ MeshRasterizer,
+ SoftPhongShader,
+ SoftSilhouetteShader,
+ TexturesVertex,
+)
+from pytorch3d.renderer.mesh import TexturesVertex
+from pytorch3d.structures import Meshes
+
+import os, subprocess
+
+from lib.dataset.mesh_util import SMPLX, get_visibility
+import lib.common.render_utils as util
+import torch
+import numpy as np
+from PIL import Image
+from tqdm import tqdm
+import cv2
+import math
+from termcolor import colored
+
+
+def image2vid(images, vid_path):
+
+ w, h = images[0].size
+ videodims = (w, h)
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
+ video = cv2.VideoWriter(vid_path, fourcc, 30, videodims)
+ for image in images:
+ video.write(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR))
+ video.release()
+
+
+def query_color(verts, faces, image, device):
+ """query colors from points and image
+ Args:
+ verts ([B, 3]): [query verts]
+ faces ([M, 3]): [query faces]
+ image ([B, 3, H, W]): [full image]
+ Returns:
+ [np.float]: [return colors]
+ """
+
+ verts = verts.float().to(device)
+ faces = faces.long().to(device)
+
+ (xy, z) = verts.split([2, 1], dim=1)
+ visibility = get_visibility(xy, z, faces[:, [0, 2, 1]]).flatten()
+ uv = xy.unsqueeze(0).unsqueeze(2) # [B, N, 2]
+ uv = uv * torch.tensor([1.0, -1.0]).type_as(uv)
+ colors = (torch.nn.functional.grid_sample(image, uv, align_corners=True)[
+ 0, :, :, 0].permute(1, 0) + 1.0) * 0.5 * 255.0
+ colors[visibility == 0.0] = ((Meshes(verts.unsqueeze(0), faces.unsqueeze(
+ 0)).verts_normals_padded().squeeze(0) + 1.0) * 0.5 * 255.0)[visibility == 0.0]
+
+ return colors.detach().cpu()
+
+
+class cleanShader(torch.nn.Module):
+ def __init__(self, device="cpu", cameras=None, blend_params=None):
+ super().__init__()
+ self.cameras = cameras
+ self.blend_params = blend_params if blend_params is not None else BlendParams()
+
+ def forward(self, fragments, meshes, **kwargs):
+ cameras = kwargs.get("cameras", self.cameras)
+ if cameras is None:
+ msg = "Cameras must be specified either at initialization \
+ or in the forward pass of TexturedSoftPhongShader"
+
+ raise ValueError(msg)
+
+ # get renderer output
+ blend_params = kwargs.get("blend_params", self.blend_params)
+ texels = meshes.sample_textures(fragments)
+ images = blending.softmax_rgb_blend(
+ texels, fragments, blend_params, znear=-256, zfar=256
+ )
+
+ return images
+
+
+class Render:
+ def __init__(self, size=512, device=torch.device("cuda:0")):
+ self.device = device
+ self.mesh_y_center = 100.0
+ self.dis = 100.0
+ self.scale = 1.0
+ self.size = size
+ self.cam_pos = [(0, 100, 100)]
+
+ self.mesh = None
+ self.deform_mesh = None
+ self.pcd = None
+ self.renderer = None
+ self.meshRas = None
+ self.type = None
+ self.knn = None
+ self.knn_inverse = None
+
+ self.smpl_seg = None
+ self.smpl_cmap = None
+
+ self.smplx = SMPLX()
+
+ self.uv_rasterizer = util.Pytorch3dRasterizer(self.size)
+
+ def get_camera(self, cam_id):
+
+ R, T = look_at_view_transform(
+ eye=[self.cam_pos[cam_id]],
+ at=((0, self.mesh_y_center, 0),),
+ up=((0, 1, 0),),
+ )
+
+ camera = FoVOrthographicCameras(
+ device=self.device,
+ R=R,
+ T=T,
+ znear=100.0,
+ zfar=-100.0,
+ max_y=100.0,
+ min_y=-100.0,
+ max_x=100.0,
+ min_x=-100.0,
+ scale_xyz=(self.scale * np.ones(3),),
+ )
+
+ return camera
+
+ def init_renderer(self, camera, type="clean_mesh", bg="gray"):
+
+ if "mesh" in type:
+
+ # rasterizer
+ self.raster_settings_mesh = RasterizationSettings(
+ image_size=self.size,
+ blur_radius=np.log(1.0 / 1e-4) * 1e-7,
+ faces_per_pixel=30,
+ )
+ self.meshRas = MeshRasterizer(
+ cameras=camera, raster_settings=self.raster_settings_mesh
+ )
+
+ if bg == "black":
+ blendparam = BlendParams(1e-4, 1e-4, (0.0, 0.0, 0.0))
+ elif bg == "white":
+ blendparam = BlendParams(1e-4, 1e-8, (1.0, 1.0, 1.0))
+ elif bg == "gray":
+ blendparam = BlendParams(1e-4, 1e-8, (0.5, 0.5, 0.5))
+
+ if type == "ori_mesh":
+
+ lights = PointLights(
+ device=self.device,
+ ambient_color=((0.8, 0.8, 0.8),),
+ diffuse_color=((0.2, 0.2, 0.2),),
+ specular_color=((0.0, 0.0, 0.0),),
+ location=[[0.0, 200.0, 0.0]],
+ )
+
+ self.renderer = MeshRenderer(
+ rasterizer=self.meshRas,
+ shader=SoftPhongShader(
+ device=self.device,
+ cameras=camera,
+ lights=lights,
+ blend_params=blendparam,
+ ),
+ )
+
+ if type == "silhouette":
+ self.raster_settings_silhouette = RasterizationSettings(
+ image_size=self.size,
+ blur_radius=np.log(1.0 / 1e-4 - 1.0) * 5e-5,
+ faces_per_pixel=50,
+ cull_backfaces=True,
+ )
+
+ self.silhouetteRas = MeshRasterizer(
+ cameras=camera, raster_settings=self.raster_settings_silhouette
+ )
+ self.renderer = MeshRenderer(
+ rasterizer=self.silhouetteRas, shader=SoftSilhouetteShader()
+ )
+
+ if type == "pointcloud":
+ self.raster_settings_pcd = PointsRasterizationSettings(
+ image_size=self.size, radius=0.006, points_per_pixel=10
+ )
+
+ self.pcdRas = PointsRasterizer(
+ cameras=camera, raster_settings=self.raster_settings_pcd
+ )
+ self.renderer = PointsRenderer(
+ rasterizer=self.pcdRas,
+ compositor=AlphaCompositor(background_color=(0, 0, 0)),
+ )
+
+ if type == "clean_mesh":
+
+ self.renderer = MeshRenderer(
+ rasterizer=self.meshRas,
+ shader=cleanShader(
+ device=self.device, cameras=camera, blend_params=blendparam
+ ),
+ )
+
+ def VF2Mesh(self, verts, faces):
+
+ if not torch.is_tensor(verts):
+ verts = torch.tensor(verts)
+ if not torch.is_tensor(faces):
+ faces = torch.tensor(faces)
+
+ if verts.ndimension() == 2:
+ verts = verts.unsqueeze(0).float()
+ if faces.ndimension() == 2:
+ faces = faces.unsqueeze(0).long()
+
+ verts = verts.to(self.device)
+ faces = faces.to(self.device)
+
+ mesh = Meshes(verts, faces).to(self.device)
+
+ mesh.textures = TexturesVertex(
+ verts_features=(mesh.verts_normals_padded() + 1.0) * 0.5
+ )
+
+ return mesh
+
+ def load_meshes(self, verts, faces):
+ """load mesh into the pytorch3d renderer
+ Args:
+ verts ([N,3]): verts
+ faces ([N,3]): faces
+ offset ([N,3]): offset
+ """
+
+ # camera setting
+ self.scale = 100.0
+ self.mesh_y_center = 0.0
+
+ self.cam_pos = [
+ (0, self.mesh_y_center, 100.0),
+ (100.0, self.mesh_y_center, 0),
+ (0, self.mesh_y_center, -100.0),
+ (-100.0, self.mesh_y_center, 0),
+ ]
+
+ self.type = "color"
+
+ if isinstance(verts, list):
+ self.meshes = []
+ for V, F in zip(verts, faces):
+ self.meshes.append(self.VF2Mesh(V, F))
+ else:
+ self.meshes = [self.VF2Mesh(verts, faces)]
+
+ def get_depth_map(self, cam_ids=[0, 2]):
+
+ depth_maps = []
+ for cam_id in cam_ids:
+ self.init_renderer(self.get_camera(cam_id), "clean_mesh", "gray")
+ fragments = self.meshRas(self.meshes[0])
+ depth_map = fragments.zbuf[..., 0].squeeze(0)
+ if cam_id == 2:
+ depth_map = torch.fliplr(depth_map)
+ depth_maps.append(depth_map)
+
+ return depth_maps
+
+ def get_rgb_image(self, cam_ids=[0, 2]):
+
+ images = []
+ for cam_id in range(len(self.cam_pos)):
+ if cam_id in cam_ids:
+ self.init_renderer(self.get_camera(
+ cam_id), "clean_mesh", "gray")
+ if len(cam_ids) == 4:
+ rendered_img = (
+ self.renderer(self.meshes[0])[
+ 0:1, :, :, :3].permute(0, 3, 1, 2)
+ - 0.5
+ ) * 2.0
+ else:
+ rendered_img = (
+ self.renderer(self.meshes[0])[
+ 0:1, :, :, :3].permute(0, 3, 1, 2)
+ - 0.5
+ ) * 2.0
+ if cam_id == 2 and len(cam_ids) == 2:
+ rendered_img = torch.flip(rendered_img, dims=[3])
+ images.append(rendered_img)
+
+ return images
+
+ def get_rendered_video(self, images, save_path):
+
+ tmp_path = save_path.replace('cloth', 'tmp')
+
+ self.cam_pos = []
+ for angle in range(0, 360, 3):
+ self.cam_pos.append(
+ (
+ 100.0 * math.cos(np.pi / 180 * angle),
+ self.mesh_y_center,
+ 100.0 * math.sin(np.pi / 180 * angle),
+ )
+ )
+
+ old_shape = np.array(images[0].shape[:2])
+ new_shape = np.around(
+ (self.size / old_shape[0]) * old_shape).astype(np.int)
+
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
+ video = cv2.VideoWriter(
+ tmp_path, fourcc, 30, (self.size * len(self.meshes) +
+ new_shape[1] * len(images), self.size)
+ )
+
+ pbar = tqdm(range(len(self.cam_pos)))
+ pbar.set_description(colored(f"exporting video {os.path.basename(save_path)}...", "blue"))
+ for cam_id in pbar:
+ self.init_renderer(self.get_camera(cam_id), "clean_mesh", "gray")
+
+ img_lst = [
+ np.array(Image.fromarray(img).resize(new_shape[::-1])).astype(np.uint8)[
+ :, :, [2, 1, 0]
+ ]
+ for img in images
+ ]
+
+ for mesh in self.meshes:
+ rendered_img = (
+ (self.renderer(mesh)[0, :, :, :3] * 255.0)
+ .detach()
+ .cpu()
+ .numpy()
+ .astype(np.uint8)
+ )
+
+ img_lst.append(rendered_img)
+ final_img = np.concatenate(img_lst, axis=1)
+ video.write(final_img)
+
+ video.release()
+
+ os.system(f'ffmpeg -y -loglevel quiet -stats -i {tmp_path} -c:v libx264 {save_path}')
+
+ def get_silhouette_image(self, cam_ids=[0, 2]):
+
+ images = []
+ for cam_id in range(len(self.cam_pos)):
+ if cam_id in cam_ids:
+ self.init_renderer(self.get_camera(cam_id), "silhouette")
+ rendered_img = self.renderer(self.meshes[0])[0:1, :, :, 3]
+ if cam_id == 2 and len(cam_ids) == 2:
+ rendered_img = torch.flip(rendered_img, dims=[2])
+ images.append(rendered_img)
+
+ return images
diff --git a/lib /common /render_utils.py b/lib /common /render_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..00437ac2c3b243fa01d3916cd130e1437ae5b4d8
--- /dev/null
+++ b/lib /common /render_utils.py
@@ -0,0 +1,221 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import torch
+from torch import nn
+import trimesh
+import math
+from typing import NewType
+from pytorch3d.structures import Meshes
+from pytorch3d.renderer.mesh import rasterize_meshes
+
+Tensor = NewType('Tensor', torch.Tensor)
+
+
+def solid_angles(points: Tensor,
+ triangles: Tensor,
+ thresh: float = 1e-8) -> Tensor:
+ ''' Compute solid angle between the input points and triangles
+ Follows the method described in:
+ The Solid Angle of a Plane Triangle
+ A. VAN OOSTEROM AND J. STRACKEE
+ IEEE TRANSACTIONS ON BIOMEDICAL ENGINEERING,
+ VOL. BME-30, NO. 2, FEBRUARY 1983
+ Parameters
+ -----------
+ points: BxQx3
+ Tensor of input query points
+ triangles: BxFx3x3
+ Target triangles
+ thresh: float
+ float threshold
+ Returns
+ -------
+ solid_angles: BxQxF
+ A tensor containing the solid angle between all query points
+ and input triangles
+ '''
+ # Center the triangles on the query points. Size should be BxQxFx3x3
+ centered_tris = triangles[:, None] - points[:, :, None, None]
+
+ # BxQxFx3
+ norms = torch.norm(centered_tris, dim=-1)
+
+ # Should be BxQxFx3
+ cross_prod = torch.cross(centered_tris[:, :, :, 1],
+ centered_tris[:, :, :, 2],
+ dim=-1)
+ # Should be BxQxF
+ numerator = (centered_tris[:, :, :, 0] * cross_prod).sum(dim=-1)
+ del cross_prod
+
+ dot01 = (centered_tris[:, :, :, 0] * centered_tris[:, :, :, 1]).sum(dim=-1)
+ dot12 = (centered_tris[:, :, :, 1] * centered_tris[:, :, :, 2]).sum(dim=-1)
+ dot02 = (centered_tris[:, :, :, 0] * centered_tris[:, :, :, 2]).sum(dim=-1)
+ del centered_tris
+
+ denominator = (norms.prod(dim=-1) + dot01 * norms[:, :, :, 2] +
+ dot02 * norms[:, :, :, 1] + dot12 * norms[:, :, :, 0])
+ del dot01, dot12, dot02, norms
+
+ # Should be BxQ
+ solid_angle = torch.atan2(numerator, denominator)
+ del numerator, denominator
+
+ torch.cuda.empty_cache()
+
+ return 2 * solid_angle
+
+
+def winding_numbers(points: Tensor,
+ triangles: Tensor,
+ thresh: float = 1e-8) -> Tensor:
+ ''' Uses winding_numbers to compute inside/outside
+ Robust inside-outside segmentation using generalized winding numbers
+ Alec Jacobson,
+ Ladislav Kavan,
+ Olga Sorkine-Hornung
+ Fast Winding Numbers for Soups and Clouds SIGGRAPH 2018
+ Gavin Barill
+ NEIL G. Dickson
+ Ryan Schmidt
+ David I.W. Levin
+ and Alec Jacobson
+ Parameters
+ -----------
+ points: BxQx3
+ Tensor of input query points
+ triangles: BxFx3x3
+ Target triangles
+ thresh: float
+ float threshold
+ Returns
+ -------
+ winding_numbers: BxQ
+ A tensor containing the Generalized winding numbers
+ '''
+ # The generalized winding number is the sum of solid angles of the point
+ # with respect to all triangles.
+ return 1 / (4 * math.pi) * solid_angles(points, triangles,
+ thresh=thresh).sum(dim=-1)
+
+
+def batch_contains(verts, faces, points):
+
+ B = verts.shape[0]
+ N = points.shape[1]
+
+ verts = verts.detach().cpu()
+ faces = faces.detach().cpu()
+ points = points.detach().cpu()
+ contains = torch.zeros(B, N)
+
+ for i in range(B):
+ contains[i] = torch.as_tensor(
+ trimesh.Trimesh(verts[i], faces[i]).contains(points[i]))
+
+ return 2.0 * (contains - 0.5)
+
+
+def dict2obj(d):
+ # if isinstance(d, list):
+ # d = [dict2obj(x) for x in d]
+ if not isinstance(d, dict):
+ return d
+
+ class C(object):
+ pass
+
+ o = C()
+ for k in d:
+ o.__dict__[k] = dict2obj(d[k])
+ return o
+
+
+def face_vertices(vertices, faces):
+ """
+ :param vertices: [batch size, number of vertices, 3]
+ :param faces: [batch size, number of faces, 3]
+ :return: [batch size, number of faces, 3, 3]
+ """
+
+ bs, nv = vertices.shape[:2]
+ bs, nf = faces.shape[:2]
+ device = vertices.device
+ faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) *
+ nv)[:, None, None]
+ vertices = vertices.reshape((bs * nv, vertices.shape[-1]))
+
+ return vertices[faces.long()]
+
+
+class Pytorch3dRasterizer(nn.Module):
+ """ Borrowed from https://github.com/facebookresearch/pytorch3d
+ Notice:
+ x,y,z are in image space, normalized
+ can only render squared image now
+ """
+
+ def __init__(self, image_size=224):
+ """
+ use fixed raster_settings for rendering faces
+ """
+ super().__init__()
+ raster_settings = {
+ 'image_size': image_size,
+ 'blur_radius': 0.0,
+ 'faces_per_pixel': 1,
+ 'bin_size': None,
+ 'max_faces_per_bin': None,
+ 'perspective_correct': True,
+ 'cull_backfaces': True,
+ }
+ raster_settings = dict2obj(raster_settings)
+ self.raster_settings = raster_settings
+
+ def forward(self, vertices, faces, attributes=None):
+ fixed_vertices = vertices.clone()
+ fixed_vertices[..., :2] = -fixed_vertices[..., :2]
+ meshes_screen = Meshes(verts=fixed_vertices.float(),
+ faces=faces.long())
+ raster_settings = self.raster_settings
+ pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
+ meshes_screen,
+ image_size=raster_settings.image_size,
+ blur_radius=raster_settings.blur_radius,
+ faces_per_pixel=raster_settings.faces_per_pixel,
+ bin_size=raster_settings.bin_size,
+ max_faces_per_bin=raster_settings.max_faces_per_bin,
+ perspective_correct=raster_settings.perspective_correct,
+ )
+ vismask = (pix_to_face > -1).float()
+ D = attributes.shape[-1]
+ attributes = attributes.clone()
+ attributes = attributes.view(attributes.shape[0] * attributes.shape[1],
+ 3, attributes.shape[-1])
+ N, H, W, K, _ = bary_coords.shape
+ mask = pix_to_face == -1
+ pix_to_face = pix_to_face.clone()
+ pix_to_face[mask] = 0
+ idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D)
+ pixel_face_vals = attributes.gather(0, idx).view(N, H, W, K, 3, D)
+ pixel_vals = (bary_coords[..., None] * pixel_face_vals).sum(dim=-2)
+ pixel_vals[mask] = 0 # Replace masked values in output.
+ pixel_vals = pixel_vals[:, :, :, 0].permute(0, 3, 1, 2)
+ pixel_vals = torch.cat(
+ [pixel_vals, vismask[:, :, :, 0][:, None, :, :]], dim=1)
+ return pixel_vals
\ No newline at end of file
diff --git a/lib /common /seg3d_lossless.py b/lib /common /seg3d_lossless.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb8dff2e1ba459f53dc5e27a4ee76c6168486203
--- /dev/null
+++ b/lib /common /seg3d_lossless.py
@@ -0,0 +1,604 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+
+from .seg3d_utils import (
+ create_grid3D,
+ plot_mask3D,
+ SmoothConv3D,
+)
+
+import torch
+import torch.nn as nn
+import numpy as np
+import torch.nn.functional as F
+import mcubes
+from kaolin.ops.conversions import voxelgrids_to_trianglemeshes
+import logging
+
+logging.getLogger("lightning").setLevel(logging.ERROR)
+
+
+class Seg3dLossless(nn.Module):
+ def __init__(self,
+ query_func,
+ b_min,
+ b_max,
+ resolutions,
+ channels=1,
+ balance_value=0.5,
+ align_corners=False,
+ visualize=False,
+ debug=False,
+ use_cuda_impl=False,
+ faster=False,
+ use_shadow=False,
+ **kwargs):
+ """
+ align_corners: same with how you process gt. (grid_sample / interpolate)
+ """
+ super().__init__()
+ self.query_func = query_func
+ self.register_buffer(
+ 'b_min',
+ torch.tensor(b_min).float().unsqueeze(1)) # [bz, 1, 3]
+ self.register_buffer(
+ 'b_max',
+ torch.tensor(b_max).float().unsqueeze(1)) # [bz, 1, 3]
+
+ # ti.init(arch=ti.cuda)
+ # self.mciso_taichi = MCISO(dim=3, N=resolutions[-1]-1)
+
+ if type(resolutions[0]) is int:
+ resolutions = torch.tensor([(res, res, res)
+ for res in resolutions])
+ else:
+ resolutions = torch.tensor(resolutions)
+ self.register_buffer('resolutions', resolutions)
+ self.batchsize = self.b_min.size(0)
+ assert self.batchsize == 1
+ self.balance_value = balance_value
+ self.channels = channels
+ assert self.channels == 1
+ self.align_corners = align_corners
+ self.visualize = visualize
+ self.debug = debug
+ self.use_cuda_impl = use_cuda_impl
+ self.faster = faster
+ self.use_shadow = use_shadow
+
+ for resolution in resolutions:
+ assert resolution[0] % 2 == 1 and resolution[1] % 2 == 1, \
+ f"resolution {resolution} need to be odd becuase of align_corner."
+
+ # init first resolution
+ init_coords = create_grid3D(0,
+ resolutions[-1] - 1,
+ steps=resolutions[0]) # [N, 3]
+ init_coords = init_coords.unsqueeze(0).repeat(self.batchsize, 1,
+ 1) # [bz, N, 3]
+ self.register_buffer('init_coords', init_coords)
+
+ # some useful tensors
+ calculated = torch.zeros(
+ (self.resolutions[-1][2], self.resolutions[-1][1],
+ self.resolutions[-1][0]),
+ dtype=torch.bool)
+ self.register_buffer('calculated', calculated)
+
+ gird8_offsets = torch.stack(
+ torch.meshgrid([
+ torch.tensor([-1, 0, 1]),
+ torch.tensor([-1, 0, 1]),
+ torch.tensor([-1, 0, 1])
+ ])).int().view(3, -1).t() # [27, 3]
+ self.register_buffer('gird8_offsets', gird8_offsets)
+
+ # smooth convs
+ self.smooth_conv3x3 = SmoothConv3D(in_channels=1,
+ out_channels=1,
+ kernel_size=3)
+ self.smooth_conv5x5 = SmoothConv3D(in_channels=1,
+ out_channels=1,
+ kernel_size=5)
+ self.smooth_conv7x7 = SmoothConv3D(in_channels=1,
+ out_channels=1,
+ kernel_size=7)
+ self.smooth_conv9x9 = SmoothConv3D(in_channels=1,
+ out_channels=1,
+ kernel_size=9)
+
+ def batch_eval(self, coords, **kwargs):
+ """
+ coords: in the coordinates of last resolution
+ **kwargs: for query_func
+ """
+ coords = coords.detach()
+ # normalize coords to fit in [b_min, b_max]
+ if self.align_corners:
+ coords2D = coords.float() / (self.resolutions[-1] - 1)
+ else:
+ step = 1.0 / self.resolutions[-1].float()
+ coords2D = coords.float() / self.resolutions[-1] + step / 2
+ coords2D = coords2D * (self.b_max - self.b_min) + self.b_min
+ # query function
+ occupancys = self.query_func(**kwargs, points=coords2D)
+ if type(occupancys) is list:
+ occupancys = torch.stack(occupancys) # [bz, C, N]
+ assert len(occupancys.size()) == 3, \
+ "query_func should return a occupancy with shape of [bz, C, N]"
+ return occupancys
+
+ def forward(self, **kwargs):
+ if self.faster:
+ return self._forward_faster(**kwargs)
+ else:
+ return self._forward(**kwargs)
+
+ def _forward_faster(self, **kwargs):
+ """
+ In faster mode, we make following changes to exchange accuracy for speed:
+ 1. no conflict checking: 4.88 fps -> 6.56 fps
+ 2. smooth_conv9x9 ~ smooth_conv3x3 for different resolution
+ 3. last step no examine
+ """
+ final_W = self.resolutions[-1][0]
+ final_H = self.resolutions[-1][1]
+ final_D = self.resolutions[-1][2]
+
+ for resolution in self.resolutions:
+ W, H, D = resolution
+ stride = (self.resolutions[-1] - 1) / (resolution - 1)
+
+ # first step
+ if torch.equal(resolution, self.resolutions[0]):
+ coords = self.init_coords.clone() # torch.long
+ occupancys = self.batch_eval(coords, **kwargs)
+ occupancys = occupancys.view(self.batchsize, self.channels, D,
+ H, W)
+ if (occupancys > 0.5).sum() == 0:
+ # return F.interpolate(
+ # occupancys, size=(final_D, final_H, final_W),
+ # mode="linear", align_corners=True)
+ return None
+
+ if self.visualize:
+ self.plot(occupancys, coords, final_D, final_H, final_W)
+
+ with torch.no_grad():
+ coords_accum = coords / stride
+
+ # last step
+ elif torch.equal(resolution, self.resolutions[-1]):
+
+ with torch.no_grad():
+ # here true is correct!
+ valid = F.interpolate(
+ (occupancys > self.balance_value).float(),
+ size=(D, H, W),
+ mode="trilinear",
+ align_corners=True)
+
+ # here true is correct!
+ occupancys = F.interpolate(occupancys.float(),
+ size=(D, H, W),
+ mode="trilinear",
+ align_corners=True)
+
+ # is_boundary = (valid > 0.0) & (valid < 1.0)
+ is_boundary = valid == 0.5
+
+ # next steps
+ else:
+ coords_accum *= 2
+
+ with torch.no_grad():
+ # here true is correct!
+ valid = F.interpolate(
+ (occupancys > self.balance_value).float(),
+ size=(D, H, W),
+ mode="trilinear",
+ align_corners=True)
+
+ # here true is correct!
+ occupancys = F.interpolate(occupancys.float(),
+ size=(D, H, W),
+ mode="trilinear",
+ align_corners=True)
+
+ is_boundary = (valid > 0.0) & (valid < 1.0)
+
+ with torch.no_grad():
+ if torch.equal(resolution, self.resolutions[1]):
+ is_boundary = (self.smooth_conv9x9(is_boundary.float())
+ > 0)[0, 0]
+ elif torch.equal(resolution, self.resolutions[2]):
+ is_boundary = (self.smooth_conv7x7(is_boundary.float())
+ > 0)[0, 0]
+ else:
+ is_boundary = (self.smooth_conv3x3(is_boundary.float())
+ > 0)[0, 0]
+
+ coords_accum = coords_accum.long()
+ is_boundary[coords_accum[0, :, 2], coords_accum[0, :, 1],
+ coords_accum[0, :, 0]] = False
+ point_coords = is_boundary.permute(
+ 2, 1, 0).nonzero(as_tuple=False).unsqueeze(0)
+ point_indices = (point_coords[:, :, 2] * H * W +
+ point_coords[:, :, 1] * W +
+ point_coords[:, :, 0])
+
+ R, C, D, H, W = occupancys.shape
+
+ # inferred value
+ coords = point_coords * stride
+
+ if coords.size(1) == 0:
+ continue
+ occupancys_topk = self.batch_eval(coords, **kwargs)
+
+ # put mask point predictions to the right places on the upsampled grid.
+ R, C, D, H, W = occupancys.shape
+ point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
+ occupancys = (occupancys.reshape(R, C, D * H * W).scatter_(
+ 2, point_indices, occupancys_topk).view(R, C, D, H, W))
+
+ with torch.no_grad():
+ voxels = coords / stride
+ coords_accum = torch.cat([voxels, coords_accum],
+ dim=1).unique(dim=1)
+
+ return occupancys[0, 0]
+
+ def _forward(self, **kwargs):
+ """
+ output occupancy field would be:
+ (bz, C, res, res)
+ """
+ final_W = self.resolutions[-1][0]
+ final_H = self.resolutions[-1][1]
+ final_D = self.resolutions[-1][2]
+
+ calculated = self.calculated.clone()
+
+ for resolution in self.resolutions:
+ W, H, D = resolution
+ stride = (self.resolutions[-1] - 1) / (resolution - 1)
+
+ if self.visualize:
+ this_stage_coords = []
+
+ # first step
+ if torch.equal(resolution, self.resolutions[0]):
+ coords = self.init_coords.clone() # torch.long
+ occupancys = self.batch_eval(coords, **kwargs)
+ occupancys = occupancys.view(self.batchsize, self.channels, D,
+ H, W)
+
+ if self.visualize:
+ self.plot(occupancys, coords, final_D, final_H, final_W)
+
+ with torch.no_grad():
+ coords_accum = coords / stride
+ calculated[coords[0, :, 2], coords[0, :, 1],
+ coords[0, :, 0]] = True
+
+ # next steps
+ else:
+ coords_accum *= 2
+
+ with torch.no_grad():
+ # here true is correct!
+ valid = F.interpolate(
+ (occupancys > self.balance_value).float(),
+ size=(D, H, W),
+ mode="trilinear",
+ align_corners=True)
+
+ # here true is correct!
+ occupancys = F.interpolate(occupancys.float(),
+ size=(D, H, W),
+ mode="trilinear",
+ align_corners=True)
+
+ is_boundary = (valid > 0.0) & (valid < 1.0)
+
+ with torch.no_grad():
+ # TODO
+ if self.use_shadow and torch.equal(resolution,
+ self.resolutions[-1]):
+ # larger z means smaller depth here
+ depth_res = resolution[2].item()
+ depth_index = torch.linspace(0,
+ depth_res - 1,
+ steps=depth_res).type_as(
+ occupancys.device)
+ depth_index_max = torch.max(
+ (occupancys > self.balance_value) *
+ (depth_index + 1),
+ dim=-1,
+ keepdim=True)[0] - 1
+ shadow = depth_index < depth_index_max
+ is_boundary[shadow] = False
+ is_boundary = is_boundary[0, 0]
+ else:
+ is_boundary = (self.smooth_conv3x3(is_boundary.float())
+ > 0)[0, 0]
+ # is_boundary = is_boundary[0, 0]
+
+ is_boundary[coords_accum[0, :, 2], coords_accum[0, :, 1],
+ coords_accum[0, :, 0]] = False
+ point_coords = is_boundary.permute(
+ 2, 1, 0).nonzero(as_tuple=False).unsqueeze(0)
+ point_indices = (point_coords[:, :, 2] * H * W +
+ point_coords[:, :, 1] * W +
+ point_coords[:, :, 0])
+
+ R, C, D, H, W = occupancys.shape
+ # interpolated value
+ occupancys_interp = torch.gather(
+ occupancys.reshape(R, C, D * H * W), 2,
+ point_indices.unsqueeze(1))
+
+ # inferred value
+ coords = point_coords * stride
+
+ if coords.size(1) == 0:
+ continue
+ occupancys_topk = self.batch_eval(coords, **kwargs)
+ if self.visualize:
+ this_stage_coords.append(coords)
+
+ # put mask point predictions to the right places on the upsampled grid.
+ R, C, D, H, W = occupancys.shape
+ point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
+ occupancys = (occupancys.reshape(R, C, D * H * W).scatter_(
+ 2, point_indices, occupancys_topk).view(R, C, D, H, W))
+
+ with torch.no_grad():
+ # conflicts
+ conflicts = ((occupancys_interp - self.balance_value) *
+ (occupancys_topk - self.balance_value) < 0)[0,
+ 0]
+
+ if self.visualize:
+ self.plot(occupancys, coords, final_D, final_H,
+ final_W)
+
+ voxels = coords / stride
+ coords_accum = torch.cat([voxels, coords_accum],
+ dim=1).unique(dim=1)
+ calculated[coords[0, :, 2], coords[0, :, 1],
+ coords[0, :, 0]] = True
+
+ while conflicts.sum() > 0:
+ if self.use_shadow and torch.equal(resolution,
+ self.resolutions[-1]):
+ break
+
+ with torch.no_grad():
+ conflicts_coords = coords[0, conflicts, :]
+
+ if self.debug:
+ self.plot(occupancys,
+ conflicts_coords.unsqueeze(0),
+ final_D,
+ final_H,
+ final_W,
+ title='conflicts')
+
+ conflicts_boundary = (conflicts_coords.int() +
+ self.gird8_offsets.unsqueeze(1) *
+ stride.int()).reshape(
+ -1, 3).long().unique(dim=0)
+ conflicts_boundary[:, 0] = (
+ conflicts_boundary[:, 0].clamp(
+ 0,
+ calculated.size(2) - 1))
+ conflicts_boundary[:, 1] = (
+ conflicts_boundary[:, 1].clamp(
+ 0,
+ calculated.size(1) - 1))
+ conflicts_boundary[:, 2] = (
+ conflicts_boundary[:, 2].clamp(
+ 0,
+ calculated.size(0) - 1))
+
+ coords = conflicts_boundary[calculated[
+ conflicts_boundary[:, 2], conflicts_boundary[:, 1],
+ conflicts_boundary[:, 0]] == False]
+
+ if self.debug:
+ self.plot(occupancys,
+ coords.unsqueeze(0),
+ final_D,
+ final_H,
+ final_W,
+ title='coords')
+
+ coords = coords.unsqueeze(0)
+ point_coords = coords / stride
+ point_indices = (point_coords[:, :, 2] * H * W +
+ point_coords[:, :, 1] * W +
+ point_coords[:, :, 0])
+
+ R, C, D, H, W = occupancys.shape
+ # interpolated value
+ occupancys_interp = torch.gather(
+ occupancys.reshape(R, C, D * H * W), 2,
+ point_indices.unsqueeze(1))
+
+ # inferred value
+ coords = point_coords * stride
+
+ if coords.size(1) == 0:
+ break
+ occupancys_topk = self.batch_eval(coords, **kwargs)
+ if self.visualize:
+ this_stage_coords.append(coords)
+
+ with torch.no_grad():
+ # conflicts
+ conflicts = ((occupancys_interp - self.balance_value) *
+ (occupancys_topk - self.balance_value) <
+ 0)[0, 0]
+
+ # put mask point predictions to the right places on the upsampled grid.
+ point_indices = point_indices.unsqueeze(1).expand(
+ -1, C, -1)
+ occupancys = (occupancys.reshape(R, C, D * H * W).scatter_(
+ 2, point_indices, occupancys_topk).view(R, C, D, H, W))
+
+ with torch.no_grad():
+ voxels = coords / stride
+ coords_accum = torch.cat([voxels, coords_accum],
+ dim=1).unique(dim=1)
+ calculated[coords[0, :, 2], coords[0, :, 1],
+ coords[0, :, 0]] = True
+
+ if self.visualize:
+ this_stage_coords = torch.cat(this_stage_coords, dim=1)
+ self.plot(occupancys, this_stage_coords, final_D, final_H,
+ final_W)
+
+ return occupancys[0, 0]
+
+ def plot(self,
+ occupancys,
+ coords,
+ final_D,
+ final_H,
+ final_W,
+ title='',
+ **kwargs):
+ final = F.interpolate(occupancys.float(),
+ size=(final_D, final_H, final_W),
+ mode="trilinear",
+ align_corners=True) # here true is correct!
+ x = coords[0, :, 0].to("cpu")
+ y = coords[0, :, 1].to("cpu")
+ z = coords[0, :, 2].to("cpu")
+
+ plot_mask3D(final[0, 0].to("cpu"), title, (x, y, z), **kwargs)
+
+ def find_vertices(self, sdf, direction="front"):
+ '''
+ - direction: "front" | "back" | "left" | "right"
+ '''
+ resolution = sdf.size(2)
+ if direction == "front":
+ pass
+ elif direction == "left":
+ sdf = sdf.permute(2, 1, 0)
+ elif direction == "back":
+ inv_idx = torch.arange(sdf.size(2) - 1, -1, -1).long()
+ sdf = sdf[inv_idx, :, :]
+ elif direction == "right":
+ inv_idx = torch.arange(sdf.size(2) - 1, -1, -1).long()
+ sdf = sdf[:, :, inv_idx]
+ sdf = sdf.permute(2, 1, 0)
+
+ inv_idx = torch.arange(sdf.size(2) - 1, -1, -1).long()
+ sdf = sdf[inv_idx, :, :]
+ sdf_all = sdf.permute(2, 1, 0)
+
+ # shadow
+ grad_v = (sdf_all > 0.5) * torch.linspace(
+ resolution, 1, steps=resolution).to(sdf.device)
+ grad_c = torch.ones_like(sdf_all) * torch.linspace(
+ 0, resolution - 1, steps=resolution).to(sdf.device)
+ max_v, max_c = grad_v.max(dim=2)
+ shadow = grad_c > max_c.view(resolution, resolution, 1)
+ keep = (sdf_all > 0.5) & (~shadow)
+
+ p1 = keep.nonzero(as_tuple=False).t() # [3, N]
+ p2 = p1.clone() # z
+ p2[2, :] = (p2[2, :] - 2).clamp(0, resolution)
+ p3 = p1.clone() # y
+ p3[1, :] = (p3[1, :] - 2).clamp(0, resolution)
+ p4 = p1.clone() # x
+ p4[0, :] = (p4[0, :] - 2).clamp(0, resolution)
+
+ v1 = sdf_all[p1[0, :], p1[1, :], p1[2, :]]
+ v2 = sdf_all[p2[0, :], p2[1, :], p2[2, :]]
+ v3 = sdf_all[p3[0, :], p3[1, :], p3[2, :]]
+ v4 = sdf_all[p4[0, :], p4[1, :], p4[2, :]]
+
+ X = p1[0, :].long() # [N,]
+ Y = p1[1, :].long() # [N,]
+ Z = p2[2, :].float() * (0.5 - v1) / (v2 - v1) + \
+ p1[2, :].float() * (v2 - 0.5) / (v2 - v1) # [N,]
+ Z = Z.clamp(0, resolution)
+
+ # normal
+ norm_z = v2 - v1
+ norm_y = v3 - v1
+ norm_x = v4 - v1
+ # print (v2.min(dim=0)[0], v2.max(dim=0)[0], v3.min(dim=0)[0], v3.max(dim=0)[0])
+
+ norm = torch.stack([norm_x, norm_y, norm_z], dim=1)
+ norm = norm / torch.norm(norm, p=2, dim=1, keepdim=True)
+
+ return X, Y, Z, norm
+
+ def render_normal(self, resolution, X, Y, Z, norm):
+ image = torch.ones((1, 3, resolution, resolution),
+ dtype=torch.float32).to(norm.device)
+ color = (norm + 1) / 2.0
+ color = color.clamp(0, 1)
+ image[0, :, Y, X] = color.t()
+ return image
+
+ def display(self, sdf):
+
+ # render
+ X, Y, Z, norm = self.find_vertices(sdf, direction="front")
+ image1 = self.render_normal(self.resolutions[-1, -1], X, Y, Z, norm)
+ X, Y, Z, norm = self.find_vertices(sdf, direction="left")
+ image2 = self.render_normal(self.resolutions[-1, -1], X, Y, Z, norm)
+ X, Y, Z, norm = self.find_vertices(sdf, direction="right")
+ image3 = self.render_normal(self.resolutions[-1, -1], X, Y, Z, norm)
+ X, Y, Z, norm = self.find_vertices(sdf, direction="back")
+ image4 = self.render_normal(self.resolutions[-1, -1], X, Y, Z, norm)
+
+ image = torch.cat([image1, image2, image3, image4], axis=3)
+ image = image.detach().cpu().numpy()[0].transpose(1, 2, 0) * 255.0
+
+ return np.uint8(image)
+
+ def export_mesh(self, occupancys):
+
+ final = occupancys[1:, 1:, 1:].contiguous()
+
+ if final.shape[0] > 256:
+ # for voxelgrid larger than 256^3, the required GPU memory will be > 9GB
+ # thus we use CPU marching_cube to avoid "CUDA out of memory"
+ occu_arr = final.detach().cpu().numpy() # non-smooth surface
+ # occu_arr = mcubes.smooth(final.detach().cpu().numpy()) # smooth surface
+ vertices, triangles = mcubes.marching_cubes(
+ occu_arr, self.balance_value)
+ verts = torch.as_tensor(vertices[:, [2, 1, 0]])
+ faces = torch.as_tensor(triangles.astype(
+ np.long), dtype=torch.long)[:, [0, 2, 1]]
+ else:
+ torch.cuda.empty_cache()
+ vertices, triangles = voxelgrids_to_trianglemeshes(
+ final.unsqueeze(0))
+ verts = vertices[0][:, [2, 1, 0]].cpu()
+ faces = triangles[0][:, [0, 2, 1]].cpu()
+
+ return verts, faces
\ No newline at end of file
diff --git a/lib /common /smpl_vert_segmentation.json b/lib /common /smpl_vert_segmentation.json
new file mode 100644
index 0000000000000000000000000000000000000000..b3244cce450e13f1095a1c3af676f4c8fdea5633
--- /dev/null
+++ b/lib /common /smpl_vert_segmentation.json
@@ -0,0 +1,7440 @@
+{
+ "rightHand": [
+ 5442,
+ 5443,
+ 5444,
+ 5445,
+ 5446,
+ 5447,
+ 5448,
+ 5449,
+ 5450,
+ 5451,
+ 5452,
+ 5453,
+ 5454,
+ 5455,
+ 5456,
+ 5457,
+ 5458,
+ 5459,
+ 5460,
+ 5461,
+ 5462,
+ 5463,
+ 5464,
+ 5465,
+ 5466,
+ 5467,
+ 5468,
+ 5469,
+ 5470,
+ 5471,
+ 5472,
+ 5473,
+ 5474,
+ 5475,
+ 5476,
+ 5477,
+ 5478,
+ 5479,
+ 5480,
+ 5481,
+ 5482,
+ 5483,
+ 5484,
+ 5485,
+ 5486,
+ 5487,
+ 5492,
+ 5493,
+ 5494,
+ 5495,
+ 5496,
+ 5497,
+ 5502,
+ 5503,
+ 5504,
+ 5505,
+ 5506,
+ 5507,
+ 5508,
+ 5509,
+ 5510,
+ 5511,
+ 5512,
+ 5513,
+ 5514,
+ 5515,
+ 5516,
+ 5517,
+ 5518,
+ 5519,
+ 5520,
+ 5521,
+ 5522,
+ 5523,
+ 5524,
+ 5525,
+ 5526,
+ 5527,
+ 5530,
+ 5531,
+ 5532,
+ 5533,
+ 5534,
+ 5535,
+ 5536,
+ 5537,
+ 5538,
+ 5539,
+ 5540,
+ 5541,
+ 5542,
+ 5543,
+ 5544,
+ 5545,
+ 5546,
+ 5547,
+ 5548,
+ 5549,
+ 5550,
+ 5551,
+ 5552,
+ 5553,
+ 5554,
+ 5555,
+ 5556,
+ 5557,
+ 5558,
+ 5559,
+ 5560,
+ 5561,
+ 5562,
+ 5569,
+ 5571,
+ 5574,
+ 5575,
+ 5576,
+ 5577,
+ 5578,
+ 5579,
+ 5580,
+ 5581,
+ 5582,
+ 5583,
+ 5588,
+ 5589,
+ 5592,
+ 5593,
+ 5594,
+ 5595,
+ 5596,
+ 5597,
+ 5598,
+ 5599,
+ 5600,
+ 5601,
+ 5602,
+ 5603,
+ 5604,
+ 5605,
+ 5610,
+ 5611,
+ 5612,
+ 5613,
+ 5614,
+ 5621,
+ 5622,
+ 5625,
+ 5631,
+ 5632,
+ 5633,
+ 5634,
+ 5635,
+ 5636,
+ 5637,
+ 5638,
+ 5639,
+ 5640,
+ 5641,
+ 5643,
+ 5644,
+ 5645,
+ 5646,
+ 5649,
+ 5650,
+ 5652,
+ 5653,
+ 5654,
+ 5655,
+ 5656,
+ 5657,
+ 5658,
+ 5659,
+ 5660,
+ 5661,
+ 5662,
+ 5663,
+ 5664,
+ 5667,
+ 5670,
+ 5671,
+ 5672,
+ 5673,
+ 5674,
+ 5675,
+ 5682,
+ 5683,
+ 5684,
+ 5685,
+ 5686,
+ 5687,
+ 5688,
+ 5689,
+ 5690,
+ 5692,
+ 5695,
+ 5697,
+ 5698,
+ 5699,
+ 5700,
+ 5701,
+ 5707,
+ 5708,
+ 5709,
+ 5710,
+ 5711,
+ 5712,
+ 5713,
+ 5714,
+ 5715,
+ 5716,
+ 5717,
+ 5718,
+ 5719,
+ 5720,
+ 5721,
+ 5723,
+ 5724,
+ 5725,
+ 5726,
+ 5727,
+ 5728,
+ 5729,
+ 5730,
+ 5731,
+ 5732,
+ 5735,
+ 5736,
+ 5737,
+ 5738,
+ 5739,
+ 5740,
+ 5745,
+ 5746,
+ 5748,
+ 5749,
+ 5750,
+ 5751,
+ 5752,
+ 6056,
+ 6057,
+ 6066,
+ 6067,
+ 6158,
+ 6159,
+ 6160,
+ 6161,
+ 6162,
+ 6163,
+ 6164,
+ 6165,
+ 6166,
+ 6167,
+ 6168,
+ 6169,
+ 6170,
+ 6171,
+ 6172,
+ 6173,
+ 6174,
+ 6175,
+ 6176,
+ 6177,
+ 6178,
+ 6179,
+ 6180,
+ 6181,
+ 6182,
+ 6183,
+ 6184,
+ 6185,
+ 6186,
+ 6187,
+ 6188,
+ 6189,
+ 6190,
+ 6191,
+ 6192,
+ 6193,
+ 6194,
+ 6195,
+ 6196,
+ 6197,
+ 6198,
+ 6199,
+ 6200,
+ 6201,
+ 6202,
+ 6203,
+ 6204,
+ 6205,
+ 6206,
+ 6207,
+ 6208,
+ 6209,
+ 6210,
+ 6211,
+ 6212,
+ 6213,
+ 6214,
+ 6215,
+ 6216,
+ 6217,
+ 6218,
+ 6219,
+ 6220,
+ 6221,
+ 6222,
+ 6223,
+ 6224,
+ 6225,
+ 6226,
+ 6227,
+ 6228,
+ 6229,
+ 6230,
+ 6231,
+ 6232,
+ 6233,
+ 6234,
+ 6235,
+ 6236,
+ 6237,
+ 6238,
+ 6239
+ ],
+ "rightUpLeg": [
+ 4320,
+ 4321,
+ 4323,
+ 4324,
+ 4333,
+ 4334,
+ 4335,
+ 4336,
+ 4337,
+ 4338,
+ 4339,
+ 4340,
+ 4356,
+ 4357,
+ 4358,
+ 4359,
+ 4360,
+ 4361,
+ 4362,
+ 4363,
+ 4364,
+ 4365,
+ 4366,
+ 4367,
+ 4383,
+ 4384,
+ 4385,
+ 4386,
+ 4387,
+ 4388,
+ 4389,
+ 4390,
+ 4391,
+ 4392,
+ 4393,
+ 4394,
+ 4395,
+ 4396,
+ 4397,
+ 4398,
+ 4399,
+ 4400,
+ 4401,
+ 4419,
+ 4420,
+ 4421,
+ 4422,
+ 4430,
+ 4431,
+ 4432,
+ 4433,
+ 4434,
+ 4435,
+ 4436,
+ 4437,
+ 4438,
+ 4439,
+ 4440,
+ 4441,
+ 4442,
+ 4443,
+ 4444,
+ 4445,
+ 4446,
+ 4447,
+ 4448,
+ 4449,
+ 4450,
+ 4451,
+ 4452,
+ 4453,
+ 4454,
+ 4455,
+ 4456,
+ 4457,
+ 4458,
+ 4459,
+ 4460,
+ 4461,
+ 4462,
+ 4463,
+ 4464,
+ 4465,
+ 4466,
+ 4467,
+ 4468,
+ 4469,
+ 4470,
+ 4471,
+ 4472,
+ 4473,
+ 4474,
+ 4475,
+ 4476,
+ 4477,
+ 4478,
+ 4479,
+ 4480,
+ 4481,
+ 4482,
+ 4483,
+ 4484,
+ 4485,
+ 4486,
+ 4487,
+ 4488,
+ 4489,
+ 4490,
+ 4491,
+ 4492,
+ 4493,
+ 4494,
+ 4495,
+ 4496,
+ 4497,
+ 4498,
+ 4499,
+ 4500,
+ 4501,
+ 4502,
+ 4503,
+ 4504,
+ 4505,
+ 4506,
+ 4507,
+ 4508,
+ 4509,
+ 4510,
+ 4511,
+ 4512,
+ 4513,
+ 4514,
+ 4515,
+ 4516,
+ 4517,
+ 4518,
+ 4519,
+ 4520,
+ 4521,
+ 4522,
+ 4523,
+ 4524,
+ 4525,
+ 4526,
+ 4527,
+ 4528,
+ 4529,
+ 4530,
+ 4531,
+ 4532,
+ 4623,
+ 4624,
+ 4625,
+ 4626,
+ 4627,
+ 4628,
+ 4629,
+ 4630,
+ 4631,
+ 4632,
+ 4633,
+ 4634,
+ 4645,
+ 4646,
+ 4647,
+ 4648,
+ 4649,
+ 4650,
+ 4651,
+ 4652,
+ 4653,
+ 4654,
+ 4655,
+ 4656,
+ 4657,
+ 4658,
+ 4659,
+ 4660,
+ 4670,
+ 4671,
+ 4672,
+ 4673,
+ 4704,
+ 4705,
+ 4706,
+ 4707,
+ 4708,
+ 4709,
+ 4710,
+ 4711,
+ 4712,
+ 4713,
+ 4745,
+ 4746,
+ 4757,
+ 4758,
+ 4759,
+ 4760,
+ 4801,
+ 4802,
+ 4829,
+ 4834,
+ 4835,
+ 4836,
+ 4837,
+ 4838,
+ 4839,
+ 4840,
+ 4841,
+ 4924,
+ 4925,
+ 4926,
+ 4928,
+ 4929,
+ 4930,
+ 4931,
+ 4932,
+ 4933,
+ 4934,
+ 4935,
+ 4936,
+ 4948,
+ 4949,
+ 4950,
+ 4951,
+ 4952,
+ 4970,
+ 4971,
+ 4972,
+ 4973,
+ 4983,
+ 4984,
+ 4985,
+ 4986,
+ 4987,
+ 4988,
+ 4989,
+ 4990,
+ 4991,
+ 4992,
+ 4993,
+ 5004,
+ 5005,
+ 6546,
+ 6547,
+ 6548,
+ 6549,
+ 6552,
+ 6553,
+ 6554,
+ 6555,
+ 6556,
+ 6873,
+ 6877
+ ],
+ "leftArm": [
+ 626,
+ 627,
+ 628,
+ 629,
+ 634,
+ 635,
+ 680,
+ 681,
+ 716,
+ 717,
+ 718,
+ 719,
+ 769,
+ 770,
+ 771,
+ 772,
+ 773,
+ 774,
+ 775,
+ 776,
+ 777,
+ 778,
+ 779,
+ 780,
+ 784,
+ 785,
+ 786,
+ 787,
+ 788,
+ 789,
+ 790,
+ 791,
+ 792,
+ 793,
+ 1231,
+ 1232,
+ 1233,
+ 1234,
+ 1258,
+ 1259,
+ 1260,
+ 1261,
+ 1271,
+ 1281,
+ 1282,
+ 1310,
+ 1311,
+ 1314,
+ 1315,
+ 1340,
+ 1341,
+ 1342,
+ 1343,
+ 1355,
+ 1356,
+ 1357,
+ 1358,
+ 1376,
+ 1377,
+ 1378,
+ 1379,
+ 1380,
+ 1381,
+ 1382,
+ 1383,
+ 1384,
+ 1385,
+ 1386,
+ 1387,
+ 1388,
+ 1389,
+ 1390,
+ 1391,
+ 1392,
+ 1393,
+ 1394,
+ 1395,
+ 1396,
+ 1397,
+ 1398,
+ 1399,
+ 1400,
+ 1402,
+ 1403,
+ 1405,
+ 1406,
+ 1407,
+ 1408,
+ 1409,
+ 1410,
+ 1411,
+ 1412,
+ 1413,
+ 1414,
+ 1415,
+ 1416,
+ 1428,
+ 1429,
+ 1430,
+ 1431,
+ 1432,
+ 1433,
+ 1438,
+ 1439,
+ 1440,
+ 1441,
+ 1442,
+ 1443,
+ 1444,
+ 1445,
+ 1502,
+ 1505,
+ 1506,
+ 1507,
+ 1508,
+ 1509,
+ 1510,
+ 1538,
+ 1541,
+ 1542,
+ 1543,
+ 1545,
+ 1619,
+ 1620,
+ 1621,
+ 1622,
+ 1631,
+ 1632,
+ 1633,
+ 1634,
+ 1635,
+ 1636,
+ 1637,
+ 1638,
+ 1639,
+ 1640,
+ 1641,
+ 1642,
+ 1645,
+ 1646,
+ 1647,
+ 1648,
+ 1649,
+ 1650,
+ 1651,
+ 1652,
+ 1653,
+ 1654,
+ 1655,
+ 1656,
+ 1658,
+ 1659,
+ 1661,
+ 1662,
+ 1664,
+ 1666,
+ 1667,
+ 1668,
+ 1669,
+ 1670,
+ 1671,
+ 1672,
+ 1673,
+ 1674,
+ 1675,
+ 1676,
+ 1677,
+ 1678,
+ 1679,
+ 1680,
+ 1681,
+ 1682,
+ 1683,
+ 1684,
+ 1696,
+ 1697,
+ 1698,
+ 1703,
+ 1704,
+ 1705,
+ 1706,
+ 1707,
+ 1708,
+ 1709,
+ 1710,
+ 1711,
+ 1712,
+ 1713,
+ 1714,
+ 1715,
+ 1716,
+ 1717,
+ 1718,
+ 1719,
+ 1720,
+ 1725,
+ 1731,
+ 1732,
+ 1733,
+ 1734,
+ 1735,
+ 1737,
+ 1739,
+ 1740,
+ 1745,
+ 1746,
+ 1747,
+ 1748,
+ 1749,
+ 1751,
+ 1761,
+ 1830,
+ 1831,
+ 1844,
+ 1845,
+ 1846,
+ 1850,
+ 1851,
+ 1854,
+ 1855,
+ 1858,
+ 1860,
+ 1865,
+ 1866,
+ 1867,
+ 1869,
+ 1870,
+ 1871,
+ 1874,
+ 1875,
+ 1876,
+ 1877,
+ 1878,
+ 1882,
+ 1883,
+ 1888,
+ 1889,
+ 1892,
+ 1900,
+ 1901,
+ 1902,
+ 1903,
+ 1904,
+ 1909,
+ 2819,
+ 2820,
+ 2821,
+ 2822,
+ 2895,
+ 2896,
+ 2897,
+ 2898,
+ 2899,
+ 2900,
+ 2901,
+ 2902,
+ 2903,
+ 2945,
+ 2946,
+ 2974,
+ 2975,
+ 2976,
+ 2977,
+ 2978,
+ 2979,
+ 2980,
+ 2981,
+ 2982,
+ 2983,
+ 2984,
+ 2985,
+ 2986,
+ 2987,
+ 2988,
+ 2989,
+ 2990,
+ 2991,
+ 2992,
+ 2993,
+ 2994,
+ 2995,
+ 2996,
+ 3002,
+ 3013
+ ],
+ "leftLeg": [
+ 995,
+ 998,
+ 999,
+ 1002,
+ 1004,
+ 1005,
+ 1008,
+ 1010,
+ 1012,
+ 1015,
+ 1016,
+ 1018,
+ 1019,
+ 1043,
+ 1044,
+ 1047,
+ 1048,
+ 1049,
+ 1050,
+ 1051,
+ 1052,
+ 1053,
+ 1054,
+ 1055,
+ 1056,
+ 1057,
+ 1058,
+ 1059,
+ 1060,
+ 1061,
+ 1062,
+ 1063,
+ 1064,
+ 1065,
+ 1066,
+ 1067,
+ 1068,
+ 1069,
+ 1070,
+ 1071,
+ 1072,
+ 1073,
+ 1074,
+ 1075,
+ 1076,
+ 1077,
+ 1078,
+ 1079,
+ 1080,
+ 1081,
+ 1082,
+ 1083,
+ 1084,
+ 1085,
+ 1086,
+ 1087,
+ 1088,
+ 1089,
+ 1090,
+ 1091,
+ 1092,
+ 1093,
+ 1094,
+ 1095,
+ 1096,
+ 1097,
+ 1098,
+ 1099,
+ 1100,
+ 1101,
+ 1102,
+ 1103,
+ 1104,
+ 1105,
+ 1106,
+ 1107,
+ 1108,
+ 1109,
+ 1110,
+ 1111,
+ 1112,
+ 1113,
+ 1114,
+ 1115,
+ 1116,
+ 1117,
+ 1118,
+ 1119,
+ 1120,
+ 1121,
+ 1122,
+ 1123,
+ 1124,
+ 1125,
+ 1126,
+ 1127,
+ 1128,
+ 1129,
+ 1130,
+ 1131,
+ 1132,
+ 1133,
+ 1134,
+ 1135,
+ 1136,
+ 1148,
+ 1149,
+ 1150,
+ 1151,
+ 1152,
+ 1153,
+ 1154,
+ 1155,
+ 1156,
+ 1157,
+ 1158,
+ 1175,
+ 1176,
+ 1177,
+ 1178,
+ 1179,
+ 1180,
+ 1181,
+ 1182,
+ 1183,
+ 1369,
+ 1370,
+ 1371,
+ 1372,
+ 1373,
+ 1374,
+ 1375,
+ 1464,
+ 1465,
+ 1466,
+ 1467,
+ 1468,
+ 1469,
+ 1470,
+ 1471,
+ 1472,
+ 1473,
+ 1474,
+ 1522,
+ 1523,
+ 1524,
+ 1525,
+ 1526,
+ 1527,
+ 1528,
+ 1529,
+ 1530,
+ 1531,
+ 1532,
+ 3174,
+ 3175,
+ 3176,
+ 3177,
+ 3178,
+ 3179,
+ 3180,
+ 3181,
+ 3182,
+ 3183,
+ 3184,
+ 3185,
+ 3186,
+ 3187,
+ 3188,
+ 3189,
+ 3190,
+ 3191,
+ 3192,
+ 3193,
+ 3194,
+ 3195,
+ 3196,
+ 3197,
+ 3198,
+ 3199,
+ 3200,
+ 3201,
+ 3202,
+ 3203,
+ 3204,
+ 3205,
+ 3206,
+ 3207,
+ 3208,
+ 3209,
+ 3210,
+ 3319,
+ 3320,
+ 3321,
+ 3322,
+ 3323,
+ 3324,
+ 3325,
+ 3326,
+ 3327,
+ 3328,
+ 3329,
+ 3330,
+ 3331,
+ 3332,
+ 3333,
+ 3334,
+ 3335,
+ 3432,
+ 3433,
+ 3434,
+ 3435,
+ 3436,
+ 3469,
+ 3472,
+ 3473,
+ 3474
+ ],
+ "leftToeBase": [
+ 3211,
+ 3212,
+ 3213,
+ 3214,
+ 3215,
+ 3216,
+ 3217,
+ 3218,
+ 3219,
+ 3220,
+ 3221,
+ 3222,
+ 3223,
+ 3224,
+ 3225,
+ 3226,
+ 3227,
+ 3228,
+ 3229,
+ 3230,
+ 3231,
+ 3232,
+ 3233,
+ 3234,
+ 3235,
+ 3236,
+ 3237,
+ 3238,
+ 3239,
+ 3240,
+ 3241,
+ 3242,
+ 3243,
+ 3244,
+ 3245,
+ 3246,
+ 3247,
+ 3248,
+ 3249,
+ 3250,
+ 3251,
+ 3252,
+ 3253,
+ 3254,
+ 3255,
+ 3256,
+ 3257,
+ 3258,
+ 3259,
+ 3260,
+ 3261,
+ 3262,
+ 3263,
+ 3264,
+ 3265,
+ 3266,
+ 3267,
+ 3268,
+ 3269,
+ 3270,
+ 3271,
+ 3272,
+ 3273,
+ 3274,
+ 3275,
+ 3276,
+ 3277,
+ 3278,
+ 3279,
+ 3280,
+ 3281,
+ 3282,
+ 3283,
+ 3284,
+ 3285,
+ 3286,
+ 3287,
+ 3288,
+ 3289,
+ 3290,
+ 3291,
+ 3292,
+ 3293,
+ 3294,
+ 3295,
+ 3296,
+ 3297,
+ 3298,
+ 3299,
+ 3300,
+ 3301,
+ 3302,
+ 3303,
+ 3304,
+ 3305,
+ 3306,
+ 3307,
+ 3308,
+ 3309,
+ 3310,
+ 3311,
+ 3312,
+ 3313,
+ 3314,
+ 3315,
+ 3316,
+ 3317,
+ 3318,
+ 3336,
+ 3337,
+ 3340,
+ 3342,
+ 3344,
+ 3346,
+ 3348,
+ 3350,
+ 3352,
+ 3354,
+ 3357,
+ 3358,
+ 3360,
+ 3362
+ ],
+ "leftFoot": [
+ 3327,
+ 3328,
+ 3329,
+ 3330,
+ 3331,
+ 3332,
+ 3333,
+ 3334,
+ 3335,
+ 3336,
+ 3337,
+ 3338,
+ 3339,
+ 3340,
+ 3341,
+ 3342,
+ 3343,
+ 3344,
+ 3345,
+ 3346,
+ 3347,
+ 3348,
+ 3349,
+ 3350,
+ 3351,
+ 3352,
+ 3353,
+ 3354,
+ 3355,
+ 3356,
+ 3357,
+ 3358,
+ 3359,
+ 3360,
+ 3361,
+ 3362,
+ 3363,
+ 3364,
+ 3365,
+ 3366,
+ 3367,
+ 3368,
+ 3369,
+ 3370,
+ 3371,
+ 3372,
+ 3373,
+ 3374,
+ 3375,
+ 3376,
+ 3377,
+ 3378,
+ 3379,
+ 3380,
+ 3381,
+ 3382,
+ 3383,
+ 3384,
+ 3385,
+ 3386,
+ 3387,
+ 3388,
+ 3389,
+ 3390,
+ 3391,
+ 3392,
+ 3393,
+ 3394,
+ 3395,
+ 3396,
+ 3397,
+ 3398,
+ 3399,
+ 3400,
+ 3401,
+ 3402,
+ 3403,
+ 3404,
+ 3405,
+ 3406,
+ 3407,
+ 3408,
+ 3409,
+ 3410,
+ 3411,
+ 3412,
+ 3413,
+ 3414,
+ 3415,
+ 3416,
+ 3417,
+ 3418,
+ 3419,
+ 3420,
+ 3421,
+ 3422,
+ 3423,
+ 3424,
+ 3425,
+ 3426,
+ 3427,
+ 3428,
+ 3429,
+ 3430,
+ 3431,
+ 3432,
+ 3433,
+ 3434,
+ 3435,
+ 3436,
+ 3437,
+ 3438,
+ 3439,
+ 3440,
+ 3441,
+ 3442,
+ 3443,
+ 3444,
+ 3445,
+ 3446,
+ 3447,
+ 3448,
+ 3449,
+ 3450,
+ 3451,
+ 3452,
+ 3453,
+ 3454,
+ 3455,
+ 3456,
+ 3457,
+ 3458,
+ 3459,
+ 3460,
+ 3461,
+ 3462,
+ 3463,
+ 3464,
+ 3465,
+ 3466,
+ 3467,
+ 3468,
+ 3469
+ ],
+ "spine1": [
+ 598,
+ 599,
+ 600,
+ 601,
+ 610,
+ 611,
+ 612,
+ 613,
+ 614,
+ 615,
+ 616,
+ 617,
+ 618,
+ 619,
+ 620,
+ 621,
+ 642,
+ 645,
+ 646,
+ 647,
+ 652,
+ 653,
+ 658,
+ 659,
+ 660,
+ 661,
+ 668,
+ 669,
+ 670,
+ 671,
+ 684,
+ 685,
+ 686,
+ 687,
+ 688,
+ 689,
+ 690,
+ 691,
+ 692,
+ 722,
+ 723,
+ 724,
+ 725,
+ 736,
+ 750,
+ 751,
+ 761,
+ 764,
+ 766,
+ 767,
+ 794,
+ 795,
+ 891,
+ 892,
+ 893,
+ 894,
+ 925,
+ 926,
+ 927,
+ 928,
+ 929,
+ 940,
+ 941,
+ 942,
+ 943,
+ 1190,
+ 1191,
+ 1192,
+ 1193,
+ 1194,
+ 1195,
+ 1196,
+ 1197,
+ 1200,
+ 1201,
+ 1202,
+ 1212,
+ 1236,
+ 1252,
+ 1253,
+ 1254,
+ 1255,
+ 1268,
+ 1269,
+ 1270,
+ 1329,
+ 1330,
+ 1348,
+ 1349,
+ 1351,
+ 1420,
+ 1421,
+ 1423,
+ 1424,
+ 1425,
+ 1426,
+ 1436,
+ 1437,
+ 1756,
+ 1757,
+ 1758,
+ 2839,
+ 2840,
+ 2841,
+ 2842,
+ 2843,
+ 2844,
+ 2845,
+ 2846,
+ 2847,
+ 2848,
+ 2849,
+ 2850,
+ 2851,
+ 2870,
+ 2871,
+ 2883,
+ 2906,
+ 2908,
+ 3014,
+ 3017,
+ 3025,
+ 3030,
+ 3033,
+ 3034,
+ 3037,
+ 3039,
+ 3040,
+ 3041,
+ 3042,
+ 3043,
+ 3044,
+ 3076,
+ 3077,
+ 3079,
+ 3480,
+ 3505,
+ 3511,
+ 4086,
+ 4087,
+ 4088,
+ 4089,
+ 4098,
+ 4099,
+ 4100,
+ 4101,
+ 4102,
+ 4103,
+ 4104,
+ 4105,
+ 4106,
+ 4107,
+ 4108,
+ 4109,
+ 4130,
+ 4131,
+ 4134,
+ 4135,
+ 4140,
+ 4141,
+ 4146,
+ 4147,
+ 4148,
+ 4149,
+ 4156,
+ 4157,
+ 4158,
+ 4159,
+ 4172,
+ 4173,
+ 4174,
+ 4175,
+ 4176,
+ 4177,
+ 4178,
+ 4179,
+ 4180,
+ 4210,
+ 4211,
+ 4212,
+ 4213,
+ 4225,
+ 4239,
+ 4240,
+ 4249,
+ 4250,
+ 4255,
+ 4256,
+ 4282,
+ 4283,
+ 4377,
+ 4378,
+ 4379,
+ 4380,
+ 4411,
+ 4412,
+ 4413,
+ 4414,
+ 4415,
+ 4426,
+ 4427,
+ 4428,
+ 4429,
+ 4676,
+ 4677,
+ 4678,
+ 4679,
+ 4680,
+ 4681,
+ 4682,
+ 4683,
+ 4686,
+ 4687,
+ 4688,
+ 4695,
+ 4719,
+ 4735,
+ 4736,
+ 4737,
+ 4740,
+ 4751,
+ 4752,
+ 4753,
+ 4824,
+ 4825,
+ 4828,
+ 4893,
+ 4894,
+ 4895,
+ 4897,
+ 4898,
+ 4899,
+ 4908,
+ 4909,
+ 5223,
+ 5224,
+ 5225,
+ 6300,
+ 6301,
+ 6302,
+ 6303,
+ 6304,
+ 6305,
+ 6306,
+ 6307,
+ 6308,
+ 6309,
+ 6310,
+ 6311,
+ 6312,
+ 6331,
+ 6332,
+ 6342,
+ 6366,
+ 6367,
+ 6475,
+ 6477,
+ 6478,
+ 6481,
+ 6482,
+ 6485,
+ 6487,
+ 6488,
+ 6489,
+ 6490,
+ 6491,
+ 6878
+ ],
+ "spine2": [
+ 570,
+ 571,
+ 572,
+ 573,
+ 584,
+ 585,
+ 586,
+ 587,
+ 588,
+ 589,
+ 590,
+ 591,
+ 592,
+ 593,
+ 594,
+ 595,
+ 596,
+ 597,
+ 602,
+ 603,
+ 604,
+ 605,
+ 606,
+ 607,
+ 608,
+ 609,
+ 622,
+ 623,
+ 624,
+ 625,
+ 638,
+ 639,
+ 640,
+ 641,
+ 643,
+ 644,
+ 648,
+ 649,
+ 650,
+ 651,
+ 666,
+ 667,
+ 672,
+ 673,
+ 674,
+ 675,
+ 680,
+ 681,
+ 682,
+ 683,
+ 693,
+ 694,
+ 695,
+ 696,
+ 697,
+ 698,
+ 699,
+ 700,
+ 701,
+ 702,
+ 703,
+ 704,
+ 713,
+ 714,
+ 715,
+ 716,
+ 717,
+ 726,
+ 727,
+ 728,
+ 729,
+ 730,
+ 731,
+ 732,
+ 733,
+ 735,
+ 737,
+ 738,
+ 739,
+ 740,
+ 741,
+ 742,
+ 743,
+ 744,
+ 745,
+ 746,
+ 747,
+ 748,
+ 749,
+ 752,
+ 753,
+ 754,
+ 755,
+ 756,
+ 757,
+ 758,
+ 759,
+ 760,
+ 762,
+ 763,
+ 803,
+ 804,
+ 805,
+ 806,
+ 811,
+ 812,
+ 813,
+ 814,
+ 817,
+ 818,
+ 819,
+ 820,
+ 821,
+ 824,
+ 825,
+ 826,
+ 827,
+ 828,
+ 895,
+ 896,
+ 930,
+ 931,
+ 1198,
+ 1199,
+ 1213,
+ 1214,
+ 1215,
+ 1216,
+ 1217,
+ 1218,
+ 1219,
+ 1220,
+ 1235,
+ 1237,
+ 1256,
+ 1257,
+ 1271,
+ 1272,
+ 1273,
+ 1279,
+ 1280,
+ 1283,
+ 1284,
+ 1285,
+ 1286,
+ 1287,
+ 1288,
+ 1289,
+ 1290,
+ 1291,
+ 1292,
+ 1293,
+ 1294,
+ 1295,
+ 1296,
+ 1297,
+ 1298,
+ 1299,
+ 1300,
+ 1301,
+ 1302,
+ 1303,
+ 1304,
+ 1305,
+ 1306,
+ 1307,
+ 1308,
+ 1309,
+ 1312,
+ 1313,
+ 1319,
+ 1320,
+ 1346,
+ 1347,
+ 1350,
+ 1352,
+ 1401,
+ 1417,
+ 1418,
+ 1419,
+ 1422,
+ 1427,
+ 1434,
+ 1435,
+ 1503,
+ 1504,
+ 1536,
+ 1537,
+ 1544,
+ 1545,
+ 1753,
+ 1754,
+ 1755,
+ 1759,
+ 1760,
+ 1761,
+ 1762,
+ 1763,
+ 1808,
+ 1809,
+ 1810,
+ 1811,
+ 1816,
+ 1817,
+ 1818,
+ 1819,
+ 1820,
+ 1834,
+ 1835,
+ 1836,
+ 1837,
+ 1838,
+ 1839,
+ 1868,
+ 1879,
+ 1880,
+ 2812,
+ 2813,
+ 2852,
+ 2853,
+ 2854,
+ 2855,
+ 2856,
+ 2857,
+ 2858,
+ 2859,
+ 2860,
+ 2861,
+ 2862,
+ 2863,
+ 2864,
+ 2865,
+ 2866,
+ 2867,
+ 2868,
+ 2869,
+ 2872,
+ 2875,
+ 2876,
+ 2877,
+ 2878,
+ 2881,
+ 2882,
+ 2884,
+ 2885,
+ 2886,
+ 2904,
+ 2905,
+ 2907,
+ 2931,
+ 2932,
+ 2933,
+ 2934,
+ 2935,
+ 2936,
+ 2937,
+ 2941,
+ 2950,
+ 2951,
+ 2952,
+ 2953,
+ 2954,
+ 2955,
+ 2956,
+ 2957,
+ 2958,
+ 2959,
+ 2960,
+ 2961,
+ 2962,
+ 2963,
+ 2964,
+ 2965,
+ 2966,
+ 2967,
+ 2968,
+ 2969,
+ 2970,
+ 2971,
+ 2972,
+ 2973,
+ 2997,
+ 2998,
+ 3006,
+ 3007,
+ 3012,
+ 3015,
+ 3026,
+ 3027,
+ 3028,
+ 3029,
+ 3031,
+ 3032,
+ 3035,
+ 3036,
+ 3038,
+ 3059,
+ 3060,
+ 3061,
+ 3062,
+ 3063,
+ 3064,
+ 3065,
+ 3066,
+ 3067,
+ 3073,
+ 3074,
+ 3075,
+ 3078,
+ 3168,
+ 3169,
+ 3171,
+ 3470,
+ 3471,
+ 3482,
+ 3483,
+ 3495,
+ 3496,
+ 3497,
+ 3498,
+ 3506,
+ 3508,
+ 4058,
+ 4059,
+ 4060,
+ 4061,
+ 4072,
+ 4073,
+ 4074,
+ 4075,
+ 4076,
+ 4077,
+ 4078,
+ 4079,
+ 4080,
+ 4081,
+ 4082,
+ 4083,
+ 4084,
+ 4085,
+ 4090,
+ 4091,
+ 4092,
+ 4093,
+ 4094,
+ 4095,
+ 4096,
+ 4097,
+ 4110,
+ 4111,
+ 4112,
+ 4113,
+ 4126,
+ 4127,
+ 4128,
+ 4129,
+ 4132,
+ 4133,
+ 4136,
+ 4137,
+ 4138,
+ 4139,
+ 4154,
+ 4155,
+ 4160,
+ 4161,
+ 4162,
+ 4163,
+ 4168,
+ 4169,
+ 4170,
+ 4171,
+ 4181,
+ 4182,
+ 4183,
+ 4184,
+ 4185,
+ 4186,
+ 4187,
+ 4188,
+ 4189,
+ 4190,
+ 4191,
+ 4192,
+ 4201,
+ 4202,
+ 4203,
+ 4204,
+ 4207,
+ 4214,
+ 4215,
+ 4216,
+ 4217,
+ 4218,
+ 4219,
+ 4220,
+ 4221,
+ 4223,
+ 4224,
+ 4226,
+ 4227,
+ 4228,
+ 4229,
+ 4230,
+ 4231,
+ 4232,
+ 4233,
+ 4234,
+ 4235,
+ 4236,
+ 4237,
+ 4238,
+ 4241,
+ 4242,
+ 4243,
+ 4244,
+ 4245,
+ 4246,
+ 4247,
+ 4248,
+ 4251,
+ 4252,
+ 4291,
+ 4292,
+ 4293,
+ 4294,
+ 4299,
+ 4300,
+ 4301,
+ 4302,
+ 4305,
+ 4306,
+ 4307,
+ 4308,
+ 4309,
+ 4312,
+ 4313,
+ 4314,
+ 4315,
+ 4381,
+ 4382,
+ 4416,
+ 4417,
+ 4684,
+ 4685,
+ 4696,
+ 4697,
+ 4698,
+ 4699,
+ 4700,
+ 4701,
+ 4702,
+ 4703,
+ 4718,
+ 4720,
+ 4738,
+ 4739,
+ 4754,
+ 4755,
+ 4756,
+ 4761,
+ 4762,
+ 4765,
+ 4766,
+ 4767,
+ 4768,
+ 4769,
+ 4770,
+ 4771,
+ 4772,
+ 4773,
+ 4774,
+ 4775,
+ 4776,
+ 4777,
+ 4778,
+ 4779,
+ 4780,
+ 4781,
+ 4782,
+ 4783,
+ 4784,
+ 4785,
+ 4786,
+ 4787,
+ 4788,
+ 4789,
+ 4792,
+ 4793,
+ 4799,
+ 4800,
+ 4822,
+ 4823,
+ 4826,
+ 4827,
+ 4874,
+ 4890,
+ 4891,
+ 4892,
+ 4896,
+ 4900,
+ 4907,
+ 4910,
+ 4975,
+ 4976,
+ 5007,
+ 5008,
+ 5013,
+ 5014,
+ 5222,
+ 5226,
+ 5227,
+ 5228,
+ 5229,
+ 5230,
+ 5269,
+ 5270,
+ 5271,
+ 5272,
+ 5277,
+ 5278,
+ 5279,
+ 5280,
+ 5281,
+ 5295,
+ 5296,
+ 5297,
+ 5298,
+ 5299,
+ 5300,
+ 5329,
+ 5340,
+ 5341,
+ 6273,
+ 6274,
+ 6313,
+ 6314,
+ 6315,
+ 6316,
+ 6317,
+ 6318,
+ 6319,
+ 6320,
+ 6321,
+ 6322,
+ 6323,
+ 6324,
+ 6325,
+ 6326,
+ 6327,
+ 6328,
+ 6329,
+ 6330,
+ 6333,
+ 6336,
+ 6337,
+ 6340,
+ 6341,
+ 6343,
+ 6344,
+ 6345,
+ 6363,
+ 6364,
+ 6365,
+ 6390,
+ 6391,
+ 6392,
+ 6393,
+ 6394,
+ 6395,
+ 6396,
+ 6398,
+ 6409,
+ 6410,
+ 6411,
+ 6412,
+ 6413,
+ 6414,
+ 6415,
+ 6416,
+ 6417,
+ 6418,
+ 6419,
+ 6420,
+ 6421,
+ 6422,
+ 6423,
+ 6424,
+ 6425,
+ 6426,
+ 6427,
+ 6428,
+ 6429,
+ 6430,
+ 6431,
+ 6432,
+ 6456,
+ 6457,
+ 6465,
+ 6466,
+ 6476,
+ 6479,
+ 6480,
+ 6483,
+ 6484,
+ 6486,
+ 6496,
+ 6497,
+ 6498,
+ 6499,
+ 6500,
+ 6501,
+ 6502,
+ 6503,
+ 6879
+ ],
+ "leftShoulder": [
+ 591,
+ 604,
+ 605,
+ 606,
+ 609,
+ 634,
+ 635,
+ 636,
+ 637,
+ 674,
+ 706,
+ 707,
+ 708,
+ 709,
+ 710,
+ 711,
+ 712,
+ 713,
+ 715,
+ 717,
+ 730,
+ 733,
+ 734,
+ 735,
+ 781,
+ 782,
+ 783,
+ 1238,
+ 1239,
+ 1240,
+ 1241,
+ 1242,
+ 1243,
+ 1244,
+ 1245,
+ 1290,
+ 1291,
+ 1294,
+ 1316,
+ 1317,
+ 1318,
+ 1401,
+ 1402,
+ 1403,
+ 1404,
+ 1509,
+ 1535,
+ 1545,
+ 1808,
+ 1810,
+ 1811,
+ 1812,
+ 1813,
+ 1814,
+ 1815,
+ 1818,
+ 1819,
+ 1821,
+ 1822,
+ 1823,
+ 1824,
+ 1825,
+ 1826,
+ 1827,
+ 1828,
+ 1829,
+ 1830,
+ 1831,
+ 1832,
+ 1833,
+ 1837,
+ 1840,
+ 1841,
+ 1842,
+ 1843,
+ 1844,
+ 1845,
+ 1846,
+ 1847,
+ 1848,
+ 1849,
+ 1850,
+ 1851,
+ 1852,
+ 1853,
+ 1854,
+ 1855,
+ 1856,
+ 1857,
+ 1858,
+ 1859,
+ 1861,
+ 1862,
+ 1863,
+ 1864,
+ 1872,
+ 1873,
+ 1880,
+ 1881,
+ 1884,
+ 1885,
+ 1886,
+ 1887,
+ 1890,
+ 1891,
+ 1893,
+ 1894,
+ 1895,
+ 1896,
+ 1897,
+ 1898,
+ 1899,
+ 2879,
+ 2880,
+ 2881,
+ 2886,
+ 2887,
+ 2888,
+ 2889,
+ 2890,
+ 2891,
+ 2892,
+ 2893,
+ 2894,
+ 2903,
+ 2938,
+ 2939,
+ 2940,
+ 2941,
+ 2942,
+ 2943,
+ 2944,
+ 2945,
+ 2946,
+ 2947,
+ 2948,
+ 2949,
+ 2965,
+ 2967,
+ 2969,
+ 2999,
+ 3000,
+ 3001,
+ 3002,
+ 3003,
+ 3004,
+ 3005,
+ 3008,
+ 3009,
+ 3010,
+ 3011
+ ],
+ "rightShoulder": [
+ 4077,
+ 4091,
+ 4092,
+ 4094,
+ 4095,
+ 4122,
+ 4123,
+ 4124,
+ 4125,
+ 4162,
+ 4194,
+ 4195,
+ 4196,
+ 4197,
+ 4198,
+ 4199,
+ 4200,
+ 4201,
+ 4203,
+ 4207,
+ 4218,
+ 4219,
+ 4222,
+ 4223,
+ 4269,
+ 4270,
+ 4271,
+ 4721,
+ 4722,
+ 4723,
+ 4724,
+ 4725,
+ 4726,
+ 4727,
+ 4728,
+ 4773,
+ 4774,
+ 4778,
+ 4796,
+ 4797,
+ 4798,
+ 4874,
+ 4875,
+ 4876,
+ 4877,
+ 4982,
+ 5006,
+ 5014,
+ 5269,
+ 5271,
+ 5272,
+ 5273,
+ 5274,
+ 5275,
+ 5276,
+ 5279,
+ 5281,
+ 5282,
+ 5283,
+ 5284,
+ 5285,
+ 5286,
+ 5287,
+ 5288,
+ 5289,
+ 5290,
+ 5291,
+ 5292,
+ 5293,
+ 5294,
+ 5298,
+ 5301,
+ 5302,
+ 5303,
+ 5304,
+ 5305,
+ 5306,
+ 5307,
+ 5308,
+ 5309,
+ 5310,
+ 5311,
+ 5312,
+ 5313,
+ 5314,
+ 5315,
+ 5316,
+ 5317,
+ 5318,
+ 5319,
+ 5320,
+ 5322,
+ 5323,
+ 5324,
+ 5325,
+ 5333,
+ 5334,
+ 5341,
+ 5342,
+ 5345,
+ 5346,
+ 5347,
+ 5348,
+ 5351,
+ 5352,
+ 5354,
+ 5355,
+ 5356,
+ 5357,
+ 5358,
+ 5359,
+ 5360,
+ 6338,
+ 6339,
+ 6340,
+ 6345,
+ 6346,
+ 6347,
+ 6348,
+ 6349,
+ 6350,
+ 6351,
+ 6352,
+ 6353,
+ 6362,
+ 6397,
+ 6398,
+ 6399,
+ 6400,
+ 6401,
+ 6402,
+ 6403,
+ 6404,
+ 6405,
+ 6406,
+ 6407,
+ 6408,
+ 6424,
+ 6425,
+ 6428,
+ 6458,
+ 6459,
+ 6460,
+ 6461,
+ 6462,
+ 6463,
+ 6464,
+ 6467,
+ 6468,
+ 6469,
+ 6470
+ ],
+ "rightFoot": [
+ 6727,
+ 6728,
+ 6729,
+ 6730,
+ 6731,
+ 6732,
+ 6733,
+ 6734,
+ 6735,
+ 6736,
+ 6737,
+ 6738,
+ 6739,
+ 6740,
+ 6741,
+ 6742,
+ 6743,
+ 6744,
+ 6745,
+ 6746,
+ 6747,
+ 6748,
+ 6749,
+ 6750,
+ 6751,
+ 6752,
+ 6753,
+ 6754,
+ 6755,
+ 6756,
+ 6757,
+ 6758,
+ 6759,
+ 6760,
+ 6761,
+ 6762,
+ 6763,
+ 6764,
+ 6765,
+ 6766,
+ 6767,
+ 6768,
+ 6769,
+ 6770,
+ 6771,
+ 6772,
+ 6773,
+ 6774,
+ 6775,
+ 6776,
+ 6777,
+ 6778,
+ 6779,
+ 6780,
+ 6781,
+ 6782,
+ 6783,
+ 6784,
+ 6785,
+ 6786,
+ 6787,
+ 6788,
+ 6789,
+ 6790,
+ 6791,
+ 6792,
+ 6793,
+ 6794,
+ 6795,
+ 6796,
+ 6797,
+ 6798,
+ 6799,
+ 6800,
+ 6801,
+ 6802,
+ 6803,
+ 6804,
+ 6805,
+ 6806,
+ 6807,
+ 6808,
+ 6809,
+ 6810,
+ 6811,
+ 6812,
+ 6813,
+ 6814,
+ 6815,
+ 6816,
+ 6817,
+ 6818,
+ 6819,
+ 6820,
+ 6821,
+ 6822,
+ 6823,
+ 6824,
+ 6825,
+ 6826,
+ 6827,
+ 6828,
+ 6829,
+ 6830,
+ 6831,
+ 6832,
+ 6833,
+ 6834,
+ 6835,
+ 6836,
+ 6837,
+ 6838,
+ 6839,
+ 6840,
+ 6841,
+ 6842,
+ 6843,
+ 6844,
+ 6845,
+ 6846,
+ 6847,
+ 6848,
+ 6849,
+ 6850,
+ 6851,
+ 6852,
+ 6853,
+ 6854,
+ 6855,
+ 6856,
+ 6857,
+ 6858,
+ 6859,
+ 6860,
+ 6861,
+ 6862,
+ 6863,
+ 6864,
+ 6865,
+ 6866,
+ 6867,
+ 6868,
+ 6869
+ ],
+ "head": [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25,
+ 26,
+ 27,
+ 28,
+ 29,
+ 30,
+ 31,
+ 32,
+ 33,
+ 34,
+ 35,
+ 36,
+ 37,
+ 38,
+ 39,
+ 40,
+ 41,
+ 42,
+ 43,
+ 44,
+ 45,
+ 46,
+ 47,
+ 48,
+ 49,
+ 50,
+ 51,
+ 52,
+ 53,
+ 54,
+ 55,
+ 56,
+ 57,
+ 58,
+ 59,
+ 60,
+ 61,
+ 62,
+ 63,
+ 64,
+ 65,
+ 66,
+ 67,
+ 68,
+ 69,
+ 70,
+ 71,
+ 72,
+ 73,
+ 74,
+ 75,
+ 76,
+ 77,
+ 78,
+ 79,
+ 80,
+ 81,
+ 82,
+ 83,
+ 84,
+ 85,
+ 86,
+ 87,
+ 88,
+ 89,
+ 90,
+ 91,
+ 92,
+ 93,
+ 94,
+ 95,
+ 96,
+ 97,
+ 98,
+ 99,
+ 100,
+ 101,
+ 102,
+ 103,
+ 104,
+ 105,
+ 106,
+ 107,
+ 108,
+ 109,
+ 110,
+ 111,
+ 112,
+ 113,
+ 114,
+ 115,
+ 116,
+ 117,
+ 118,
+ 119,
+ 120,
+ 121,
+ 122,
+ 123,
+ 124,
+ 125,
+ 126,
+ 127,
+ 128,
+ 129,
+ 130,
+ 131,
+ 132,
+ 133,
+ 134,
+ 135,
+ 136,
+ 137,
+ 138,
+ 139,
+ 140,
+ 141,
+ 142,
+ 143,
+ 144,
+ 145,
+ 146,
+ 147,
+ 148,
+ 149,
+ 154,
+ 155,
+ 156,
+ 157,
+ 158,
+ 159,
+ 160,
+ 161,
+ 162,
+ 163,
+ 164,
+ 165,
+ 166,
+ 167,
+ 168,
+ 169,
+ 170,
+ 171,
+ 172,
+ 173,
+ 176,
+ 177,
+ 178,
+ 179,
+ 180,
+ 181,
+ 182,
+ 183,
+ 184,
+ 185,
+ 186,
+ 187,
+ 188,
+ 189,
+ 190,
+ 191,
+ 192,
+ 193,
+ 194,
+ 195,
+ 196,
+ 197,
+ 198,
+ 199,
+ 200,
+ 201,
+ 202,
+ 203,
+ 204,
+ 205,
+ 220,
+ 221,
+ 225,
+ 226,
+ 227,
+ 228,
+ 229,
+ 230,
+ 231,
+ 232,
+ 233,
+ 234,
+ 235,
+ 236,
+ 237,
+ 238,
+ 239,
+ 240,
+ 241,
+ 242,
+ 243,
+ 244,
+ 245,
+ 246,
+ 247,
+ 248,
+ 249,
+ 250,
+ 251,
+ 252,
+ 253,
+ 254,
+ 255,
+ 258,
+ 259,
+ 260,
+ 261,
+ 262,
+ 263,
+ 264,
+ 265,
+ 266,
+ 267,
+ 268,
+ 269,
+ 270,
+ 271,
+ 272,
+ 273,
+ 274,
+ 275,
+ 276,
+ 277,
+ 278,
+ 279,
+ 280,
+ 281,
+ 282,
+ 283,
+ 286,
+ 287,
+ 288,
+ 289,
+ 290,
+ 291,
+ 292,
+ 293,
+ 294,
+ 295,
+ 303,
+ 304,
+ 306,
+ 307,
+ 310,
+ 311,
+ 312,
+ 313,
+ 314,
+ 315,
+ 316,
+ 317,
+ 318,
+ 319,
+ 320,
+ 321,
+ 322,
+ 323,
+ 324,
+ 325,
+ 326,
+ 327,
+ 328,
+ 329,
+ 330,
+ 331,
+ 332,
+ 335,
+ 336,
+ 337,
+ 338,
+ 339,
+ 340,
+ 341,
+ 342,
+ 343,
+ 344,
+ 345,
+ 346,
+ 347,
+ 348,
+ 349,
+ 350,
+ 351,
+ 352,
+ 353,
+ 354,
+ 355,
+ 356,
+ 357,
+ 358,
+ 359,
+ 360,
+ 361,
+ 362,
+ 363,
+ 364,
+ 365,
+ 366,
+ 367,
+ 368,
+ 369,
+ 370,
+ 371,
+ 372,
+ 373,
+ 374,
+ 375,
+ 376,
+ 377,
+ 378,
+ 379,
+ 380,
+ 381,
+ 382,
+ 383,
+ 384,
+ 385,
+ 386,
+ 387,
+ 388,
+ 389,
+ 390,
+ 391,
+ 392,
+ 393,
+ 394,
+ 395,
+ 396,
+ 397,
+ 398,
+ 399,
+ 400,
+ 401,
+ 402,
+ 403,
+ 404,
+ 405,
+ 406,
+ 407,
+ 408,
+ 409,
+ 410,
+ 411,
+ 412,
+ 413,
+ 414,
+ 415,
+ 416,
+ 417,
+ 418,
+ 419,
+ 420,
+ 421,
+ 422,
+ 427,
+ 428,
+ 429,
+ 430,
+ 431,
+ 432,
+ 433,
+ 434,
+ 435,
+ 436,
+ 437,
+ 438,
+ 439,
+ 442,
+ 443,
+ 444,
+ 445,
+ 446,
+ 447,
+ 448,
+ 449,
+ 450,
+ 454,
+ 455,
+ 456,
+ 457,
+ 458,
+ 459,
+ 461,
+ 462,
+ 463,
+ 464,
+ 465,
+ 466,
+ 467,
+ 468,
+ 469,
+ 470,
+ 471,
+ 472,
+ 473,
+ 474,
+ 475,
+ 476,
+ 477,
+ 478,
+ 479,
+ 480,
+ 481,
+ 482,
+ 483,
+ 484,
+ 485,
+ 486,
+ 487,
+ 488,
+ 489,
+ 490,
+ 491,
+ 492,
+ 493,
+ 494,
+ 495,
+ 496,
+ 497,
+ 498,
+ 499,
+ 500,
+ 501,
+ 502,
+ 503,
+ 504,
+ 505,
+ 506,
+ 507,
+ 508,
+ 509,
+ 510,
+ 511,
+ 512,
+ 513,
+ 514,
+ 515,
+ 516,
+ 517,
+ 518,
+ 519,
+ 520,
+ 521,
+ 522,
+ 523,
+ 524,
+ 525,
+ 526,
+ 527,
+ 528,
+ 529,
+ 530,
+ 531,
+ 532,
+ 533,
+ 534,
+ 535,
+ 536,
+ 537,
+ 538,
+ 539,
+ 540,
+ 541,
+ 542,
+ 543,
+ 544,
+ 545,
+ 546,
+ 547,
+ 548,
+ 549,
+ 550,
+ 551,
+ 552,
+ 553,
+ 554,
+ 555,
+ 556,
+ 557,
+ 558,
+ 559,
+ 560,
+ 561,
+ 562,
+ 563,
+ 564,
+ 565,
+ 566,
+ 567,
+ 568,
+ 569,
+ 574,
+ 575,
+ 576,
+ 577,
+ 578,
+ 579,
+ 580,
+ 581,
+ 582,
+ 583,
+ 1764,
+ 1765,
+ 1766,
+ 1770,
+ 1771,
+ 1772,
+ 1773,
+ 1774,
+ 1775,
+ 1776,
+ 1777,
+ 1778,
+ 1905,
+ 1906,
+ 1907,
+ 1908,
+ 2779,
+ 2780,
+ 2781,
+ 2782,
+ 2783,
+ 2784,
+ 2785,
+ 2786,
+ 2787,
+ 2788,
+ 2789,
+ 2790,
+ 2791,
+ 2792,
+ 2793,
+ 2794,
+ 2795,
+ 2796,
+ 2797,
+ 2798,
+ 2799,
+ 2800,
+ 2801,
+ 2802,
+ 2803,
+ 2804,
+ 2805,
+ 2806,
+ 2807,
+ 2808,
+ 2809,
+ 2810,
+ 2811,
+ 2814,
+ 2815,
+ 2816,
+ 2817,
+ 2818,
+ 3045,
+ 3046,
+ 3047,
+ 3048,
+ 3051,
+ 3052,
+ 3053,
+ 3054,
+ 3055,
+ 3056,
+ 3058,
+ 3069,
+ 3070,
+ 3071,
+ 3072,
+ 3161,
+ 3162,
+ 3163,
+ 3165,
+ 3166,
+ 3167,
+ 3485,
+ 3486,
+ 3487,
+ 3488,
+ 3489,
+ 3490,
+ 3491,
+ 3492,
+ 3493,
+ 3494,
+ 3499,
+ 3512,
+ 3513,
+ 3514,
+ 3515,
+ 3516,
+ 3517,
+ 3518,
+ 3519,
+ 3520,
+ 3521,
+ 3522,
+ 3523,
+ 3524,
+ 3525,
+ 3526,
+ 3527,
+ 3528,
+ 3529,
+ 3530,
+ 3531,
+ 3532,
+ 3533,
+ 3534,
+ 3535,
+ 3536,
+ 3537,
+ 3538,
+ 3539,
+ 3540,
+ 3541,
+ 3542,
+ 3543,
+ 3544,
+ 3545,
+ 3546,
+ 3547,
+ 3548,
+ 3549,
+ 3550,
+ 3551,
+ 3552,
+ 3553,
+ 3554,
+ 3555,
+ 3556,
+ 3557,
+ 3558,
+ 3559,
+ 3560,
+ 3561,
+ 3562,
+ 3563,
+ 3564,
+ 3565,
+ 3566,
+ 3567,
+ 3568,
+ 3569,
+ 3570,
+ 3571,
+ 3572,
+ 3573,
+ 3574,
+ 3575,
+ 3576,
+ 3577,
+ 3578,
+ 3579,
+ 3580,
+ 3581,
+ 3582,
+ 3583,
+ 3584,
+ 3585,
+ 3586,
+ 3587,
+ 3588,
+ 3589,
+ 3590,
+ 3591,
+ 3592,
+ 3593,
+ 3594,
+ 3595,
+ 3596,
+ 3597,
+ 3598,
+ 3599,
+ 3600,
+ 3601,
+ 3602,
+ 3603,
+ 3604,
+ 3605,
+ 3606,
+ 3607,
+ 3608,
+ 3609,
+ 3610,
+ 3611,
+ 3612,
+ 3613,
+ 3614,
+ 3615,
+ 3616,
+ 3617,
+ 3618,
+ 3619,
+ 3620,
+ 3621,
+ 3622,
+ 3623,
+ 3624,
+ 3625,
+ 3626,
+ 3627,
+ 3628,
+ 3629,
+ 3630,
+ 3631,
+ 3632,
+ 3633,
+ 3634,
+ 3635,
+ 3636,
+ 3637,
+ 3638,
+ 3639,
+ 3640,
+ 3641,
+ 3642,
+ 3643,
+ 3644,
+ 3645,
+ 3646,
+ 3647,
+ 3648,
+ 3649,
+ 3650,
+ 3651,
+ 3652,
+ 3653,
+ 3654,
+ 3655,
+ 3656,
+ 3657,
+ 3658,
+ 3659,
+ 3660,
+ 3661,
+ 3666,
+ 3667,
+ 3668,
+ 3669,
+ 3670,
+ 3671,
+ 3672,
+ 3673,
+ 3674,
+ 3675,
+ 3676,
+ 3677,
+ 3678,
+ 3679,
+ 3680,
+ 3681,
+ 3682,
+ 3683,
+ 3684,
+ 3685,
+ 3688,
+ 3689,
+ 3690,
+ 3691,
+ 3692,
+ 3693,
+ 3694,
+ 3695,
+ 3696,
+ 3697,
+ 3698,
+ 3699,
+ 3700,
+ 3701,
+ 3702,
+ 3703,
+ 3704,
+ 3705,
+ 3706,
+ 3707,
+ 3708,
+ 3709,
+ 3710,
+ 3711,
+ 3712,
+ 3713,
+ 3714,
+ 3715,
+ 3716,
+ 3717,
+ 3732,
+ 3733,
+ 3737,
+ 3738,
+ 3739,
+ 3740,
+ 3741,
+ 3742,
+ 3743,
+ 3744,
+ 3745,
+ 3746,
+ 3747,
+ 3748,
+ 3749,
+ 3750,
+ 3751,
+ 3752,
+ 3753,
+ 3754,
+ 3755,
+ 3756,
+ 3757,
+ 3758,
+ 3759,
+ 3760,
+ 3761,
+ 3762,
+ 3763,
+ 3764,
+ 3765,
+ 3766,
+ 3767,
+ 3770,
+ 3771,
+ 3772,
+ 3773,
+ 3774,
+ 3775,
+ 3776,
+ 3777,
+ 3778,
+ 3779,
+ 3780,
+ 3781,
+ 3782,
+ 3783,
+ 3784,
+ 3785,
+ 3786,
+ 3787,
+ 3788,
+ 3789,
+ 3790,
+ 3791,
+ 3792,
+ 3793,
+ 3794,
+ 3795,
+ 3798,
+ 3799,
+ 3800,
+ 3801,
+ 3802,
+ 3803,
+ 3804,
+ 3805,
+ 3806,
+ 3807,
+ 3815,
+ 3816,
+ 3819,
+ 3820,
+ 3821,
+ 3822,
+ 3823,
+ 3824,
+ 3825,
+ 3826,
+ 3827,
+ 3828,
+ 3829,
+ 3830,
+ 3831,
+ 3832,
+ 3833,
+ 3834,
+ 3835,
+ 3836,
+ 3837,
+ 3838,
+ 3841,
+ 3842,
+ 3843,
+ 3844,
+ 3845,
+ 3846,
+ 3847,
+ 3848,
+ 3849,
+ 3850,
+ 3851,
+ 3852,
+ 3853,
+ 3854,
+ 3855,
+ 3856,
+ 3857,
+ 3858,
+ 3859,
+ 3860,
+ 3861,
+ 3862,
+ 3863,
+ 3864,
+ 3865,
+ 3866,
+ 3867,
+ 3868,
+ 3869,
+ 3870,
+ 3871,
+ 3872,
+ 3873,
+ 3874,
+ 3875,
+ 3876,
+ 3877,
+ 3878,
+ 3879,
+ 3880,
+ 3881,
+ 3882,
+ 3883,
+ 3884,
+ 3885,
+ 3886,
+ 3887,
+ 3888,
+ 3889,
+ 3890,
+ 3891,
+ 3892,
+ 3893,
+ 3894,
+ 3895,
+ 3896,
+ 3897,
+ 3898,
+ 3899,
+ 3900,
+ 3901,
+ 3902,
+ 3903,
+ 3904,
+ 3905,
+ 3906,
+ 3907,
+ 3908,
+ 3909,
+ 3910,
+ 3911,
+ 3912,
+ 3913,
+ 3914,
+ 3915,
+ 3916,
+ 3917,
+ 3922,
+ 3923,
+ 3924,
+ 3925,
+ 3926,
+ 3927,
+ 3928,
+ 3929,
+ 3930,
+ 3931,
+ 3932,
+ 3933,
+ 3936,
+ 3937,
+ 3938,
+ 3939,
+ 3940,
+ 3941,
+ 3945,
+ 3946,
+ 3947,
+ 3948,
+ 3949,
+ 3950,
+ 3951,
+ 3952,
+ 3953,
+ 3954,
+ 3955,
+ 3956,
+ 3957,
+ 3958,
+ 3959,
+ 3960,
+ 3961,
+ 3962,
+ 3963,
+ 3964,
+ 3965,
+ 3966,
+ 3967,
+ 3968,
+ 3969,
+ 3970,
+ 3971,
+ 3972,
+ 3973,
+ 3974,
+ 3975,
+ 3976,
+ 3977,
+ 3978,
+ 3979,
+ 3980,
+ 3981,
+ 3982,
+ 3983,
+ 3984,
+ 3985,
+ 3986,
+ 3987,
+ 3988,
+ 3989,
+ 3990,
+ 3991,
+ 3992,
+ 3993,
+ 3994,
+ 3995,
+ 3996,
+ 3997,
+ 3998,
+ 3999,
+ 4000,
+ 4001,
+ 4002,
+ 4003,
+ 4004,
+ 4005,
+ 4006,
+ 4007,
+ 4008,
+ 4009,
+ 4010,
+ 4011,
+ 4012,
+ 4013,
+ 4014,
+ 4015,
+ 4016,
+ 4017,
+ 4018,
+ 4019,
+ 4020,
+ 4021,
+ 4022,
+ 4023,
+ 4024,
+ 4025,
+ 4026,
+ 4027,
+ 4028,
+ 4029,
+ 4030,
+ 4031,
+ 4032,
+ 4033,
+ 4034,
+ 4035,
+ 4036,
+ 4037,
+ 4038,
+ 4039,
+ 4040,
+ 4041,
+ 4042,
+ 4043,
+ 4044,
+ 4045,
+ 4046,
+ 4047,
+ 4048,
+ 4049,
+ 4050,
+ 4051,
+ 4052,
+ 4053,
+ 4054,
+ 4055,
+ 4056,
+ 4057,
+ 4062,
+ 4063,
+ 4064,
+ 4065,
+ 4066,
+ 4067,
+ 4068,
+ 4069,
+ 4070,
+ 4071,
+ 5231,
+ 5232,
+ 5233,
+ 5235,
+ 5236,
+ 5237,
+ 5238,
+ 5239,
+ 5240,
+ 5241,
+ 5242,
+ 5243,
+ 5366,
+ 5367,
+ 5368,
+ 5369,
+ 6240,
+ 6241,
+ 6242,
+ 6243,
+ 6244,
+ 6245,
+ 6246,
+ 6247,
+ 6248,
+ 6249,
+ 6250,
+ 6251,
+ 6252,
+ 6253,
+ 6254,
+ 6255,
+ 6256,
+ 6257,
+ 6258,
+ 6259,
+ 6260,
+ 6261,
+ 6262,
+ 6263,
+ 6264,
+ 6265,
+ 6266,
+ 6267,
+ 6268,
+ 6269,
+ 6270,
+ 6271,
+ 6272,
+ 6275,
+ 6276,
+ 6277,
+ 6278,
+ 6279,
+ 6492,
+ 6493,
+ 6494,
+ 6495,
+ 6880,
+ 6881,
+ 6882,
+ 6883,
+ 6884,
+ 6885,
+ 6886,
+ 6887,
+ 6888,
+ 6889
+ ],
+ "rightArm": [
+ 4114,
+ 4115,
+ 4116,
+ 4117,
+ 4122,
+ 4125,
+ 4168,
+ 4171,
+ 4204,
+ 4205,
+ 4206,
+ 4207,
+ 4257,
+ 4258,
+ 4259,
+ 4260,
+ 4261,
+ 4262,
+ 4263,
+ 4264,
+ 4265,
+ 4266,
+ 4267,
+ 4268,
+ 4272,
+ 4273,
+ 4274,
+ 4275,
+ 4276,
+ 4277,
+ 4278,
+ 4279,
+ 4280,
+ 4281,
+ 4714,
+ 4715,
+ 4716,
+ 4717,
+ 4741,
+ 4742,
+ 4743,
+ 4744,
+ 4756,
+ 4763,
+ 4764,
+ 4790,
+ 4791,
+ 4794,
+ 4795,
+ 4816,
+ 4817,
+ 4818,
+ 4819,
+ 4830,
+ 4831,
+ 4832,
+ 4833,
+ 4849,
+ 4850,
+ 4851,
+ 4852,
+ 4853,
+ 4854,
+ 4855,
+ 4856,
+ 4857,
+ 4858,
+ 4859,
+ 4860,
+ 4861,
+ 4862,
+ 4863,
+ 4864,
+ 4865,
+ 4866,
+ 4867,
+ 4868,
+ 4869,
+ 4870,
+ 4871,
+ 4872,
+ 4873,
+ 4876,
+ 4877,
+ 4878,
+ 4879,
+ 4880,
+ 4881,
+ 4882,
+ 4883,
+ 4884,
+ 4885,
+ 4886,
+ 4887,
+ 4888,
+ 4889,
+ 4901,
+ 4902,
+ 4903,
+ 4904,
+ 4905,
+ 4906,
+ 4911,
+ 4912,
+ 4913,
+ 4914,
+ 4915,
+ 4916,
+ 4917,
+ 4918,
+ 4974,
+ 4977,
+ 4978,
+ 4979,
+ 4980,
+ 4981,
+ 4982,
+ 5009,
+ 5010,
+ 5011,
+ 5012,
+ 5014,
+ 5088,
+ 5089,
+ 5090,
+ 5091,
+ 5100,
+ 5101,
+ 5102,
+ 5103,
+ 5104,
+ 5105,
+ 5106,
+ 5107,
+ 5108,
+ 5109,
+ 5110,
+ 5111,
+ 5114,
+ 5115,
+ 5116,
+ 5117,
+ 5118,
+ 5119,
+ 5120,
+ 5121,
+ 5122,
+ 5123,
+ 5124,
+ 5125,
+ 5128,
+ 5129,
+ 5130,
+ 5131,
+ 5134,
+ 5135,
+ 5136,
+ 5137,
+ 5138,
+ 5139,
+ 5140,
+ 5141,
+ 5142,
+ 5143,
+ 5144,
+ 5145,
+ 5146,
+ 5147,
+ 5148,
+ 5149,
+ 5150,
+ 5151,
+ 5152,
+ 5153,
+ 5165,
+ 5166,
+ 5167,
+ 5172,
+ 5173,
+ 5174,
+ 5175,
+ 5176,
+ 5177,
+ 5178,
+ 5179,
+ 5180,
+ 5181,
+ 5182,
+ 5183,
+ 5184,
+ 5185,
+ 5186,
+ 5187,
+ 5188,
+ 5189,
+ 5194,
+ 5200,
+ 5201,
+ 5202,
+ 5203,
+ 5204,
+ 5206,
+ 5208,
+ 5209,
+ 5214,
+ 5215,
+ 5216,
+ 5217,
+ 5218,
+ 5220,
+ 5229,
+ 5292,
+ 5293,
+ 5303,
+ 5306,
+ 5309,
+ 5311,
+ 5314,
+ 5315,
+ 5318,
+ 5319,
+ 5321,
+ 5326,
+ 5327,
+ 5328,
+ 5330,
+ 5331,
+ 5332,
+ 5335,
+ 5336,
+ 5337,
+ 5338,
+ 5339,
+ 5343,
+ 5344,
+ 5349,
+ 5350,
+ 5353,
+ 5361,
+ 5362,
+ 5363,
+ 5364,
+ 5365,
+ 5370,
+ 6280,
+ 6281,
+ 6282,
+ 6283,
+ 6354,
+ 6355,
+ 6356,
+ 6357,
+ 6358,
+ 6359,
+ 6360,
+ 6361,
+ 6362,
+ 6404,
+ 6405,
+ 6433,
+ 6434,
+ 6435,
+ 6436,
+ 6437,
+ 6438,
+ 6439,
+ 6440,
+ 6441,
+ 6442,
+ 6443,
+ 6444,
+ 6445,
+ 6446,
+ 6447,
+ 6448,
+ 6449,
+ 6450,
+ 6451,
+ 6452,
+ 6453,
+ 6454,
+ 6455,
+ 6461,
+ 6471
+ ],
+ "leftHandIndex1": [
+ 2027,
+ 2028,
+ 2029,
+ 2030,
+ 2037,
+ 2038,
+ 2039,
+ 2040,
+ 2057,
+ 2067,
+ 2068,
+ 2123,
+ 2124,
+ 2125,
+ 2126,
+ 2127,
+ 2128,
+ 2129,
+ 2130,
+ 2132,
+ 2145,
+ 2146,
+ 2152,
+ 2153,
+ 2154,
+ 2156,
+ 2157,
+ 2158,
+ 2159,
+ 2160,
+ 2161,
+ 2162,
+ 2163,
+ 2164,
+ 2165,
+ 2166,
+ 2167,
+ 2168,
+ 2169,
+ 2177,
+ 2178,
+ 2179,
+ 2181,
+ 2186,
+ 2187,
+ 2190,
+ 2191,
+ 2204,
+ 2205,
+ 2215,
+ 2216,
+ 2217,
+ 2218,
+ 2219,
+ 2220,
+ 2232,
+ 2233,
+ 2245,
+ 2246,
+ 2247,
+ 2258,
+ 2259,
+ 2261,
+ 2262,
+ 2263,
+ 2269,
+ 2270,
+ 2272,
+ 2273,
+ 2274,
+ 2276,
+ 2277,
+ 2280,
+ 2281,
+ 2282,
+ 2283,
+ 2291,
+ 2292,
+ 2293,
+ 2294,
+ 2295,
+ 2296,
+ 2297,
+ 2298,
+ 2299,
+ 2300,
+ 2301,
+ 2302,
+ 2303,
+ 2304,
+ 2305,
+ 2306,
+ 2307,
+ 2308,
+ 2309,
+ 2310,
+ 2311,
+ 2312,
+ 2313,
+ 2314,
+ 2315,
+ 2316,
+ 2317,
+ 2318,
+ 2319,
+ 2320,
+ 2321,
+ 2322,
+ 2323,
+ 2324,
+ 2325,
+ 2326,
+ 2327,
+ 2328,
+ 2329,
+ 2330,
+ 2331,
+ 2332,
+ 2333,
+ 2334,
+ 2335,
+ 2336,
+ 2337,
+ 2338,
+ 2339,
+ 2340,
+ 2341,
+ 2342,
+ 2343,
+ 2344,
+ 2345,
+ 2346,
+ 2347,
+ 2348,
+ 2349,
+ 2350,
+ 2351,
+ 2352,
+ 2353,
+ 2354,
+ 2355,
+ 2356,
+ 2357,
+ 2358,
+ 2359,
+ 2360,
+ 2361,
+ 2362,
+ 2363,
+ 2364,
+ 2365,
+ 2366,
+ 2367,
+ 2368,
+ 2369,
+ 2370,
+ 2371,
+ 2372,
+ 2373,
+ 2374,
+ 2375,
+ 2376,
+ 2377,
+ 2378,
+ 2379,
+ 2380,
+ 2381,
+ 2382,
+ 2383,
+ 2384,
+ 2385,
+ 2386,
+ 2387,
+ 2388,
+ 2389,
+ 2390,
+ 2391,
+ 2392,
+ 2393,
+ 2394,
+ 2395,
+ 2396,
+ 2397,
+ 2398,
+ 2399,
+ 2400,
+ 2401,
+ 2402,
+ 2403,
+ 2404,
+ 2405,
+ 2406,
+ 2407,
+ 2408,
+ 2409,
+ 2410,
+ 2411,
+ 2412,
+ 2413,
+ 2414,
+ 2415,
+ 2416,
+ 2417,
+ 2418,
+ 2419,
+ 2420,
+ 2421,
+ 2422,
+ 2423,
+ 2424,
+ 2425,
+ 2426,
+ 2427,
+ 2428,
+ 2429,
+ 2430,
+ 2431,
+ 2432,
+ 2433,
+ 2434,
+ 2435,
+ 2436,
+ 2437,
+ 2438,
+ 2439,
+ 2440,
+ 2441,
+ 2442,
+ 2443,
+ 2444,
+ 2445,
+ 2446,
+ 2447,
+ 2448,
+ 2449,
+ 2450,
+ 2451,
+ 2452,
+ 2453,
+ 2454,
+ 2455,
+ 2456,
+ 2457,
+ 2458,
+ 2459,
+ 2460,
+ 2461,
+ 2462,
+ 2463,
+ 2464,
+ 2465,
+ 2466,
+ 2467,
+ 2468,
+ 2469,
+ 2470,
+ 2471,
+ 2472,
+ 2473,
+ 2474,
+ 2475,
+ 2476,
+ 2477,
+ 2478,
+ 2479,
+ 2480,
+ 2481,
+ 2482,
+ 2483,
+ 2484,
+ 2485,
+ 2486,
+ 2487,
+ 2488,
+ 2489,
+ 2490,
+ 2491,
+ 2492,
+ 2493,
+ 2494,
+ 2495,
+ 2496,
+ 2497,
+ 2498,
+ 2499,
+ 2500,
+ 2501,
+ 2502,
+ 2503,
+ 2504,
+ 2505,
+ 2506,
+ 2507,
+ 2508,
+ 2509,
+ 2510,
+ 2511,
+ 2512,
+ 2513,
+ 2514,
+ 2515,
+ 2516,
+ 2517,
+ 2518,
+ 2519,
+ 2520,
+ 2521,
+ 2522,
+ 2523,
+ 2524,
+ 2525,
+ 2526,
+ 2527,
+ 2528,
+ 2529,
+ 2530,
+ 2531,
+ 2532,
+ 2533,
+ 2534,
+ 2535,
+ 2536,
+ 2537,
+ 2538,
+ 2539,
+ 2540,
+ 2541,
+ 2542,
+ 2543,
+ 2544,
+ 2545,
+ 2546,
+ 2547,
+ 2548,
+ 2549,
+ 2550,
+ 2551,
+ 2552,
+ 2553,
+ 2554,
+ 2555,
+ 2556,
+ 2557,
+ 2558,
+ 2559,
+ 2560,
+ 2561,
+ 2562,
+ 2563,
+ 2564,
+ 2565,
+ 2566,
+ 2567,
+ 2568,
+ 2569,
+ 2570,
+ 2571,
+ 2572,
+ 2573,
+ 2574,
+ 2575,
+ 2576,
+ 2577,
+ 2578,
+ 2579,
+ 2580,
+ 2581,
+ 2582,
+ 2583,
+ 2584,
+ 2585,
+ 2586,
+ 2587,
+ 2588,
+ 2589,
+ 2590,
+ 2591,
+ 2592,
+ 2593,
+ 2594,
+ 2596,
+ 2597,
+ 2599,
+ 2600,
+ 2601,
+ 2602,
+ 2603,
+ 2604,
+ 2606,
+ 2607,
+ 2609,
+ 2610,
+ 2611,
+ 2612,
+ 2613,
+ 2614,
+ 2615,
+ 2616,
+ 2617,
+ 2618,
+ 2619,
+ 2620,
+ 2621,
+ 2622,
+ 2623,
+ 2624,
+ 2625,
+ 2626,
+ 2627,
+ 2628,
+ 2629,
+ 2630,
+ 2631,
+ 2632,
+ 2633,
+ 2634,
+ 2635,
+ 2636,
+ 2637,
+ 2638,
+ 2639,
+ 2640,
+ 2641,
+ 2642,
+ 2643,
+ 2644,
+ 2645,
+ 2646,
+ 2647,
+ 2648,
+ 2649,
+ 2650,
+ 2651,
+ 2652,
+ 2653,
+ 2654,
+ 2655,
+ 2656,
+ 2657,
+ 2658,
+ 2659,
+ 2660,
+ 2661,
+ 2662,
+ 2663,
+ 2664,
+ 2665,
+ 2666,
+ 2667,
+ 2668,
+ 2669,
+ 2670,
+ 2671,
+ 2672,
+ 2673,
+ 2674,
+ 2675,
+ 2676,
+ 2677,
+ 2678,
+ 2679,
+ 2680,
+ 2681,
+ 2682,
+ 2683,
+ 2684,
+ 2685,
+ 2686,
+ 2687,
+ 2688,
+ 2689,
+ 2690,
+ 2691,
+ 2692,
+ 2693,
+ 2694,
+ 2695,
+ 2696
+ ],
+ "rightLeg": [
+ 4481,
+ 4482,
+ 4485,
+ 4486,
+ 4491,
+ 4492,
+ 4493,
+ 4495,
+ 4498,
+ 4500,
+ 4501,
+ 4505,
+ 4506,
+ 4529,
+ 4532,
+ 4533,
+ 4534,
+ 4535,
+ 4536,
+ 4537,
+ 4538,
+ 4539,
+ 4540,
+ 4541,
+ 4542,
+ 4543,
+ 4544,
+ 4545,
+ 4546,
+ 4547,
+ 4548,
+ 4549,
+ 4550,
+ 4551,
+ 4552,
+ 4553,
+ 4554,
+ 4555,
+ 4556,
+ 4557,
+ 4558,
+ 4559,
+ 4560,
+ 4561,
+ 4562,
+ 4563,
+ 4564,
+ 4565,
+ 4566,
+ 4567,
+ 4568,
+ 4569,
+ 4570,
+ 4571,
+ 4572,
+ 4573,
+ 4574,
+ 4575,
+ 4576,
+ 4577,
+ 4578,
+ 4579,
+ 4580,
+ 4581,
+ 4582,
+ 4583,
+ 4584,
+ 4585,
+ 4586,
+ 4587,
+ 4588,
+ 4589,
+ 4590,
+ 4591,
+ 4592,
+ 4593,
+ 4594,
+ 4595,
+ 4596,
+ 4597,
+ 4598,
+ 4599,
+ 4600,
+ 4601,
+ 4602,
+ 4603,
+ 4604,
+ 4605,
+ 4606,
+ 4607,
+ 4608,
+ 4609,
+ 4610,
+ 4611,
+ 4612,
+ 4613,
+ 4614,
+ 4615,
+ 4616,
+ 4617,
+ 4618,
+ 4619,
+ 4620,
+ 4621,
+ 4622,
+ 4634,
+ 4635,
+ 4636,
+ 4637,
+ 4638,
+ 4639,
+ 4640,
+ 4641,
+ 4642,
+ 4643,
+ 4644,
+ 4661,
+ 4662,
+ 4663,
+ 4664,
+ 4665,
+ 4666,
+ 4667,
+ 4668,
+ 4669,
+ 4842,
+ 4843,
+ 4844,
+ 4845,
+ 4846,
+ 4847,
+ 4848,
+ 4937,
+ 4938,
+ 4939,
+ 4940,
+ 4941,
+ 4942,
+ 4943,
+ 4944,
+ 4945,
+ 4946,
+ 4947,
+ 4993,
+ 4994,
+ 4995,
+ 4996,
+ 4997,
+ 4998,
+ 4999,
+ 5000,
+ 5001,
+ 5002,
+ 5003,
+ 6574,
+ 6575,
+ 6576,
+ 6577,
+ 6578,
+ 6579,
+ 6580,
+ 6581,
+ 6582,
+ 6583,
+ 6584,
+ 6585,
+ 6586,
+ 6587,
+ 6588,
+ 6589,
+ 6590,
+ 6591,
+ 6592,
+ 6593,
+ 6594,
+ 6595,
+ 6596,
+ 6597,
+ 6598,
+ 6599,
+ 6600,
+ 6601,
+ 6602,
+ 6603,
+ 6604,
+ 6605,
+ 6606,
+ 6607,
+ 6608,
+ 6609,
+ 6610,
+ 6719,
+ 6720,
+ 6721,
+ 6722,
+ 6723,
+ 6724,
+ 6725,
+ 6726,
+ 6727,
+ 6728,
+ 6729,
+ 6730,
+ 6731,
+ 6732,
+ 6733,
+ 6734,
+ 6735,
+ 6832,
+ 6833,
+ 6834,
+ 6835,
+ 6836,
+ 6869,
+ 6870,
+ 6871,
+ 6872
+ ],
+ "rightHandIndex1": [
+ 5488,
+ 5489,
+ 5490,
+ 5491,
+ 5498,
+ 5499,
+ 5500,
+ 5501,
+ 5518,
+ 5528,
+ 5529,
+ 5584,
+ 5585,
+ 5586,
+ 5587,
+ 5588,
+ 5589,
+ 5590,
+ 5591,
+ 5592,
+ 5606,
+ 5607,
+ 5613,
+ 5615,
+ 5616,
+ 5617,
+ 5618,
+ 5619,
+ 5620,
+ 5621,
+ 5622,
+ 5623,
+ 5624,
+ 5625,
+ 5626,
+ 5627,
+ 5628,
+ 5629,
+ 5630,
+ 5638,
+ 5639,
+ 5640,
+ 5642,
+ 5647,
+ 5648,
+ 5650,
+ 5651,
+ 5665,
+ 5666,
+ 5676,
+ 5677,
+ 5678,
+ 5679,
+ 5680,
+ 5681,
+ 5693,
+ 5694,
+ 5706,
+ 5707,
+ 5708,
+ 5719,
+ 5721,
+ 5722,
+ 5723,
+ 5724,
+ 5730,
+ 5731,
+ 5733,
+ 5734,
+ 5735,
+ 5737,
+ 5738,
+ 5741,
+ 5742,
+ 5743,
+ 5744,
+ 5752,
+ 5753,
+ 5754,
+ 5755,
+ 5756,
+ 5757,
+ 5758,
+ 5759,
+ 5760,
+ 5761,
+ 5762,
+ 5763,
+ 5764,
+ 5765,
+ 5766,
+ 5767,
+ 5768,
+ 5769,
+ 5770,
+ 5771,
+ 5772,
+ 5773,
+ 5774,
+ 5775,
+ 5776,
+ 5777,
+ 5778,
+ 5779,
+ 5780,
+ 5781,
+ 5782,
+ 5783,
+ 5784,
+ 5785,
+ 5786,
+ 5787,
+ 5788,
+ 5789,
+ 5790,
+ 5791,
+ 5792,
+ 5793,
+ 5794,
+ 5795,
+ 5796,
+ 5797,
+ 5798,
+ 5799,
+ 5800,
+ 5801,
+ 5802,
+ 5803,
+ 5804,
+ 5805,
+ 5806,
+ 5807,
+ 5808,
+ 5809,
+ 5810,
+ 5811,
+ 5812,
+ 5813,
+ 5814,
+ 5815,
+ 5816,
+ 5817,
+ 5818,
+ 5819,
+ 5820,
+ 5821,
+ 5822,
+ 5823,
+ 5824,
+ 5825,
+ 5826,
+ 5827,
+ 5828,
+ 5829,
+ 5830,
+ 5831,
+ 5832,
+ 5833,
+ 5834,
+ 5835,
+ 5836,
+ 5837,
+ 5838,
+ 5839,
+ 5840,
+ 5841,
+ 5842,
+ 5843,
+ 5844,
+ 5845,
+ 5846,
+ 5847,
+ 5848,
+ 5849,
+ 5850,
+ 5851,
+ 5852,
+ 5853,
+ 5854,
+ 5855,
+ 5856,
+ 5857,
+ 5858,
+ 5859,
+ 5860,
+ 5861,
+ 5862,
+ 5863,
+ 5864,
+ 5865,
+ 5866,
+ 5867,
+ 5868,
+ 5869,
+ 5870,
+ 5871,
+ 5872,
+ 5873,
+ 5874,
+ 5875,
+ 5876,
+ 5877,
+ 5878,
+ 5879,
+ 5880,
+ 5881,
+ 5882,
+ 5883,
+ 5884,
+ 5885,
+ 5886,
+ 5887,
+ 5888,
+ 5889,
+ 5890,
+ 5891,
+ 5892,
+ 5893,
+ 5894,
+ 5895,
+ 5896,
+ 5897,
+ 5898,
+ 5899,
+ 5900,
+ 5901,
+ 5902,
+ 5903,
+ 5904,
+ 5905,
+ 5906,
+ 5907,
+ 5908,
+ 5909,
+ 5910,
+ 5911,
+ 5912,
+ 5913,
+ 5914,
+ 5915,
+ 5916,
+ 5917,
+ 5918,
+ 5919,
+ 5920,
+ 5921,
+ 5922,
+ 5923,
+ 5924,
+ 5925,
+ 5926,
+ 5927,
+ 5928,
+ 5929,
+ 5930,
+ 5931,
+ 5932,
+ 5933,
+ 5934,
+ 5935,
+ 5936,
+ 5937,
+ 5938,
+ 5939,
+ 5940,
+ 5941,
+ 5942,
+ 5943,
+ 5944,
+ 5945,
+ 5946,
+ 5947,
+ 5948,
+ 5949,
+ 5950,
+ 5951,
+ 5952,
+ 5953,
+ 5954,
+ 5955,
+ 5956,
+ 5957,
+ 5958,
+ 5959,
+ 5960,
+ 5961,
+ 5962,
+ 5963,
+ 5964,
+ 5965,
+ 5966,
+ 5967,
+ 5968,
+ 5969,
+ 5970,
+ 5971,
+ 5972,
+ 5973,
+ 5974,
+ 5975,
+ 5976,
+ 5977,
+ 5978,
+ 5979,
+ 5980,
+ 5981,
+ 5982,
+ 5983,
+ 5984,
+ 5985,
+ 5986,
+ 5987,
+ 5988,
+ 5989,
+ 5990,
+ 5991,
+ 5992,
+ 5993,
+ 5994,
+ 5995,
+ 5996,
+ 5997,
+ 5998,
+ 5999,
+ 6000,
+ 6001,
+ 6002,
+ 6003,
+ 6004,
+ 6005,
+ 6006,
+ 6007,
+ 6008,
+ 6009,
+ 6010,
+ 6011,
+ 6012,
+ 6013,
+ 6014,
+ 6015,
+ 6016,
+ 6017,
+ 6018,
+ 6019,
+ 6020,
+ 6021,
+ 6022,
+ 6023,
+ 6024,
+ 6025,
+ 6026,
+ 6027,
+ 6028,
+ 6029,
+ 6030,
+ 6031,
+ 6032,
+ 6033,
+ 6034,
+ 6035,
+ 6036,
+ 6037,
+ 6038,
+ 6039,
+ 6040,
+ 6041,
+ 6042,
+ 6043,
+ 6044,
+ 6045,
+ 6046,
+ 6047,
+ 6048,
+ 6049,
+ 6050,
+ 6051,
+ 6052,
+ 6053,
+ 6054,
+ 6055,
+ 6058,
+ 6059,
+ 6060,
+ 6061,
+ 6062,
+ 6063,
+ 6064,
+ 6065,
+ 6068,
+ 6069,
+ 6070,
+ 6071,
+ 6072,
+ 6073,
+ 6074,
+ 6075,
+ 6076,
+ 6077,
+ 6078,
+ 6079,
+ 6080,
+ 6081,
+ 6082,
+ 6083,
+ 6084,
+ 6085,
+ 6086,
+ 6087,
+ 6088,
+ 6089,
+ 6090,
+ 6091,
+ 6092,
+ 6093,
+ 6094,
+ 6095,
+ 6096,
+ 6097,
+ 6098,
+ 6099,
+ 6100,
+ 6101,
+ 6102,
+ 6103,
+ 6104,
+ 6105,
+ 6106,
+ 6107,
+ 6108,
+ 6109,
+ 6110,
+ 6111,
+ 6112,
+ 6113,
+ 6114,
+ 6115,
+ 6116,
+ 6117,
+ 6118,
+ 6119,
+ 6120,
+ 6121,
+ 6122,
+ 6123,
+ 6124,
+ 6125,
+ 6126,
+ 6127,
+ 6128,
+ 6129,
+ 6130,
+ 6131,
+ 6132,
+ 6133,
+ 6134,
+ 6135,
+ 6136,
+ 6137,
+ 6138,
+ 6139,
+ 6140,
+ 6141,
+ 6142,
+ 6143,
+ 6144,
+ 6145,
+ 6146,
+ 6147,
+ 6148,
+ 6149,
+ 6150,
+ 6151,
+ 6152,
+ 6153,
+ 6154,
+ 6155,
+ 6156,
+ 6157
+ ],
+ "leftForeArm": [
+ 1546,
+ 1547,
+ 1548,
+ 1549,
+ 1550,
+ 1551,
+ 1552,
+ 1553,
+ 1554,
+ 1555,
+ 1556,
+ 1557,
+ 1558,
+ 1559,
+ 1560,
+ 1561,
+ 1562,
+ 1563,
+ 1564,
+ 1565,
+ 1566,
+ 1567,
+ 1568,
+ 1569,
+ 1570,
+ 1571,
+ 1572,
+ 1573,
+ 1574,
+ 1575,
+ 1576,
+ 1577,
+ 1578,
+ 1579,
+ 1580,
+ 1581,
+ 1582,
+ 1583,
+ 1584,
+ 1585,
+ 1586,
+ 1587,
+ 1588,
+ 1589,
+ 1590,
+ 1591,
+ 1592,
+ 1593,
+ 1594,
+ 1595,
+ 1596,
+ 1597,
+ 1598,
+ 1599,
+ 1600,
+ 1601,
+ 1602,
+ 1603,
+ 1604,
+ 1605,
+ 1606,
+ 1607,
+ 1608,
+ 1609,
+ 1610,
+ 1611,
+ 1612,
+ 1613,
+ 1614,
+ 1615,
+ 1616,
+ 1617,
+ 1618,
+ 1620,
+ 1621,
+ 1623,
+ 1624,
+ 1625,
+ 1626,
+ 1627,
+ 1628,
+ 1629,
+ 1630,
+ 1643,
+ 1644,
+ 1646,
+ 1647,
+ 1650,
+ 1651,
+ 1654,
+ 1655,
+ 1657,
+ 1658,
+ 1659,
+ 1660,
+ 1661,
+ 1662,
+ 1663,
+ 1664,
+ 1665,
+ 1666,
+ 1685,
+ 1686,
+ 1687,
+ 1688,
+ 1689,
+ 1690,
+ 1691,
+ 1692,
+ 1693,
+ 1694,
+ 1695,
+ 1699,
+ 1700,
+ 1701,
+ 1702,
+ 1721,
+ 1722,
+ 1723,
+ 1724,
+ 1725,
+ 1726,
+ 1727,
+ 1728,
+ 1729,
+ 1730,
+ 1732,
+ 1736,
+ 1738,
+ 1741,
+ 1742,
+ 1743,
+ 1744,
+ 1750,
+ 1752,
+ 1900,
+ 1909,
+ 1910,
+ 1911,
+ 1912,
+ 1913,
+ 1914,
+ 1915,
+ 1916,
+ 1917,
+ 1918,
+ 1919,
+ 1920,
+ 1921,
+ 1922,
+ 1923,
+ 1924,
+ 1925,
+ 1926,
+ 1927,
+ 1928,
+ 1929,
+ 1930,
+ 1931,
+ 1932,
+ 1933,
+ 1934,
+ 1935,
+ 1936,
+ 1937,
+ 1938,
+ 1939,
+ 1940,
+ 1941,
+ 1942,
+ 1943,
+ 1944,
+ 1945,
+ 1946,
+ 1947,
+ 1948,
+ 1949,
+ 1950,
+ 1951,
+ 1952,
+ 1953,
+ 1954,
+ 1955,
+ 1956,
+ 1957,
+ 1958,
+ 1959,
+ 1960,
+ 1961,
+ 1962,
+ 1963,
+ 1964,
+ 1965,
+ 1966,
+ 1967,
+ 1968,
+ 1969,
+ 1970,
+ 1971,
+ 1972,
+ 1973,
+ 1974,
+ 1975,
+ 1976,
+ 1977,
+ 1978,
+ 1979,
+ 1980,
+ 2019,
+ 2059,
+ 2060,
+ 2073,
+ 2089,
+ 2098,
+ 2099,
+ 2100,
+ 2101,
+ 2102,
+ 2103,
+ 2104,
+ 2105,
+ 2106,
+ 2107,
+ 2108,
+ 2109,
+ 2110,
+ 2111,
+ 2112,
+ 2147,
+ 2148,
+ 2206,
+ 2207,
+ 2208,
+ 2209,
+ 2228,
+ 2230,
+ 2234,
+ 2235,
+ 2241,
+ 2242,
+ 2243,
+ 2244,
+ 2279,
+ 2286,
+ 2873,
+ 2874
+ ],
+ "rightForeArm": [
+ 5015,
+ 5016,
+ 5017,
+ 5018,
+ 5019,
+ 5020,
+ 5021,
+ 5022,
+ 5023,
+ 5024,
+ 5025,
+ 5026,
+ 5027,
+ 5028,
+ 5029,
+ 5030,
+ 5031,
+ 5032,
+ 5033,
+ 5034,
+ 5035,
+ 5036,
+ 5037,
+ 5038,
+ 5039,
+ 5040,
+ 5041,
+ 5042,
+ 5043,
+ 5044,
+ 5045,
+ 5046,
+ 5047,
+ 5048,
+ 5049,
+ 5050,
+ 5051,
+ 5052,
+ 5053,
+ 5054,
+ 5055,
+ 5056,
+ 5057,
+ 5058,
+ 5059,
+ 5060,
+ 5061,
+ 5062,
+ 5063,
+ 5064,
+ 5065,
+ 5066,
+ 5067,
+ 5068,
+ 5069,
+ 5070,
+ 5071,
+ 5072,
+ 5073,
+ 5074,
+ 5075,
+ 5076,
+ 5077,
+ 5078,
+ 5079,
+ 5080,
+ 5081,
+ 5082,
+ 5083,
+ 5084,
+ 5085,
+ 5086,
+ 5087,
+ 5090,
+ 5091,
+ 5092,
+ 5093,
+ 5094,
+ 5095,
+ 5096,
+ 5097,
+ 5098,
+ 5099,
+ 5112,
+ 5113,
+ 5116,
+ 5117,
+ 5120,
+ 5121,
+ 5124,
+ 5125,
+ 5126,
+ 5127,
+ 5128,
+ 5129,
+ 5130,
+ 5131,
+ 5132,
+ 5133,
+ 5134,
+ 5135,
+ 5154,
+ 5155,
+ 5156,
+ 5157,
+ 5158,
+ 5159,
+ 5160,
+ 5161,
+ 5162,
+ 5163,
+ 5164,
+ 5168,
+ 5169,
+ 5170,
+ 5171,
+ 5190,
+ 5191,
+ 5192,
+ 5193,
+ 5194,
+ 5195,
+ 5196,
+ 5197,
+ 5198,
+ 5199,
+ 5202,
+ 5205,
+ 5207,
+ 5210,
+ 5211,
+ 5212,
+ 5213,
+ 5219,
+ 5221,
+ 5361,
+ 5370,
+ 5371,
+ 5372,
+ 5373,
+ 5374,
+ 5375,
+ 5376,
+ 5377,
+ 5378,
+ 5379,
+ 5380,
+ 5381,
+ 5382,
+ 5383,
+ 5384,
+ 5385,
+ 5386,
+ 5387,
+ 5388,
+ 5389,
+ 5390,
+ 5391,
+ 5392,
+ 5393,
+ 5394,
+ 5395,
+ 5396,
+ 5397,
+ 5398,
+ 5399,
+ 5400,
+ 5401,
+ 5402,
+ 5403,
+ 5404,
+ 5405,
+ 5406,
+ 5407,
+ 5408,
+ 5409,
+ 5410,
+ 5411,
+ 5412,
+ 5413,
+ 5414,
+ 5415,
+ 5416,
+ 5417,
+ 5418,
+ 5419,
+ 5420,
+ 5421,
+ 5422,
+ 5423,
+ 5424,
+ 5425,
+ 5426,
+ 5427,
+ 5428,
+ 5429,
+ 5430,
+ 5431,
+ 5432,
+ 5433,
+ 5434,
+ 5435,
+ 5436,
+ 5437,
+ 5438,
+ 5439,
+ 5440,
+ 5441,
+ 5480,
+ 5520,
+ 5521,
+ 5534,
+ 5550,
+ 5559,
+ 5560,
+ 5561,
+ 5562,
+ 5563,
+ 5564,
+ 5565,
+ 5566,
+ 5567,
+ 5568,
+ 5569,
+ 5570,
+ 5571,
+ 5572,
+ 5573,
+ 5608,
+ 5609,
+ 5667,
+ 5668,
+ 5669,
+ 5670,
+ 5689,
+ 5691,
+ 5695,
+ 5696,
+ 5702,
+ 5703,
+ 5704,
+ 5705,
+ 5740,
+ 5747,
+ 6334,
+ 6335
+ ],
+ "neck": [
+ 148,
+ 150,
+ 151,
+ 152,
+ 153,
+ 172,
+ 174,
+ 175,
+ 201,
+ 202,
+ 204,
+ 205,
+ 206,
+ 207,
+ 208,
+ 209,
+ 210,
+ 211,
+ 212,
+ 213,
+ 214,
+ 215,
+ 216,
+ 217,
+ 218,
+ 219,
+ 222,
+ 223,
+ 224,
+ 225,
+ 256,
+ 257,
+ 284,
+ 285,
+ 295,
+ 296,
+ 297,
+ 298,
+ 299,
+ 300,
+ 301,
+ 302,
+ 303,
+ 304,
+ 305,
+ 306,
+ 307,
+ 308,
+ 309,
+ 333,
+ 334,
+ 423,
+ 424,
+ 425,
+ 426,
+ 440,
+ 441,
+ 451,
+ 452,
+ 453,
+ 460,
+ 461,
+ 571,
+ 572,
+ 824,
+ 825,
+ 826,
+ 827,
+ 828,
+ 829,
+ 1279,
+ 1280,
+ 1312,
+ 1313,
+ 1319,
+ 1320,
+ 1331,
+ 3049,
+ 3050,
+ 3057,
+ 3058,
+ 3059,
+ 3068,
+ 3164,
+ 3661,
+ 3662,
+ 3663,
+ 3664,
+ 3665,
+ 3685,
+ 3686,
+ 3687,
+ 3714,
+ 3715,
+ 3716,
+ 3717,
+ 3718,
+ 3719,
+ 3720,
+ 3721,
+ 3722,
+ 3723,
+ 3724,
+ 3725,
+ 3726,
+ 3727,
+ 3728,
+ 3729,
+ 3730,
+ 3731,
+ 3734,
+ 3735,
+ 3736,
+ 3737,
+ 3768,
+ 3769,
+ 3796,
+ 3797,
+ 3807,
+ 3808,
+ 3809,
+ 3810,
+ 3811,
+ 3812,
+ 3813,
+ 3814,
+ 3815,
+ 3816,
+ 3817,
+ 3818,
+ 3819,
+ 3839,
+ 3840,
+ 3918,
+ 3919,
+ 3920,
+ 3921,
+ 3934,
+ 3935,
+ 3942,
+ 3943,
+ 3944,
+ 3950,
+ 4060,
+ 4061,
+ 4312,
+ 4313,
+ 4314,
+ 4315,
+ 4761,
+ 4762,
+ 4792,
+ 4793,
+ 4799,
+ 4800,
+ 4807
+ ],
+ "rightToeBase": [
+ 6611,
+ 6612,
+ 6613,
+ 6614,
+ 6615,
+ 6616,
+ 6617,
+ 6618,
+ 6619,
+ 6620,
+ 6621,
+ 6622,
+ 6623,
+ 6624,
+ 6625,
+ 6626,
+ 6627,
+ 6628,
+ 6629,
+ 6630,
+ 6631,
+ 6632,
+ 6633,
+ 6634,
+ 6635,
+ 6636,
+ 6637,
+ 6638,
+ 6639,
+ 6640,
+ 6641,
+ 6642,
+ 6643,
+ 6644,
+ 6645,
+ 6646,
+ 6647,
+ 6648,
+ 6649,
+ 6650,
+ 6651,
+ 6652,
+ 6653,
+ 6654,
+ 6655,
+ 6656,
+ 6657,
+ 6658,
+ 6659,
+ 6660,
+ 6661,
+ 6662,
+ 6663,
+ 6664,
+ 6665,
+ 6666,
+ 6667,
+ 6668,
+ 6669,
+ 6670,
+ 6671,
+ 6672,
+ 6673,
+ 6674,
+ 6675,
+ 6676,
+ 6677,
+ 6678,
+ 6679,
+ 6680,
+ 6681,
+ 6682,
+ 6683,
+ 6684,
+ 6685,
+ 6686,
+ 6687,
+ 6688,
+ 6689,
+ 6690,
+ 6691,
+ 6692,
+ 6693,
+ 6694,
+ 6695,
+ 6696,
+ 6697,
+ 6698,
+ 6699,
+ 6700,
+ 6701,
+ 6702,
+ 6703,
+ 6704,
+ 6705,
+ 6706,
+ 6707,
+ 6708,
+ 6709,
+ 6710,
+ 6711,
+ 6712,
+ 6713,
+ 6714,
+ 6715,
+ 6716,
+ 6717,
+ 6718,
+ 6736,
+ 6739,
+ 6741,
+ 6743,
+ 6745,
+ 6747,
+ 6749,
+ 6750,
+ 6752,
+ 6754,
+ 6757,
+ 6758,
+ 6760,
+ 6762
+ ],
+ "spine": [
+ 616,
+ 617,
+ 630,
+ 631,
+ 632,
+ 633,
+ 654,
+ 655,
+ 656,
+ 657,
+ 662,
+ 663,
+ 664,
+ 665,
+ 720,
+ 721,
+ 765,
+ 766,
+ 767,
+ 768,
+ 796,
+ 797,
+ 798,
+ 799,
+ 889,
+ 890,
+ 916,
+ 917,
+ 918,
+ 919,
+ 921,
+ 922,
+ 923,
+ 924,
+ 925,
+ 926,
+ 1188,
+ 1189,
+ 1211,
+ 1212,
+ 1248,
+ 1249,
+ 1250,
+ 1251,
+ 1264,
+ 1265,
+ 1266,
+ 1267,
+ 1323,
+ 1324,
+ 1325,
+ 1326,
+ 1327,
+ 1328,
+ 1332,
+ 1333,
+ 1334,
+ 1335,
+ 1336,
+ 1344,
+ 1345,
+ 1481,
+ 1482,
+ 1483,
+ 1484,
+ 1485,
+ 1486,
+ 1487,
+ 1488,
+ 1489,
+ 1490,
+ 1491,
+ 1492,
+ 1493,
+ 1494,
+ 1495,
+ 1496,
+ 1767,
+ 2823,
+ 2824,
+ 2825,
+ 2826,
+ 2827,
+ 2828,
+ 2829,
+ 2830,
+ 2831,
+ 2832,
+ 2833,
+ 2834,
+ 2835,
+ 2836,
+ 2837,
+ 2838,
+ 2839,
+ 2840,
+ 2841,
+ 2842,
+ 2843,
+ 2844,
+ 2845,
+ 2847,
+ 2848,
+ 2851,
+ 3016,
+ 3017,
+ 3018,
+ 3019,
+ 3020,
+ 3023,
+ 3024,
+ 3124,
+ 3173,
+ 3476,
+ 3477,
+ 3478,
+ 3480,
+ 3500,
+ 3501,
+ 3502,
+ 3504,
+ 3509,
+ 3511,
+ 4103,
+ 4104,
+ 4118,
+ 4119,
+ 4120,
+ 4121,
+ 4142,
+ 4143,
+ 4144,
+ 4145,
+ 4150,
+ 4151,
+ 4152,
+ 4153,
+ 4208,
+ 4209,
+ 4253,
+ 4254,
+ 4255,
+ 4256,
+ 4284,
+ 4285,
+ 4286,
+ 4287,
+ 4375,
+ 4376,
+ 4402,
+ 4403,
+ 4405,
+ 4406,
+ 4407,
+ 4408,
+ 4409,
+ 4410,
+ 4411,
+ 4412,
+ 4674,
+ 4675,
+ 4694,
+ 4695,
+ 4731,
+ 4732,
+ 4733,
+ 4734,
+ 4747,
+ 4748,
+ 4749,
+ 4750,
+ 4803,
+ 4804,
+ 4805,
+ 4806,
+ 4808,
+ 4809,
+ 4810,
+ 4811,
+ 4812,
+ 4820,
+ 4821,
+ 4953,
+ 4954,
+ 4955,
+ 4956,
+ 4957,
+ 4958,
+ 4959,
+ 4960,
+ 4961,
+ 4962,
+ 4963,
+ 4964,
+ 4965,
+ 4966,
+ 4967,
+ 4968,
+ 5234,
+ 6284,
+ 6285,
+ 6286,
+ 6287,
+ 6288,
+ 6289,
+ 6290,
+ 6291,
+ 6292,
+ 6293,
+ 6294,
+ 6295,
+ 6296,
+ 6297,
+ 6298,
+ 6299,
+ 6300,
+ 6301,
+ 6302,
+ 6303,
+ 6304,
+ 6305,
+ 6306,
+ 6308,
+ 6309,
+ 6312,
+ 6472,
+ 6473,
+ 6474,
+ 6545,
+ 6874,
+ 6875,
+ 6876,
+ 6878
+ ],
+ "leftUpLeg": [
+ 833,
+ 834,
+ 838,
+ 839,
+ 847,
+ 848,
+ 849,
+ 850,
+ 851,
+ 852,
+ 853,
+ 854,
+ 870,
+ 871,
+ 872,
+ 873,
+ 874,
+ 875,
+ 876,
+ 877,
+ 878,
+ 879,
+ 880,
+ 881,
+ 897,
+ 898,
+ 899,
+ 900,
+ 901,
+ 902,
+ 903,
+ 904,
+ 905,
+ 906,
+ 907,
+ 908,
+ 909,
+ 910,
+ 911,
+ 912,
+ 913,
+ 914,
+ 915,
+ 933,
+ 934,
+ 935,
+ 936,
+ 944,
+ 945,
+ 946,
+ 947,
+ 948,
+ 949,
+ 950,
+ 951,
+ 952,
+ 953,
+ 954,
+ 955,
+ 956,
+ 957,
+ 958,
+ 959,
+ 960,
+ 961,
+ 962,
+ 963,
+ 964,
+ 965,
+ 966,
+ 967,
+ 968,
+ 969,
+ 970,
+ 971,
+ 972,
+ 973,
+ 974,
+ 975,
+ 976,
+ 977,
+ 978,
+ 979,
+ 980,
+ 981,
+ 982,
+ 983,
+ 984,
+ 985,
+ 986,
+ 987,
+ 988,
+ 989,
+ 990,
+ 991,
+ 992,
+ 993,
+ 994,
+ 995,
+ 996,
+ 997,
+ 998,
+ 999,
+ 1000,
+ 1001,
+ 1002,
+ 1003,
+ 1004,
+ 1005,
+ 1006,
+ 1007,
+ 1008,
+ 1009,
+ 1010,
+ 1011,
+ 1012,
+ 1013,
+ 1014,
+ 1015,
+ 1016,
+ 1017,
+ 1018,
+ 1019,
+ 1020,
+ 1021,
+ 1022,
+ 1023,
+ 1024,
+ 1025,
+ 1026,
+ 1027,
+ 1028,
+ 1029,
+ 1030,
+ 1031,
+ 1032,
+ 1033,
+ 1034,
+ 1035,
+ 1036,
+ 1037,
+ 1038,
+ 1039,
+ 1040,
+ 1041,
+ 1042,
+ 1043,
+ 1044,
+ 1045,
+ 1046,
+ 1137,
+ 1138,
+ 1139,
+ 1140,
+ 1141,
+ 1142,
+ 1143,
+ 1144,
+ 1145,
+ 1146,
+ 1147,
+ 1148,
+ 1159,
+ 1160,
+ 1161,
+ 1162,
+ 1163,
+ 1164,
+ 1165,
+ 1166,
+ 1167,
+ 1168,
+ 1169,
+ 1170,
+ 1171,
+ 1172,
+ 1173,
+ 1174,
+ 1184,
+ 1185,
+ 1186,
+ 1187,
+ 1221,
+ 1222,
+ 1223,
+ 1224,
+ 1225,
+ 1226,
+ 1227,
+ 1228,
+ 1229,
+ 1230,
+ 1262,
+ 1263,
+ 1274,
+ 1275,
+ 1276,
+ 1277,
+ 1321,
+ 1322,
+ 1354,
+ 1359,
+ 1360,
+ 1361,
+ 1362,
+ 1365,
+ 1366,
+ 1367,
+ 1368,
+ 1451,
+ 1452,
+ 1453,
+ 1455,
+ 1456,
+ 1457,
+ 1458,
+ 1459,
+ 1460,
+ 1461,
+ 1462,
+ 1463,
+ 1475,
+ 1477,
+ 1478,
+ 1479,
+ 1480,
+ 1498,
+ 1499,
+ 1500,
+ 1501,
+ 1511,
+ 1512,
+ 1513,
+ 1514,
+ 1516,
+ 1517,
+ 1518,
+ 1519,
+ 1520,
+ 1521,
+ 1522,
+ 1533,
+ 1534,
+ 3125,
+ 3126,
+ 3127,
+ 3128,
+ 3131,
+ 3132,
+ 3133,
+ 3134,
+ 3135,
+ 3475,
+ 3479
+ ],
+ "leftHand": [
+ 1981,
+ 1982,
+ 1983,
+ 1984,
+ 1985,
+ 1986,
+ 1987,
+ 1988,
+ 1989,
+ 1990,
+ 1991,
+ 1992,
+ 1993,
+ 1994,
+ 1995,
+ 1996,
+ 1997,
+ 1998,
+ 1999,
+ 2000,
+ 2001,
+ 2002,
+ 2003,
+ 2004,
+ 2005,
+ 2006,
+ 2007,
+ 2008,
+ 2009,
+ 2010,
+ 2011,
+ 2012,
+ 2013,
+ 2014,
+ 2015,
+ 2016,
+ 2017,
+ 2018,
+ 2019,
+ 2020,
+ 2021,
+ 2022,
+ 2023,
+ 2024,
+ 2025,
+ 2026,
+ 2031,
+ 2032,
+ 2033,
+ 2034,
+ 2035,
+ 2036,
+ 2041,
+ 2042,
+ 2043,
+ 2044,
+ 2045,
+ 2046,
+ 2047,
+ 2048,
+ 2049,
+ 2050,
+ 2051,
+ 2052,
+ 2053,
+ 2054,
+ 2055,
+ 2056,
+ 2057,
+ 2058,
+ 2059,
+ 2060,
+ 2061,
+ 2062,
+ 2063,
+ 2064,
+ 2065,
+ 2066,
+ 2069,
+ 2070,
+ 2071,
+ 2072,
+ 2073,
+ 2074,
+ 2075,
+ 2076,
+ 2077,
+ 2078,
+ 2079,
+ 2080,
+ 2081,
+ 2082,
+ 2083,
+ 2084,
+ 2085,
+ 2086,
+ 2087,
+ 2088,
+ 2089,
+ 2090,
+ 2091,
+ 2092,
+ 2093,
+ 2094,
+ 2095,
+ 2096,
+ 2097,
+ 2098,
+ 2099,
+ 2100,
+ 2101,
+ 2107,
+ 2111,
+ 2113,
+ 2114,
+ 2115,
+ 2116,
+ 2117,
+ 2118,
+ 2119,
+ 2120,
+ 2121,
+ 2122,
+ 2127,
+ 2130,
+ 2131,
+ 2132,
+ 2133,
+ 2134,
+ 2135,
+ 2136,
+ 2137,
+ 2138,
+ 2139,
+ 2140,
+ 2141,
+ 2142,
+ 2143,
+ 2144,
+ 2149,
+ 2150,
+ 2151,
+ 2152,
+ 2155,
+ 2160,
+ 2163,
+ 2164,
+ 2170,
+ 2171,
+ 2172,
+ 2173,
+ 2174,
+ 2175,
+ 2176,
+ 2177,
+ 2178,
+ 2179,
+ 2180,
+ 2182,
+ 2183,
+ 2184,
+ 2185,
+ 2188,
+ 2189,
+ 2191,
+ 2192,
+ 2193,
+ 2194,
+ 2195,
+ 2196,
+ 2197,
+ 2198,
+ 2199,
+ 2200,
+ 2201,
+ 2202,
+ 2203,
+ 2207,
+ 2209,
+ 2210,
+ 2211,
+ 2212,
+ 2213,
+ 2214,
+ 2221,
+ 2222,
+ 2223,
+ 2224,
+ 2225,
+ 2226,
+ 2227,
+ 2228,
+ 2229,
+ 2231,
+ 2234,
+ 2236,
+ 2237,
+ 2238,
+ 2239,
+ 2240,
+ 2246,
+ 2247,
+ 2248,
+ 2249,
+ 2250,
+ 2251,
+ 2252,
+ 2253,
+ 2254,
+ 2255,
+ 2256,
+ 2257,
+ 2258,
+ 2259,
+ 2260,
+ 2262,
+ 2263,
+ 2264,
+ 2265,
+ 2266,
+ 2267,
+ 2268,
+ 2269,
+ 2270,
+ 2271,
+ 2274,
+ 2275,
+ 2276,
+ 2277,
+ 2278,
+ 2279,
+ 2284,
+ 2285,
+ 2287,
+ 2288,
+ 2289,
+ 2290,
+ 2293,
+ 2595,
+ 2598,
+ 2605,
+ 2608,
+ 2697,
+ 2698,
+ 2699,
+ 2700,
+ 2701,
+ 2702,
+ 2703,
+ 2704,
+ 2705,
+ 2706,
+ 2707,
+ 2708,
+ 2709,
+ 2710,
+ 2711,
+ 2712,
+ 2713,
+ 2714,
+ 2715,
+ 2716,
+ 2717,
+ 2718,
+ 2719,
+ 2720,
+ 2721,
+ 2722,
+ 2723,
+ 2724,
+ 2725,
+ 2726,
+ 2727,
+ 2728,
+ 2729,
+ 2730,
+ 2731,
+ 2732,
+ 2733,
+ 2734,
+ 2735,
+ 2736,
+ 2737,
+ 2738,
+ 2739,
+ 2740,
+ 2741,
+ 2742,
+ 2743,
+ 2744,
+ 2745,
+ 2746,
+ 2747,
+ 2748,
+ 2749,
+ 2750,
+ 2751,
+ 2752,
+ 2753,
+ 2754,
+ 2755,
+ 2756,
+ 2757,
+ 2758,
+ 2759,
+ 2760,
+ 2761,
+ 2762,
+ 2763,
+ 2764,
+ 2765,
+ 2766,
+ 2767,
+ 2768,
+ 2769,
+ 2770,
+ 2771,
+ 2772,
+ 2773,
+ 2774,
+ 2775,
+ 2776,
+ 2777,
+ 2778
+ ],
+ "hips": [
+ 631,
+ 632,
+ 654,
+ 657,
+ 662,
+ 665,
+ 676,
+ 677,
+ 678,
+ 679,
+ 705,
+ 720,
+ 796,
+ 799,
+ 800,
+ 801,
+ 802,
+ 807,
+ 808,
+ 809,
+ 810,
+ 815,
+ 816,
+ 822,
+ 823,
+ 830,
+ 831,
+ 832,
+ 833,
+ 834,
+ 835,
+ 836,
+ 837,
+ 838,
+ 839,
+ 840,
+ 841,
+ 842,
+ 843,
+ 844,
+ 845,
+ 846,
+ 855,
+ 856,
+ 857,
+ 858,
+ 859,
+ 860,
+ 861,
+ 862,
+ 863,
+ 864,
+ 865,
+ 866,
+ 867,
+ 868,
+ 869,
+ 871,
+ 878,
+ 881,
+ 882,
+ 883,
+ 884,
+ 885,
+ 886,
+ 887,
+ 888,
+ 889,
+ 890,
+ 912,
+ 915,
+ 916,
+ 917,
+ 918,
+ 919,
+ 920,
+ 932,
+ 937,
+ 938,
+ 939,
+ 1163,
+ 1166,
+ 1203,
+ 1204,
+ 1205,
+ 1206,
+ 1207,
+ 1208,
+ 1209,
+ 1210,
+ 1246,
+ 1247,
+ 1262,
+ 1263,
+ 1276,
+ 1277,
+ 1278,
+ 1321,
+ 1336,
+ 1337,
+ 1338,
+ 1339,
+ 1353,
+ 1354,
+ 1361,
+ 1362,
+ 1363,
+ 1364,
+ 1446,
+ 1447,
+ 1448,
+ 1449,
+ 1450,
+ 1454,
+ 1476,
+ 1497,
+ 1511,
+ 1513,
+ 1514,
+ 1515,
+ 1533,
+ 1534,
+ 1539,
+ 1540,
+ 1768,
+ 1769,
+ 1779,
+ 1780,
+ 1781,
+ 1782,
+ 1783,
+ 1784,
+ 1785,
+ 1786,
+ 1787,
+ 1788,
+ 1789,
+ 1790,
+ 1791,
+ 1792,
+ 1793,
+ 1794,
+ 1795,
+ 1796,
+ 1797,
+ 1798,
+ 1799,
+ 1800,
+ 1801,
+ 1802,
+ 1803,
+ 1804,
+ 1805,
+ 1806,
+ 1807,
+ 2909,
+ 2910,
+ 2911,
+ 2912,
+ 2913,
+ 2914,
+ 2915,
+ 2916,
+ 2917,
+ 2918,
+ 2919,
+ 2920,
+ 2921,
+ 2922,
+ 2923,
+ 2924,
+ 2925,
+ 2926,
+ 2927,
+ 2928,
+ 2929,
+ 2930,
+ 3018,
+ 3019,
+ 3021,
+ 3022,
+ 3080,
+ 3081,
+ 3082,
+ 3083,
+ 3084,
+ 3085,
+ 3086,
+ 3087,
+ 3088,
+ 3089,
+ 3090,
+ 3091,
+ 3092,
+ 3093,
+ 3094,
+ 3095,
+ 3096,
+ 3097,
+ 3098,
+ 3099,
+ 3100,
+ 3101,
+ 3102,
+ 3103,
+ 3104,
+ 3105,
+ 3106,
+ 3107,
+ 3108,
+ 3109,
+ 3110,
+ 3111,
+ 3112,
+ 3113,
+ 3114,
+ 3115,
+ 3116,
+ 3117,
+ 3118,
+ 3119,
+ 3120,
+ 3121,
+ 3122,
+ 3123,
+ 3124,
+ 3128,
+ 3129,
+ 3130,
+ 3136,
+ 3137,
+ 3138,
+ 3139,
+ 3140,
+ 3141,
+ 3142,
+ 3143,
+ 3144,
+ 3145,
+ 3146,
+ 3147,
+ 3148,
+ 3149,
+ 3150,
+ 3151,
+ 3152,
+ 3153,
+ 3154,
+ 3155,
+ 3156,
+ 3157,
+ 3158,
+ 3159,
+ 3160,
+ 3170,
+ 3172,
+ 3481,
+ 3484,
+ 3500,
+ 3502,
+ 3503,
+ 3507,
+ 3510,
+ 4120,
+ 4121,
+ 4142,
+ 4143,
+ 4150,
+ 4151,
+ 4164,
+ 4165,
+ 4166,
+ 4167,
+ 4193,
+ 4208,
+ 4284,
+ 4285,
+ 4288,
+ 4289,
+ 4290,
+ 4295,
+ 4296,
+ 4297,
+ 4298,
+ 4303,
+ 4304,
+ 4310,
+ 4311,
+ 4316,
+ 4317,
+ 4318,
+ 4319,
+ 4320,
+ 4321,
+ 4322,
+ 4323,
+ 4324,
+ 4325,
+ 4326,
+ 4327,
+ 4328,
+ 4329,
+ 4330,
+ 4331,
+ 4332,
+ 4341,
+ 4342,
+ 4343,
+ 4344,
+ 4345,
+ 4346,
+ 4347,
+ 4348,
+ 4349,
+ 4350,
+ 4351,
+ 4352,
+ 4353,
+ 4354,
+ 4355,
+ 4356,
+ 4364,
+ 4365,
+ 4368,
+ 4369,
+ 4370,
+ 4371,
+ 4372,
+ 4373,
+ 4374,
+ 4375,
+ 4376,
+ 4398,
+ 4399,
+ 4402,
+ 4403,
+ 4404,
+ 4405,
+ 4406,
+ 4418,
+ 4423,
+ 4424,
+ 4425,
+ 4649,
+ 4650,
+ 4689,
+ 4690,
+ 4691,
+ 4692,
+ 4693,
+ 4729,
+ 4730,
+ 4745,
+ 4746,
+ 4759,
+ 4760,
+ 4801,
+ 4812,
+ 4813,
+ 4814,
+ 4815,
+ 4829,
+ 4836,
+ 4837,
+ 4919,
+ 4920,
+ 4921,
+ 4922,
+ 4923,
+ 4927,
+ 4969,
+ 4983,
+ 4984,
+ 4986,
+ 5004,
+ 5005,
+ 5244,
+ 5245,
+ 5246,
+ 5247,
+ 5248,
+ 5249,
+ 5250,
+ 5251,
+ 5252,
+ 5253,
+ 5254,
+ 5255,
+ 5256,
+ 5257,
+ 5258,
+ 5259,
+ 5260,
+ 5261,
+ 5262,
+ 5263,
+ 5264,
+ 5265,
+ 5266,
+ 5267,
+ 5268,
+ 6368,
+ 6369,
+ 6370,
+ 6371,
+ 6372,
+ 6373,
+ 6374,
+ 6375,
+ 6376,
+ 6377,
+ 6378,
+ 6379,
+ 6380,
+ 6381,
+ 6382,
+ 6383,
+ 6384,
+ 6385,
+ 6386,
+ 6387,
+ 6388,
+ 6389,
+ 6473,
+ 6474,
+ 6504,
+ 6505,
+ 6506,
+ 6507,
+ 6508,
+ 6509,
+ 6510,
+ 6511,
+ 6512,
+ 6513,
+ 6514,
+ 6515,
+ 6516,
+ 6517,
+ 6518,
+ 6519,
+ 6520,
+ 6521,
+ 6522,
+ 6523,
+ 6524,
+ 6525,
+ 6526,
+ 6527,
+ 6528,
+ 6529,
+ 6530,
+ 6531,
+ 6532,
+ 6533,
+ 6534,
+ 6535,
+ 6536,
+ 6537,
+ 6538,
+ 6539,
+ 6540,
+ 6541,
+ 6542,
+ 6543,
+ 6544,
+ 6545,
+ 6549,
+ 6550,
+ 6551,
+ 6557,
+ 6558,
+ 6559,
+ 6560,
+ 6561,
+ 6562,
+ 6563,
+ 6564,
+ 6565,
+ 6566,
+ 6567,
+ 6568,
+ 6569,
+ 6570,
+ 6571,
+ 6572,
+ 6573
+ ]
+}
\ No newline at end of file
diff --git a/lib /common /train_util.py b/lib /common /train_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..24af5a05f70588a65cb26658ddd3ea87430e8c44
--- /dev/null
+++ b/lib /common /train_util.py
@@ -0,0 +1,594 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import yaml
+import os.path as osp
+import torch
+import numpy as np
+import torch.nn.functional as F
+from ..dataset.mesh_util import *
+from ..net.geometry import orthogonal
+from pytorch3d.renderer.mesh import rasterize_meshes
+from .render_utils import Pytorch3dRasterizer
+from pytorch3d.structures import Meshes
+import cv2
+from PIL import Image
+from tqdm import tqdm
+import os
+from termcolor import colored
+
+
+
+
+def reshape_sample_tensor(sample_tensor, num_views):
+ if num_views == 1:
+ return sample_tensor
+ # Need to repeat sample_tensor along the batch dim num_views times
+ sample_tensor = sample_tensor.unsqueeze(dim=1)
+ sample_tensor = sample_tensor.repeat(1, num_views, 1, 1)
+ sample_tensor = sample_tensor.view(
+ sample_tensor.shape[0] * sample_tensor.shape[1],
+ sample_tensor.shape[2], sample_tensor.shape[3])
+ return sample_tensor
+
+
+def gen_mesh_eval(opt, net, cuda, data, resolution=None):
+ resolution = opt.resolution if resolution is None else resolution
+ image_tensor = data['img'].to(device=cuda)
+ calib_tensor = data['calib'].to(device=cuda)
+
+ net.filter(image_tensor)
+
+ b_min = data['b_min']
+ b_max = data['b_max']
+ try:
+ verts, faces, _, _ = reconstruction_faster(net,
+ cuda,
+ calib_tensor,
+ resolution,
+ b_min,
+ b_max,
+ use_octree=False)
+
+ except Exception as e:
+ print(e)
+ print('Can not create marching cubes at this time.')
+ verts, faces = None, None
+ return verts, faces
+
+
+def gen_mesh(opt, net, cuda, data, save_path, resolution=None):
+ resolution = opt.resolution if resolution is None else resolution
+ image_tensor = data['img'].to(device=cuda)
+ calib_tensor = data['calib'].to(device=cuda)
+
+ net.filter(image_tensor)
+
+ b_min = data['b_min']
+ b_max = data['b_max']
+ try:
+ save_img_path = save_path[:-4] + '.png'
+ save_img_list = []
+ for v in range(image_tensor.shape[0]):
+ save_img = (np.transpose(image_tensor[v].detach().cpu().numpy(),
+ (1, 2, 0)) * 0.5 +
+ 0.5)[:, :, ::-1] * 255.0
+ save_img_list.append(save_img)
+ save_img = np.concatenate(save_img_list, axis=1)
+ Image.fromarray(np.uint8(save_img[:, :, ::-1])).save(save_img_path)
+
+ verts, faces, _, _ = reconstruction_faster(net, cuda, calib_tensor,
+ resolution, b_min, b_max)
+ verts_tensor = torch.from_numpy(
+ verts.T).unsqueeze(0).to(device=cuda).float()
+ xyz_tensor = net.projection(verts_tensor, calib_tensor[:1])
+ uv = xyz_tensor[:, :2, :]
+ color = netG.index(image_tensor[:1], uv).detach().cpu().numpy()[0].T
+ color = color * 0.5 + 0.5
+ save_obj_mesh_with_color(save_path, verts, faces, color)
+ except Exception as e:
+ print(e)
+ print('Can not create marching cubes at this time.')
+ verts, faces, color = None, None, None
+ return verts, faces, color
+
+
+def gen_mesh_color(opt, netG, netC, cuda, data, save_path, use_octree=True):
+ image_tensor = data['img'].to(device=cuda)
+ calib_tensor = data['calib'].to(device=cuda)
+
+ netG.filter(image_tensor)
+ netC.filter(image_tensor)
+ netC.attach(netG.get_im_feat())
+
+ b_min = data['b_min']
+ b_max = data['b_max']
+ try:
+ save_img_path = save_path[:-4] + '.png'
+ save_img_list = []
+ for v in range(image_tensor.shape[0]):
+ save_img = (np.transpose(image_tensor[v].detach().cpu().numpy(),
+ (1, 2, 0)) * 0.5 +
+ 0.5)[:, :, ::-1] * 255.0
+ save_img_list.append(save_img)
+ save_img = np.concatenate(save_img_list, axis=1)
+ Image.fromarray(np.uint8(save_img[:, :, ::-1])).save(save_img_path)
+
+ verts, faces, _, _ = reconstruction_faster(netG,
+ cuda,
+ calib_tensor,
+ opt.resolution,
+ b_min,
+ b_max,
+ use_octree=use_octree)
+
+ # Now Getting colors
+ verts_tensor = torch.from_numpy(
+ verts.T).unsqueeze(0).to(device=cuda).float()
+ verts_tensor = reshape_sample_tensor(verts_tensor, opt.num_views)
+ color = np.zeros(verts.shape)
+ interval = 10000
+ for i in range(len(color) // interval):
+ left = i * interval
+ right = i * interval + interval
+ if i == len(color) // interval - 1:
+ right = -1
+ netC.query(verts_tensor[:, :, left:right], calib_tensor)
+ rgb = netC.get_preds()[0].detach().cpu().numpy() * 0.5 + 0.5
+ color[left:right] = rgb.T
+
+ save_obj_mesh_with_color(save_path, verts, faces, color)
+ except Exception as e:
+ print(e)
+ print('Can not create marching cubes at this time.')
+ verts, faces, color = None, None, None
+ return verts, faces, color
+
+
+def adjust_learning_rate(optimizer, epoch, lr, schedule, gamma):
+ """Sets the learning rate to the initial LR decayed by schedule"""
+ if epoch in schedule:
+ lr *= gamma
+ for param_group in optimizer.param_groups:
+ param_group['lr'] = lr
+ return lr
+
+
+def compute_acc(pred, gt, thresh=0.5):
+ '''
+ return:
+ IOU, precision, and recall
+ '''
+ with torch.no_grad():
+ vol_pred = pred > thresh
+ vol_gt = gt > thresh
+
+ union = vol_pred | vol_gt
+ inter = vol_pred & vol_gt
+
+ true_pos = inter.sum().float()
+
+ union = union.sum().float()
+ if union == 0:
+ union = 1
+ vol_pred = vol_pred.sum().float()
+ if vol_pred == 0:
+ vol_pred = 1
+ vol_gt = vol_gt.sum().float()
+ if vol_gt == 0:
+ vol_gt = 1
+ return true_pos / union, true_pos / vol_pred, true_pos / vol_gt
+
+
+# def calc_metrics(opt, net, cuda, dataset, num_tests,
+# resolution=128, sampled_points=1000, use_kaolin=True):
+# if num_tests > len(dataset):
+# num_tests = len(dataset)
+# with torch.no_grad():
+# chamfer_arr, p2s_arr = [], []
+# for idx in tqdm(range(num_tests)):
+# data = dataset[idx * len(dataset) // num_tests]
+
+# verts, faces = gen_mesh_eval(opt, net, cuda, data, resolution)
+# if verts is None:
+# continue
+
+# mesh_gt = trimesh.load(data['mesh_path'])
+# mesh_gt = mesh_gt.split(only_watertight=False)
+# comp_num = [mesh.vertices.shape[0] for mesh in mesh_gt]
+# mesh_gt = mesh_gt[comp_num.index(max(comp_num))]
+
+# mesh_pred = trimesh.Trimesh(verts, faces)
+
+# gt_surface_pts, _ = trimesh.sample.sample_surface_even(
+# mesh_gt, sampled_points)
+# pred_surface_pts, _ = trimesh.sample.sample_surface_even(
+# mesh_pred, sampled_points)
+
+# if use_kaolin and has_kaolin:
+# kal_mesh_gt = kal.rep.TriangleMesh.from_tensors(
+# torch.tensor(mesh_gt.vertices).float().to(device=cuda),
+# torch.tensor(mesh_gt.faces).long().to(device=cuda))
+# kal_mesh_pred = kal.rep.TriangleMesh.from_tensors(
+# torch.tensor(mesh_pred.vertices).float().to(device=cuda),
+# torch.tensor(mesh_pred.faces).long().to(device=cuda))
+
+# kal_distance_0 = kal.metrics.mesh.point_to_surface(
+# torch.tensor(pred_surface_pts).float().to(device=cuda), kal_mesh_gt)
+# kal_distance_1 = kal.metrics.mesh.point_to_surface(
+# torch.tensor(gt_surface_pts).float().to(device=cuda), kal_mesh_pred)
+
+# dist_gt_pred = torch.sqrt(kal_distance_0).cpu().numpy()
+# dist_pred_gt = torch.sqrt(kal_distance_1).cpu().numpy()
+# else:
+# try:
+# _, dist_pred_gt, _ = trimesh.proximity.closest_point(mesh_pred, gt_surface_pts)
+# _, dist_gt_pred, _ = trimesh.proximity.closest_point(mesh_gt, pred_surface_pts)
+# except Exception as e:
+# print (e)
+# continue
+
+# chamfer_dist = 0.5 * (dist_pred_gt.mean() + dist_gt_pred.mean())
+# p2s_dist = dist_pred_gt.mean()
+
+# chamfer_arr.append(chamfer_dist)
+# p2s_arr.append(p2s_dist)
+
+# return np.average(chamfer_arr), np.average(p2s_arr)
+
+
+def calc_error(opt, net, cuda, dataset, num_tests):
+ if num_tests > len(dataset):
+ num_tests = len(dataset)
+ with torch.no_grad():
+ erorr_arr, IOU_arr, prec_arr, recall_arr = [], [], [], []
+ for idx in tqdm(range(num_tests)):
+ data = dataset[idx * len(dataset) // num_tests]
+ # retrieve the data
+ image_tensor = data['img'].to(device=cuda)
+ calib_tensor = data['calib'].to(device=cuda)
+ sample_tensor = data['samples'].to(device=cuda).unsqueeze(0)
+ if opt.num_views > 1:
+ sample_tensor = reshape_sample_tensor(sample_tensor,
+ opt.num_views)
+ label_tensor = data['labels'].to(device=cuda).unsqueeze(0)
+
+ res, error = net.forward(image_tensor,
+ sample_tensor,
+ calib_tensor,
+ labels=label_tensor)
+
+ IOU, prec, recall = compute_acc(res, label_tensor)
+
+ # print(
+ # '{0}/{1} | Error: {2:06f} IOU: {3:06f} prec: {4:06f} recall: {5:06f}'
+ # .format(idx, num_tests, error.item(), IOU.item(), prec.item(), recall.item()))
+ erorr_arr.append(error.item())
+ IOU_arr.append(IOU.item())
+ prec_arr.append(prec.item())
+ recall_arr.append(recall.item())
+
+ return np.average(erorr_arr), np.average(IOU_arr), np.average(
+ prec_arr), np.average(recall_arr)
+
+
+def calc_error_color(opt, netG, netC, cuda, dataset, num_tests):
+ if num_tests > len(dataset):
+ num_tests = len(dataset)
+ with torch.no_grad():
+ error_color_arr = []
+
+ for idx in tqdm(range(num_tests)):
+ data = dataset[idx * len(dataset) // num_tests]
+ # retrieve the data
+ image_tensor = data['img'].to(device=cuda)
+ calib_tensor = data['calib'].to(device=cuda)
+ color_sample_tensor = data['color_samples'].to(
+ device=cuda).unsqueeze(0)
+
+ if opt.num_views > 1:
+ color_sample_tensor = reshape_sample_tensor(
+ color_sample_tensor, opt.num_views)
+
+ rgb_tensor = data['rgbs'].to(device=cuda).unsqueeze(0)
+
+ netG.filter(image_tensor)
+ _, errorC = netC.forward(image_tensor,
+ netG.get_im_feat(),
+ color_sample_tensor,
+ calib_tensor,
+ labels=rgb_tensor)
+
+ # print('{0}/{1} | Error inout: {2:06f} | Error color: {3:06f}'
+ # .format(idx, num_tests, errorG.item(), errorC.item()))
+ error_color_arr.append(errorC.item())
+
+ return np.average(error_color_arr)
+
+
+# pytorch lightning training related fucntions
+
+
+def query_func(opt, netG, features, points, proj_matrix=None):
+ '''
+ - points: size of (bz, N, 3)
+ - proj_matrix: size of (bz, 4, 4)
+ return: size of (bz, 1, N)
+ '''
+ assert len(points) == 1
+ samples = points.repeat(opt.num_views, 1, 1)
+ samples = samples.permute(0, 2, 1) # [bz, 3, N]
+
+ # view specific query
+ if proj_matrix is not None:
+ samples = orthogonal(samples, proj_matrix)
+
+ calib_tensor = torch.stack([torch.eye(4).float()], dim=0).type_as(samples)
+
+ preds = netG.query(features=features,
+ points=samples,
+ calibs=calib_tensor,
+ regressor=netG.if_regressor)
+
+ if type(preds) is list:
+ preds = preds[0]
+
+ return preds
+
+
+def isin(ar1, ar2):
+ return (ar1[..., None] == ar2).any(-1)
+
+
+def in1d(ar1, ar2):
+ mask = ar2.new_zeros((max(ar1.max(), ar2.max()) + 1, ), dtype=torch.bool)
+ mask[ar2.unique()] = True
+ return mask[ar1]
+
+
+def get_visibility(xy, z, faces):
+ """get the visibility of vertices
+ Args:
+ xy (torch.tensor): [N,2]
+ z (torch.tensor): [N,1]
+ faces (torch.tensor): [N,3]
+ size (int): resolution of rendered image
+ """
+
+ xyz = torch.cat((xy, -z), dim=1)
+ xyz = (xyz + 1.0) / 2.0
+ faces = faces.long()
+
+ rasterizer = Pytorch3dRasterizer(image_size=2**12)
+ meshes_screen = Meshes(verts=xyz[None, ...], faces=faces[None, ...])
+ raster_settings = rasterizer.raster_settings
+
+ pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
+ meshes_screen,
+ image_size=raster_settings.image_size,
+ blur_radius=raster_settings.blur_radius,
+ faces_per_pixel=raster_settings.faces_per_pixel,
+ bin_size=raster_settings.bin_size,
+ max_faces_per_bin=raster_settings.max_faces_per_bin,
+ perspective_correct=raster_settings.perspective_correct,
+ cull_backfaces=raster_settings.cull_backfaces,
+ )
+
+ vis_vertices_id = torch.unique(faces[torch.unique(pix_to_face), :])
+ vis_mask = torch.zeros(size=(z.shape[0], 1))
+ vis_mask[vis_vertices_id] = 1.0
+
+ # print("------------------------\n")
+ # print(f"keep points : {vis_mask.sum()/len(vis_mask)}")
+
+ return vis_mask
+
+
+def batch_mean(res, key):
+ # recursive mean for multilevel dicts
+ return torch.stack([
+ x[key] if isinstance(x, dict) else batch_mean(x, key) for x in res
+ ]).mean()
+
+
+def tf_log_convert(log_dict):
+ new_log_dict = log_dict.copy()
+ for k, v in log_dict.items():
+ new_log_dict[k.replace("_", "/")] = v
+ del new_log_dict[k]
+
+ return new_log_dict
+
+
+def bar_log_convert(log_dict, name=None, rot=None):
+ from decimal import Decimal
+
+ new_log_dict = {}
+
+ if name is not None:
+ new_log_dict['name'] = name[0]
+ if rot is not None:
+ new_log_dict['rot'] = rot[0]
+
+ for k, v in log_dict.items():
+ color = "yellow"
+ if 'loss' in k:
+ color = "red"
+ k = k.replace("loss", "L")
+ elif 'acc' in k:
+ color = "green"
+ k = k.replace("acc", "A")
+ elif 'iou' in k:
+ color = "green"
+ k = k.replace("iou", "I")
+ elif 'prec' in k:
+ color = "green"
+ k = k.replace("prec", "P")
+ elif 'recall' in k:
+ color = "green"
+ k = k.replace("recall", "R")
+
+ if 'lr' not in k:
+ new_log_dict[colored(k.split("_")[1],
+ color)] = colored(f"{v:.3f}", color)
+ else:
+ new_log_dict[colored(k.split("_")[1],
+ color)] = colored(f"{Decimal(str(v)):.1E}",
+ color)
+
+ if 'loss' in new_log_dict.keys():
+ del new_log_dict['loss']
+
+ return new_log_dict
+
+
+def accumulate(outputs, rot_num, split):
+
+ hparam_log_dict = {}
+
+ metrics = outputs[0].keys()
+ datasets = split.keys()
+
+ for dataset in datasets:
+ for metric in metrics:
+ keyword = f"hparam/{dataset}-{metric}"
+ if keyword not in hparam_log_dict.keys():
+ hparam_log_dict[keyword] = 0
+ for idx in range(split[dataset][0] * rot_num,
+ split[dataset][1] * rot_num):
+ hparam_log_dict[keyword] += outputs[idx][metric]
+ hparam_log_dict[keyword] /= (split[dataset][1] -
+ split[dataset][0]) * rot_num
+
+ print(colored(hparam_log_dict, "green"))
+
+ return hparam_log_dict
+
+
+def calc_error_N(outputs, targets):
+ """calculate the error of normal (IGR)
+ Args:
+ outputs (torch.tensor): [B, 3, N]
+ target (torch.tensor): [B, N, 3]
+ # manifold loss and grad_loss in IGR paper
+ grad_loss = ((nonmnfld_grad.norm(2, dim=-1) - 1) ** 2).mean()
+ normals_loss = ((mnfld_grad - normals).abs()).norm(2, dim=1).mean()
+ Returns:
+ torch.tensor: error of valid normals on the surface
+ """
+ # outputs = torch.tanh(-outputs.permute(0,2,1).reshape(-1,3))
+ outputs = -outputs.permute(0, 2, 1).reshape(-1, 1)
+ targets = targets.reshape(-1, 3)[:, 2:3]
+ with_normals = targets.sum(dim=1).abs() > 0.0
+
+ # eikonal loss
+ grad_loss = ((outputs[with_normals].norm(2, dim=-1) - 1)**2).mean()
+ # normals loss
+ normal_loss = (outputs - targets)[with_normals].abs().norm(2, dim=1).mean()
+
+ return grad_loss * 0.0 + normal_loss
+
+
+def calc_knn_acc(preds, carn_verts, labels, pick_num):
+ """calculate knn accuracy
+ Args:
+ preds (torch.tensor): [B, 3, N]
+ carn_verts (torch.tensor): [SMPLX_V_num, 3]
+ labels (torch.tensor): [B, N_knn, N]
+ """
+ N_knn_full = labels.shape[1]
+ preds = preds.permute(0, 2, 1).reshape(-1, 3)
+ labels = labels.permute(0, 2, 1).reshape(-1, N_knn_full) # [BxN, num_knn]
+ labels = labels[:, :pick_num]
+
+ dist = torch.cdist(preds, carn_verts, p=2) # [BxN, SMPL_V_num]
+ knn = dist.topk(k=pick_num, dim=1, largest=False)[1] # [BxN, num_knn]
+ cat_mat = torch.sort(torch.cat((knn, labels), dim=1))[0]
+ bool_col = torch.zeros_like(cat_mat)[:, 0]
+ for i in range(pick_num * 2 - 1):
+ bool_col += cat_mat[:, i] == cat_mat[:, i + 1]
+ acc = (bool_col > 0).sum() / len(bool_col)
+
+ return acc
+
+
+def calc_acc_seg(output, target, num_multiseg):
+ from pytorch_lightning.metrics import Accuracy
+ return Accuracy()(output.reshape(-1, num_multiseg).cpu(),
+ target.flatten().cpu())
+
+
+def add_watermark(imgs, titles):
+
+ # Write some Text
+
+ font = cv2.FONT_HERSHEY_SIMPLEX
+ bottomLeftCornerOfText = (350, 50)
+ bottomRightCornerOfText = (800, 50)
+ fontScale = 1
+ fontColor = (1.0, 1.0, 1.0)
+ lineType = 2
+
+ for i in range(len(imgs)):
+
+ title = titles[i + 1]
+ cv2.putText(imgs[i], title, bottomLeftCornerOfText, font, fontScale,
+ fontColor, lineType)
+
+ if i == 0:
+ cv2.putText(imgs[i], str(titles[i][0]), bottomRightCornerOfText,
+ font, fontScale, fontColor, lineType)
+
+ result = np.concatenate(imgs, axis=0).transpose(2, 0, 1)
+
+ return result
+
+
+def make_test_gif(img_dir):
+
+ if img_dir is not None and len(os.listdir(img_dir)) > 0:
+ for dataset in os.listdir(img_dir):
+ for subject in sorted(os.listdir(osp.join(img_dir, dataset))):
+ img_lst = []
+ im1 = None
+ for file in sorted(
+ os.listdir(osp.join(img_dir, dataset, subject))):
+ if file[-3:] not in ['obj', 'gif']:
+ img_path = os.path.join(img_dir, dataset, subject,
+ file)
+ if im1 == None:
+ im1 = Image.open(img_path)
+ else:
+ img_lst.append(Image.open(img_path))
+
+ print(os.path.join(img_dir, dataset, subject, "out.gif"))
+ im1.save(os.path.join(img_dir, dataset, subject, "out.gif"),
+ save_all=True,
+ append_images=img_lst,
+ duration=500,
+ loop=0)
+
+
+def export_cfg(logger, cfg):
+
+ cfg_export_file = osp.join(logger.save_dir, logger.name,
+ f"version_{logger.version}", "cfg.yaml")
+
+ if not osp.exists(cfg_export_file):
+ os.makedirs(osp.dirname(cfg_export_file), exist_ok=True)
+ with open(cfg_export_file, "w+") as file:
+ _ = yaml.dump(cfg, file)
diff --git a/lib /dataset / Evaluator.py b/lib /dataset / Evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..68746d1b76c6328f7e4a904bb4d6fb1f5aa1de29
--- /dev/null
+++ b/lib /dataset / Evaluator.py
@@ -0,0 +1,262 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+
+from lib.renderer.gl.normal_render import NormalRender
+from lib.dataset.mesh_util import projection
+from lib.common.render import Render
+from PIL import Image
+import numpy as np
+import torch
+from torch import nn
+import trimesh
+import os.path as osp
+from PIL import Image
+
+
+class Evaluator:
+
+ _normal_render = None
+
+ @staticmethod
+ def init_gl():
+ Evaluator._normal_render = NormalRender(width=512, height=512)
+
+ def __init__(self, device):
+ self.device = device
+ self.render = Render(size=512, device=self.device)
+ self.error_term = nn.MSELoss()
+
+ self.offset = 0.0
+ self.scale_factor = None
+
+ def set_mesh(self, result_dict, scale_factor=1.0, offset=0.0):
+
+ for key in result_dict.keys():
+ if torch.is_tensor(result_dict[key]):
+ result_dict[key] = result_dict[key].detach().cpu().numpy()
+
+ for k, v in result_dict.items():
+ setattr(self, k, v)
+
+ self.scale_factor = scale_factor
+ self.offset = offset
+
+ def _render_normal(self, mesh, deg, norms=None):
+ view_mat = np.identity(4)
+ rz = deg / 180.0 * np.pi
+ model_mat = np.identity(4)
+ model_mat[:3, :3] = self._normal_render.euler_to_rot_mat(0, rz, 0)
+ model_mat[1, 3] = self.offset
+ view_mat[2, 2] *= -1
+
+ self._normal_render.set_matrices(view_mat, model_mat)
+ if norms is None:
+ norms = mesh.vertex_normals
+ self._normal_render.set_normal_mesh(self.scale_factor * mesh.vertices,
+ mesh.faces, norms, mesh.faces)
+ self._normal_render.draw()
+ normal_img = self._normal_render.get_color()
+ return normal_img
+
+ def render_mesh_list(self, mesh_lst):
+
+ self.offset = 0.0
+ self.scale_factor = 1.0
+
+ full_list = []
+ for mesh in mesh_lst:
+ row_lst = []
+ for deg in np.arange(0, 360, 90):
+ normal = self._render_normal(mesh, deg)
+ row_lst.append(normal)
+ full_list.append(np.concatenate(row_lst, axis=1))
+
+ res_array = np.concatenate(full_list, axis=0)
+
+ return res_array
+
+ def _get_reproj_normal_error(self, deg):
+
+ tgt_normal = self._render_normal(self.tgt_mesh, deg)
+ src_normal = self._render_normal(self.src_mesh, deg)
+ error = (((src_normal[:, :, :3] -
+ tgt_normal[:, :, :3])**2).sum(axis=2).mean(axis=(0, 1)))
+
+ return error, [src_normal, tgt_normal]
+
+ def render_normal(self, verts, faces):
+
+ verts = verts[0].detach().cpu().numpy()
+ faces = faces[0].detach().cpu().numpy()
+
+ mesh_F = trimesh.Trimesh(verts * np.array([1.0, -1.0, 1.0]), faces)
+ mesh_B = trimesh.Trimesh(verts * np.array([1.0, -1.0, -1.0]), faces)
+
+ self.scale_factor = 1.0
+
+ normal_F = self._render_normal(mesh_F, 0)
+ normal_B = self._render_normal(mesh_B,
+ 0,
+ norms=mesh_B.vertex_normals *
+ np.array([-1.0, -1.0, 1.0]))
+
+ mask = normal_F[:, :, 3:4]
+ normal_F = (torch.as_tensor(2.0 * (normal_F - 0.5) * mask).permute(
+ 2, 0, 1)[:3, :, :].float().unsqueeze(0).to(self.device))
+ normal_B = (torch.as_tensor(2.0 * (normal_B - 0.5) * mask).permute(
+ 2, 0, 1)[:3, :, :].float().unsqueeze(0).to(self.device))
+
+ return {"T_normal_F": normal_F, "T_normal_B": normal_B}
+
+ def calculate_normal_consist(
+ self,
+ frontal=True,
+ back=True,
+ left=True,
+ right=True,
+ save_demo_img=None,
+ return_demo=False,
+ ):
+
+ # reproj error
+ # if save_demo_img is not None, save a visualization at the given path (etc, "./test.png")
+ if self._normal_render is None:
+ print(
+ "In order to use normal render, "
+ "you have to call init_gl() before initialing any evaluator objects."
+ )
+ return -1
+
+ side_cnt = 0
+ total_error = 0
+ demo_list = []
+
+ if frontal:
+ side_cnt += 1
+ error, normal_lst = self._get_reproj_normal_error(0)
+ total_error += error
+ demo_list.append(np.concatenate(normal_lst, axis=0))
+ if back:
+ side_cnt += 1
+ error, normal_lst = self._get_reproj_normal_error(180)
+ total_error += error
+ demo_list.append(np.concatenate(normal_lst, axis=0))
+ if left:
+ side_cnt += 1
+ error, normal_lst = self._get_reproj_normal_error(90)
+ total_error += error
+ demo_list.append(np.concatenate(normal_lst, axis=0))
+ if right:
+ side_cnt += 1
+ error, normal_lst = self._get_reproj_normal_error(270)
+ total_error += error
+ demo_list.append(np.concatenate(normal_lst, axis=0))
+ if save_demo_img is not None:
+ res_array = np.concatenate(demo_list, axis=1)
+ res_img = Image.fromarray((res_array * 255).astype(np.uint8))
+ res_img.save(save_demo_img)
+
+ if return_demo:
+ res_array = np.concatenate(demo_list, axis=1)
+ return res_array
+ else:
+ return total_error
+
+ def space_transfer(self):
+
+ # convert from GT to SDF
+ self.verts_pr -= self.recon_size / 2.0
+ self.verts_pr /= self.recon_size / 2.0
+
+ self.verts_gt = projection(self.verts_gt, self.calib)
+ self.verts_gt[:, 1] *= -1
+
+ self.tgt_mesh = trimesh.Trimesh(self.verts_gt, self.faces_gt)
+ self.src_mesh = trimesh.Trimesh(self.verts_pr, self.faces_pr)
+
+ # (self.tgt_mesh+self.src_mesh).show()
+
+ def export_mesh(self, dir, name):
+ self.tgt_mesh.visual.vertex_colors = np.array([255, 0, 0])
+ self.src_mesh.visual.vertex_colors = np.array([0, 255, 0])
+
+ (self.tgt_mesh + self.src_mesh).export(
+ osp.join(dir, f"{name}_gt_pr.obj"))
+
+ def calculate_chamfer_p2s(self, sampled_points=1000):
+ """calculate the geometry metrics [chamfer, p2s, chamfer_H, p2s_H]
+ Args:
+ verts_gt (torch.cuda.tensor): [N, 3]
+ faces_gt (torch.cuda.tensor): [M, 3]
+ verts_pr (torch.cuda.tensor): [N', 3]
+ faces_pr (torch.cuda.tensor): [M', 3]
+ sampled_points (int, optional): use smaller number for faster testing. Defaults to 1000.
+ Returns:
+ tuple: chamfer, p2s, chamfer_H, p2s_H
+ """
+
+ gt_surface_pts, _ = trimesh.sample.sample_surface_even(
+ self.tgt_mesh, sampled_points)
+ pred_surface_pts, _ = trimesh.sample.sample_surface_even(
+ self.src_mesh, sampled_points)
+
+ _, dist_pred_gt, _ = trimesh.proximity.closest_point(
+ self.src_mesh, gt_surface_pts)
+ _, dist_gt_pred, _ = trimesh.proximity.closest_point(
+ self.tgt_mesh, pred_surface_pts)
+
+ dist_pred_gt[np.isnan(dist_pred_gt)] = 0
+ dist_gt_pred[np.isnan(dist_gt_pred)] = 0
+ chamfer_dist = 0.5 * (dist_pred_gt.mean() +
+ dist_gt_pred.mean()).item() * 100
+ p2s_dist = dist_pred_gt.mean().item() * 100
+
+ return chamfer_dist, p2s_dist
+
+ def calc_acc(self, output, target, thres=0.5, use_sdf=False):
+
+ # # remove the surface points with thres
+ # non_surf_ids = (target != thres)
+ # output = output[non_surf_ids]
+ # target = target[non_surf_ids]
+
+ with torch.no_grad():
+ output = output.masked_fill(output < thres, 0.0)
+ output = output.masked_fill(output > thres, 1.0)
+
+ if use_sdf:
+ target = target.masked_fill(target < thres, 0.0)
+ target = target.masked_fill(target > thres, 1.0)
+
+ acc = output.eq(target).float().mean()
+
+ # iou, precison, recall
+ output = output > thres
+ target = target > thres
+
+ union = output | target
+ inter = output & target
+
+ _max = torch.tensor(1.0).to(output.device)
+
+ union = max(union.sum().float(), _max)
+ true_pos = max(inter.sum().float(), _max)
+ vol_pred = max(output.sum().float(), _max)
+ vol_gt = max(target.sum().float(), _max)
+
+ return acc, true_pos / union, true_pos / vol_pred, true_pos / vol_gt
\ No newline at end of file
diff --git a/lib /dataset / PIFuDataModule.py b/lib /dataset / PIFuDataModule.py
new file mode 100644
index 0000000000000000000000000000000000000000..453238cf616d14ee0034253c91f661d3a98f6319
--- /dev/null
+++ b/lib /dataset / PIFuDataModule.py
@@ -0,0 +1,71 @@
+import numpy as np
+from torch.utils.data import DataLoader
+from .PIFuDataset import PIFuDataset
+import pytorch_lightning as pl
+
+
+class PIFuDataModule(pl.LightningDataModule):
+ def __init__(self, cfg):
+ super(PIFuDataModule, self).__init__()
+ self.cfg = cfg
+ self.overfit = self.cfg.overfit
+
+ if self.overfit:
+ self.batch_size = 1
+ else:
+ self.batch_size = self.cfg.batch_size
+
+ self.data_size = {}
+
+ def prepare_data(self):
+
+ pass
+
+ @staticmethod
+ def worker_init_fn(worker_id):
+ np.random.seed(np.random.get_state()[1][0] + worker_id)
+
+ def setup(self, stage):
+
+ if stage == 'fit':
+ self.train_dataset = PIFuDataset(cfg=self.cfg, split="train")
+ self.val_dataset = PIFuDataset(cfg=self.cfg, split="val")
+ self.data_size = {'train': len(self.train_dataset),
+ 'val': len(self.val_dataset)}
+
+ if stage == 'test':
+ self.test_dataset = PIFuDataset(cfg=self.cfg, split="test")
+
+ def train_dataloader(self):
+
+ train_data_loader = DataLoader(
+ self.train_dataset,
+ batch_size=self.batch_size, shuffle=True,
+ num_workers=self.cfg.num_threads, pin_memory=True,
+ worker_init_fn=self.worker_init_fn)
+
+ return train_data_loader
+
+ def val_dataloader(self):
+
+ if self.overfit:
+ current_dataset = self.train_dataset
+ else:
+ current_dataset = self.val_dataset
+
+ val_data_loader = DataLoader(
+ current_dataset,
+ batch_size=1, shuffle=False,
+ num_workers=self.cfg.num_threads, pin_memory=True,
+ worker_init_fn=self.worker_init_fn)
+
+ return val_data_loader
+
+ def test_dataloader(self):
+
+ test_data_loader = DataLoader(
+ self.test_dataset,
+ batch_size=1, shuffle=False,
+ num_workers=self.cfg.num_threads, pin_memory=True)
+
+ return test_data_loader
\ No newline at end of file
diff --git a/lib /dataset / hoppeMesh.py b/lib /dataset / hoppeMesh.py
new file mode 100644
index 0000000000000000000000000000000000000000..924d54b0676a2b61823c884d361906c765c9d14c
--- /dev/null
+++ b/lib /dataset / hoppeMesh.py
@@ -0,0 +1,116 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import numpy as np
+from scipy.spatial import cKDTree
+import trimesh
+
+import logging
+
+logging.getLogger("trimesh").setLevel(logging.ERROR)
+
+
+def save_obj_mesh(mesh_path, verts, faces):
+ file = open(mesh_path, 'w')
+ for v in verts:
+ file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2]))
+ for f in faces:
+ f_plus = f + 1
+ file.write('f %d %d %d\n' % (f_plus[0], f_plus[1], f_plus[2]))
+ file.close()
+
+
+def save_obj_mesh_with_color(mesh_path, verts, faces, colors):
+ file = open(mesh_path, 'w')
+
+ for idx, v in enumerate(verts):
+ c = colors[idx]
+ file.write('v %.4f %.4f %.4f %.4f %.4f %.4f\n' %
+ (v[0], v[1], v[2], c[0], c[1], c[2]))
+ for f in faces:
+ f_plus = f + 1
+ file.write('f %d %d %d\n' % (f_plus[0], f_plus[1], f_plus[2]))
+ file.close()
+
+
+def save_ply(mesh_path, points, rgb):
+ '''
+ Save the visualization of sampling to a ply file.
+ Red points represent positive predictions.
+ Green points represent negative predictions.
+ :param mesh_path: File name to save
+ :param points: [N, 3] array of points
+ :param rgb: [N, 3] array of rgb values in the range [0~1]
+ :return:
+ '''
+ to_save = np.concatenate([points, rgb * 255], axis=-1)
+ return np.savetxt(
+ mesh_path,
+ to_save,
+ fmt='%.6f %.6f %.6f %d %d %d',
+ comments='',
+ header=(
+ 'ply\nformat ascii 1.0\nelement vertex {:d}\n' +
+ 'property float x\nproperty float y\nproperty float z\n' +
+ 'property uchar red\nproperty uchar green\nproperty uchar blue\n' +
+ 'end_header').format(points.shape[0]))
+
+
+class HoppeMesh:
+ def __init__(self, verts, faces, vert_normals, face_normals):
+ '''
+ The HoppeSDF calculates signed distance towards a predefined oriented point cloud
+ http://hhoppe.com/recon.pdf
+ For clean and high-resolution pcl data, this is the fastest and accurate approximation of sdf
+ :param points: pts
+ :param normals: normals
+ '''
+ self.verts = verts # [n, 3]
+ self.faces = faces # [m, 3]
+ self.vert_normals = vert_normals # [n, 3]
+ self.face_normals = face_normals # [m, 3]
+
+ self.kd_tree = cKDTree(self.verts)
+ self.len = len(self.verts)
+
+ def query(self, points):
+ dists, idx = self.kd_tree.query(points, n_jobs=1)
+ # FIXME: because the eyebows are removed, cKDTree around eyebows
+ # are not accurate. Cause a few false-inside labels here.
+ dirs = points - self.verts[idx]
+ signs = (dirs * self.vert_normals[idx]).sum(axis=1)
+ signs = (signs > 0) * 2 - 1
+ return signs * dists
+
+ def contains(self, points):
+
+ labels = trimesh.Trimesh(vertices=self.verts,
+ faces=self.faces).contains(points)
+ return labels
+
+ def export(self, path):
+ if self.colors is not None:
+ save_obj_mesh_with_color(path, self.verts, self.faces,
+ self.colors[:, 0:3] / 255.0)
+ else:
+ save_obj_mesh(path, self.verts, self.faces)
+
+ def export_ply(self, path):
+ save_ply(path, self.verts, self.colors[:, 0:3] / 255.0)
+
+ def triangles(self):
+ return self.verts[self.faces] # [n, 3, 3]
\ No newline at end of file
diff --git a/lib /dataset /NormalDataset.py b/lib /dataset /NormalDataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e71cd3ed344867c80cf842c369e06cfccedf4c7
--- /dev/null
+++ b/lib /dataset /NormalDataset.py
@@ -0,0 +1,212 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import os.path as osp
+import numpy as np
+from PIL import Image
+import torchvision.transforms as transforms
+
+
+class NormalDataset():
+ def __init__(self, cfg, split='train'):
+
+ self.split = split
+ self.root = cfg.root
+ self.overfit = cfg.overfit
+
+ self.opt = cfg.dataset
+ self.datasets = self.opt.types
+ self.input_size = self.opt.input_size
+ self.set_splits = self.opt.set_splits
+ self.scales = self.opt.scales
+ self.pifu = self.opt.pifu
+
+ # input data types and dimensions
+ self.in_nml = [item[0] for item in cfg.net.in_nml]
+ self.in_nml_dim = [item[1] for item in cfg.net.in_nml]
+ self.in_total = self.in_nml + ['normal_F', 'normal_B']
+ self.in_total_dim = self.in_nml_dim + [3, 3]
+
+ if self.split != 'train':
+ self.rotations = range(0, 360, 120)
+ else:
+ self.rotations = np.arange(0, 360, 360 /
+ self.opt.rotation_num).astype(np.int)
+
+ self.datasets_dict = {}
+ for dataset_id, dataset in enumerate(self.datasets):
+ dataset_dir = osp.join(self.root, dataset, "smplx")
+ self.datasets_dict[dataset] = {
+ "subjects":
+ np.loadtxt(osp.join(self.root, dataset, "all.txt"), dtype=str),
+ "path":
+ dataset_dir,
+ "scale":
+ self.scales[dataset_id]
+ }
+
+ self.subject_list = self.get_subject_list(split)
+
+ # PIL to tensor
+ self.image_to_tensor = transforms.Compose([
+ transforms.Resize(self.input_size),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
+ ])
+
+ # PIL to tensor
+ self.mask_to_tensor = transforms.Compose([
+ transforms.Resize(self.input_size),
+ transforms.ToTensor(),
+ transforms.Normalize((0.0, ), (1.0, ))
+ ])
+
+ def get_subject_list(self, split):
+
+ subject_list = []
+
+ for dataset in self.datasets:
+
+ if self.pifu:
+ txt = osp.join(self.root, dataset, f'{split}_pifu.txt')
+ else:
+ txt = osp.join(self.root, dataset, f'{split}.txt')
+
+ if osp.exists(txt):
+ print(f"load from {txt}")
+ subject_list += sorted(np.loadtxt(txt, dtype=str).tolist())
+
+ if self.pifu:
+ miss_pifu = sorted(
+ np.loadtxt(osp.join(self.root, dataset,
+ "miss_pifu.txt"),
+ dtype=str).tolist())
+ subject_list = [
+ subject for subject in subject_list
+ if subject not in miss_pifu
+ ]
+ subject_list = [
+ "renderpeople/" + subject for subject in subject_list
+ ]
+
+ else:
+ train_txt = osp.join(self.root, dataset, 'train.txt')
+ val_txt = osp.join(self.root, dataset, 'val.txt')
+ test_txt = osp.join(self.root, dataset, 'test.txt')
+
+ print(
+ f"generate lists of [train, val, test] \n {train_txt} \n {val_txt} \n {test_txt} \n"
+ )
+
+ split_txt = osp.join(self.root, dataset, f'{split}.txt')
+
+ subjects = self.datasets_dict[dataset]['subjects']
+ train_split = int(len(subjects) * self.set_splits[0])
+ val_split = int(
+ len(subjects) * self.set_splits[1]) + train_split
+
+ with open(train_txt, "w") as f:
+ f.write("\n".join(dataset + "/" + item
+ for item in subjects[:train_split]))
+ with open(val_txt, "w") as f:
+ f.write("\n".join(
+ dataset + "/" + item
+ for item in subjects[train_split:val_split]))
+ with open(test_txt, "w") as f:
+ f.write("\n".join(dataset + "/" + item
+ for item in subjects[val_split:]))
+
+ subject_list += sorted(
+ np.loadtxt(split_txt, dtype=str).tolist())
+
+ bug_list = sorted(
+ np.loadtxt(osp.join(self.root, 'bug.txt'), dtype=str).tolist())
+
+ subject_list = [
+ subject for subject in subject_list if (subject not in bug_list)
+ ]
+
+ return subject_list
+
+ def __len__(self):
+ return len(self.subject_list) * len(self.rotations)
+
+ def __getitem__(self, index):
+
+ # only pick the first data if overfitting
+ if self.overfit:
+ index = 0
+
+ rid = index % len(self.rotations)
+ mid = index // len(self.rotations)
+
+ rotation = self.rotations[rid]
+
+ # choose specific test sets
+ subject = self.subject_list[mid]
+
+ subject_render = "/".join(
+ [subject.split("/")[0] + "_12views",
+ subject.split("/")[1]])
+
+ # setup paths
+ data_dict = {
+ 'dataset':
+ subject.split("/")[0],
+ 'subject':
+ subject,
+ 'rotation':
+ rotation,
+ 'image_path':
+ osp.join(self.root, subject_render, 'render',
+ f'{rotation:03d}.png')
+ }
+
+ # image/normal/depth loader
+ for name, channel in zip(self.in_total, self.in_total_dim):
+
+ if name != 'image':
+ data_dict.update({
+ f'{name}_path':
+ osp.join(self.root, subject_render, name,
+ f'{rotation:03d}.png')
+ })
+ data_dict.update({
+ name:
+ self.imagepath2tensor(data_dict[f'{name}_path'],
+ channel,
+ inv='depth_B' in name)
+ })
+
+ path_keys = [
+ key for key in data_dict.keys() if '_path' in key or '_dir' in key
+ ]
+ for key in path_keys:
+ del data_dict[key]
+
+ return data_dict
+
+ def imagepath2tensor(self, path, channel=3, inv=False):
+
+ rgba = Image.open(path).convert('RGBA')
+ mask = rgba.split()[-1]
+ image = rgba.convert('RGB')
+ image = self.image_to_tensor(image)
+ mask = self.mask_to_tensor(mask)
+ image = (image * mask)[:channel]
+
+ return (image * (0.5 - inv) * 2.0).float()
\ No newline at end of file
diff --git a/lib /dataset /NormalModule.py b/lib /dataset /NormalModule.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea25725bc8d553084c63baf8900a1b3035b00f10
--- /dev/null
+++ b/lib /dataset /NormalModule.py
@@ -0,0 +1,94 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import numpy as np
+from torch.utils.data import DataLoader
+from .NormalDataset import NormalDataset
+
+# pytorch lightning related libs
+import pytorch_lightning as pl
+
+
+class NormalModule(pl.LightningDataModule):
+ def __init__(self, cfg):
+ super(NormalModule, self).__init__()
+ self.cfg = cfg
+ self.overfit = self.cfg.overfit
+
+ if self.overfit:
+ self.batch_size = 1
+ else:
+ self.batch_size = self.cfg.batch_size
+
+ self.data_size = {}
+
+ def prepare_data(self):
+
+ pass
+
+ @staticmethod
+ def worker_init_fn(worker_id):
+ np.random.seed(np.random.get_state()[1][0] + worker_id)
+
+ def setup(self, stage):
+
+ if stage == 'fit' or stage is None:
+ self.train_dataset = NormalDataset(cfg=self.cfg, split="train")
+ self.val_dataset = NormalDataset(cfg=self.cfg, split="val")
+ self.data_size = {
+ 'train': len(self.train_dataset),
+ 'val': len(self.val_dataset)
+ }
+
+ if stage == 'test' or stage is None:
+ self.test_dataset = NormalDataset(cfg=self.cfg, split="test")
+
+ def train_dataloader(self):
+
+ train_data_loader = DataLoader(self.train_dataset,
+ batch_size=self.batch_size,
+ shuffle=not self.overfit,
+ num_workers=self.cfg.num_threads,
+ pin_memory=True,
+ worker_init_fn=self.worker_init_fn)
+
+ return train_data_loader
+
+ def val_dataloader(self):
+
+ if self.overfit:
+ current_dataset = self.train_dataset
+ else:
+ current_dataset = self.val_dataset
+
+ val_data_loader = DataLoader(current_dataset,
+ batch_size=self.batch_size,
+ shuffle=False,
+ num_workers=self.cfg.num_threads,
+ pin_memory=True)
+
+ return val_data_loader
+
+ def test_dataloader(self):
+
+ test_data_loader = DataLoader(self.test_dataset,
+ batch_size=1,
+ shuffle=False,
+ num_workers=self.cfg.num_threads,
+ pin_memory=True)
+
+ return test_data_loader
\ No newline at end of file
diff --git a/lib /dataset /PIFuDataset.py b/lib /dataset /PIFuDataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bf240c1f1a35b45c4bd38aed6c1827bdd09e56d
--- /dev/null
+++ b/lib /dataset /PIFuDataset.py
@@ -0,0 +1,589 @@
+from lib.renderer.mesh import load_fit_body
+from lib.dataset.hoppeMesh import HoppeMesh
+from lib.dataset.body_model import TetraSMPLModel
+from lib.common.render import Render
+from lib.dataset.mesh_util import SMPLX, projection, cal_sdf_batch, get_visibility
+from lib.pare.pare.utils.geometry import rotation_matrix_to_angle_axis
+from termcolor import colored
+import os.path as osp
+import numpy as np
+from PIL import Image
+import random
+import os
+import trimesh
+import torch
+from kaolin.ops.mesh import check_sign
+import torchvision.transforms as transforms
+from huggingface_hub import hf_hub_download, cached_download
+
+
+class PIFuDataset():
+ def __init__(self, cfg, split='train', vis=False):
+
+ self.split = split
+ self.root = cfg.root
+ self.bsize = cfg.batch_size
+ self.overfit = cfg.overfit
+
+ # for debug, only used in visualize_sampling3D
+ self.vis = vis
+
+ self.opt = cfg.dataset
+ self.datasets = self.opt.types
+ self.input_size = self.opt.input_size
+ self.scales = self.opt.scales
+ self.workers = cfg.num_threads
+ self.prior_type = cfg.net.prior_type
+
+ self.noise_type = self.opt.noise_type
+ self.noise_scale = self.opt.noise_scale
+
+ noise_joints = [4, 5, 7, 8, 13, 14, 16, 17, 18, 19, 20, 21]
+
+ self.noise_smpl_idx = []
+ self.noise_smplx_idx = []
+
+ for idx in noise_joints:
+ self.noise_smpl_idx.append(idx * 3)
+ self.noise_smpl_idx.append(idx * 3 + 1)
+ self.noise_smpl_idx.append(idx * 3 + 2)
+
+ self.noise_smplx_idx.append((idx-1) * 3)
+ self.noise_smplx_idx.append((idx-1) * 3 + 1)
+ self.noise_smplx_idx.append((idx-1) * 3 + 2)
+
+ self.use_sdf = cfg.sdf
+ self.sdf_clip = cfg.sdf_clip
+
+ # [(feat_name, channel_num),...]
+ self.in_geo = [item[0] for item in cfg.net.in_geo]
+ self.in_nml = [item[0] for item in cfg.net.in_nml]
+
+ self.in_geo_dim = [item[1] for item in cfg.net.in_geo]
+ self.in_nml_dim = [item[1] for item in cfg.net.in_nml]
+
+ self.in_total = self.in_geo + self.in_nml
+ self.in_total_dim = self.in_geo_dim + self.in_nml_dim
+
+ if self.split == 'train':
+ self.rotations = np.arange(
+ 0, 360, 360 / self.opt.rotation_num).astype(np.int32)
+ else:
+ self.rotations = range(0, 360, 120)
+
+ self.datasets_dict = {}
+
+ for dataset_id, dataset in enumerate(self.datasets):
+
+ mesh_dir = None
+ smplx_dir = None
+
+ dataset_dir = osp.join(self.root, dataset)
+
+ if dataset in ['thuman2']:
+ mesh_dir = osp.join(dataset_dir, "scans")
+ smplx_dir = osp.join(dataset_dir, "fits")
+ smpl_dir = osp.join(dataset_dir, "smpl")
+
+ self.datasets_dict[dataset] = {
+ "subjects": np.loadtxt(osp.join(dataset_dir, "all.txt"), dtype=str),
+ "smplx_dir": smplx_dir,
+ "smpl_dir": smpl_dir,
+ "mesh_dir": mesh_dir,
+ "scale": self.scales[dataset_id]
+ }
+
+ self.subject_list = self.get_subject_list(split)
+ self.smplx = SMPLX()
+
+ # PIL to tensor
+ self.image_to_tensor = transforms.Compose([
+ transforms.Resize(self.input_size),
+ transforms.ToTensor(),
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
+ ])
+
+ # PIL to tensor
+ self.mask_to_tensor = transforms.Compose([
+ transforms.Resize(self.input_size),
+ transforms.ToTensor(),
+ transforms.Normalize((0.0, ), (1.0, ))
+ ])
+
+ self.device = torch.device(f"cuda:{cfg.gpus[0]}")
+ self.render = Render(size=512, device=self.device)
+
+ def render_normal(self, verts, faces):
+
+ # render optimized mesh (normal, T_normal, image [-1,1])
+ self.render.load_meshes(verts, faces)
+ return self.render.get_rgb_image()
+
+ def get_subject_list(self, split):
+
+ subject_list = []
+
+ for dataset in self.datasets:
+
+ split_txt = osp.join(self.root, dataset, f'{split}.txt')
+
+ if osp.exists(split_txt):
+ print(f"load from {split_txt}")
+ subject_list += np.loadtxt(split_txt, dtype=str).tolist()
+ else:
+ full_txt = osp.join(self.root, dataset, 'all.txt')
+ print(f"split {full_txt} into train/val/test")
+
+ full_lst = np.loadtxt(full_txt, dtype=str)
+ full_lst = [dataset+"/"+item for item in full_lst]
+ [train_lst, test_lst, val_lst] = np.split(
+ full_lst, [500, 500+5, ])
+
+ np.savetxt(full_txt.replace(
+ "all", "train"), train_lst, fmt="%s")
+ np.savetxt(full_txt.replace("all", "test"), test_lst, fmt="%s")
+ np.savetxt(full_txt.replace("all", "val"), val_lst, fmt="%s")
+
+ print(f"load from {split_txt}")
+ subject_list += np.loadtxt(split_txt, dtype=str).tolist()
+
+ if self.split != 'test':
+ subject_list += subject_list[:self.bsize -
+ len(subject_list) % self.bsize]
+ print(colored(f"total: {len(subject_list)}", "yellow"))
+ random.shuffle(subject_list)
+
+ # subject_list = ["thuman2/0008"]
+ return subject_list
+
+ def __len__(self):
+ return len(self.subject_list) * len(self.rotations)
+
+ def __getitem__(self, index):
+
+ # only pick the first data if overfitting
+ if self.overfit:
+ index = 0
+
+ rid = index % len(self.rotations)
+ mid = index // len(self.rotations)
+
+ rotation = self.rotations[rid]
+ subject = self.subject_list[mid].split("/")[1]
+ dataset = self.subject_list[mid].split("/")[0]
+ render_folder = "/".join([dataset +
+ f"_{self.opt.rotation_num}views", subject])
+
+ # setup paths
+ data_dict = {
+ 'dataset': dataset,
+ 'subject': subject,
+ 'rotation': rotation,
+ 'scale': self.datasets_dict[dataset]["scale"],
+ 'mesh_path': osp.join(self.datasets_dict[dataset]["mesh_dir"], f"{subject}/{subject}.obj"),
+ 'smplx_path': osp.join(self.datasets_dict[dataset]["smplx_dir"], f"{subject}/smplx_param.pkl"),
+ 'smpl_path': osp.join(self.datasets_dict[dataset]["smpl_dir"], f"{subject}.pkl"),
+ 'calib_path': osp.join(self.root, render_folder, 'calib', f'{rotation:03d}.txt'),
+ 'vis_path': osp.join(self.root, render_folder, 'vis', f'{rotation:03d}.pt'),
+ 'image_path': osp.join(self.root, render_folder, 'render', f'{rotation:03d}.png')
+ }
+
+ # load training data
+ data_dict.update(self.load_calib(data_dict))
+
+ # image/normal/depth loader
+ for name, channel in zip(self.in_total, self.in_total_dim):
+
+ if f'{name}_path' not in data_dict.keys():
+ data_dict.update({
+ f'{name}_path': osp.join(self.root, render_folder, name, f'{rotation:03d}.png')
+ })
+
+ # tensor update
+ data_dict.update({
+ name: self.imagepath2tensor(
+ data_dict[f'{name}_path'], channel, inv=False)
+ })
+
+ data_dict.update(self.load_mesh(data_dict))
+ data_dict.update(self.get_sampling_geo(
+ data_dict, is_valid=self.split == "val", is_sdf=self.use_sdf))
+ data_dict.update(self.load_smpl(data_dict, self.vis))
+
+ if self.prior_type == 'pamir':
+ data_dict.update(self.load_smpl_voxel(data_dict))
+
+ if (self.split != 'test') and (not self.vis):
+
+ del data_dict['verts']
+ del data_dict['faces']
+
+ if not self.vis:
+ del data_dict['mesh']
+
+ path_keys = [
+ key for key in data_dict.keys() if '_path' in key or '_dir' in key
+ ]
+ for key in path_keys:
+ del data_dict[key]
+
+ return data_dict
+
+ def imagepath2tensor(self, path, channel=3, inv=False):
+
+ rgba = Image.open(path).convert('RGBA')
+ mask = rgba.split()[-1]
+ image = rgba.convert('RGB')
+ image = self.image_to_tensor(image)
+ mask = self.mask_to_tensor(mask)
+ image = (image * mask)[:channel]
+
+ return (image * (0.5 - inv) * 2.0).float()
+
+ def load_calib(self, data_dict):
+ calib_data = np.loadtxt(data_dict['calib_path'], dtype=float)
+ extrinsic = calib_data[:4, :4]
+ intrinsic = calib_data[4:8, :4]
+ calib_mat = np.matmul(intrinsic, extrinsic)
+ calib_mat = torch.from_numpy(calib_mat).float()
+ return {'calib': calib_mat}
+
+ def load_mesh(self, data_dict):
+ mesh_path = data_dict['mesh_path']
+ scale = data_dict['scale']
+
+ mesh_ori = trimesh.load(mesh_path,
+ skip_materials=True,
+ process=False,
+ maintain_order=True)
+ verts = mesh_ori.vertices * scale
+ faces = mesh_ori.faces
+
+ vert_normals = np.array(mesh_ori.vertex_normals)
+ face_normals = np.array(mesh_ori.face_normals)
+
+ mesh = HoppeMesh(verts, faces, vert_normals, face_normals)
+
+ return {
+ 'mesh': mesh,
+ 'verts': torch.as_tensor(mesh.verts).float(),
+ 'faces': torch.as_tensor(mesh.faces).long()
+ }
+
+ def add_noise(self,
+ beta_num,
+ smpl_pose,
+ smpl_betas,
+ noise_type,
+ noise_scale,
+ type,
+ hashcode):
+
+ np.random.seed(hashcode)
+
+ if type == 'smplx':
+ noise_idx = self.noise_smplx_idx
+ else:
+ noise_idx = self.noise_smpl_idx
+
+ if 'beta' in noise_type and noise_scale[noise_type.index("beta")] > 0.0:
+ smpl_betas += (np.random.rand(beta_num) -
+ 0.5) * 2.0 * noise_scale[noise_type.index("beta")]
+ smpl_betas = smpl_betas.astype(np.float32)
+
+ if 'pose' in noise_type and noise_scale[noise_type.index("pose")] > 0.0:
+ smpl_pose[noise_idx] += (
+ np.random.rand(len(noise_idx)) -
+ 0.5) * 2.0 * np.pi * noise_scale[noise_type.index("pose")]
+ smpl_pose = smpl_pose.astype(np.float32)
+ if type == 'smplx':
+ return torch.as_tensor(smpl_pose[None, ...]), torch.as_tensor(smpl_betas[None, ...])
+ else:
+ return smpl_pose, smpl_betas
+
+ def compute_smpl_verts(self, data_dict, noise_type=None, noise_scale=None):
+
+ dataset = data_dict['dataset']
+ smplx_dict = {}
+
+ smplx_param = np.load(data_dict['smplx_path'], allow_pickle=True)
+ smplx_pose = smplx_param["body_pose"] # [1,63]
+ smplx_betas = smplx_param["betas"] # [1,10]
+ smplx_pose, smplx_betas = self.add_noise(
+ smplx_betas.shape[1],
+ smplx_pose[0],
+ smplx_betas[0],
+ noise_type,
+ noise_scale,
+ type='smplx',
+ hashcode=(hash(f"{data_dict['subject']}_{data_dict['rotation']}")) % (10**8))
+
+ smplx_out, _ = load_fit_body(fitted_path=data_dict['smplx_path'],
+ scale=self.datasets_dict[dataset]['scale'],
+ smpl_type='smplx',
+ smpl_gender='male',
+ noise_dict=dict(betas=smplx_betas, body_pose=smplx_pose))
+
+ smplx_dict.update({"type": "smplx",
+ "gender": 'male',
+ "body_pose": torch.as_tensor(smplx_pose),
+ "betas": torch.as_tensor(smplx_betas)})
+
+ return smplx_out.vertices, smplx_dict
+
+ def compute_voxel_verts(self,
+ data_dict,
+ noise_type=None,
+ noise_scale=None):
+
+ smpl_param = np.load(data_dict['smpl_path'], allow_pickle=True)
+ smplx_param = np.load(data_dict['smplx_path'], allow_pickle=True)
+
+ smpl_pose = rotation_matrix_to_angle_axis(
+ torch.as_tensor(smpl_param['full_pose'][0])).numpy()
+ smpl_betas = smpl_param["betas"]
+
+ smpl_path = cached_download(osp.join(self.smplx.model_dir, "smpl/SMPL_MALE.pkl"), use_auth_token=os.environ['ICON'])
+ tetra_path = cached_download(osp.join(self.smplx.tedra_dir,
+ "tetra_male_adult_smpl.npz"), use_auth_token=os.environ['ICON'])
+
+ smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult')
+
+ smpl_pose, smpl_betas = self.add_noise(
+ smpl_model.beta_shape[0],
+ smpl_pose.flatten(),
+ smpl_betas[0],
+ noise_type,
+ noise_scale,
+ type='smpl',
+ hashcode=(hash(f"{data_dict['subject']}_{data_dict['rotation']}")) % (10**8))
+
+ smpl_model.set_params(pose=smpl_pose.reshape(-1, 3),
+ beta=smpl_betas,
+ trans=smpl_param["transl"])
+
+ verts = (np.concatenate([smpl_model.verts, smpl_model.verts_added],
+ axis=0) * smplx_param["scale"] + smplx_param["translation"]
+ ) * self.datasets_dict[data_dict['dataset']]['scale']
+ faces = np.loadtxt(cached_download(osp.join(self.smplx.tedra_dir, "tetrahedrons_male_adult.txt"), use_auth_token=os.environ['ICON']),
+ dtype=np.int32) - 1
+
+ pad_v_num = int(8000 - verts.shape[0])
+ pad_f_num = int(25100 - faces.shape[0])
+
+ verts = np.pad(verts, ((0, pad_v_num), (0, 0)),
+ mode='constant',
+ constant_values=0.0).astype(np.float32)
+ faces = np.pad(faces, ((0, pad_f_num), (0, 0)),
+ mode='constant',
+ constant_values=0.0).astype(np.int32)
+
+
+ return verts, faces, pad_v_num, pad_f_num
+
+ def load_smpl(self, data_dict, vis=False):
+
+ smplx_verts, smplx_dict = self.compute_smpl_verts(
+ data_dict, self.noise_type,
+ self.noise_scale) # compute using smpl model
+
+ smplx_verts = projection(smplx_verts, data_dict['calib']).float()
+ smplx_faces = torch.as_tensor(self.smplx.faces).long()
+ smplx_vis = torch.load(data_dict['vis_path']).float()
+ smplx_cmap = torch.as_tensor(
+ np.load(self.smplx.cmap_vert_path)).float()
+
+ # get smpl_signs
+ query_points = projection(data_dict['samples_geo'],
+ data_dict['calib']).float()
+
+ pts_signs = 2.0 * (check_sign(smplx_verts.unsqueeze(0),
+ smplx_faces,
+ query_points.unsqueeze(0)).float() - 0.5).squeeze(0)
+
+ return_dict = {
+ 'smpl_verts': smplx_verts,
+ 'smpl_faces': smplx_faces,
+ 'smpl_vis': smplx_vis,
+ 'smpl_cmap': smplx_cmap,
+ 'pts_signs': pts_signs
+ }
+ if smplx_dict is not None:
+ return_dict.update(smplx_dict)
+
+ if vis:
+
+ (xy, z) = torch.as_tensor(smplx_verts).to(
+ self.device).split([2, 1], dim=1)
+ smplx_vis = get_visibility(xy, z, torch.as_tensor(
+ smplx_faces).to(self.device).long())
+
+ T_normal_F, T_normal_B = self.render_normal(
+ (smplx_verts*torch.tensor([1.0, -1.0, 1.0])).to(self.device),
+ smplx_faces.to(self.device))
+
+ return_dict.update({"T_normal_F": T_normal_F.squeeze(0),
+ "T_normal_B": T_normal_B.squeeze(0)})
+ query_points = projection(data_dict['samples_geo'],
+ data_dict['calib']).float()
+
+ smplx_sdf, smplx_norm, smplx_cmap, smplx_vis = cal_sdf_batch(
+ smplx_verts.unsqueeze(0).to(self.device),
+ smplx_faces.unsqueeze(0).to(self.device),
+ smplx_cmap.unsqueeze(0).to(self.device),
+ smplx_vis.unsqueeze(0).to(self.device),
+ query_points.unsqueeze(0).contiguous().to(self.device))
+
+ return_dict.update({
+ 'smpl_feat':
+ torch.cat(
+ (smplx_sdf[0].detach().cpu(),
+ smplx_cmap[0].detach().cpu(),
+ smplx_norm[0].detach().cpu(),
+ smplx_vis[0].detach().cpu()),
+ dim=1)
+ })
+
+ return return_dict
+
+ def load_smpl_voxel(self, data_dict):
+
+ smpl_verts, smpl_faces, pad_v_num, pad_f_num = self.compute_voxel_verts(
+ data_dict, self.noise_type,
+ self.noise_scale) # compute using smpl model
+ smpl_verts = projection(smpl_verts, data_dict['calib'])
+
+ smpl_verts *= 0.5
+
+ return {
+ 'voxel_verts': smpl_verts,
+ 'voxel_faces': smpl_faces,
+ 'pad_v_num': pad_v_num,
+ 'pad_f_num': pad_f_num
+ }
+
+ def get_sampling_geo(self, data_dict, is_valid=False, is_sdf=False):
+
+ mesh = data_dict['mesh']
+ calib = data_dict['calib']
+
+ # Samples are around the true surface with an offset
+ n_samples_surface = 4 * self.opt.num_sample_geo
+ vert_ids = np.arange(mesh.verts.shape[0])
+ thickness_sample_ratio = np.ones_like(vert_ids).astype(np.float32)
+
+ thickness_sample_ratio /= thickness_sample_ratio.sum()
+
+ samples_surface_ids = np.random.choice(vert_ids,
+ n_samples_surface,
+ replace=True,
+ p=thickness_sample_ratio)
+
+ samples_normal_ids = np.random.choice(vert_ids,
+ self.opt.num_sample_geo // 2,
+ replace=False,
+ p=thickness_sample_ratio)
+
+ surf_samples = mesh.verts[samples_normal_ids, :]
+ surf_normals = mesh.vert_normals[samples_normal_ids, :]
+
+ samples_surface = mesh.verts[samples_surface_ids, :]
+
+ # Sampling offsets are random noise with constant scale (15cm - 20cm)
+ offset = np.random.normal(scale=self.opt.sigma_geo,
+ size=(n_samples_surface, 1))
+ samples_surface += mesh.vert_normals[samples_surface_ids, :] * offset
+
+ # Uniform samples in [-1, 1]
+ calib_inv = np.linalg.inv(calib)
+ n_samples_space = self.opt.num_sample_geo // 4
+ samples_space_img = 2.0 * np.random.rand(n_samples_space, 3) - 1.0
+ samples_space = projection(samples_space_img, calib_inv)
+
+ # z-ray direction samples
+ if self.opt.zray_type and not is_valid:
+ n_samples_rayz = self.opt.ray_sample_num
+ samples_surface_cube = projection(samples_surface, calib)
+ samples_surface_cube_repeat = np.repeat(samples_surface_cube,
+ n_samples_rayz,
+ axis=0)
+
+ thickness_repeat = np.repeat(0.5 *
+ np.ones_like(samples_surface_ids),
+ n_samples_rayz,
+ axis=0)
+
+ noise_repeat = np.random.normal(scale=0.40,
+ size=(n_samples_surface *
+ n_samples_rayz, ))
+ samples_surface_cube_repeat[:,
+ -1] += thickness_repeat * noise_repeat
+ samples_surface_rayz = projection(samples_surface_cube_repeat,
+ calib_inv)
+
+ samples = np.concatenate(
+ [samples_surface, samples_space, samples_surface_rayz], 0)
+ else:
+ samples = np.concatenate([samples_surface, samples_space], 0)
+
+ np.random.shuffle(samples)
+
+ # labels: in->1.0; out->0.0.
+ if is_sdf:
+ sdfs = mesh.get_sdf(samples)
+ inside_samples = samples[sdfs < 0]
+ outside_samples = samples[sdfs >= 0]
+
+ inside_sdfs = sdfs[sdfs < 0]
+ outside_sdfs = sdfs[sdfs >= 0]
+ else:
+ inside = mesh.contains(samples)
+ inside_samples = samples[inside >= 0.5]
+ outside_samples = samples[inside < 0.5]
+
+ nin = inside_samples.shape[0]
+
+ if nin > self.opt.num_sample_geo // 2:
+ inside_samples = inside_samples[:self.opt.num_sample_geo // 2]
+ outside_samples = outside_samples[:self.opt.num_sample_geo // 2]
+ if is_sdf:
+ inside_sdfs = inside_sdfs[:self.opt.num_sample_geo // 2]
+ outside_sdfs = outside_sdfs[:self.opt.num_sample_geo // 2]
+ else:
+ outside_samples = outside_samples[:(self.opt.num_sample_geo - nin)]
+ if is_sdf:
+ outside_sdfs = outside_sdfs[:(self.opt.num_sample_geo - nin)]
+
+ if is_sdf:
+ samples = np.concatenate(
+ [inside_samples, outside_samples, surf_samples], 0)
+
+ labels = np.concatenate([
+ inside_sdfs, outside_sdfs, 0.0 * np.ones(surf_samples.shape[0])
+ ])
+
+ normals = np.zeros_like(samples)
+ normals[-self.opt.num_sample_geo // 2:, :] = surf_normals
+
+ # convert sdf from [-14, 130] to [0, 1]
+ # outside: 0, inside: 1
+ # Note: Marching cubes is defined on occupancy space (inside=1.0, outside=0.0)
+
+ labels = -labels.clip(min=-self.sdf_clip, max=self.sdf_clip)
+ labels += self.sdf_clip
+ labels /= (self.sdf_clip * 2)
+
+ else:
+ samples = np.concatenate([inside_samples, outside_samples])
+ labels = np.concatenate([
+ np.ones(inside_samples.shape[0]),
+ np.zeros(outside_samples.shape[0])
+ ])
+
+ normals = np.zeros_like(samples)
+
+ samples = torch.from_numpy(samples).float()
+ labels = torch.from_numpy(labels).float()
+ normals = torch.from_numpy(normals).float()
+
+ return {'samples_geo': samples, 'labels_geo': labels}
\ No newline at end of file
diff --git a/lib /dataset /TestDataset.py b/lib /dataset /TestDataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ea83cc7f96edfec1eae31f660c23d204a653e38
--- /dev/null
+++ b/lib /dataset /TestDataset.py
@@ -0,0 +1,254 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import os
+
+import lib.smplx as smplx
+from lib.pymaf.utils.geometry import rotation_matrix_to_angle_axis, batch_rodrigues
+from lib.pymaf.utils.imutils import process_image
+from lib.pymaf.core import path_config
+from lib.pymaf.models import pymaf_net
+from lib.common.config import cfg
+from lib.common.render import Render
+from lib.dataset.body_model import TetraSMPLModel
+from lib.dataset.mesh_util import get_visibility, SMPLX
+import os.path as osp
+import torch
+import numpy as np
+import random
+from termcolor import colored
+from PIL import ImageFile
+from huggingface_hub import cached_download
+
+ImageFile.LOAD_TRUNCATED_IMAGES = True
+
+
+class TestDataset():
+ def __init__(self, cfg, device):
+
+ random.seed(1993)
+
+ self.image_path = cfg['image_path']
+ self.seg_dir = cfg['seg_dir']
+ self.has_det = cfg['has_det']
+ self.hps_type = cfg['hps_type']
+ self.smpl_type = 'smpl' if cfg['hps_type'] != 'pixie' else 'smplx'
+ self.smpl_gender = 'neutral'
+
+ self.device = device
+
+ self.subject_list = [self.image_path]
+
+ # smpl related
+ self.smpl_data = SMPLX()
+
+ self.get_smpl_model = lambda smpl_type, smpl_gender: smplx.create(
+ model_path=self.smpl_data.model_dir,
+ gender=smpl_gender,
+ model_type=smpl_type,
+ ext='npz')
+
+ # Load SMPL model
+ self.smpl_model = self.get_smpl_model(
+ self.smpl_type, self.smpl_gender).to(self.device)
+ self.faces = self.smpl_model.faces
+
+ self.hps = pymaf_net(path_config.SMPL_MEAN_PARAMS,
+ pretrained=True).to(self.device)
+ self.hps.load_state_dict(torch.load(
+ path_config.CHECKPOINT_FILE)['model'],
+ strict=True)
+ self.hps.eval()
+
+ print(colored(f"Using {self.hps_type} as HPS Estimator\n", "green"))
+
+ self.render = Render(size=512, device=device)
+
+ def __len__(self):
+ return len(self.subject_list)
+
+ def compute_vis_cmap(self, smpl_verts, smpl_faces):
+
+ (xy, z) = torch.as_tensor(smpl_verts).split([2, 1], dim=1)
+ smpl_vis = get_visibility(xy, -z, torch.as_tensor(smpl_faces).long())
+ if self.smpl_type == 'smpl':
+ smplx_ind = self.smpl_data.smpl2smplx(np.arange(smpl_vis.shape[0]))
+ else:
+ smplx_ind = np.arange(smpl_vis.shape[0])
+ smpl_cmap = self.smpl_data.get_smpl_mat(smplx_ind)
+
+ return {
+ 'smpl_vis': smpl_vis.unsqueeze(0).to(self.device),
+ 'smpl_cmap': smpl_cmap.unsqueeze(0).to(self.device),
+ 'smpl_verts': smpl_verts.unsqueeze(0)
+ }
+
+ def compute_voxel_verts(self, body_pose, global_orient, betas, trans,
+ scale):
+
+ smpl_path = cached_download(osp.join(self.smpl_data.model_dir, "smpl/SMPL_NEUTRAL.pkl"), use_auth_token=os.environ['ICON'])
+ tetra_path = cached_download(osp.join(self.smpl_data.tedra_dir,
+ 'tetra_neutral_adult_smpl.npz'), use_auth_token=os.environ['ICON'])
+ smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult')
+
+ pose = torch.cat([global_orient[0], body_pose[0]], dim=0)
+ smpl_model.set_params(rotation_matrix_to_angle_axis(pose),
+ beta=betas[0])
+
+ verts = np.concatenate(
+ [smpl_model.verts, smpl_model.verts_added],
+ axis=0) * scale.item() + trans.detach().cpu().numpy()
+ faces = np.loadtxt(cached_download(osp.join(self.smpl_data.tedra_dir,
+ 'tetrahedrons_neutral_adult.txt'), use_auth_token=os.environ['ICON']),
+ dtype=np.int32) - 1
+
+ pad_v_num = int(8000 - verts.shape[0])
+ pad_f_num = int(25100 - faces.shape[0])
+
+ verts = np.pad(verts, ((0, pad_v_num), (0, 0)),
+ mode='constant',
+ constant_values=0.0).astype(np.float32) * 0.5
+ faces = np.pad(faces, ((0, pad_f_num), (0, 0)),
+ mode='constant',
+ constant_values=0.0).astype(np.int32)
+
+ verts[:, 2] *= -1.0
+
+ voxel_dict = {
+ 'voxel_verts':
+ torch.from_numpy(verts).to(self.device).unsqueeze(0).float(),
+ 'voxel_faces':
+ torch.from_numpy(faces).to(self.device).unsqueeze(0).long(),
+ 'pad_v_num':
+ torch.tensor(pad_v_num).to(self.device).unsqueeze(0).long(),
+ 'pad_f_num':
+ torch.tensor(pad_f_num).to(self.device).unsqueeze(0).long()
+ }
+
+ return voxel_dict
+
+ def __getitem__(self, index):
+
+ img_path = self.subject_list[index]
+ img_name = img_path.split("/")[-1].rsplit(".", 1)[0]
+
+ if self.seg_dir is None:
+ img_icon, img_hps, img_ori, img_mask, uncrop_param = process_image(
+ img_path, self.hps_type, 512, self.device)
+
+ data_dict = {
+ 'name': img_name,
+ 'image': img_icon.to(self.device).unsqueeze(0),
+ 'ori_image': img_ori,
+ 'mask': img_mask,
+ 'uncrop_param': uncrop_param
+ }
+
+ else:
+ img_icon, img_hps, img_ori, img_mask, uncrop_param, segmentations = process_image(
+ img_path, self.hps_type, 512, self.device,
+ seg_path=os.path.join(self.seg_dir, f'{img_name}.json'))
+ data_dict = {
+ 'name': img_name,
+ 'image': img_icon.to(self.device).unsqueeze(0),
+ 'ori_image': img_ori,
+ 'mask': img_mask,
+ 'uncrop_param': uncrop_param,
+ 'segmentations': segmentations
+ }
+
+ with torch.no_grad():
+ # import ipdb; ipdb.set_trace()
+ preds_dict = self.hps.forward(img_hps)
+
+ data_dict['smpl_faces'] = torch.Tensor(
+ self.faces.astype(np.int16)).long().unsqueeze(0).to(
+ self.device)
+
+ if self.hps_type == 'pymaf':
+ output = preds_dict['smpl_out'][-1]
+ scale, tranX, tranY = output['theta'][0, :3]
+ data_dict['betas'] = output['pred_shape']
+ data_dict['body_pose'] = output['rotmat'][:, 1:]
+ data_dict['global_orient'] = output['rotmat'][:, 0:1]
+ data_dict['smpl_verts'] = output['verts']
+
+ elif self.hps_type == 'pare':
+ data_dict['body_pose'] = preds_dict['pred_pose'][:, 1:]
+ data_dict['global_orient'] = preds_dict['pred_pose'][:, 0:1]
+ data_dict['betas'] = preds_dict['pred_shape']
+ data_dict['smpl_verts'] = preds_dict['smpl_vertices']
+ scale, tranX, tranY = preds_dict['pred_cam'][0, :3]
+
+ elif self.hps_type == 'pixie':
+ data_dict.update(preds_dict)
+ data_dict['body_pose'] = preds_dict['body_pose']
+ data_dict['global_orient'] = preds_dict['global_pose']
+ data_dict['betas'] = preds_dict['shape']
+ data_dict['smpl_verts'] = preds_dict['vertices']
+ scale, tranX, tranY = preds_dict['cam'][0, :3]
+
+ elif self.hps_type == 'hybrik':
+ data_dict['body_pose'] = preds_dict['pred_theta_mats'][:, 1:]
+ data_dict['global_orient'] = preds_dict['pred_theta_mats'][:, [0]]
+ data_dict['betas'] = preds_dict['pred_shape']
+ data_dict['smpl_verts'] = preds_dict['pred_vertices']
+ scale, tranX, tranY = preds_dict['pred_camera'][0, :3]
+ scale = scale * 2
+
+ elif self.hps_type == 'bev':
+ data_dict['betas'] = torch.from_numpy(preds_dict['smpl_betas'])[
+ [0], :10].to(self.device).float()
+ pred_thetas = batch_rodrigues(torch.from_numpy(
+ preds_dict['smpl_thetas'][0]).reshape(-1, 3)).float()
+ data_dict['body_pose'] = pred_thetas[1:][None].to(self.device)
+ data_dict['global_orient'] = pred_thetas[[0]][None].to(self.device)
+ data_dict['smpl_verts'] = torch.from_numpy(
+ preds_dict['verts'][[0]]).to(self.device).float()
+ tranX = preds_dict['cam_trans'][0, 0]
+ tranY = preds_dict['cam'][0, 1] + 0.28
+ scale = preds_dict['cam'][0, 0] * 1.1
+
+ data_dict['scale'] = scale
+ data_dict['trans'] = torch.tensor(
+ [tranX, tranY, 0.0]).to(self.device).float()
+
+ # data_dict info (key-shape):
+ # scale, tranX, tranY - tensor.float
+ # betas - [1,10] / [1, 200]
+ # body_pose - [1, 23, 3, 3] / [1, 21, 3, 3]
+ # global_orient - [1, 1, 3, 3]
+ # smpl_verts - [1, 6890, 3] / [1, 10475, 3]
+
+ # from rot_mat to rot_6d for better optimization
+ N_body = data_dict["body_pose"].shape[1]
+ data_dict["body_pose"] = data_dict["body_pose"][:, :, :, :2].reshape(1, N_body,-1)
+ data_dict["global_orient"] = data_dict["global_orient"][:, :, :, :2].reshape(1, 1,-1)
+
+ return data_dict
+
+ def render_normal(self, verts, faces):
+
+ # render optimized mesh (normal, T_normal, image [-1,1])
+ self.render.load_meshes(verts, faces)
+ return self.render.get_rgb_image()
+
+ def render_depth(self, verts, faces):
+
+ # render optimized mesh (normal, T_normal, image [-1,1])
+ self.render.load_meshes(verts, faces)
+ return self.render.get_depth_map(cam_ids=[0, 2])
\ No newline at end of file
diff --git a/lib /dataset /__init__.py b/lib /dataset /__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib /dataset /body_model.py b/lib /dataset /body_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f46fe7db22cdd61f372094c6d89bf12b5a8d14e
--- /dev/null
+++ b/lib /dataset /body_model.py
@@ -0,0 +1,454 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import numpy as np
+import pickle
+import torch
+import os
+
+
+class SMPLModel():
+ def __init__(self, model_path, age):
+ """
+ SMPL model.
+ Parameter:
+ ---------
+ model_path: Path to the SMPL model parameters, pre-processed by
+ `preprocess.py`.
+ """
+ with open(model_path, 'rb') as f:
+ params = pickle.load(f, encoding='latin1')
+
+ self.J_regressor = params['J_regressor']
+ self.weights = np.asarray(params['weights'])
+ self.posedirs = np.asarray(params['posedirs'])
+ self.v_template = np.asarray(params['v_template'])
+ self.shapedirs = np.asarray(params['shapedirs'])
+ self.faces = np.asarray(params['f'])
+ self.kintree_table = np.asarray(params['kintree_table'])
+
+ self.pose_shape = [24, 3]
+ self.beta_shape = [10]
+ self.trans_shape = [3]
+
+ if age == 'kid':
+ v_template_smil = np.load(
+ os.path.join(os.path.dirname(model_path),
+ "smpl/smpl_kid_template.npy"))
+ v_template_smil -= np.mean(v_template_smil, axis=0)
+ v_template_diff = np.expand_dims(v_template_smil - self.v_template,
+ axis=2)
+ self.shapedirs = np.concatenate(
+ (self.shapedirs[:, :, :self.beta_shape[0]], v_template_diff),
+ axis=2)
+ self.beta_shape[0] += 1
+
+ id_to_col = {
+ self.kintree_table[1, i]: i
+ for i in range(self.kintree_table.shape[1])
+ }
+ self.parent = {
+ i: id_to_col[self.kintree_table[0, i]]
+ for i in range(1, self.kintree_table.shape[1])
+ }
+
+ self.pose = np.zeros(self.pose_shape)
+ self.beta = np.zeros(self.beta_shape)
+ self.trans = np.zeros(self.trans_shape)
+
+ self.verts = None
+ self.J = None
+ self.R = None
+ self.G = None
+
+ self.update()
+
+ def set_params(self, pose=None, beta=None, trans=None):
+ """
+ Set pose, shape, and/or translation parameters of SMPL model. Verices of the
+ model will be updated and returned.
+ Prameters:
+ ---------
+ pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation
+ relative to parent joint. For root joint it's global orientation.
+ Represented in a axis-angle format.
+ beta: Parameter for model shape. A vector of shape [10]. Coefficients for
+ PCA component. Only 10 components were released by MPI.
+ trans: Global translation of shape [3].
+ Return:
+ ------
+ Updated vertices.
+ """
+ if pose is not None:
+ self.pose = pose
+ if beta is not None:
+ self.beta = beta
+ if trans is not None:
+ self.trans = trans
+ self.update()
+ return self.verts
+
+ def update(self):
+ """
+ Called automatically when parameters are updated.
+ """
+ # how beta affect body shape
+ v_shaped = self.shapedirs.dot(self.beta) + self.v_template
+ # joints location
+ self.J = self.J_regressor.dot(v_shaped)
+ pose_cube = self.pose.reshape((-1, 1, 3))
+ # rotation matrix for each joint
+ self.R = self.rodrigues(pose_cube)
+ I_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
+ (self.R.shape[0] - 1, 3, 3))
+ lrotmin = (self.R[1:] - I_cube).ravel()
+ # how pose affect body shape in zero pose
+ v_posed = v_shaped + self.posedirs.dot(lrotmin)
+ # world transformation of each joint
+ G = np.empty((self.kintree_table.shape[1], 4, 4))
+ G[0] = self.with_zeros(
+ np.hstack((self.R[0], self.J[0, :].reshape([3, 1]))))
+ for i in range(1, self.kintree_table.shape[1]):
+ G[i] = G[self.parent[i]].dot(
+ self.with_zeros(
+ np.hstack([
+ self.R[i],
+ ((self.J[i, :] - self.J[self.parent[i], :]).reshape(
+ [3, 1]))
+ ])))
+ # remove the transformation due to the rest pose
+ G = G - self.pack(
+ np.matmul(
+ G,
+ np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1])))
+ # transformation of each vertex
+ T = np.tensordot(self.weights, G, axes=[[1], [0]])
+ rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1])))
+ v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1,
+ 4])[:, :3]
+ self.verts = v + self.trans.reshape([1, 3])
+ self.G = G
+
+ def rodrigues(self, r):
+ """
+ Rodrigues' rotation formula that turns axis-angle vector into rotation
+ matrix in a batch-ed manner.
+ Parameter:
+ ----------
+ r: Axis-angle rotation vector of shape [batch_size, 1, 3].
+ Return:
+ -------
+ Rotation matrix of shape [batch_size, 3, 3].
+ """
+ theta = np.linalg.norm(r, axis=(1, 2), keepdims=True)
+ # avoid zero divide
+ theta = np.maximum(theta, np.finfo(np.float64).tiny)
+ r_hat = r / theta
+ cos = np.cos(theta)
+ z_stick = np.zeros(theta.shape[0])
+ m = np.dstack([
+ z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
+ -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick
+ ]).reshape([-1, 3, 3])
+ i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
+ [theta.shape[0], 3, 3])
+ A = np.transpose(r_hat, axes=[0, 2, 1])
+ B = r_hat
+ dot = np.matmul(A, B)
+ R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m
+ return R
+
+ def with_zeros(self, x):
+ """
+ Append a [0, 0, 0, 1] vector to a [3, 4] matrix.
+ Parameter:
+ ---------
+ x: Matrix to be appended.
+ Return:
+ ------
+ Matrix after appending of shape [4,4]
+ """
+ return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]])))
+
+ def pack(self, x):
+ """
+ Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched
+ manner.
+ Parameter:
+ ----------
+ x: Matrices to be appended of shape [batch_size, 4, 1]
+ Return:
+ ------
+ Matrix of shape [batch_size, 4, 4] after appending.
+ """
+ return np.dstack((np.zeros((x.shape[0], 4, 3)), x))
+
+ def save_to_obj(self, path):
+ """
+ Save the SMPL model into .obj file.
+ Parameter:
+ ---------
+ path: Path to save.
+ """
+ with open(path, 'w') as fp:
+ for v in self.verts:
+ fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))
+ for f in self.faces + 1:
+ fp.write('f %d %d %d\n' % (f[0], f[1], f[2]))
+
+
+class TetraSMPLModel():
+ def __init__(self,
+ model_path,
+ model_addition_path,
+ age='adult',
+ v_template=None):
+ """
+ SMPL model.
+ Parameter:
+ ---------
+ model_path: Path to the SMPL model parameters, pre-processed by
+ `preprocess.py`.
+ """
+ with open(model_path, 'rb') as f:
+ params = pickle.load(f, encoding='latin1')
+
+ self.J_regressor = params['J_regressor']
+ self.weights = np.asarray(params['weights'])
+ self.posedirs = np.asarray(params['posedirs'])
+
+ if v_template is not None:
+ self.v_template = v_template
+ else:
+ self.v_template = np.asarray(params['v_template'])
+
+ self.shapedirs = np.asarray(params['shapedirs'])
+ self.faces = np.asarray(params['f'])
+ self.kintree_table = np.asarray(params['kintree_table'])
+
+ params_added = np.load(model_addition_path)
+ self.v_template_added = params_added['v_template_added']
+ self.weights_added = params_added['weights_added']
+ self.shapedirs_added = params_added['shapedirs_added']
+ self.posedirs_added = params_added['posedirs_added']
+ self.tetrahedrons = params_added['tetrahedrons']
+
+ id_to_col = {
+ self.kintree_table[1, i]: i
+ for i in range(self.kintree_table.shape[1])
+ }
+ self.parent = {
+ i: id_to_col[self.kintree_table[0, i]]
+ for i in range(1, self.kintree_table.shape[1])
+ }
+
+ self.pose_shape = [24, 3]
+ self.beta_shape = [10]
+ self.trans_shape = [3]
+
+ if age == 'kid':
+ v_template_smil = np.load(
+ os.path.join(os.path.dirname(model_path),
+ "smpl/smpl_kid_template.npy"))
+ v_template_smil -= np.mean(v_template_smil, axis=0)
+ v_template_diff = np.expand_dims(v_template_smil - self.v_template,
+ axis=2)
+ self.shapedirs = np.concatenate(
+ (self.shapedirs[:, :, :self.beta_shape[0]], v_template_diff),
+ axis=2)
+ self.beta_shape[0] += 1
+
+ self.pose = np.zeros(self.pose_shape)
+ self.beta = np.zeros(self.beta_shape)
+ self.trans = np.zeros(self.trans_shape)
+
+ self.verts = None
+ self.verts_added = None
+ self.J = None
+ self.R = None
+ self.G = None
+
+ self.update()
+
+ def set_params(self, pose=None, beta=None, trans=None):
+ """
+ Set pose, shape, and/or translation parameters of SMPL model. Verices of the
+ model will be updated and returned.
+ Prameters:
+ ---------
+ pose: Also known as 'theta', a [24,3] matrix indicating child joint rotation
+ relative to parent joint. For root joint it's global orientation.
+ Represented in a axis-angle format.
+ beta: Parameter for model shape. A vector of shape [10]. Coefficients for
+ PCA component. Only 10 components were released by MPI.
+ trans: Global translation of shape [3].
+ Return:
+ ------
+ Updated vertices.
+ """
+
+ if torch.is_tensor(pose):
+ pose = pose.detach().cpu().numpy()
+ if torch.is_tensor(beta):
+ beta = beta.detach().cpu().numpy()
+
+ if pose is not None:
+ self.pose = pose
+ if beta is not None:
+ self.beta = beta
+ if trans is not None:
+ self.trans = trans
+ self.update()
+ return self.verts
+
+ def update(self):
+ """
+ Called automatically when parameters are updated.
+ """
+ # how beta affect body shape
+ v_shaped = self.shapedirs.dot(self.beta) + self.v_template
+ v_shaped_added = self.shapedirs_added.dot(
+ self.beta) + self.v_template_added
+ # joints location
+ self.J = self.J_regressor.dot(v_shaped)
+ pose_cube = self.pose.reshape((-1, 1, 3))
+ # rotation matrix for each joint
+ self.R = self.rodrigues(pose_cube)
+ I_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
+ (self.R.shape[0] - 1, 3, 3))
+ lrotmin = (self.R[1:] - I_cube).ravel()
+ # how pose affect body shape in zero pose
+ v_posed = v_shaped + self.posedirs.dot(lrotmin)
+ v_posed_added = v_shaped_added + self.posedirs_added.dot(lrotmin)
+ # world transformation of each joint
+ G = np.empty((self.kintree_table.shape[1], 4, 4))
+ G[0] = self.with_zeros(
+ np.hstack((self.R[0], self.J[0, :].reshape([3, 1]))))
+ for i in range(1, self.kintree_table.shape[1]):
+ G[i] = G[self.parent[i]].dot(
+ self.with_zeros(
+ np.hstack([
+ self.R[i],
+ ((self.J[i, :] - self.J[self.parent[i], :]).reshape(
+ [3, 1]))
+ ])))
+ # remove the transformation due to the rest pose
+ G = G - self.pack(
+ np.matmul(
+ G,
+ np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1])))
+ self.G = G
+ # transformation of each vertex
+ T = np.tensordot(self.weights, G, axes=[[1], [0]])
+ rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1])))
+ v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1,
+ 4])[:, :3]
+ self.verts = v + self.trans.reshape([1, 3])
+ T_added = np.tensordot(self.weights_added, G, axes=[[1], [0]])
+ rest_shape_added_h = np.hstack(
+ (v_posed_added, np.ones([v_posed_added.shape[0], 1])))
+ v_added = np.matmul(T_added,
+ rest_shape_added_h.reshape([-1, 4,
+ 1])).reshape([-1, 4
+ ])[:, :3]
+ self.verts_added = v_added + self.trans.reshape([1, 3])
+
+ def rodrigues(self, r):
+ """
+ Rodrigues' rotation formula that turns axis-angle vector into rotation
+ matrix in a batch-ed manner.
+ Parameter:
+ ----------
+ r: Axis-angle rotation vector of shape [batch_size, 1, 3].
+ Return:
+ -------
+ Rotation matrix of shape [batch_size, 3, 3].
+ """
+ theta = np.linalg.norm(r, axis=(1, 2), keepdims=True)
+ # avoid zero divide
+ theta = np.maximum(theta, np.finfo(np.float64).tiny)
+ r_hat = r / theta
+ cos = np.cos(theta)
+ z_stick = np.zeros(theta.shape[0])
+ m = np.dstack([
+ z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
+ -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick
+ ]).reshape([-1, 3, 3])
+ i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0),
+ [theta.shape[0], 3, 3])
+ A = np.transpose(r_hat, axes=[0, 2, 1])
+ B = r_hat
+ dot = np.matmul(A, B)
+ R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m
+ return R
+
+ def with_zeros(self, x):
+ """
+ Append a [0, 0, 0, 1] vector to a [3, 4] matrix.
+ Parameter:
+ ---------
+ x: Matrix to be appended.
+ Return:
+ ------
+ Matrix after appending of shape [4,4]
+ """
+ return np.vstack((x, np.array([[0.0, 0.0, 0.0, 1.0]])))
+
+ def pack(self, x):
+ """
+ Append zero matrices of shape [4, 3] to vectors of [4, 1] shape in a batched
+ manner.
+ Parameter:
+ ----------
+ x: Matrices to be appended of shape [batch_size, 4, 1]
+ Return:
+ ------
+ Matrix of shape [batch_size, 4, 4] after appending.
+ """
+ return np.dstack((np.zeros((x.shape[0], 4, 3)), x))
+
+ def save_mesh_to_obj(self, path):
+ """
+ Save the SMPL model into .obj file.
+ Parameter:
+ ---------
+ path: Path to save.
+ """
+ with open(path, 'w') as fp:
+ for v in self.verts:
+ fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))
+ for f in self.faces + 1:
+ fp.write('f %d %d %d\n' % (f[0], f[1], f[2]))
+
+ def save_tetrahedron_to_obj(self, path):
+ """
+ Save the tetrahedron SMPL model into .obj file.
+ Parameter:
+ ---------
+ path: Path to save.
+ """
+
+ with open(path, 'w') as fp:
+ for v in self.verts:
+ fp.write('v %f %f %f 1 0 0\n' % (v[0], v[1], v[2]))
+ for va in self.verts_added:
+ fp.write('v %f %f %f 0 0 1\n' % (va[0], va[1], va[2]))
+ for t in self.tetrahedrons + 1:
+ fp.write('f %d %d %d\n' % (t[0], t[2], t[1]))
+ fp.write('f %d %d %d\n' % (t[0], t[3], t[2]))
+ fp.write('f %d %d %d\n' % (t[0], t[1], t[3]))
+ fp.write('f %d %d %d\n' % (t[1], t[2], t[3]))
\ No newline at end of file
diff --git a/lib /dataset /mesh_util.py b/lib /dataset /mesh_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..6109d53323630252adbae092482dae1d26c331be
--- /dev/null
+++ b/lib /dataset /mesh_util.py
@@ -0,0 +1,905 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import numpy as np
+import cv2
+import pymeshlab
+import torch
+import torchvision
+import trimesh
+from pytorch3d.io import load_obj
+from termcolor import colored
+from scipy.spatial import cKDTree
+
+from pytorch3d.structures import Meshes
+import torch.nn.functional as F
+
+import os
+from lib.pymaf.utils.imutils import uncrop
+from lib.common.render_utils import Pytorch3dRasterizer, face_vertices
+
+from pytorch3d.renderer.mesh import rasterize_meshes
+from PIL import Image, ImageFont, ImageDraw
+from kaolin.ops.mesh import check_sign
+from kaolin.metrics.trianglemesh import point_to_mesh_distance
+
+from pytorch3d.loss import (
+ mesh_laplacian_smoothing,
+ mesh_normal_consistency
+)
+
+from huggingface_hub import hf_hub_download, hf_hub_url, cached_download
+
+def rot6d_to_rotmat(x):
+ """Convert 6D rotation representation to 3x3 rotation matrix.
+ Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
+ Input:
+ (B,6) Batch of 6-D rotation representations
+ Output:
+ (B,3,3) Batch of corresponding rotation matrices
+ """
+ x = x.view(-1, 3, 2)
+ a1 = x[:, :, 0]
+ a2 = x[:, :, 1]
+ b1 = F.normalize(a1)
+ b2 = F.normalize(a2 - torch.einsum("bi,bi->b", b1, a2).unsqueeze(-1) * b1)
+ b3 = torch.cross(b1, b2)
+ return torch.stack((b1, b2, b3), dim=-1)
+
+
+def tensor2variable(tensor, device):
+ # [1,23,3,3]
+ return torch.tensor(tensor, device=device, requires_grad=True)
+
+
+def normal_loss(vec1, vec2):
+
+ # vec1_mask = vec1.sum(dim=1) != 0.0
+ # vec2_mask = vec2.sum(dim=1) != 0.0
+ # union_mask = vec1_mask * vec2_mask
+ vec_sim = torch.nn.CosineSimilarity(dim=1, eps=1e-6)(vec1, vec2)
+ # vec_diff = ((vec_sim-1.0)**2)[union_mask].mean()
+ vec_diff = ((vec_sim-1.0)**2).mean()
+
+ return vec_diff
+
+
+class GMoF(torch.nn.Module):
+ def __init__(self, rho=1):
+ super(GMoF, self).__init__()
+ self.rho = rho
+
+ def extra_repr(self):
+ return 'rho = {}'.format(self.rho)
+
+ def forward(self, residual):
+ dist = torch.div(residual, residual + self.rho ** 2)
+ return self.rho ** 2 * dist
+
+
+def mesh_edge_loss(meshes, target_length: float = 0.0):
+ """
+ Computes mesh edge length regularization loss averaged across all meshes
+ in a batch. Each mesh contributes equally to the final loss, regardless of
+ the number of edges per mesh in the batch by weighting each mesh with the
+ inverse number of edges. For example, if mesh 3 (out of N) has only E=4
+ edges, then the loss for each edge in mesh 3 should be multiplied by 1/E to
+ contribute to the final loss.
+ Args:
+ meshes: Meshes object with a batch of meshes.
+ target_length: Resting value for the edge length.
+ Returns:
+ loss: Average loss across the batch. Returns 0 if meshes contains
+ no meshes or all empty meshes.
+ """
+ if meshes.isempty():
+ return torch.tensor(
+ [0.0], dtype=torch.float32, device=meshes.device, requires_grad=True
+ )
+
+ N = len(meshes)
+ edges_packed = meshes.edges_packed() # (sum(E_n), 3)
+ verts_packed = meshes.verts_packed() # (sum(V_n), 3)
+ edge_to_mesh_idx = meshes.edges_packed_to_mesh_idx() # (sum(E_n), )
+ num_edges_per_mesh = meshes.num_edges_per_mesh() # N
+
+ # Determine the weight for each edge based on the number of edges in the
+ # mesh it corresponds to.
+ # TODO (nikhilar) Find a faster way of computing the weights for each edge
+ # as this is currently a bottleneck for meshes with a large number of faces.
+ weights = num_edges_per_mesh.gather(0, edge_to_mesh_idx)
+ weights = 1.0 / weights.float()
+
+ verts_edges = verts_packed[edges_packed]
+ v0, v1 = verts_edges.unbind(1)
+ loss = ((v0 - v1).norm(dim=1, p=2) - target_length) ** 2.0
+ loss_vertex = loss * weights
+ # loss_outlier = torch.topk(loss, 100)[0].mean()
+ # loss_all = (loss_vertex.sum() + loss_outlier.mean()) / N
+ loss_all = loss_vertex.sum() / N
+
+ return loss_all
+
+
+def remesh(obj_path, perc, device):
+
+ ms = pymeshlab.MeshSet()
+ ms.load_new_mesh(obj_path)
+ ms.laplacian_smooth()
+ ms.remeshing_isotropic_explicit_remeshing(
+ targetlen=pymeshlab.Percentage(perc), adaptive=True)
+ ms.save_current_mesh(obj_path.replace("recon", "remesh"))
+ polished_mesh = trimesh.load_mesh(obj_path.replace("recon", "remesh"))
+ verts_pr = torch.tensor(polished_mesh.vertices).float().unsqueeze(0).to(device)
+ faces_pr = torch.tensor(polished_mesh.faces).long().unsqueeze(0).to(device)
+
+ return verts_pr, faces_pr
+
+
+def possion(mesh, obj_path):
+
+ mesh.export(obj_path)
+ ms = pymeshlab.MeshSet()
+ ms.load_new_mesh(obj_path)
+ ms.surface_reconstruction_screened_poisson(depth=10)
+ ms.set_current_mesh(1)
+ ms.save_current_mesh(obj_path)
+
+ return trimesh.load(obj_path)
+
+
+def get_mask(tensor, dim):
+
+ mask = torch.abs(tensor).sum(dim=dim, keepdims=True) > 0.0
+ mask = mask.type_as(tensor)
+
+ return mask
+
+
+def blend_rgb_norm(rgb, norm, mask):
+
+ # [0,0,0] or [127,127,127] should be marked as mask
+ final = rgb * (1-mask) + norm * (mask)
+
+ return final.astype(np.uint8)
+
+
+def unwrap(image, data):
+
+ img_uncrop = uncrop(np.array(Image.fromarray(image).resize(data['uncrop_param']['box_shape'][:2])),
+ data['uncrop_param']['center'],
+ data['uncrop_param']['scale'],
+ data['uncrop_param']['crop_shape'])
+
+ img_orig = cv2.warpAffine(img_uncrop,
+ np.linalg.inv(data['uncrop_param']['M'])[:2, :],
+ data['uncrop_param']['ori_shape'][::-1][1:],
+ flags=cv2.INTER_CUBIC)
+
+ return img_orig
+
+
+# Losses to smooth / regularize the mesh shape
+def update_mesh_shape_prior_losses(mesh, losses):
+
+ # and (b) the edge length of the predicted mesh
+ losses["edge"]['value'] = mesh_edge_loss(mesh)
+ # mesh normal consistency
+ losses["nc"]['value'] = mesh_normal_consistency(mesh)
+ # mesh laplacian smoothing
+ losses["laplacian"]['value'] = mesh_laplacian_smoothing(
+ mesh, method="uniform")
+
+
+def rename(old_dict, old_name, new_name):
+ new_dict = {}
+ for key, value in zip(old_dict.keys(), old_dict.values()):
+ new_key = key if key != old_name else new_name
+ new_dict[new_key] = old_dict[key]
+ return new_dict
+
+
+def load_checkpoint(model, cfg):
+
+ model_dict = model.state_dict()
+ main_dict = {}
+ normal_dict = {}
+
+ device = torch.device(f"cuda:{cfg['test_gpus'][0]}")
+
+ main_dict = torch.load(cached_download(cfg.resume_path, use_auth_token=os.environ['ICON']),
+ map_location=device)['state_dict']
+
+ main_dict = {
+ k: v
+ for k, v in main_dict.items()
+ if k in model_dict and v.shape == model_dict[k].shape and (
+ 'reconEngine' not in k) and ("normal_filter" not in k) and (
+ 'voxelization' not in k)
+ }
+ print(colored(f"Resume MLP weights from {cfg.resume_path}", 'green'))
+
+ normal_dict = torch.load(cached_download(cfg.normal_path, use_auth_token=os.environ['ICON']),
+ map_location=device)['state_dict']
+
+ for key in normal_dict.keys():
+ normal_dict = rename(normal_dict, key,
+ key.replace("netG", "netG.normal_filter"))
+
+ normal_dict = {
+ k: v
+ for k, v in normal_dict.items()
+ if k in model_dict and v.shape == model_dict[k].shape
+ }
+ print(colored(f"Resume normal model from {cfg.normal_path}", 'green'))
+
+ model_dict.update(main_dict)
+ model_dict.update(normal_dict)
+ model.load_state_dict(model_dict)
+
+ model.netG = model.netG.to(device)
+ model.reconEngine = model.reconEngine.to(device)
+
+ model.netG.training = False
+ model.netG.eval()
+
+ del main_dict
+ del normal_dict
+ del model_dict
+
+ return model
+
+
+def read_smpl_constants(folder):
+ """Load smpl vertex code"""
+ smpl_vtx_std = np.loadtxt(cached_download(os.path.join(folder, 'vertices.txt'), use_auth_token=os.environ['ICON']))
+ min_x = np.min(smpl_vtx_std[:, 0])
+ max_x = np.max(smpl_vtx_std[:, 0])
+ min_y = np.min(smpl_vtx_std[:, 1])
+ max_y = np.max(smpl_vtx_std[:, 1])
+ min_z = np.min(smpl_vtx_std[:, 2])
+ max_z = np.max(smpl_vtx_std[:, 2])
+
+ smpl_vtx_std[:, 0] = (smpl_vtx_std[:, 0] - min_x) / (max_x - min_x)
+ smpl_vtx_std[:, 1] = (smpl_vtx_std[:, 1] - min_y) / (max_y - min_y)
+ smpl_vtx_std[:, 2] = (smpl_vtx_std[:, 2] - min_z) / (max_z - min_z)
+ smpl_vertex_code = np.float32(np.copy(smpl_vtx_std))
+ """Load smpl faces & tetrahedrons"""
+ smpl_faces = np.loadtxt(cached_download(os.path.join(folder, 'faces.txt'), use_auth_token=os.environ['ICON']),
+ dtype=np.int32) - 1
+ smpl_face_code = (smpl_vertex_code[smpl_faces[:, 0]] +
+ smpl_vertex_code[smpl_faces[:, 1]] +
+ smpl_vertex_code[smpl_faces[:, 2]]) / 3.0
+ smpl_tetras = np.loadtxt(cached_download(os.path.join(folder, 'tetrahedrons.txt'), use_auth_token=os.environ['ICON']),
+ dtype=np.int32) - 1
+
+ return smpl_vertex_code, smpl_face_code, smpl_faces, smpl_tetras
+
+
+def feat_select(feat, select):
+
+ # feat [B, featx2, N]
+ # select [B, 1, N]
+ # return [B, feat, N]
+
+ dim = feat.shape[1] // 2
+ idx = torch.tile((1-select), (1, dim, 1))*dim + \
+ torch.arange(0, dim).unsqueeze(0).unsqueeze(2).type_as(select)
+ feat_select = torch.gather(feat, 1, idx.long())
+
+ return feat_select
+
+
+def get_visibility(xy, z, faces):
+ """get the visibility of vertices
+ Args:
+ xy (torch.tensor): [N,2]
+ z (torch.tensor): [N,1]
+ faces (torch.tensor): [N,3]
+ size (int): resolution of rendered image
+ """
+
+ xyz = torch.cat((xy, -z), dim=1)
+ xyz = (xyz + 1.0) / 2.0
+ faces = faces.long()
+
+ rasterizer = Pytorch3dRasterizer(image_size=2**12)
+ meshes_screen = Meshes(verts=xyz[None, ...], faces=faces[None, ...])
+ raster_settings = rasterizer.raster_settings
+
+ pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
+ meshes_screen,
+ image_size=raster_settings.image_size,
+ blur_radius=raster_settings.blur_radius,
+ faces_per_pixel=raster_settings.faces_per_pixel,
+ bin_size=raster_settings.bin_size,
+ max_faces_per_bin=raster_settings.max_faces_per_bin,
+ perspective_correct=raster_settings.perspective_correct,
+ cull_backfaces=raster_settings.cull_backfaces,
+ )
+
+ vis_vertices_id = torch.unique(faces[torch.unique(pix_to_face), :])
+ vis_mask = torch.zeros(size=(z.shape[0], 1))
+ vis_mask[vis_vertices_id] = 1.0
+
+ # print("------------------------\n")
+ # print(f"keep points : {vis_mask.sum()/len(vis_mask)}")
+
+ return vis_mask
+
+
+def barycentric_coordinates_of_projection(points, vertices):
+ ''' https://github.com/MPI-IS/mesh/blob/master/mesh/geometry/barycentric_coordinates_of_projection.py
+ '''
+ """Given a point, gives projected coords of that point to a triangle
+ in barycentric coordinates.
+ See
+ **Heidrich**, Computing the Barycentric Coordinates of a Projected Point, JGT 05
+ at http://www.cs.ubc.ca/~heidrich/Papers/JGT.05.pdf
+
+ :param p: point to project. [B, 3]
+ :param v0: first vertex of triangles. [B, 3]
+ :returns: barycentric coordinates of ``p``'s projection in triangle defined by ``q``, ``u``, ``v``
+ vectorized so ``p``, ``q``, ``u``, ``v`` can all be ``3xN``
+ """
+ #(p, q, u, v)
+ v0, v1, v2 = vertices[:, 0], vertices[:, 1], vertices[:, 2]
+ p = points
+
+ q = v0
+ u = v1 - v0
+ v = v2 - v0
+ n = torch.cross(u, v)
+ s = torch.sum(n * n, dim=1)
+ # If the triangle edges are collinear, cross-product is zero,
+ # which makes "s" 0, which gives us divide by zero. So we
+ # make the arbitrary choice to set s to epsv (=numpy.spacing(1)),
+ # the closest thing to zero
+ s[s == 0] = 1e-6
+ oneOver4ASquared = 1.0 / s
+ w = p - q
+ b2 = torch.sum(torch.cross(u, w) * n, dim=1) * oneOver4ASquared
+ b1 = torch.sum(torch.cross(w, v) * n, dim=1) * oneOver4ASquared
+ weights = torch.stack((1 - b1 - b2, b1, b2), dim=-1)
+ # check barycenric weights
+ # p_n = v0*weights[:,0:1] + v1*weights[:,1:2] + v2*weights[:,2:3]
+ return weights
+
+
+def cal_sdf_batch(verts, faces, cmaps, vis, points):
+
+ # verts [B, N_vert, 3]
+ # faces [B, N_face, 3]
+ # triangles [B, N_face, 3, 3]
+ # points [B, N_point, 3]
+ # cmaps [B, N_vert, 3]
+
+ Bsize = points.shape[0]
+
+ normals = Meshes(verts, faces).verts_normals_padded()
+
+ triangles = face_vertices(verts, faces)
+ normals = face_vertices(normals, faces)
+ cmaps = face_vertices(cmaps, faces)
+ vis = face_vertices(vis, faces)
+
+ residues, pts_ind, _ = point_to_mesh_distance(points, triangles)
+ closest_triangles = torch.gather(
+ triangles, 1, pts_ind[:, :, None, None].expand(-1, -1, 3, 3)).view(-1, 3, 3)
+ closest_normals = torch.gather(
+ normals, 1, pts_ind[:, :, None, None].expand(-1, -1, 3, 3)).view(-1, 3, 3)
+ closest_cmaps = torch.gather(
+ cmaps, 1, pts_ind[:, :, None, None].expand(-1, -1, 3, 3)).view(-1, 3, 3)
+ closest_vis = torch.gather(
+ vis, 1, pts_ind[:, :, None, None].expand(-1, -1, 3, 1)).view(-1, 3, 1)
+ bary_weights = barycentric_coordinates_of_projection(
+ points.view(-1, 3), closest_triangles)
+
+ pts_cmap = (closest_cmaps*bary_weights[:, :, None]).sum(1).unsqueeze(0).clamp_(min=0.0, max=1.0)
+ pts_vis = (closest_vis*bary_weights[:,
+ :, None]).sum(1).unsqueeze(0).ge(1e-1)
+ pts_norm = (closest_normals*bary_weights[:, :, None]).sum(
+ 1).unsqueeze(0) * torch.tensor([-1.0, 1.0, -1.0]).type_as(normals)
+ pts_norm = F.normalize(pts_norm, dim=2)
+ pts_dist = torch.sqrt(residues) / torch.sqrt(torch.tensor(3))
+
+ pts_signs = 2.0 * (check_sign(verts, faces[0], points).float() - 0.5)
+ pts_sdf = (pts_dist * pts_signs).unsqueeze(-1)
+
+ return pts_sdf.view(Bsize, -1, 1), pts_norm.view(Bsize, -1, 3), pts_cmap.view(Bsize, -1, 3), pts_vis.view(Bsize, -1, 1)
+
+
+def orthogonal(points, calibrations, transforms=None):
+ '''
+ Compute the orthogonal projections of 3D points into the image plane by given projection matrix
+ :param points: [B, 3, N] Tensor of 3D points
+ :param calibrations: [B, 3, 4] Tensor of projection matrix
+ :param transforms: [B, 2, 3] Tensor of image transform matrix
+ :return: xyz: [B, 3, N] Tensor of xyz coordinates in the image plane
+ '''
+ rot = calibrations[:, :3, :3]
+ trans = calibrations[:, :3, 3:4]
+ pts = torch.baddbmm(trans, rot, points) # [B, 3, N]
+ if transforms is not None:
+ scale = transforms[:2, :2]
+ shift = transforms[:2, 2:3]
+ pts[:, :2, :] = torch.baddbmm(shift, scale, pts[:, :2, :])
+ return pts
+
+
+def projection(points, calib, format='numpy'):
+ if format == 'tensor':
+ return torch.mm(calib[:3, :3], points.T).T + calib[:3, 3]
+ else:
+ return np.matmul(calib[:3, :3], points.T).T + calib[:3, 3]
+
+
+def load_calib(calib_path):
+ calib_data = np.loadtxt(calib_path, dtype=float)
+ extrinsic = calib_data[:4, :4]
+ intrinsic = calib_data[4:8, :4]
+ calib_mat = np.matmul(intrinsic, extrinsic)
+ calib_mat = torch.from_numpy(calib_mat).float()
+ return calib_mat
+
+
+def load_obj_mesh_for_Hoppe(mesh_file):
+ vertex_data = []
+ face_data = []
+
+ if isinstance(mesh_file, str):
+ f = open(mesh_file, "r")
+ else:
+ f = mesh_file
+ for line in f:
+ if isinstance(line, bytes):
+ line = line.decode("utf-8")
+ if line.startswith('#'):
+ continue
+ values = line.split()
+ if not values:
+ continue
+
+ if values[0] == 'v':
+ v = list(map(float, values[1:4]))
+ vertex_data.append(v)
+
+ elif values[0] == 'f':
+ # quad mesh
+ if len(values) > 4:
+ f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
+ face_data.append(f)
+ f = list(
+ map(lambda x: int(x.split('/')[0]),
+ [values[3], values[4], values[1]]))
+ face_data.append(f)
+ # tri mesh
+ else:
+ f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
+ face_data.append(f)
+
+ vertices = np.array(vertex_data)
+ faces = np.array(face_data)
+ faces[faces > 0] -= 1
+
+ normals, _ = compute_normal(vertices, faces)
+
+ return vertices, normals, faces
+
+
+def load_obj_mesh_with_color(mesh_file):
+ vertex_data = []
+ color_data = []
+ face_data = []
+
+ if isinstance(mesh_file, str):
+ f = open(mesh_file, "r")
+ else:
+ f = mesh_file
+ for line in f:
+ if isinstance(line, bytes):
+ line = line.decode("utf-8")
+ if line.startswith('#'):
+ continue
+ values = line.split()
+ if not values:
+ continue
+
+ if values[0] == 'v':
+ v = list(map(float, values[1:4]))
+ vertex_data.append(v)
+ c = list(map(float, values[4:7]))
+ color_data.append(c)
+
+ elif values[0] == 'f':
+ # quad mesh
+ if len(values) > 4:
+ f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
+ face_data.append(f)
+ f = list(
+ map(lambda x: int(x.split('/')[0]),
+ [values[3], values[4], values[1]]))
+ face_data.append(f)
+ # tri mesh
+ else:
+ f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
+ face_data.append(f)
+
+ vertices = np.array(vertex_data)
+ colors = np.array(color_data)
+ faces = np.array(face_data)
+ faces[faces > 0] -= 1
+
+ return vertices, colors, faces
+
+
+def load_obj_mesh(mesh_file, with_normal=False, with_texture=False):
+ vertex_data = []
+ norm_data = []
+ uv_data = []
+
+ face_data = []
+ face_norm_data = []
+ face_uv_data = []
+
+ if isinstance(mesh_file, str):
+ f = open(mesh_file, "r")
+ else:
+ f = mesh_file
+ for line in f:
+ if isinstance(line, bytes):
+ line = line.decode("utf-8")
+ if line.startswith('#'):
+ continue
+ values = line.split()
+ if not values:
+ continue
+
+ if values[0] == 'v':
+ v = list(map(float, values[1:4]))
+ vertex_data.append(v)
+ elif values[0] == 'vn':
+ vn = list(map(float, values[1:4]))
+ norm_data.append(vn)
+ elif values[0] == 'vt':
+ vt = list(map(float, values[1:3]))
+ uv_data.append(vt)
+
+ elif values[0] == 'f':
+ # quad mesh
+ if len(values) > 4:
+ f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
+ face_data.append(f)
+ f = list(
+ map(lambda x: int(x.split('/')[0]),
+ [values[3], values[4], values[1]]))
+ face_data.append(f)
+ # tri mesh
+ else:
+ f = list(map(lambda x: int(x.split('/')[0]), values[1:4]))
+ face_data.append(f)
+
+ # deal with texture
+ if len(values[1].split('/')) >= 2:
+ # quad mesh
+ if len(values) > 4:
+ f = list(map(lambda x: int(x.split('/')[1]), values[1:4]))
+ face_uv_data.append(f)
+ f = list(
+ map(lambda x: int(x.split('/')[1]),
+ [values[3], values[4], values[1]]))
+ face_uv_data.append(f)
+ # tri mesh
+ elif len(values[1].split('/')[1]) != 0:
+ f = list(map(lambda x: int(x.split('/')[1]), values[1:4]))
+ face_uv_data.append(f)
+ # deal with normal
+ if len(values[1].split('/')) == 3:
+ # quad mesh
+ if len(values) > 4:
+ f = list(map(lambda x: int(x.split('/')[2]), values[1:4]))
+ face_norm_data.append(f)
+ f = list(
+ map(lambda x: int(x.split('/')[2]),
+ [values[3], values[4], values[1]]))
+ face_norm_data.append(f)
+ # tri mesh
+ elif len(values[1].split('/')[2]) != 0:
+ f = list(map(lambda x: int(x.split('/')[2]), values[1:4]))
+ face_norm_data.append(f)
+
+ vertices = np.array(vertex_data)
+ faces = np.array(face_data)
+ faces[faces > 0] -= 1
+
+ if with_texture and with_normal:
+ uvs = np.array(uv_data)
+ face_uvs = np.array(face_uv_data)
+ face_uvs[face_uvs > 0] -= 1
+ norms = np.array(norm_data)
+ if norms.shape[0] == 0:
+ norms, _ = compute_normal(vertices, faces)
+ face_normals = faces
+ else:
+ norms = normalize_v3(norms)
+ face_normals = np.array(face_norm_data)
+ face_normals[face_normals > 0] -= 1
+ return vertices, faces, norms, face_normals, uvs, face_uvs
+
+ if with_texture:
+ uvs = np.array(uv_data)
+ face_uvs = np.array(face_uv_data) - 1
+ return vertices, faces, uvs, face_uvs
+
+ if with_normal:
+ norms = np.array(norm_data)
+ norms = normalize_v3(norms)
+ face_normals = np.array(face_norm_data) - 1
+ return vertices, faces, norms, face_normals
+
+ return vertices, faces
+
+
+def normalize_v3(arr):
+ ''' Normalize a numpy array of 3 component vectors shape=(n,3) '''
+ lens = np.sqrt(arr[:, 0]**2 + arr[:, 1]**2 + arr[:, 2]**2)
+ eps = 0.00000001
+ lens[lens < eps] = eps
+ arr[:, 0] /= lens
+ arr[:, 1] /= lens
+ arr[:, 2] /= lens
+ return arr
+
+
+def compute_normal(vertices, faces):
+ # Create a zeroed array with the same type and shape as our vertices i.e., per vertex normal
+ vert_norms = np.zeros(vertices.shape, dtype=vertices.dtype)
+ # Create an indexed view into the vertex array using the array of three indices for triangles
+ tris = vertices[faces]
+ # Calculate the normal for all the triangles, by taking the cross product of the vectors v1-v0, and v2-v0 in each triangle
+ face_norms = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])
+ # n is now an array of normals per triangle. The length of each normal is dependent the vertices,
+ # we need to normalize these, so that our next step weights each normal equally.
+ normalize_v3(face_norms)
+ # now we have a normalized array of normals, one per triangle, i.e., per triangle normals.
+ # But instead of one per triangle (i.e., flat shading), we add to each vertex in that triangle,
+ # the triangles' normal. Multiple triangles would then contribute to every vertex, so we need to normalize again afterwards.
+ # The cool part, we can actually add the normals through an indexed view of our (zeroed) per vertex normal array
+ vert_norms[faces[:, 0]] += face_norms
+ vert_norms[faces[:, 1]] += face_norms
+ vert_norms[faces[:, 2]] += face_norms
+ normalize_v3(vert_norms)
+
+ return vert_norms, face_norms
+
+
+def save_obj_mesh(mesh_path, verts, faces):
+ file = open(mesh_path, 'w')
+ for v in verts:
+ file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2]))
+ for f in faces:
+ f_plus = f + 1
+ file.write('f %d %d %d\n' % (f_plus[0], f_plus[1], f_plus[2]))
+ file.close()
+
+
+def save_obj_mesh_with_color(mesh_path, verts, faces, colors):
+ file = open(mesh_path, 'w')
+
+ for idx, v in enumerate(verts):
+ c = colors[idx]
+ file.write('v %.4f %.4f %.4f %.4f %.4f %.4f\n' %
+ (v[0], v[1], v[2], c[0], c[1], c[2]))
+ for f in faces:
+ f_plus = f + 1
+ file.write('f %d %d %d\n' % (f_plus[0], f_plus[1], f_plus[2]))
+ file.close()
+
+
+def calculate_mIoU(outputs, labels):
+
+ SMOOTH = 1e-6
+
+ outputs = outputs.int()
+ labels = labels.int()
+
+ intersection = (
+ outputs
+ & labels).float().sum() # Will be zero if Truth=0 or Prediction=0
+ union = (outputs | labels).float().sum() # Will be zzero if both are 0
+
+ iou = (intersection + SMOOTH) / (union + SMOOTH
+ ) # We smooth our devision to avoid 0/0
+
+ thresholded = torch.clamp(
+ 20 * (iou - 0.5), 0,
+ 10).ceil() / 10 # This is equal to comparing with thresolds
+
+ return thresholded.mean().detach().cpu().numpy(
+ ) # Or thresholded.mean() if you are interested in average across the batch
+
+
+def mask_filter(mask, number=1000):
+ """only keep {number} True items within a mask
+ Args:
+ mask (bool array): [N, ]
+ number (int, optional): total True item. Defaults to 1000.
+ """
+ true_ids = np.where(mask)[0]
+ keep_ids = np.random.choice(true_ids, size=number)
+ filter_mask = np.isin(np.arange(len(mask)), keep_ids)
+
+ return filter_mask
+
+
+def query_mesh(path):
+
+ verts, faces_idx, _ = load_obj(path)
+
+ return verts, faces_idx.verts_idx
+
+
+def add_alpha(colors, alpha=0.7):
+
+ colors_pad = np.pad(colors, ((0, 0), (0, 1)),
+ mode='constant',
+ constant_values=alpha)
+
+ return colors_pad
+
+
+def get_optim_grid_image(per_loop_lst, loss=None, nrow=4, type='smpl'):
+
+ font_path = os.path.join(os.path.dirname(__file__), "tbfo.ttf")
+ font = ImageFont.truetype(font_path, 30)
+ grid_img = torchvision.utils.make_grid(torch.cat(per_loop_lst, dim=0),
+ nrow=nrow)
+ grid_img = Image.fromarray(
+ ((grid_img.permute(1, 2, 0).detach().cpu().numpy() + 1.0) * 0.5 *
+ 255.0).astype(np.uint8))
+
+ # add text
+ draw = ImageDraw.Draw(grid_img)
+ grid_size = 512
+ if loss is not None:
+ draw.text((10, 5), f"error: {loss:.3f}", (255, 0, 0), font=font)
+
+ if type == 'smpl':
+ for col_id, col_txt in enumerate(
+ ['image', 'smpl-norm(render)', 'cloth-norm(pred)', 'diff-norm', 'diff-mask']):
+ draw.text((10+(col_id*grid_size), 5),
+ col_txt, (255, 0, 0), font=font)
+ elif type == 'cloth':
+ for col_id, col_txt in enumerate(
+ ['image', 'cloth-norm(recon)', 'cloth-norm(pred)', 'diff-norm']):
+ draw.text((10+(col_id*grid_size), 5),
+ col_txt, (255, 0, 0), font=font)
+ for col_id, col_txt in enumerate(
+ ['0', '90', '180', '270']):
+ draw.text((10+(col_id*grid_size), grid_size*2+5),
+ col_txt, (255, 0, 0), font=font)
+ else:
+ print(f"{type} should be 'smpl' or 'cloth'")
+
+ grid_img = grid_img.resize((grid_img.size[0], grid_img.size[1]),
+ Image.ANTIALIAS)
+
+ return grid_img
+
+
+def clean_mesh(verts, faces):
+
+ device = verts.device
+
+ mesh_lst = trimesh.Trimesh(verts.detach().cpu().numpy(),
+ faces.detach().cpu().numpy())
+ mesh_lst = mesh_lst.split(only_watertight=False)
+ comp_num = [mesh.vertices.shape[0] for mesh in mesh_lst]
+ mesh_clean = mesh_lst[comp_num.index(max(comp_num))]
+
+ final_verts = torch.as_tensor(mesh_clean.vertices).float().to(device)
+ final_faces = torch.as_tensor(mesh_clean.faces).int().to(device)
+
+ return final_verts, final_faces
+
+
+def merge_mesh(verts_A, faces_A, verts_B, faces_B, color=False):
+
+ sep_mesh = trimesh.Trimesh(np.concatenate([verts_A, verts_B], axis=0),
+ np.concatenate(
+ [faces_A, faces_B + faces_A.max() + 1],
+ axis=0),
+ maintain_order=True,
+ process=False)
+ if color:
+ colors = np.ones_like(sep_mesh.vertices)
+ colors[:verts_A.shape[0]] *= np.array([255.0, 0.0, 0.0])
+ colors[verts_A.shape[0]:] *= np.array([0.0, 255.0, 0.0])
+ sep_mesh.visual.vertex_colors = colors
+
+ # union_mesh = trimesh.boolean.union([trimesh.Trimesh(verts_A, faces_A),
+ # trimesh.Trimesh(verts_B, faces_B)], engine='blender')
+
+ return sep_mesh
+
+
+def mesh_move(mesh_lst, step, scale=1.0):
+
+ trans = np.array([1.0, 0.0, 0.0]) * step
+
+ resize_matrix = trimesh.transformations.scale_and_translate(
+ scale=(scale), translate=trans)
+
+ results = []
+
+ for mesh in mesh_lst:
+ mesh.apply_transform(resize_matrix)
+ results.append(mesh)
+
+ return results
+
+
+class SMPLX():
+ def __init__(self):
+
+ REPO_ID = "Yuliang/SMPL"
+
+ self.smpl_verts_path = hf_hub_download(REPO_ID, filename='smpl_data/smpl_verts.npy', use_auth_token=os.environ['ICON'])
+ self.smplx_verts_path = hf_hub_download(REPO_ID, filename='smpl_data/smplx_verts.npy', use_auth_token=os.environ['ICON'])
+ self.faces_path = hf_hub_download(REPO_ID, filename='smpl_data/smplx_faces.npy', use_auth_token=os.environ['ICON'])
+ self.cmap_vert_path = hf_hub_download(REPO_ID, filename='smpl_data/smplx_cmap.npy', use_auth_token=os.environ['ICON'])
+
+ self.faces = np.load(self.faces_path)
+ self.verts = np.load(self.smplx_verts_path)
+ self.smpl_verts = np.load(self.smpl_verts_path)
+
+ self.model_dir = hf_hub_url(REPO_ID, filename='models')
+ self.tedra_dir = hf_hub_url(REPO_ID, filename='tedra_data')
+
+ def get_smpl_mat(self, vert_ids):
+
+ mat = torch.as_tensor(np.load(self.cmap_vert_path)).float()
+ return mat[vert_ids, :]
+
+ def smpl2smplx(self, vert_ids=None):
+ """convert vert_ids in smpl to vert_ids in smplx
+ Args:
+ vert_ids ([int.array]): [n, knn_num]
+ """
+ smplx_tree = cKDTree(self.verts, leafsize=1)
+ _, ind = smplx_tree.query(self.smpl_verts, k=1) # ind: [smpl_num, 1]
+
+ if vert_ids is not None:
+ smplx_vert_ids = ind[vert_ids]
+ else:
+ smplx_vert_ids = ind
+
+ return smplx_vert_ids
+
+ def smplx2smpl(self, vert_ids=None):
+ """convert vert_ids in smplx to vert_ids in smpl
+ Args:
+ vert_ids ([int.array]): [n, knn_num]
+ """
+ smpl_tree = cKDTree(self.smpl_verts, leafsize=1)
+ _, ind = smpl_tree.query(self.verts, k=1) # ind: [smplx_num, 1]
+ if vert_ids is not None:
+ smpl_vert_ids = ind[vert_ids]
+ else:
+ smpl_vert_ids = ind
+
+ return smpl_vert_ids
diff --git a/lib /dataset /tbfo.ttf b/lib /dataset /tbfo.ttf
new file mode 100644
index 0000000000000000000000000000000000000000..6cc76fcd568a5a42edd71272a19b15214de0b0d5
Binary files /dev/null and b/lib /dataset /tbfo.ttf differ
diff --git a/lib /net / BasePIFuNet.py b/lib /net / BasePIFuNet.py
new file mode 100644
index 0000000000000000000000000000000000000000..674cd61e958f16db78823005630fcb9e4bb80657
--- /dev/null
+++ b/lib /net / BasePIFuNet.py
@@ -0,0 +1,84 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import torch.nn as nn
+import pytorch_lightning as pl
+
+from .geometry import index, orthogonal, perspective
+
+
+class BasePIFuNet(pl.LightningModule):
+ def __init__(
+ self,
+ projection_mode='orthogonal',
+ error_term=nn.MSELoss(),
+ ):
+ """
+ :param projection_mode:
+ Either orthogonal or perspective.
+ It will call the corresponding function for projection.
+ :param error_term:
+ nn Loss between the predicted [B, Res, N] and the label [B, Res, N]
+ """
+ super(BasePIFuNet, self).__init__()
+ self.name = 'base'
+
+ self.error_term = error_term
+
+ self.index = index
+ self.projection = orthogonal if projection_mode == 'orthogonal' else perspective
+
+ def forward(self, points, images, calibs, transforms=None):
+ '''
+ :param points: [B, 3, N] world space coordinates of points
+ :param images: [B, C, H, W] input images
+ :param calibs: [B, 3, 4] calibration matrices for each image
+ :param transforms: Optional [B, 2, 3] image space coordinate transforms
+ :return: [B, Res, N] predictions for each point
+ '''
+ features = self.filter(images)
+ preds = self.query(features, points, calibs, transforms)
+ return preds
+
+ def filter(self, images):
+ '''
+ Filter the input images
+ store all intermediate features.
+ :param images: [B, C, H, W] input images
+ '''
+ return None
+
+ def query(self, features, points, calibs, transforms=None):
+ '''
+ Given 3D points, query the network predictions for each point.
+ Image features should be pre-computed before this call.
+ store all intermediate features.
+ query() function may behave differently during training/testing.
+ :param points: [B, 3, N] world space coordinates of points
+ :param calibs: [B, 3, 4] calibration matrices for each image
+ :param transforms: Optional [B, 2, 3] image space coordinate transforms
+ :param labels: Optional [B, Res, N] gt labeling
+ :return: [B, Res, N] predictions for each point
+ '''
+ return None
+
+ def get_error(self, preds, labels):
+ '''
+ Get the network loss from the last query
+ :return: loss term
+ '''
+ return self.error_term(preds, labels)
\ No newline at end of file
diff --git a/lib /net / VE.py b/lib /net / VE.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfcef12c42cf935db00b26a114c3d3f0642de5a8
--- /dev/null
+++ b/lib /net / VE.py
@@ -0,0 +1,183 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+
+import torch.nn as nn
+import pytorch_lightning as pl
+
+
+class BaseNetwork(pl.LightningModule):
+ def __init__(self):
+ super(BaseNetwork, self).__init__()
+
+ def init_weights(self, init_type='xavier', gain=0.02):
+ '''
+ initializes network's weights
+ init_type: normal | xavier | kaiming | orthogonal
+ https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39
+ '''
+ def init_func(m):
+ classname = m.__class__.__name__
+ if hasattr(m, 'weight') and (classname.find('Conv') != -1
+ or classname.find('Linear') != -1):
+ if init_type == 'normal':
+ nn.init.normal_(m.weight.data, 0.0, gain)
+ elif init_type == 'xavier':
+ nn.init.xavier_normal_(m.weight.data, gain=gain)
+ elif init_type == 'kaiming':
+ nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
+ elif init_type == 'orthogonal':
+ nn.init.orthogonal_(m.weight.data, gain=gain)
+
+ if hasattr(m, 'bias') and m.bias is not None:
+ nn.init.constant_(m.bias.data, 0.0)
+
+ elif classname.find('BatchNorm2d') != -1:
+ nn.init.normal_(m.weight.data, 1.0, gain)
+ nn.init.constant_(m.bias.data, 0.0)
+
+ self.apply(init_func)
+
+
+class Residual3D(BaseNetwork):
+ def __init__(self, numIn, numOut):
+ super(Residual3D, self).__init__()
+ self.numIn = numIn
+ self.numOut = numOut
+ self.with_bias = True
+ # self.bn = nn.GroupNorm(4, self.numIn)
+ self.bn = nn.BatchNorm3d(self.numIn)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv1 = nn.Conv3d(self.numIn,
+ self.numOut,
+ bias=self.with_bias,
+ kernel_size=3,
+ stride=1,
+ padding=2,
+ dilation=2)
+ # self.bn1 = nn.GroupNorm(4, self.numOut)
+ self.bn1 = nn.BatchNorm3d(self.numOut)
+ self.conv2 = nn.Conv3d(self.numOut,
+ self.numOut,
+ bias=self.with_bias,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+ # self.bn2 = nn.GroupNorm(4, self.numOut)
+ self.bn2 = nn.BatchNorm3d(self.numOut)
+ self.conv3 = nn.Conv3d(self.numOut,
+ self.numOut,
+ bias=self.with_bias,
+ kernel_size=3,
+ stride=1,
+ padding=1)
+
+ if self.numIn != self.numOut:
+ self.conv4 = nn.Conv3d(self.numIn,
+ self.numOut,
+ bias=self.with_bias,
+ kernel_size=1)
+ self.init_weights()
+
+ def forward(self, x):
+ residual = x
+ # out = self.bn(x)
+ # out = self.relu(out)
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+ out = self.bn2(out)
+ # out = self.conv3(out)
+ # out = self.relu(out)
+
+ if self.numIn != self.numOut:
+ residual = self.conv4(x)
+
+ return out + residual
+
+
+class VolumeEncoder(BaseNetwork):
+ """CycleGan Encoder"""
+
+ def __init__(self, num_in=3, num_out=32, num_stacks=2):
+ super(VolumeEncoder, self).__init__()
+ self.num_in = num_in
+ self.num_out = num_out
+ self.num_inter = 8
+ self.num_stacks = num_stacks
+ self.with_bias = True
+
+ self.relu = nn.ReLU(inplace=True)
+ self.conv1 = nn.Conv3d(self.num_in,
+ self.num_inter,
+ bias=self.with_bias,
+ kernel_size=5,
+ stride=2,
+ padding=4,
+ dilation=2)
+ # self.bn1 = nn.GroupNorm(4, self.num_inter)
+ self.bn1 = nn.BatchNorm3d(self.num_inter)
+ self.conv2 = nn.Conv3d(self.num_inter,
+ self.num_out,
+ bias=self.with_bias,
+ kernel_size=5,
+ stride=2,
+ padding=4,
+ dilation=2)
+ # self.bn2 = nn.GroupNorm(4, self.num_out)
+ self.bn2 = nn.BatchNorm3d(self.num_out)
+
+ self.conv_out1 = nn.Conv3d(self.num_out,
+ self.num_out,
+ bias=self.with_bias,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ dilation=1)
+ self.conv_out2 = nn.Conv3d(self.num_out,
+ self.num_out,
+ bias=self.with_bias,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ dilation=1)
+
+ for idx in range(self.num_stacks):
+ self.add_module("res" + str(idx),
+ Residual3D(self.num_out, self.num_out))
+
+ self.init_weights()
+
+ def forward(self, x, intermediate_output=True):
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out_lst = []
+ for idx in range(self.num_stacks):
+ out = self._modules["res" + str(idx)](out)
+ out_lst.append(out)
+
+ if intermediate_output:
+ return out_lst
+ else:
+ return [out_lst[-1]]
\ No newline at end of file
diff --git a/lib /net / __init__.py b/lib /net / __init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d300c0941f58c2172bab5dcdadb309e80d3c2b4d
--- /dev/null
+++ b/lib /net / __init__.py
@@ -0,0 +1,4 @@
+from .BasePIFuNet import BasePIFuNet
+from .HGPIFuNet import HGPIFuNet
+from .NormalNet import NormalNet
+from .VE import VolumeEncoder
\ No newline at end of file
diff --git a/lib /net / geometry.py b/lib /net / geometry.py
new file mode 100644
index 0000000000000000000000000000000000000000..27958c565cc933ad9114b078094727231c97d49d
--- /dev/null
+++ b/lib /net / geometry.py
@@ -0,0 +1,82 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+import torch
+
+
+def index(feat, uv):
+ '''
+ :param feat: [B, C, H, W] image features
+ :param uv: [B, 2, N] uv coordinates in the image plane, range [0, 1]
+ :return: [B, C, N] image features at the uv coordinates
+ '''
+ uv = uv.transpose(1, 2) # [B, N, 2]
+
+ (B, N, _) = uv.shape
+ C = feat.shape[1]
+
+ if uv.shape[-1] == 3:
+ # uv = uv[:,:,[2,1,0]]
+ # uv = uv * torch.tensor([1.0,-1.0,1.0]).type_as(uv)[None,None,...]
+ uv = uv.unsqueeze(2).unsqueeze(3) # [B, N, 1, 1, 3]
+ else:
+ uv = uv.unsqueeze(2) # [B, N, 1, 2]
+
+ # NOTE: for newer PyTorch, it seems that training results are degraded due to implementation diff in F.grid_sample
+ # for old versions, simply remove the aligned_corners argument.
+ samples = torch.nn.functional.grid_sample(
+ feat, uv, align_corners=True) # [B, C, N, 1]
+ return samples.view(B, C, N) # [B, C, N]
+
+
+def orthogonal(points, calibrations, transforms=None):
+ '''
+ Compute the orthogonal projections of 3D points into the image plane by given projection matrix
+ :param points: [B, 3, N] Tensor of 3D points
+ :param calibrations: [B, 3, 4] Tensor of projection matrix
+ :param transforms: [B, 2, 3] Tensor of image transform matrix
+ :return: xyz: [B, 3, N] Tensor of xyz coordinates in the image plane
+ '''
+ rot = calibrations[:, :3, :3]
+ trans = calibrations[:, :3, 3:4]
+ pts = torch.baddbmm(trans, rot, points) # [B, 3, N]
+ if transforms is not None:
+ scale = transforms[:2, :2]
+ shift = transforms[:2, 2:3]
+ pts[:, :2, :] = torch.baddbmm(shift, scale, pts[:, :2, :])
+ return pts
+
+
+def perspective(points, calibrations, transforms=None):
+ '''
+ Compute the perspective projections of 3D points into the image plane by given projection matrix
+ :param points: [Bx3xN] Tensor of 3D points
+ :param calibrations: [Bx3x4] Tensor of projection matrix
+ :param transforms: [Bx2x3] Tensor of image transform matrix
+ :return: xy: [Bx2xN] Tensor of xy coordinates in the image plane
+ '''
+ rot = calibrations[:, :3, :3]
+ trans = calibrations[:, :3, 3:4]
+ homo = torch.baddbmm(trans, rot, points) # [B, 3, N]
+ xy = homo[:, :2, :] / homo[:, 2:3, :]
+ if transforms is not None:
+ scale = transforms[:2, :2]
+ shift = transforms[:2, 2:3]
+ xy = torch.baddbmm(shift, scale, xy)
+
+ xyz = torch.cat([xy, homo[:, 2:3, :]], 1)
+ return xyz
\ No newline at end of file
diff --git a/lib /net /FBNet.py b/lib /net /FBNet.py
new file mode 100644
index 0000000000000000000000000000000000000000..dab9b7cae8869cb0d907cbb5cfac79b5cb016644
--- /dev/null
+++ b/lib /net /FBNet.py
@@ -0,0 +1,384 @@
+'''
+Copyright (C) 2019 NVIDIA Corporation. Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu.
+BSD License. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
+DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+'''
+import torch
+import torch.nn as nn
+import functools
+import numpy as np
+import pytorch_lightning as pl
+
+
+###############################################################################
+# Functions
+###############################################################################
+def weights_init(m):
+ classname = m.__class__.__name__
+ if classname.find('Conv') != -1:
+ m.weight.data.normal_(0.0, 0.02)
+ elif classname.find('BatchNorm2d') != -1:
+ m.weight.data.normal_(1.0, 0.02)
+ m.bias.data.fill_(0)
+
+
+def get_norm_layer(norm_type='instance'):
+ if norm_type == 'batch':
+ norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
+ elif norm_type == 'instance':
+ norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
+ else:
+ raise NotImplementedError('normalization layer [%s] is not found' %
+ norm_type)
+ return norm_layer
+
+
+def define_G(input_nc,
+ output_nc,
+ ngf,
+ netG,
+ n_downsample_global=3,
+ n_blocks_global=9,
+ n_local_enhancers=1,
+ n_blocks_local=3,
+ norm='instance',
+ gpu_ids=[],
+ last_op=nn.Tanh()):
+ norm_layer = get_norm_layer(norm_type=norm)
+ if netG == 'global':
+ netG = GlobalGenerator(input_nc,
+ output_nc,
+ ngf,
+ n_downsample_global,
+ n_blocks_global,
+ norm_layer,
+ last_op=last_op)
+ elif netG == 'local':
+ netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global,
+ n_blocks_global, n_local_enhancers,
+ n_blocks_local, norm_layer)
+ elif netG == 'encoder':
+ netG = Encoder(input_nc, output_nc, ngf, n_downsample_global,
+ norm_layer)
+ else:
+ raise ('generator not implemented!')
+ # print(netG)
+ if len(gpu_ids) > 0:
+ assert (torch.cuda.is_available())
+ device=torch.device(f"cuda:{gpu_ids[0]}")
+ netG = netG.to(device)
+ netG.apply(weights_init)
+ return netG
+
+
+def print_network(net):
+ if isinstance(net, list):
+ net = net[0]
+ num_params = 0
+ for param in net.parameters():
+ num_params += param.numel()
+ print(net)
+ print('Total number of parameters: %d' % num_params)
+
+
+##############################################################################
+# Generator
+##############################################################################
+class LocalEnhancer(pl.LightningModule):
+ def __init__(self,
+ input_nc,
+ output_nc,
+ ngf=32,
+ n_downsample_global=3,
+ n_blocks_global=9,
+ n_local_enhancers=1,
+ n_blocks_local=3,
+ norm_layer=nn.BatchNorm2d,
+ padding_type='reflect'):
+ super(LocalEnhancer, self).__init__()
+ self.n_local_enhancers = n_local_enhancers
+
+ ###### global generator model #####
+ ngf_global = ngf * (2**n_local_enhancers)
+ model_global = GlobalGenerator(input_nc, output_nc, ngf_global,
+ n_downsample_global, n_blocks_global,
+ norm_layer).model
+ model_global = [model_global[i] for i in range(len(model_global) - 3)
+ ] # get rid of final convolution layers
+ self.model = nn.Sequential(*model_global)
+
+ ###### local enhancer layers #####
+ for n in range(1, n_local_enhancers + 1):
+ # downsample
+ ngf_global = ngf * (2**(n_local_enhancers - n))
+ model_downsample = [
+ nn.ReflectionPad2d(3),
+ nn.Conv2d(input_nc, ngf_global, kernel_size=7, padding=0),
+ norm_layer(ngf_global),
+ nn.ReLU(True),
+ nn.Conv2d(ngf_global,
+ ngf_global * 2,
+ kernel_size=3,
+ stride=2,
+ padding=1),
+ norm_layer(ngf_global * 2),
+ nn.ReLU(True)
+ ]
+ # residual blocks
+ model_upsample = []
+ for i in range(n_blocks_local):
+ model_upsample += [
+ ResnetBlock(ngf_global * 2,
+ padding_type=padding_type,
+ norm_layer=norm_layer)
+ ]
+
+ # upsample
+ model_upsample += [
+ nn.ConvTranspose2d(ngf_global * 2,
+ ngf_global,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ output_padding=1),
+ norm_layer(ngf_global),
+ nn.ReLU(True)
+ ]
+
+ # final convolution
+ if n == n_local_enhancers:
+ model_upsample += [
+ nn.ReflectionPad2d(3),
+ nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
+ nn.Tanh()
+ ]
+
+ setattr(self, 'model' + str(n) + '_1',
+ nn.Sequential(*model_downsample))
+ setattr(self, 'model' + str(n) + '_2',
+ nn.Sequential(*model_upsample))
+
+ self.downsample = nn.AvgPool2d(3,
+ stride=2,
+ padding=[1, 1],
+ count_include_pad=False)
+
+ def forward(self, input):
+ # create input pyramid
+ input_downsampled = [input]
+ for i in range(self.n_local_enhancers):
+ input_downsampled.append(self.downsample(input_downsampled[-1]))
+
+ # output at coarest level
+ output_prev = self.model(input_downsampled[-1])
+ # build up one layer at a time
+ for n_local_enhancers in range(1, self.n_local_enhancers + 1):
+ model_downsample = getattr(self,
+ 'model' + str(n_local_enhancers) + '_1')
+ model_upsample = getattr(self,
+ 'model' + str(n_local_enhancers) + '_2')
+ input_i = input_downsampled[self.n_local_enhancers -
+ n_local_enhancers]
+ output_prev = model_upsample(
+ model_downsample(input_i) + output_prev)
+ return output_prev
+
+
+class GlobalGenerator(pl.LightningModule):
+ def __init__(self,
+ input_nc,
+ output_nc,
+ ngf=64,
+ n_downsampling=3,
+ n_blocks=9,
+ norm_layer=nn.BatchNorm2d,
+ padding_type='reflect',
+ last_op=nn.Tanh()):
+ assert (n_blocks >= 0)
+ super(GlobalGenerator, self).__init__()
+ activation = nn.ReLU(True)
+
+ model = [
+ nn.ReflectionPad2d(3),
+ nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
+ norm_layer(ngf), activation
+ ]
+ # downsample
+ for i in range(n_downsampling):
+ mult = 2**i
+ model += [
+ nn.Conv2d(ngf * mult,
+ ngf * mult * 2,
+ kernel_size=3,
+ stride=2,
+ padding=1),
+ norm_layer(ngf * mult * 2), activation
+ ]
+
+ # resnet blocks
+ mult = 2**n_downsampling
+ for i in range(n_blocks):
+ model += [
+ ResnetBlock(ngf * mult,
+ padding_type=padding_type,
+ activation=activation,
+ norm_layer=norm_layer)
+ ]
+
+ # upsample
+ for i in range(n_downsampling):
+ mult = 2**(n_downsampling - i)
+ model += [
+ nn.ConvTranspose2d(ngf * mult,
+ int(ngf * mult / 2),
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ output_padding=1),
+ norm_layer(int(ngf * mult / 2)), activation
+ ]
+ model += [
+ nn.ReflectionPad2d(3),
+ nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)
+ ]
+ if last_op is not None:
+ model += [last_op]
+ self.model = nn.Sequential(*model)
+
+ def forward(self, input):
+ return self.model(input)
+
+
+# Define a resnet block
+class ResnetBlock(pl.LightningModule):
+ def __init__(self,
+ dim,
+ padding_type,
+ norm_layer,
+ activation=nn.ReLU(True),
+ use_dropout=False):
+ super(ResnetBlock, self).__init__()
+ self.conv_block = self.build_conv_block(dim, padding_type, norm_layer,
+ activation, use_dropout)
+
+ def build_conv_block(self, dim, padding_type, norm_layer, activation,
+ use_dropout):
+ conv_block = []
+ p = 0
+ if padding_type == 'reflect':
+ conv_block += [nn.ReflectionPad2d(1)]
+ elif padding_type == 'replicate':
+ conv_block += [nn.ReplicationPad2d(1)]
+ elif padding_type == 'zero':
+ p = 1
+ else:
+ raise NotImplementedError('padding [%s] is not implemented' %
+ padding_type)
+
+ conv_block += [
+ nn.Conv2d(dim, dim, kernel_size=3, padding=p),
+ norm_layer(dim), activation
+ ]
+ if use_dropout:
+ conv_block += [nn.Dropout(0.5)]
+
+ p = 0
+ if padding_type == 'reflect':
+ conv_block += [nn.ReflectionPad2d(1)]
+ elif padding_type == 'replicate':
+ conv_block += [nn.ReplicationPad2d(1)]
+ elif padding_type == 'zero':
+ p = 1
+ else:
+ raise NotImplementedError('padding [%s] is not implemented' %
+ padding_type)
+ conv_block += [
+ nn.Conv2d(dim, dim, kernel_size=3, padding=p),
+ norm_layer(dim)
+ ]
+
+ return nn.Sequential(*conv_block)
+
+ def forward(self, x):
+ out = x + self.conv_block(x)
+ return out
+
+
+class Encoder(pl.LightningModule):
+ def __init__(self,
+ input_nc,
+ output_nc,
+ ngf=32,
+ n_downsampling=4,
+ norm_layer=nn.BatchNorm2d):
+ super(Encoder, self).__init__()
+ self.output_nc = output_nc
+
+ model = [
+ nn.ReflectionPad2d(3),
+ nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
+ norm_layer(ngf),
+ nn.ReLU(True)
+ ]
+ # downsample
+ for i in range(n_downsampling):
+ mult = 2**i
+ model += [
+ nn.Conv2d(ngf * mult,
+ ngf * mult * 2,
+ kernel_size=3,
+ stride=2,
+ padding=1),
+ norm_layer(ngf * mult * 2),
+ nn.ReLU(True)
+ ]
+
+ # upsample
+ for i in range(n_downsampling):
+ mult = 2**(n_downsampling - i)
+ model += [
+ nn.ConvTranspose2d(ngf * mult,
+ int(ngf * mult / 2),
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ output_padding=1),
+ norm_layer(int(ngf * mult / 2)),
+ nn.ReLU(True)
+ ]
+
+ model += [
+ nn.ReflectionPad2d(3),
+ nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
+ nn.Tanh()
+ ]
+ self.model = nn.Sequential(*model)
+
+ def forward(self, input, inst):
+ outputs = self.model(input)
+
+ # instance-wise average pooling
+ outputs_mean = outputs.clone()
+ inst_list = np.unique(inst.cpu().numpy().astype(int))
+ for i in inst_list:
+ for b in range(input.size()[0]):
+ indices = (inst[b:b + 1] == int(i)).nonzero() # n x 4
+ for j in range(self.output_nc):
+ output_ins = outputs[indices[:, 0] + b, indices[:, 1] + j,
+ indices[:, 2], indices[:, 3]]
+ mean_feat = torch.mean(output_ins).expand_as(output_ins)
+ outputs_mean[indices[:, 0] + b, indices[:, 1] + j,
+ indices[:, 2], indices[:, 3]] = mean_feat
+ return outputs_mean
\ No newline at end of file
diff --git a/lib /net /HGFilters.py b/lib /net /HGFilters.py
new file mode 100644
index 0000000000000000000000000000000000000000..46b977807608861add77085f4ed48bf3585e928f
--- /dev/null
+++ b/lib /net /HGFilters.py
@@ -0,0 +1,197 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from lib.net.net_util import *
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class HourGlass(nn.Module):
+ def __init__(self, num_modules, depth, num_features, opt):
+ super(HourGlass, self).__init__()
+ self.num_modules = num_modules
+ self.depth = depth
+ self.features = num_features
+ self.opt = opt
+
+ self._generate_network(self.depth)
+
+ def _generate_network(self, level):
+ self.add_module('b1_' + str(level),
+ ConvBlock(self.features, self.features, self.opt))
+
+ self.add_module('b2_' + str(level),
+ ConvBlock(self.features, self.features, self.opt))
+
+ if level > 1:
+ self._generate_network(level - 1)
+ else:
+ self.add_module('b2_plus_' + str(level),
+ ConvBlock(self.features, self.features, self.opt))
+
+ self.add_module('b3_' + str(level),
+ ConvBlock(self.features, self.features, self.opt))
+
+ def _forward(self, level, inp):
+ # Upper branch
+ up1 = inp
+ up1 = self._modules['b1_' + str(level)](up1)
+
+ # Lower branch
+ low1 = F.avg_pool2d(inp, 2, stride=2)
+ low1 = self._modules['b2_' + str(level)](low1)
+
+ if level > 1:
+ low2 = self._forward(level - 1, low1)
+ else:
+ low2 = low1
+ low2 = self._modules['b2_plus_' + str(level)](low2)
+
+ low3 = low2
+ low3 = self._modules['b3_' + str(level)](low3)
+
+ # NOTE: for newer PyTorch (1.3~), it seems that training results are degraded due to implementation diff in F.grid_sample
+ # if the pretrained model behaves weirdly, switch with the commented line.
+ # NOTE: I also found that "bicubic" works better.
+ up2 = F.interpolate(low3,
+ scale_factor=2,
+ mode='bicubic',
+ align_corners=True)
+ # up2 = F.interpolate(low3, scale_factor=2, mode='nearest)
+
+ return up1 + up2
+
+ def forward(self, x):
+ return self._forward(self.depth, x)
+
+
+class HGFilter(nn.Module):
+ def __init__(self, opt, num_modules, in_dim):
+ super(HGFilter, self).__init__()
+ self.num_modules = num_modules
+
+ self.opt = opt
+ [k, s, d, p] = self.opt.conv1
+
+ # self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3)
+ self.conv1 = nn.Conv2d(in_dim,
+ 64,
+ kernel_size=k,
+ stride=s,
+ dilation=d,
+ padding=p)
+
+ if self.opt.norm == 'batch':
+ self.bn1 = nn.BatchNorm2d(64)
+ elif self.opt.norm == 'group':
+ self.bn1 = nn.GroupNorm(32, 64)
+
+ if self.opt.hg_down == 'conv64':
+ self.conv2 = ConvBlock(64, 64, self.opt)
+ self.down_conv2 = nn.Conv2d(64,
+ 128,
+ kernel_size=3,
+ stride=2,
+ padding=1)
+ elif self.opt.hg_down == 'conv128':
+ self.conv2 = ConvBlock(64, 128, self.opt)
+ self.down_conv2 = nn.Conv2d(128,
+ 128,
+ kernel_size=3,
+ stride=2,
+ padding=1)
+ elif self.opt.hg_down == 'ave_pool':
+ self.conv2 = ConvBlock(64, 128, self.opt)
+ else:
+ raise NameError('Unknown Fan Filter setting!')
+
+ self.conv3 = ConvBlock(128, 128, self.opt)
+ self.conv4 = ConvBlock(128, 256, self.opt)
+
+ # Stacking part
+ for hg_module in range(self.num_modules):
+ self.add_module('m' + str(hg_module),
+ HourGlass(1, opt.num_hourglass, 256, self.opt))
+
+ self.add_module('top_m_' + str(hg_module),
+ ConvBlock(256, 256, self.opt))
+ self.add_module(
+ 'conv_last' + str(hg_module),
+ nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
+ if self.opt.norm == 'batch':
+ self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256))
+ elif self.opt.norm == 'group':
+ self.add_module('bn_end' + str(hg_module),
+ nn.GroupNorm(32, 256))
+
+ self.add_module(
+ 'l' + str(hg_module),
+ nn.Conv2d(256,
+ opt.hourglass_dim,
+ kernel_size=1,
+ stride=1,
+ padding=0))
+
+ if hg_module < self.num_modules - 1:
+ self.add_module(
+ 'bl' + str(hg_module),
+ nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
+ self.add_module(
+ 'al' + str(hg_module),
+ nn.Conv2d(opt.hourglass_dim,
+ 256,
+ kernel_size=1,
+ stride=1,
+ padding=0))
+
+ def forward(self, x):
+ x = F.relu(self.bn1(self.conv1(x)), True)
+ tmpx = x
+ if self.opt.hg_down == 'ave_pool':
+ x = F.avg_pool2d(self.conv2(x), 2, stride=2)
+ elif self.opt.hg_down in ['conv64', 'conv128']:
+ x = self.conv2(x)
+ x = self.down_conv2(x)
+ else:
+ raise NameError('Unknown Fan Filter setting!')
+
+ x = self.conv3(x)
+ x = self.conv4(x)
+
+ previous = x
+
+ outputs = []
+ for i in range(self.num_modules):
+ hg = self._modules['m' + str(i)](previous)
+
+ ll = hg
+ ll = self._modules['top_m_' + str(i)](ll)
+
+ ll = F.relu(
+ self._modules['bn_end' + str(i)](
+ self._modules['conv_last' + str(i)](ll)), True)
+
+ # Predict heatmaps
+ tmp_out = self._modules['l' + str(i)](ll)
+ outputs.append(tmp_out)
+
+ if i < self.num_modules - 1:
+ ll = self._modules['bl' + str(i)](ll)
+ tmp_out_ = self._modules['al' + str(i)](tmp_out)
+ previous = previous + ll + tmp_out_
+
+ return outputs
\ No newline at end of file
diff --git a/lib /net /HGPIFuNet.py b/lib /net /HGPIFuNet.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e8f00fe1011a6293e0f37ed977d92e9fd1791ef
--- /dev/null
+++ b/lib /net /HGPIFuNet.py
@@ -0,0 +1,401 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from lib.net.voxelize import Voxelization
+from lib.dataset.mesh_util import cal_sdf_batch, feat_select, read_smpl_constants
+from lib.net.NormalNet import NormalNet
+from lib.net.MLP import MLP
+from lib.dataset.mesh_util import SMPLX
+from lib.net.VE import VolumeEncoder
+from lib.net.HGFilters import *
+from termcolor import colored
+from lib.net.BasePIFuNet import BasePIFuNet
+import torch.nn as nn
+import torch
+
+
+maskout = False
+
+
+class HGPIFuNet(BasePIFuNet):
+ '''
+ HG PIFu network uses Hourglass stacks as the image filter.
+ It does the following:
+ 1. Compute image feature stacks and store it in self.im_feat_list
+ self.im_feat_list[-1] is the last stack (output stack)
+ 2. Calculate calibration
+ 3. If training, it index on every intermediate stacks,
+ If testing, it index on the last stack.
+ 4. Classification.
+ 5. During training, error is calculated on all stacks.
+ '''
+
+ def __init__(self,
+ cfg,
+ projection_mode='orthogonal',
+ error_term=nn.MSELoss()):
+
+ super(HGPIFuNet, self).__init__(projection_mode=projection_mode,
+ error_term=error_term)
+
+ self.l1_loss = nn.SmoothL1Loss()
+ self.opt = cfg.net
+ self.root = cfg.root
+ self.overfit = cfg.overfit
+
+ channels_IF = self.opt.mlp_dim
+
+ self.use_filter = self.opt.use_filter
+ self.prior_type = self.opt.prior_type
+ self.smpl_feats = self.opt.smpl_feats
+
+ self.smpl_dim = self.opt.smpl_dim
+ self.voxel_dim = self.opt.voxel_dim
+ self.hourglass_dim = self.opt.hourglass_dim
+ self.sdf_clip = cfg.sdf_clip / 100.0
+
+ self.in_geo = [item[0] for item in self.opt.in_geo]
+ self.in_nml = [item[0] for item in self.opt.in_nml]
+
+ self.in_geo_dim = sum([item[1] for item in self.opt.in_geo])
+ self.in_nml_dim = sum([item[1] for item in self.opt.in_nml])
+
+ self.in_total = self.in_geo + self.in_nml
+ self.smpl_feat_dict = None
+ self.smplx_data = SMPLX()
+
+ if self.prior_type == 'icon':
+ if 'image' in self.in_geo:
+ self.channels_filter = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 6, 7, 8]]
+ else:
+ self.channels_filter = [[0, 1, 2], [3, 4, 5]]
+
+ else:
+ if 'image' in self.in_geo:
+ self.channels_filter = [[0, 1, 2, 3, 4, 5, 6, 7, 8]]
+ else:
+ self.channels_filter = [[0, 1, 2, 3, 4, 5]]
+
+ channels_IF[0] = self.hourglass_dim if self.use_filter else len(
+ self.channels_filter[0])
+
+ if self.prior_type == 'icon' and 'vis' not in self.smpl_feats:
+ if self.use_filter:
+ channels_IF[0] += self.hourglass_dim
+ else:
+ channels_IF[0] += len(self.channels_filter[0])
+
+ if self.prior_type == 'icon':
+ channels_IF[0] += self.smpl_dim
+ elif self.prior_type == 'pamir':
+ channels_IF[0] += self.voxel_dim
+ smpl_vertex_code, smpl_face_code, smpl_faces, smpl_tetras = read_smpl_constants(
+ self.smplx_data.tedra_dir)
+ self.voxelization = Voxelization(
+ smpl_vertex_code,
+ smpl_face_code,
+ smpl_faces,
+ smpl_tetras,
+ volume_res=128,
+ sigma=0.05,
+ smooth_kernel_size=7,
+ batch_size=cfg.batch_size,
+ device=torch.device(f"cuda:{cfg.gpus[0]}"))
+ self.ve = VolumeEncoder(3, self.voxel_dim, self.opt.num_stack)
+ else:
+ channels_IF[0] += 1
+
+ self.icon_keys = ["smpl_verts", "smpl_faces", "smpl_vis", "smpl_cmap"]
+ self.pamir_keys = [
+ "voxel_verts", "voxel_faces", "pad_v_num", "pad_f_num"
+ ]
+
+ self.if_regressor = MLP(
+ filter_channels=channels_IF,
+ name='if',
+ res_layers=self.opt.res_layers,
+ norm=self.opt.norm_mlp,
+ last_op=nn.Sigmoid() if not cfg.test_mode else None)
+
+ # network
+ if self.use_filter:
+ if self.opt.gtype == "HGPIFuNet":
+ self.F_filter = HGFilter(self.opt, self.opt.num_stack,
+ len(self.channels_filter[0]))
+ else:
+ print(
+ colored(f"Backbone {self.opt.gtype} is unimplemented",
+ 'green'))
+
+ summary_log = f"{self.prior_type.upper()}:\n" + \
+ f"w/ Global Image Encoder: {self.use_filter}\n" + \
+ f"Image Features used by MLP: {self.in_geo}\n"
+
+ if self.prior_type == "icon":
+ summary_log += f"Geometry Features used by MLP: {self.smpl_feats}\n"
+ summary_log += f"Dim of Image Features (local): 6\n"
+ summary_log += f"Dim of Geometry Features (ICON): {self.smpl_dim}\n"
+ elif self.prior_type == "pamir":
+ summary_log += f"Dim of Image Features (global): {self.hourglass_dim}\n"
+ summary_log += f"Dim of Geometry Features (PaMIR): {self.voxel_dim}\n"
+ else:
+ summary_log += f"Dim of Image Features (global): {self.hourglass_dim}\n"
+ summary_log += f"Dim of Geometry Features (PIFu): 1 (z-value)\n"
+
+ summary_log += f"Dim of MLP's first layer: {channels_IF[0]}\n"
+
+ print(colored(summary_log, "yellow"))
+
+ self.normal_filter = NormalNet(cfg)
+ init_net(self)
+
+ def get_normal(self, in_tensor_dict):
+
+ # insert normal features
+ if (not self.training) and (not self.overfit):
+ # print(colored("infer normal","blue"))
+ with torch.no_grad():
+ feat_lst = []
+ if "image" in self.in_geo:
+ feat_lst.append(
+ in_tensor_dict['image']) # [1, 3, 512, 512]
+ if 'normal_F' in self.in_geo and 'normal_B' in self.in_geo:
+ if 'normal_F' not in in_tensor_dict.keys(
+ ) or 'normal_B' not in in_tensor_dict.keys():
+ (nmlF, nmlB) = self.normal_filter(in_tensor_dict)
+ else:
+ nmlF = in_tensor_dict['normal_F']
+ nmlB = in_tensor_dict['normal_B']
+ feat_lst.append(nmlF) # [1, 3, 512, 512]
+ feat_lst.append(nmlB) # [1, 3, 512, 512]
+ in_filter = torch.cat(feat_lst, dim=1)
+
+ else:
+ in_filter = torch.cat([in_tensor_dict[key] for key in self.in_geo],
+ dim=1)
+
+ return in_filter
+
+ def get_mask(self, in_filter, size=128):
+
+ mask = F.interpolate(in_filter[:, self.channels_filter[0]],
+ size=(size, size),
+ mode="bilinear",
+ align_corners=True).abs().sum(dim=1,
+ keepdim=True) != 0.0
+
+ return mask
+
+ def filter(self, in_tensor_dict, return_inter=False):
+ '''
+ Filter the input images
+ store all intermediate features.
+ :param images: [B, C, H, W] input images
+ '''
+
+ in_filter = self.get_normal(in_tensor_dict)
+
+ features_G = []
+
+ if self.prior_type == 'icon':
+ if self.use_filter:
+ features_F = self.F_filter(in_filter[:,
+ self.channels_filter[0]]
+ ) # [(B,hg_dim,128,128) * 4]
+ features_B = self.F_filter(in_filter[:,
+ self.channels_filter[1]]
+ ) # [(B,hg_dim,128,128) * 4]
+ else:
+ features_F = [in_filter[:, self.channels_filter[0]]]
+ features_B = [in_filter[:, self.channels_filter[1]]]
+ for idx in range(len(features_F)):
+ features_G.append(
+ torch.cat([features_F[idx], features_B[idx]], dim=1))
+ else:
+ if self.use_filter:
+ features_G = self.F_filter(in_filter[:,
+ self.channels_filter[0]])
+ else:
+ features_G = [in_filter[:, self.channels_filter[0]]]
+
+ if self.prior_type == 'icon':
+ self.smpl_feat_dict = {
+ k: in_tensor_dict[k]
+ for k in self.icon_keys
+ }
+ elif self.prior_type == "pamir":
+ self.smpl_feat_dict = {
+ k: in_tensor_dict[k]
+ for k in self.pamir_keys
+ }
+ else:
+ pass
+ # print(colored("use z rather than icon or pamir", "green"))
+
+ # If it is not in training, only produce the last im_feat
+ if not self.training:
+ features_out = [features_G[-1]]
+ else:
+ features_out = features_G
+
+ if maskout:
+ features_out_mask = []
+ for feat in features_out:
+ features_out_mask.append(
+ feat * self.get_mask(in_filter, size=feat.shape[2]))
+ features_out = features_out_mask
+
+ if return_inter:
+ return features_out, in_filter
+ else:
+ return features_out
+
+ def query(self, features, points, calibs, transforms=None, regressor=None):
+
+ xyz = self.projection(points, calibs, transforms)
+
+ (xy, z) = xyz.split([2, 1], dim=1)
+
+ in_cube = (xyz > -1.0) & (xyz < 1.0)
+ in_cube = in_cube.all(dim=1, keepdim=True).detach().float()
+
+ preds_list = []
+
+ if self.prior_type == 'icon':
+
+ # smpl_verts [B, N_vert, 3]
+ # smpl_faces [B, N_face, 3]
+ # points [B, 3, N]
+
+ smpl_sdf, smpl_norm, smpl_cmap, smpl_vis = cal_sdf_batch(
+ self.smpl_feat_dict['smpl_verts'],
+ self.smpl_feat_dict['smpl_faces'],
+ self.smpl_feat_dict['smpl_cmap'],
+ self.smpl_feat_dict['smpl_vis'],
+ xyz.permute(0, 2, 1).contiguous())
+
+ # smpl_sdf [B, N, 1]
+ # smpl_norm [B, N, 3]
+ # smpl_cmap [B, N, 3]
+ # smpl_vis [B, N, 1]
+
+ feat_lst = [smpl_sdf]
+ if 'cmap' in self.smpl_feats:
+ feat_lst.append(smpl_cmap)
+ if 'norm' in self.smpl_feats:
+ feat_lst.append(smpl_norm)
+ if 'vis' in self.smpl_feats:
+ feat_lst.append(smpl_vis)
+
+ smpl_feat = torch.cat(feat_lst, dim=2).permute(0, 2, 1)
+ vol_feats = features
+
+ elif self.prior_type == "pamir":
+
+ voxel_verts = self.smpl_feat_dict[
+ 'voxel_verts'][:, :-self.smpl_feat_dict['pad_v_num'][0], :]
+ voxel_faces = self.smpl_feat_dict[
+ 'voxel_faces'][:, :-self.smpl_feat_dict['pad_f_num'][0], :]
+
+ self.voxelization.update_param(
+ batch_size=voxel_faces.shape[0],
+ smpl_tetra=voxel_faces[0].detach().cpu().numpy())
+ vol = self.voxelization(voxel_verts) # vol ~ [0,1]
+ vol_feats = self.ve(vol, intermediate_output=self.training)
+ else:
+ vol_feats = features
+
+ for im_feat, vol_feat in zip(features, vol_feats):
+
+ # [B, Feat_i + z, N]
+ # normal feature choice by smpl_vis
+ if self.prior_type == 'icon':
+ if 'vis' in self.smpl_feats:
+ point_local_feat = feat_select(self.index(im_feat, xy),
+ smpl_feat[:, [-1], :])
+ if maskout:
+ normal_mask = torch.tile(
+ point_local_feat.sum(dim=1, keepdims=True) == 0.0,
+ (1, smpl_feat.shape[1], 1))
+ normal_mask[:, 1:, :] = False
+ smpl_feat[normal_mask] = -1.0
+ point_feat_list = [point_local_feat, smpl_feat[:, :-1, :]]
+ else:
+ point_local_feat = self.index(im_feat, xy)
+ point_feat_list = [point_local_feat, smpl_feat[:, :, :]]
+
+ elif self.prior_type == 'pamir':
+ # im_feat [B, hg_dim, 128, 128]
+ # vol_feat [B, vol_dim, 32, 32, 32]
+ point_feat_list = [
+ self.index(im_feat, xy),
+ self.index(vol_feat, xyz)
+ ]
+
+ else:
+ point_feat_list = [self.index(im_feat, xy), z]
+
+ point_feat = torch.cat(point_feat_list, 1)
+
+ # out of image plane is always set to 0
+ preds = regressor(point_feat)
+ preds = in_cube * preds
+
+ preds_list.append(preds)
+
+ return preds_list
+
+ def get_error(self, preds_if_list, labels):
+ """calcaulate error
+ Args:
+ preds_list (list): list of torch.tensor(B, 3, N)
+ labels (torch.tensor): (B, N_knn, N)
+ Returns:
+ torch.tensor: error
+ """
+ error_if = 0
+
+ for pred_id in range(len(preds_if_list)):
+ pred_if = preds_if_list[pred_id]
+ error_if += self.error_term(pred_if, labels)
+
+ error_if /= len(preds_if_list)
+
+ return error_if
+
+ def forward(self, in_tensor_dict):
+ """
+ sample_tensor [B, 3, N]
+ calib_tensor [B, 4, 4]
+ label_tensor [B, 1, N]
+ smpl_feat_tensor [B, 59, N]
+ """
+
+ sample_tensor = in_tensor_dict['sample']
+ calib_tensor = in_tensor_dict['calib']
+ label_tensor = in_tensor_dict['label']
+
+ in_feat = self.filter(in_tensor_dict)
+
+ preds_if_list = self.query(in_feat,
+ sample_tensor,
+ calib_tensor,
+ regressor=self.if_regressor)
+
+ error = self.get_error(preds_if_list, label_tensor)
+
+ return preds_if_list[-1], error
\ No newline at end of file
diff --git a/lib /net /MLP.py b/lib /net /MLP.py
new file mode 100644
index 0000000000000000000000000000000000000000..de9628f7ab35de92e9241cf09a12f33aa96b82a7
--- /dev/null
+++ b/lib /net /MLP.py
@@ -0,0 +1,72 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
+
+import torch
+import torch.nn as nn
+import pytorch_lightning as pl
+
+
+class MLP(pl.LightningModule):
+ def __init__(self,
+ filter_channels,
+ name=None,
+ res_layers=[],
+ norm='group',
+ last_op=None):
+
+ super(MLP, self).__init__()
+
+ self.filters = nn.ModuleList()
+ self.norms = nn.ModuleList()
+ self.res_layers = res_layers
+ self.norm = norm
+ self.last_op = last_op
+ self.name = name
+ self.activate = nn.LeakyReLU(inplace=True)
+
+ for l in range(0, len(filter_channels) - 1):
+ if l in self.res_layers:
+ self.filters.append(
+ nn.Conv1d(filter_channels[l] + filter_channels[0],
+ filter_channels[l + 1], 1))
+ else:
+ self.filters.append(
+ nn.Conv1d(filter_channels[l], filter_channels[l + 1], 1))
+
+ if l != len(filter_channels) - 2:
+ if norm == 'group':
+ self.norms.append(nn.GroupNorm(32, filter_channels[l + 1]))
+ elif norm == 'batch':
+ self.norms.append(nn.BatchNorm1d(filter_channels[l + 1]))
+ elif norm == 'instance':
+ self.norms.append(nn.InstanceNorm1d(filter_channels[l +
+ 1]))
+ elif norm == 'weight':
+ self.filters[l] = nn.utils.weight_norm(self.filters[l],
+ name='weight')
+ # print(self.filters[l].weight_g.size(),
+ # self.filters[l].weight_v.size())
+
+ def forward(self, feature):
+ '''
+ feature may include multiple view inputs
+ args:
+ feature: [B, C_in, N]
+ return:
+ [B, C_out, N] prediction
+ '''
+ y = feature
+ tmpy = feature
+
+ for i, f in enumerate(self.filters):
+
+ y = f(y if i not in self.res_layers else torch.cat([y, tmpy], 1))
+ if i != len(self.filters) - 1:
+ if self.norm not in ['batch', 'group', 'instance']:
+ y = self.activate(y)
+ else:
+ y = self.activate(self.norms[i](y))
+
+ if self.last_op is not None:
+ y = self.last_op(y)
+
+ return y
\ No newline at end of file
diff --git a/lib /net /NormalNet.py b/lib /net /NormalNet.py
new file mode 100644
index 0000000000000000000000000000000000000000..022cde1050fe29d007ab1a5d8913a74d1af81e38
--- /dev/null
+++ b/lib /net /NormalNet.py
@@ -0,0 +1,121 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from lib.net.FBNet import define_G
+from lib.net.net_util import init_net, VGGLoss
+from lib.net.HGFilters import *
+from lib.net.BasePIFuNet import BasePIFuNet
+import torch
+import torch.nn as nn
+
+
+class NormalNet(BasePIFuNet):
+ '''
+ HG PIFu network uses Hourglass stacks as the image filter.
+ It does the following:
+ 1. Compute image feature stacks and store it in self.im_feat_list
+ self.im_feat_list[-1] is the last stack (output stack)
+ 2. Calculate calibration
+ 3. If training, it index on every intermediate stacks,
+ If testing, it index on the last stack.
+ 4. Classification.
+ 5. During training, error is calculated on all stacks.
+ '''
+
+ def __init__(self, cfg, error_term=nn.SmoothL1Loss()):
+
+ super(NormalNet, self).__init__(error_term=error_term)
+
+ self.l1_loss = nn.SmoothL1Loss()
+
+ self.opt = cfg.net
+
+ if self.training:
+ self.vgg_loss = [VGGLoss()]
+
+ self.in_nmlF = [
+ item[0] for item in self.opt.in_nml
+ if '_F' in item[0] or item[0] == 'image'
+ ]
+ self.in_nmlB = [
+ item[0] for item in self.opt.in_nml
+ if '_B' in item[0] or item[0] == 'image'
+ ]
+ self.in_nmlF_dim = sum([
+ item[1] for item in self.opt.in_nml
+ if '_F' in item[0] or item[0] == 'image'
+ ])
+ self.in_nmlB_dim = sum([
+ item[1] for item in self.opt.in_nml
+ if '_B' in item[0] or item[0] == 'image'
+ ])
+
+ self.netF = define_G(self.in_nmlF_dim, 3, 64, "global", 4, 9, 1, 3,
+ "instance")
+ self.netB = define_G(self.in_nmlB_dim, 3, 64, "global", 4, 9, 1, 3,
+ "instance")
+
+ init_net(self)
+
+ def forward(self, in_tensor):
+
+ inF_list = []
+ inB_list = []
+
+ for name in self.in_nmlF:
+ inF_list.append(in_tensor[name])
+ for name in self.in_nmlB:
+ inB_list.append(in_tensor[name])
+
+ nmlF = self.netF(torch.cat(inF_list, dim=1))
+ nmlB = self.netB(torch.cat(inB_list, dim=1))
+
+ # ||normal|| == 1
+ nmlF /= torch.norm(nmlF, dim=1)
+ nmlB /= torch.norm(nmlB, dim=1)
+
+ # output: float_arr [-1,1] with [B, C, H, W]
+
+ mask = (in_tensor['image'].abs().sum(dim=1, keepdim=True) !=
+ 0.0).detach().float()
+
+ nmlF = nmlF * mask
+ nmlB = nmlB * mask
+
+ return nmlF, nmlB
+
+ def get_norm_error(self, prd_F, prd_B, tgt):
+ """calculate normal loss
+ Args:
+ pred (torch.tensor): [B, 6, 512, 512]
+ tagt (torch.tensor): [B, 6, 512, 512]
+ """
+
+ tgt_F, tgt_B = tgt['normal_F'], tgt['normal_B']
+
+ l1_F_loss = self.l1_loss(prd_F, tgt_F)
+ l1_B_loss = self.l1_loss(prd_B, tgt_B)
+
+ with torch.no_grad():
+ vgg_F_loss = self.vgg_loss[0](prd_F, tgt_F)
+ vgg_B_loss = self.vgg_loss[0](prd_B, tgt_B)
+
+ total_loss = [
+ 5.0 * l1_F_loss + vgg_F_loss, 5.0 * l1_B_loss + vgg_B_loss
+ ]
+
+ return total_loss
\ No newline at end of file
diff --git a/lib /net /local_affine.py b/lib /net /local_affine.py
new file mode 100644
index 0000000000000000000000000000000000000000..5399842502c8b1adf8f60cf53e38b6daa7f35068
--- /dev/null
+++ b/lib /net /local_affine.py
@@ -0,0 +1,57 @@
+# Copyright 2021 by Haozhe Wu, Tsinghua University, Department of Computer Science and Technology.
+# All rights reserved.
+# This file is part of the pytorch-nicp,
+# and is released under the "MIT License Agreement". Please see the LICENSE
+# file that should have been included as part of this package.
+
+import torch
+import torch.nn as nn
+import torch.sparse as sp
+
+# reference: https://github.com/wuhaozhe/pytorch-nicp
+class LocalAffine(nn.Module):
+ def __init__(self, num_points, batch_size=1, edges=None):
+ '''
+ specify the number of points, the number of points should be constant across the batch
+ and the edges torch.Longtensor() with shape N * 2
+ the local affine operator supports batch operation
+ batch size must be constant
+ add additional pooling on top of w matrix
+ '''
+ super(LocalAffine, self).__init__()
+ self.A = nn.Parameter(torch.eye(3).unsqueeze(
+ 0).unsqueeze(0).repeat(batch_size, num_points, 1, 1))
+ self.b = nn.Parameter(torch.zeros(3).unsqueeze(0).unsqueeze(
+ 0).unsqueeze(3).repeat(batch_size, num_points, 1, 1))
+ self.edges = edges
+ self.num_points = num_points
+
+ def stiffness(self):
+ '''
+ calculate the stiffness of local affine transformation
+ f norm get infinity gradient when w is zero matrix,
+ '''
+ if self.edges is None:
+ raise Exception("edges cannot be none when calculate stiff")
+ idx1 = self.edges[:, 0]
+ idx2 = self.edges[:, 1]
+ affine_weight = torch.cat((self.A, self.b), dim=3)
+ w1 = torch.index_select(affine_weight, dim=1, index=idx1)
+ w2 = torch.index_select(affine_weight, dim=1, index=idx2)
+ w_diff = (w1 - w2) ** 2
+ w_rigid = (torch.linalg.det(self.A) - 1.0) ** 2
+ return w_diff, w_rigid
+
+ def forward(self, x, return_stiff=False):
+ '''
+ x should have shape of B * N * 3
+ '''
+ x = x.unsqueeze(3)
+ out_x = torch.matmul(self.A, x)
+ out_x = out_x + self.b
+ out_x.squeeze_(3)
+ if return_stiff:
+ stiffness, rigid = self.stiffness()
+ return out_x, stiffness, rigid
+ else:
+ return out_x
\ No newline at end of file
diff --git a/lib /net /net_util.py b/lib /net /net_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b794eaa3995d32e098299ea3a2f7a4893c8d3d4
--- /dev/null
+++ b/lib /net /net_util.py
@@ -0,0 +1,323 @@
+
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from torchvision import models
+import torch
+from torch.nn import init
+import torch.nn as nn
+import torch.nn.functional as F
+import functools
+from torch.autograd import grad
+
+
+def gradient(inputs, outputs):
+ d_points = torch.ones_like(outputs,
+ requires_grad=False,
+ device=outputs.device)
+ points_grad = grad(outputs=outputs,
+ inputs=inputs,
+ grad_outputs=d_points,
+ create_graph=True,
+ retain_graph=True,
+ only_inputs=True,
+ allow_unused=True)[0]
+ return points_grad
+
+
+# def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):
+# "3x3 convolution with padding"
+# return nn.Conv2d(in_planes, out_planes, kernel_size=3,
+# stride=strd, padding=padding, bias=bias)
+
+
+def conv3x3(in_planes,
+ out_planes,
+ kernel=3,
+ strd=1,
+ dilation=1,
+ padding=1,
+ bias=False):
+ "3x3 convolution with padding"
+ return nn.Conv2d(in_planes,
+ out_planes,
+ kernel_size=kernel,
+ dilation=dilation,
+ stride=strd,
+ padding=padding,
+ bias=bias)
+
+
+def conv1x1(in_planes, out_planes, stride=1):
+ """1x1 convolution"""
+ return nn.Conv2d(in_planes,
+ out_planes,
+ kernel_size=1,
+ stride=stride,
+ bias=False)
+
+
+def init_weights(net, init_type='normal', init_gain=0.02):
+ """Initialize network weights.
+ Parameters:
+ net (network) -- network to be initialized
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
+ init_gain (float) -- scaling factor for normal, xavier and orthogonal.
+ We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
+ work better for some applications. Feel free to try yourself.
+ """
+ def init_func(m): # define the initialization function
+ classname = m.__class__.__name__
+ if hasattr(m, 'weight') and (classname.find('Conv') != -1
+ or classname.find('Linear') != -1):
+ if init_type == 'normal':
+ init.normal_(m.weight.data, 0.0, init_gain)
+ elif init_type == 'xavier':
+ init.xavier_normal_(m.weight.data, gain=init_gain)
+ elif init_type == 'kaiming':
+ init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
+ elif init_type == 'orthogonal':
+ init.orthogonal_(m.weight.data, gain=init_gain)
+ else:
+ raise NotImplementedError(
+ 'initialization method [%s] is not implemented' %
+ init_type)
+ if hasattr(m, 'bias') and m.bias is not None:
+ init.constant_(m.bias.data, 0.0)
+ elif classname.find(
+ 'BatchNorm2d'
+ ) != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
+ init.normal_(m.weight.data, 1.0, init_gain)
+ init.constant_(m.bias.data, 0.0)
+
+ # print('initialize network with %s' % init_type)
+ net.apply(init_func) # apply the initialization function
+
+
+def init_net(net, init_type='xavier', init_gain=0.02, gpu_ids=[]):
+ """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
+ Parameters:
+ net (network) -- the network to be initialized
+ init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
+ gain (float) -- scaling factor for normal, xavier and orthogonal.
+ gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
+ Return an initialized network.
+ """
+ if len(gpu_ids) > 0:
+ assert (torch.cuda.is_available())
+ net = torch.nn.DataParallel(net) # multi-GPUs
+ init_weights(net, init_type, init_gain=init_gain)
+ return net
+
+
+def imageSpaceRotation(xy, rot):
+ '''
+ args:
+ xy: (B, 2, N) input
+ rot: (B, 2) x,y axis rotation angles
+ rotation center will be always image center (other rotation center can be represented by additional z translation)
+ '''
+ disp = rot.unsqueeze(2).sin().expand_as(xy)
+ return (disp * xy).sum(dim=1)
+
+
+def cal_gradient_penalty(netD,
+ real_data,
+ fake_data,
+ device,
+ type='mixed',
+ constant=1.0,
+ lambda_gp=10.0):
+ """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
+ Arguments:
+ netD (network) -- discriminator network
+ real_data (tensor array) -- real images
+ fake_data (tensor array) -- generated images from the generator
+ device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
+ type (str) -- if we mix real and fake data or not [real | fake | mixed].
+ constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
+ lambda_gp (float) -- weight for this loss
+ Returns the gradient penalty loss
+ """
+ if lambda_gp > 0.0:
+ # either use real images, fake images, or a linear interpolation of two.
+ if type == 'real':
+ interpolatesv = real_data
+ elif type == 'fake':
+ interpolatesv = fake_data
+ elif type == 'mixed':
+ alpha = torch.rand(real_data.shape[0], 1)
+ alpha = alpha.expand(
+ real_data.shape[0],
+ real_data.nelement() //
+ real_data.shape[0]).contiguous().view(*real_data.shape)
+ alpha = alpha.to(device)
+ interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
+ else:
+ raise NotImplementedError('{} not implemented'.format(type))
+ interpolatesv.requires_grad_(True)
+ disc_interpolates = netD(interpolatesv)
+ gradients = torch.autograd.grad(
+ outputs=disc_interpolates,
+ inputs=interpolatesv,
+ grad_outputs=torch.ones(disc_interpolates.size()).to(device),
+ create_graph=True,
+ retain_graph=True,
+ only_inputs=True)
+ gradients = gradients[0].view(real_data.size(0), -1) # flat the data
+ gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) **
+ 2).mean() * lambda_gp # added eps
+ return gradient_penalty, gradients
+ else:
+ return 0.0, None
+
+
+def get_norm_layer(norm_type='instance'):
+ """Return a normalization layer
+ Parameters:
+ norm_type (str) -- the name of the normalization layer: batch | instance | none
+ For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
+ For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
+ """
+ if norm_type == 'batch':
+ norm_layer = functools.partial(nn.BatchNorm2d,
+ affine=True,
+ track_running_stats=True)
+ elif norm_type == 'instance':
+ norm_layer = functools.partial(nn.InstanceNorm2d,
+ affine=False,
+ track_running_stats=False)
+ elif norm_type == 'group':
+ norm_layer = functools.partial(nn.GroupNorm, 32)
+ elif norm_type == 'none':
+ norm_layer = None
+ else:
+ raise NotImplementedError('normalization layer [%s] is not found' %
+ norm_type)
+ return norm_layer
+
+
+class Flatten(nn.Module):
+ def forward(self, input):
+ return input.view(input.size(0), -1)
+
+
+class ConvBlock(nn.Module):
+ def __init__(self, in_planes, out_planes, opt):
+ super(ConvBlock, self).__init__()
+ [k, s, d, p] = opt.conv3x3
+ self.conv1 = conv3x3(in_planes, int(out_planes / 2), k, s, d, p)
+ self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4), k, s, d,
+ p)
+ self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4), k, s, d,
+ p)
+
+ if opt.norm == 'batch':
+ self.bn1 = nn.BatchNorm2d(in_planes)
+ self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
+ self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
+ self.bn4 = nn.BatchNorm2d(in_planes)
+ elif opt.norm == 'group':
+ self.bn1 = nn.GroupNorm(32, in_planes)
+ self.bn2 = nn.GroupNorm(32, int(out_planes / 2))
+ self.bn3 = nn.GroupNorm(32, int(out_planes / 4))
+ self.bn4 = nn.GroupNorm(32, in_planes)
+
+ if in_planes != out_planes:
+ self.downsample = nn.Sequential(
+ self.bn4,
+ nn.ReLU(True),
+ nn.Conv2d(in_planes,
+ out_planes,
+ kernel_size=1,
+ stride=1,
+ bias=False),
+ )
+ else:
+ self.downsample = None
+
+ def forward(self, x):
+ residual = x
+
+ out1 = self.bn1(x)
+ out1 = F.relu(out1, True)
+ out1 = self.conv1(out1)
+
+ out2 = self.bn2(out1)
+ out2 = F.relu(out2, True)
+ out2 = self.conv2(out2)
+
+ out3 = self.bn3(out2)
+ out3 = F.relu(out3, True)
+ out3 = self.conv3(out3)
+
+ out3 = torch.cat((out1, out2, out3), 1)
+
+ if self.downsample is not None:
+ residual = self.downsample(residual)
+
+ out3 += residual
+
+ return out3
+
+
+class Vgg19(torch.nn.Module):
+ def __init__(self, requires_grad=False):
+ super(Vgg19, self).__init__()
+ vgg_pretrained_features = models.vgg19(pretrained=True).features
+ self.slice1 = torch.nn.Sequential()
+ self.slice2 = torch.nn.Sequential()
+ self.slice3 = torch.nn.Sequential()
+ self.slice4 = torch.nn.Sequential()
+ self.slice5 = torch.nn.Sequential()
+ for x in range(2):
+ self.slice1.add_module(str(x), vgg_pretrained_features[x])
+ for x in range(2, 7):
+ self.slice2.add_module(str(x), vgg_pretrained_features[x])
+ for x in range(7, 12):
+ self.slice3.add_module(str(x), vgg_pretrained_features[x])
+ for x in range(12, 21):
+ self.slice4.add_module(str(x), vgg_pretrained_features[x])
+ for x in range(21, 30):
+ self.slice5.add_module(str(x), vgg_pretrained_features[x])
+ if not requires_grad:
+ for param in self.parameters():
+ param.requires_grad = False
+
+ def forward(self, X):
+ h_relu1 = self.slice1(X)
+ h_relu2 = self.slice2(h_relu1)
+ h_relu3 = self.slice3(h_relu2)
+ h_relu4 = self.slice4(h_relu3)
+ h_relu5 = self.slice5(h_relu4)
+ out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
+ return out
+
+
+class VGGLoss(nn.Module):
+ def __init__(self):
+ super(VGGLoss, self).__init__()
+ self.vgg = Vgg19()
+ self.criterion = nn.L1Loss()
+ self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
+
+ def forward(self, x, y):
+ x_vgg, y_vgg = self.vgg(x), self.vgg(y)
+ loss = 0
+ for i in range(len(x_vgg)):
+ loss += self.weights[i] * self.criterion(x_vgg[i],
+ y_vgg[i].detach())
+ return loss
diff --git a/lib /net /voxelize.py b/lib /net /voxelize.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf2936bd646693a2af1bffac766020057119f6d5
--- /dev/null
+++ b/lib /net /voxelize.py
@@ -0,0 +1,184 @@
+from __future__ import division, print_function
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy as np
+from torch.autograd import Function
+
+import voxelize_cuda
+
+
+class VoxelizationFunction(Function):
+ """
+ Definition of differentiable voxelization function
+ Currently implemented only for cuda Tensors
+ """
+ @staticmethod
+ def forward(ctx, smpl_vertices, smpl_face_center, smpl_face_normal,
+ smpl_vertex_code, smpl_face_code, smpl_tetrahedrons,
+ volume_res, sigma, smooth_kernel_size):
+ """
+ forward pass
+ Output format: (batch_size, z_dims, y_dims, x_dims, channel_num)
+ """
+ assert (smpl_vertices.size()[1] == smpl_vertex_code.size()[1])
+ assert (smpl_face_center.size()[1] == smpl_face_normal.size()[1])
+ assert (smpl_face_center.size()[1] == smpl_face_code.size()[1])
+ ctx.batch_size = smpl_vertices.size()[0]
+ ctx.volume_res = volume_res
+ ctx.sigma = sigma
+ ctx.smooth_kernel_size = smooth_kernel_size
+ ctx.smpl_vertex_num = smpl_vertices.size()[1]
+ ctx.device = smpl_vertices.device
+
+ smpl_vertices = smpl_vertices.contiguous()
+ smpl_face_center = smpl_face_center.contiguous()
+ smpl_face_normal = smpl_face_normal.contiguous()
+ smpl_vertex_code = smpl_vertex_code.contiguous()
+ smpl_face_code = smpl_face_code.contiguous()
+ smpl_tetrahedrons = smpl_tetrahedrons.contiguous()
+
+ occ_volume = torch.cuda.FloatTensor(ctx.batch_size, ctx.volume_res,
+ ctx.volume_res,
+ ctx.volume_res).fill_(0.0)
+ semantic_volume = torch.cuda.FloatTensor(ctx.batch_size,
+ ctx.volume_res,
+ ctx.volume_res,
+ ctx.volume_res, 3).fill_(0.0)
+ weight_sum_volume = torch.cuda.FloatTensor(ctx.batch_size,
+ ctx.volume_res,
+ ctx.volume_res,
+ ctx.volume_res).fill_(1e-3)
+
+ # occ_volume [B, volume_res, volume_res, volume_res]
+ # semantic_volume [B, volume_res, volume_res, volume_res, 3]
+ # weight_sum_volume [B, volume_res, volume_res, volume_res]
+
+ occ_volume, semantic_volume, weight_sum_volume = voxelize_cuda.forward_semantic_voxelization(
+ smpl_vertices, smpl_vertex_code, smpl_tetrahedrons, occ_volume,
+ semantic_volume, weight_sum_volume, sigma)
+
+ return semantic_volume
+
+
+class Voxelization(nn.Module):
+ """
+ Wrapper around the autograd function VoxelizationFunction
+ """
+
+ def __init__(self, smpl_vertex_code, smpl_face_code, smpl_face_indices,
+ smpl_tetraderon_indices, volume_res, sigma,
+ smooth_kernel_size, batch_size, device):
+ super(Voxelization, self).__init__()
+ assert (len(smpl_face_indices.shape) == 2)
+ assert (len(smpl_tetraderon_indices.shape) == 2)
+ assert (smpl_face_indices.shape[1] == 3)
+ assert (smpl_tetraderon_indices.shape[1] == 4)
+
+ self.volume_res = volume_res
+ self.sigma = sigma
+ self.smooth_kernel_size = smooth_kernel_size
+ self.batch_size = batch_size
+ self.device = device
+
+ self.smpl_vertex_code = smpl_vertex_code
+ self.smpl_face_code = smpl_face_code
+ self.smpl_face_indices = smpl_face_indices
+ self.smpl_tetraderon_indices = smpl_tetraderon_indices
+
+ def update_param(self, batch_size, smpl_tetra):
+
+ self.batch_size = batch_size
+ self.smpl_tetraderon_indices = smpl_tetra
+
+ smpl_vertex_code_batch = np.tile(self.smpl_vertex_code,
+ (self.batch_size, 1, 1))
+ smpl_face_code_batch = np.tile(self.smpl_face_code,
+ (self.batch_size, 1, 1))
+ smpl_face_indices_batch = np.tile(self.smpl_face_indices,
+ (self.batch_size, 1, 1))
+ smpl_tetraderon_indices_batch = np.tile(self.smpl_tetraderon_indices,
+ (self.batch_size, 1, 1))
+
+ smpl_vertex_code_batch = torch.from_numpy(
+ smpl_vertex_code_batch).contiguous().to(self.device)
+ smpl_face_code_batch = torch.from_numpy(
+ smpl_face_code_batch).contiguous().to(self.device)
+ smpl_face_indices_batch = torch.from_numpy(
+ smpl_face_indices_batch).contiguous().to(self.device)
+ smpl_tetraderon_indices_batch = torch.from_numpy(
+ smpl_tetraderon_indices_batch).contiguous().to(self.device)
+
+ self.register_buffer('smpl_vertex_code_batch', smpl_vertex_code_batch)
+ self.register_buffer('smpl_face_code_batch', smpl_face_code_batch)
+ self.register_buffer('smpl_face_indices_batch',
+ smpl_face_indices_batch)
+ self.register_buffer('smpl_tetraderon_indices_batch',
+ smpl_tetraderon_indices_batch)
+
+ def forward(self, smpl_vertices):
+ """
+ Generate semantic volumes from SMPL vertices
+ """
+ assert (smpl_vertices.size()[0] == self.batch_size)
+ self.check_input(smpl_vertices)
+ smpl_faces = self.vertices_to_faces(smpl_vertices)
+ smpl_tetrahedrons = self.vertices_to_tetrahedrons(smpl_vertices)
+ smpl_face_center = self.calc_face_centers(smpl_faces)
+ smpl_face_normal = self.calc_face_normals(smpl_faces)
+ smpl_surface_vertex_num = self.smpl_vertex_code_batch.size()[1]
+ smpl_vertices_surface = smpl_vertices[:, :smpl_surface_vertex_num, :]
+ vol = VoxelizationFunction.apply(smpl_vertices_surface,
+ smpl_face_center, smpl_face_normal,
+ self.smpl_vertex_code_batch,
+ self.smpl_face_code_batch,
+ smpl_tetrahedrons, self.volume_res,
+ self.sigma, self.smooth_kernel_size)
+ return vol.permute((0, 4, 1, 2, 3)) # (bzyxc --> bcdhw)
+
+ def vertices_to_faces(self, vertices):
+ assert (vertices.ndimension() == 3)
+ bs, nv = vertices.shape[:2]
+ device = vertices.device
+ face = self.smpl_face_indices_batch + (
+ torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
+ vertices_ = vertices.reshape((bs * nv, 3))
+ return vertices_[face.long()]
+
+ def vertices_to_tetrahedrons(self, vertices):
+ assert (vertices.ndimension() == 3)
+ bs, nv = vertices.shape[:2]
+ device = vertices.device
+ tets = self.smpl_tetraderon_indices_batch + (
+ torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
+ vertices_ = vertices.reshape((bs * nv, 3))
+ return vertices_[tets.long()]
+
+ def calc_face_centers(self, face_verts):
+ assert len(face_verts.shape) == 4
+ assert face_verts.shape[2] == 3
+ assert face_verts.shape[3] == 3
+ bs, nf = face_verts.shape[:2]
+ face_centers = (face_verts[:, :, 0, :] + face_verts[:, :, 1, :] +
+ face_verts[:, :, 2, :]) / 3.0
+ face_centers = face_centers.reshape((bs, nf, 3))
+ return face_centers
+
+ def calc_face_normals(self, face_verts):
+ assert len(face_verts.shape) == 4
+ assert face_verts.shape[2] == 3
+ assert face_verts.shape[3] == 3
+ bs, nf = face_verts.shape[:2]
+ face_verts = face_verts.reshape((bs * nf, 3, 3))
+ v10 = face_verts[:, 0] - face_verts[:, 1]
+ v12 = face_verts[:, 2] - face_verts[:, 1]
+ normals = F.normalize(torch.cross(v10, v12), eps=1e-5)
+ normals = normals.reshape((bs, nf, 3))
+ return normals
+
+ def check_input(self, x):
+ if x.device == 'cpu':
+ raise TypeError('Voxelization module supports only cuda tensors')
+ if x.type() != 'torch.cuda.FloatTensor':
+ raise TypeError(
+ 'Voxelization module supports only float32 tensors')
\ No newline at end of file
diff --git a/lib /smplx / .gitignore b/lib /smplx / .gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f1bf708c5b86d8e13496eba43dae45dcfec7bcc8
--- /dev/null
+++ b/lib /smplx / .gitignore
@@ -0,0 +1,114 @@
+#### joe made this: http://goel.io/joe
+
+#####=== Python ===#####
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+models/
+output/
+outputs/
+transfer_data/
+torch-trust-ncg/
+build/
\ No newline at end of file
diff --git a/lib /smplx / __init__.py b/lib /smplx / __init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bd031b5cd03d61226709fc324b66b8403e6f08b
--- /dev/null
+++ b/lib /smplx / __init__.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from .body_models import (
+ create,
+ SMPL,
+ SMPLH,
+ SMPLX,
+ MANO,
+ FLAME,
+ build_layer,
+ SMPLLayer,
+ SMPLHLayer,
+ SMPLXLayer,
+ MANOLayer,
+ FLAMELayer,
+)
\ No newline at end of file
diff --git a/lib /smplx / vertex_ids.py b/lib /smplx / vertex_ids.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c77e31f290bad38c2815208c645e0211be09df2
--- /dev/null
+++ b/lib /smplx / vertex_ids.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from __future__ import print_function
+from __future__ import absolute_import
+from __future__ import division
+
+# Joint name to vertex mapping. SMPL/SMPL-H/SMPL-X vertices that correspond to
+# MSCOCO and OpenPose joints
+vertex_ids = {
+ 'smplh': {
+ 'nose': 332,
+ 'reye': 6260,
+ 'leye': 2800,
+ 'rear': 4071,
+ 'lear': 583,
+ 'rthumb': 6191,
+ 'rindex': 5782,
+ 'rmiddle': 5905,
+ 'rring': 6016,
+ 'rpinky': 6133,
+ 'lthumb': 2746,
+ 'lindex': 2319,
+ 'lmiddle': 2445,
+ 'lring': 2556,
+ 'lpinky': 2673,
+ 'LBigToe': 3216,
+ 'LSmallToe': 3226,
+ 'LHeel': 3387,
+ 'RBigToe': 6617,
+ 'RSmallToe': 6624,
+ 'RHeel': 6787
+ },
+ 'smplx': {
+ 'nose': 9120,
+ 'reye': 9929,
+ 'leye': 9448,
+ 'rear': 616,
+ 'lear': 6,
+ 'rthumb': 8079,
+ 'rindex': 7669,
+ 'rmiddle': 7794,
+ 'rring': 7905,
+ 'rpinky': 8022,
+ 'lthumb': 5361,
+ 'lindex': 4933,
+ 'lmiddle': 5058,
+ 'lring': 5169,
+ 'lpinky': 5286,
+ 'LBigToe': 5770,
+ 'LSmallToe': 5780,
+ 'LHeel': 8846,
+ 'RBigToe': 8463,
+ 'RSmallToe': 8474,
+ 'RHeel': 8635
+ },
+ 'mano': {
+ 'thumb': 744,
+ 'index': 320,
+ 'middle': 443,
+ 'ring': 554,
+ 'pinky': 671,
+ }
+}
\ No newline at end of file
diff --git a/lib /smplx /README.md b/lib /smplx /README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ef5d84a7abdab40b9a2f7602f9111d2c40288411
--- /dev/null
+++ b/lib /smplx /README.md
@@ -0,0 +1,32 @@
+
+Citation
+Depending on which model is loaded for your project, i.e. SMPL-X or SMPL+H or SMPL, please cite the most relevant work below, listed in the same order:
+
+@inproceedings{SMPL-X:2019,
+ title = {Expressive Body Capture: 3D Hands, Face, and Body from a Single Image},
+ author = {Pavlakos, Georgios and Choutas, Vasileios and Ghorbani, Nima and Bolkart, Timo and Osman, Ahmed A. A. and Tzionas, Dimitrios and Black, Michael J.},
+ booktitle = {Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR)},
+ year = {2019}
+}
+@article{MANO:SIGGRAPHASIA:2017,
+ title = {Embodied Hands: Modeling and Capturing Hands and Bodies Together},
+ author = {Romero, Javier and Tzionas, Dimitrios and Black, Michael J.},
+ journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)},
+ volume = {36},
+ number = {6},
+ series = {245:1--245:17},
+ month = nov,
+ year = {2017},
+ month_numeric = {11}
+ }
+@article{SMPL:2015,
+ author = {Loper, Matthew and Mahmood, Naureen and Romero, Javier and Pons-Moll, Gerard and Black, Michael J.},
+ title = {{SMPL}: A Skinned Multi-Person Linear Model},
+ journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH Asia)},
+ month = oct,
+ number = {6},
+ pages = {248:1--248:16},
+ publisher = {ACM},
+ volume = {34},
+ year = {2015}
+}
diff --git a/lib /smplx /body_models.py b/lib /smplx /body_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0896a7368ce7368d500d7333585fabe00193cc8
--- /dev/null
+++ b/lib /smplx /body_models.py
@@ -0,0 +1,2392 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from typing import Optional, Dict, Union
+import os
+import os.path as osp
+import pickle
+
+import numpy as np
+from termcolor import colored
+
+import torch
+import torch.nn as nn
+from collections import namedtuple
+from huggingface_hub import cached_download
+
+import logging
+logging.getLogger("smplx").setLevel(logging.ERROR)
+
+from .lbs import (
+ lbs, vertices2landmarks, find_dynamic_lmk_idx_and_bcoords)
+
+from .vertex_ids import vertex_ids as VERTEX_IDS
+from .utils import (
+ Struct, to_np, to_tensor, Tensor, Array,
+ SMPLOutput,
+ SMPLHOutput,
+ SMPLXOutput,
+ MANOOutput,
+ FLAMEOutput,
+ find_joint_kin_chain)
+from .vertex_joint_selector import VertexJointSelector
+
+ModelOutput = namedtuple('ModelOutput',
+ ['vertices', 'joints', 'full_pose', 'betas',
+ 'global_orient',
+ 'body_pose', 'expression',
+ 'left_hand_pose', 'right_hand_pose',
+ 'jaw_pose'])
+ModelOutput.__new__.__defaults__ = (None,) * len(ModelOutput._fields)
+
+class SMPL(nn.Module):
+
+ NUM_JOINTS = 23
+ NUM_BODY_JOINTS = 23
+ SHAPE_SPACE_DIM = 300
+
+ def __init__(
+ self, model_path: str,
+ kid_template_path: str = '',
+ data_struct: Optional[Struct] = None,
+ create_betas: bool = True,
+ betas: Optional[Tensor] = None,
+ num_betas: int = 10,
+ create_global_orient: bool = True,
+ global_orient: Optional[Tensor] = None,
+ create_body_pose: bool = True,
+ body_pose: Optional[Tensor] = None,
+ create_transl: bool = True,
+ transl: Optional[Tensor] = None,
+ dtype=torch.float32,
+ batch_size: int = 1,
+ joint_mapper=None,
+ gender: str = 'neutral',
+ age: str = 'adult',
+ vertex_ids: Dict[str, int] = None,
+ v_template: Optional[Union[Tensor, Array]] = None,
+ v_personal: Optional[Union[Tensor, Array]] = None,
+ **kwargs
+ ) -> None:
+ ''' SMPL model constructor
+ Parameters
+ ----------
+ model_path: str
+ The path to the folder or to the file where the model
+ parameters are stored
+ data_struct: Strct
+ A struct object. If given, then the parameters of the model are
+ read from the object. Otherwise, the model tries to read the
+ parameters from the given `model_path`. (default = None)
+ create_global_orient: bool, optional
+ Flag for creating a member variable for the global orientation
+ of the body. (default = True)
+ global_orient: torch.tensor, optional, Bx3
+ The default value for the global orientation variable.
+ (default = None)
+ create_body_pose: bool, optional
+ Flag for creating a member variable for the pose of the body.
+ (default = True)
+ body_pose: torch.tensor, optional, Bx(Body Joints * 3)
+ The default value for the body pose variable.
+ (default = None)
+ num_betas: int, optional
+ Number of shape components to use
+ (default = 10).
+ create_betas: bool, optional
+ Flag for creating a member variable for the shape space
+ (default = True).
+ betas: torch.tensor, optional, Bx10
+ The default value for the shape member variable.
+ (default = None)
+ create_transl: bool, optional
+ Flag for creating a member variable for the translation
+ of the body. (default = True)
+ transl: torch.tensor, optional, Bx3
+ The default value for the transl variable.
+ (default = None)
+ dtype: torch.dtype, optional
+ The data type for the created variables
+ batch_size: int, optional
+ The batch size used for creating the member variables
+ joint_mapper: object, optional
+ An object that re-maps the joints. Useful if one wants to
+ re-order the SMPL joints to some other convention (e.g. MSCOCO)
+ (default = None)
+ gender: str, optional
+ Which gender to load
+ vertex_ids: dict, optional
+ A dictionary containing the indices of the extra vertices that
+ will be selected
+ '''
+
+ self.gender = gender
+ self.age = age
+
+ if data_struct is None:
+ model_fn = 'SMPL_{}.{ext}'.format(gender.upper(), ext='pkl')
+ smpl_path = cached_download(os.path.join(model_path, model_fn), use_auth_token=os.environ['ICON'])
+
+ with open(smpl_path, 'rb') as smpl_file:
+ data_struct = Struct(**pickle.load(smpl_file,
+ encoding='latin1'))
+
+ super(SMPL, self).__init__()
+ self.batch_size = batch_size
+ shapedirs = data_struct.shapedirs
+ if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM):
+ # print(f'WARNING: You are using a {self.name()} model, with only'
+ # ' 10 shape coefficients.')
+ num_betas = min(num_betas, 10)
+ else:
+ num_betas = min(num_betas, self.SHAPE_SPACE_DIM)
+
+ if self.age=='kid':
+ v_template_smil = np.load(kid_template_path)
+ v_template_smil -= np.mean(v_template_smil, axis=0)
+ v_template_diff = np.expand_dims(v_template_smil - data_struct.v_template, axis=2)
+ shapedirs = np.concatenate((shapedirs[:, :, :num_betas], v_template_diff), axis=2)
+ num_betas = num_betas + 1
+
+ self._num_betas = num_betas
+ shapedirs = shapedirs[:, :, :num_betas]
+ # The shape components
+ self.register_buffer(
+ 'shapedirs',
+ to_tensor(to_np(shapedirs), dtype=dtype))
+
+ if vertex_ids is None:
+ # SMPL and SMPL-H share the same topology, so any extra joints can
+ # be drawn from the same place
+ vertex_ids = VERTEX_IDS['smplh']
+
+ self.dtype = dtype
+
+ self.joint_mapper = joint_mapper
+
+ self.vertex_joint_selector = VertexJointSelector(
+ vertex_ids=vertex_ids, **kwargs)
+
+ self.faces = data_struct.f
+ self.register_buffer('faces_tensor',
+ to_tensor(to_np(self.faces, dtype=np.int64),
+ dtype=torch.long))
+
+ if create_betas:
+ if betas is None:
+ default_betas = torch.zeros(
+ [batch_size, self.num_betas], dtype=dtype)
+ else:
+ if torch.is_tensor(betas):
+ default_betas = betas.clone().detach()
+ else:
+ default_betas = torch.tensor(betas, dtype=dtype)
+
+ self.register_parameter(
+ 'betas', nn.Parameter(default_betas, requires_grad=True))
+
+ # The tensor that contains the global rotation of the model
+ # It is separated from the pose of the joints in case we wish to
+ # optimize only over one of them
+ if create_global_orient:
+ if global_orient is None:
+ default_global_orient = torch.zeros(
+ [batch_size, 3], dtype=dtype)
+ else:
+ if torch.is_tensor(global_orient):
+ default_global_orient = global_orient.clone().detach()
+ else:
+ default_global_orient = torch.tensor(
+ global_orient, dtype=dtype)
+
+ global_orient = nn.Parameter(default_global_orient,
+ requires_grad=True)
+ self.register_parameter('global_orient', global_orient)
+
+ if create_body_pose:
+ if body_pose is None:
+ default_body_pose = torch.zeros(
+ [batch_size, self.NUM_BODY_JOINTS * 3], dtype=dtype)
+ else:
+ if torch.is_tensor(body_pose):
+ default_body_pose = body_pose.clone().detach()
+ else:
+ default_body_pose = torch.tensor(body_pose,
+ dtype=dtype)
+ self.register_parameter(
+ 'body_pose',
+ nn.Parameter(default_body_pose, requires_grad=True))
+
+ if create_transl:
+ if transl is None:
+ default_transl = torch.zeros([batch_size, 3],
+ dtype=dtype,
+ requires_grad=True)
+ else:
+ default_transl = torch.tensor(transl, dtype=dtype)
+ self.register_parameter(
+ 'transl', nn.Parameter(default_transl, requires_grad=True))
+
+ if v_template is None:
+ v_template = data_struct.v_template
+
+ if not torch.is_tensor(v_template):
+ v_template = to_tensor(to_np(v_template), dtype=dtype)
+
+ if v_personal is not None:
+ v_personal = to_tensor(to_np(v_personal), dtype=dtype)
+ v_template += v_personal
+
+ # The vertices of the template model
+ self.register_buffer('v_template', v_template)
+
+ j_regressor = to_tensor(to_np(
+ data_struct.J_regressor), dtype=dtype)
+ self.register_buffer('J_regressor', j_regressor)
+
+ # Pose blend shape basis: 6890 x 3 x 207, reshaped to 6890*3 x 207
+ num_pose_basis = data_struct.posedirs.shape[-1]
+ # 207 x 20670
+ posedirs = np.reshape(data_struct.posedirs, [-1, num_pose_basis]).T
+ self.register_buffer('posedirs',
+ to_tensor(to_np(posedirs), dtype=dtype))
+
+ # indices of parents for each joints
+ parents = to_tensor(to_np(data_struct.kintree_table[0])).long()
+ parents[0] = -1
+ self.register_buffer('parents', parents)
+
+ self.register_buffer(
+ 'lbs_weights', to_tensor(to_np(data_struct.weights), dtype=dtype))
+
+ @property
+ def num_betas(self):
+ return self._num_betas
+
+ @property
+ def num_expression_coeffs(self):
+ return 0
+
+ def create_mean_pose(self, data_struct) -> Tensor:
+ pass
+
+ def name(self) -> str:
+ return 'SMPL'
+
+ @torch.no_grad()
+ def reset_params(self, **params_dict) -> None:
+ for param_name, param in self.named_parameters():
+ if param_name in params_dict:
+ param[:] = torch.tensor(params_dict[param_name])
+ else:
+ param.fill_(0)
+
+ def get_num_verts(self) -> int:
+ return self.v_template.shape[0]
+
+ def get_num_faces(self) -> int:
+ return self.faces.shape[0]
+
+ def extra_repr(self) -> str:
+ msg = [
+ f'Gender: {self.gender.upper()}',
+ f'Number of joints: {self.J_regressor.shape[0]}',
+ f'Betas: {self.num_betas}',
+ ]
+ return '\n'.join(msg)
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ body_pose: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ return_verts=True,
+ return_full_pose: bool = False,
+ pose2rot: bool = True,
+ **kwargs
+ ) -> SMPLOutput:
+ ''' Forward pass for the SMPL model
+ Parameters
+ ----------
+ global_orient: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable and use it as the global
+ rotation of the body. Useful if someone wishes to predicts this
+ with an external model. (default=None)
+ betas: torch.tensor, optional, shape BxN_b
+ If given, ignore the member variable `betas` and use it
+ instead. For example, it can used if shape parameters
+ `betas` are predicted from some external model.
+ (default=None)
+ body_pose: torch.tensor, optional, shape Bx(J*3)
+ If given, ignore the member variable `body_pose` and use it
+ instead. For example, it can used if someone predicts the
+ pose of the body joints are predicted from some external model.
+ It should be a tensor that contains joint rotations in
+ axis-angle format. (default=None)
+ transl: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable `transl` and use it
+ instead. For example, it can used if the translation
+ `transl` is predicted from some external model.
+ (default=None)
+ return_verts: bool, optional
+ Return the vertices. (default=True)
+ return_full_pose: bool, optional
+ Returns the full axis-angle pose vector (default=False)
+ Returns
+ -------
+ '''
+ # If no shape and pose parameters are passed along, then use the
+ # ones from the module
+ global_orient = (global_orient if global_orient is not None else
+ self.global_orient)
+ body_pose = body_pose if body_pose is not None else self.body_pose
+ betas = betas if betas is not None else self.betas
+
+ apply_trans = transl is not None or hasattr(self, 'transl')
+ if transl is None and hasattr(self, 'transl'):
+ transl = self.transl
+
+ full_pose = torch.cat([global_orient, body_pose], dim=1)
+
+ batch_size = max(betas.shape[0], global_orient.shape[0],
+ body_pose.shape[0])
+
+ if betas.shape[0] != batch_size:
+ num_repeats = int(batch_size / betas.shape[0])
+ betas = betas.expand(num_repeats, -1)
+
+ vertices, joints = lbs(betas, full_pose, self.v_template,
+ self.shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=pose2rot)
+
+ joints = self.vertex_joint_selector(vertices, joints)
+ # Map the joints to the current dataset
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints)
+
+ if apply_trans:
+ joints += transl.unsqueeze(dim=1)
+ vertices += transl.unsqueeze(dim=1)
+
+ output = SMPLOutput(vertices=vertices if return_verts else None,
+ global_orient=global_orient,
+ body_pose=body_pose,
+ joints=joints,
+ betas=betas,
+ full_pose=full_pose if return_full_pose else None)
+
+ return output
+
+
+class SMPLLayer(SMPL):
+ def __init__(
+ self,
+ *args,
+ **kwargs
+ ) -> None:
+ # Just create a SMPL module without any member variables
+ super(SMPLLayer, self).__init__(
+ create_body_pose=False,
+ create_betas=False,
+ create_global_orient=False,
+ create_transl=False,
+ *args,
+ **kwargs,
+ )
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ body_pose: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ return_verts=True,
+ return_full_pose: bool = False,
+ pose2rot: bool = True,
+ **kwargs
+ ) -> SMPLOutput:
+ ''' Forward pass for the SMPL model
+ Parameters
+ ----------
+ global_orient: torch.tensor, optional, shape Bx3x3
+ Global rotation of the body. Useful if someone wishes to
+ predicts this with an external model. It is expected to be in
+ rotation matrix format. (default=None)
+ betas: torch.tensor, optional, shape BxN_b
+ Shape parameters. For example, it can used if shape parameters
+ `betas` are predicted from some external model.
+ (default=None)
+ body_pose: torch.tensor, optional, shape BxJx3x3
+ Body pose. For example, it can used if someone predicts the
+ pose of the body joints are predicted from some external model.
+ It should be a tensor that contains joint rotations in
+ rotation matrix format. (default=None)
+ transl: torch.tensor, optional, shape Bx3
+ Translation vector of the body.
+ For example, it can used if the translation
+ `transl` is predicted from some external model.
+ (default=None)
+ return_verts: bool, optional
+ Return the vertices. (default=True)
+ return_full_pose: bool, optional
+ Returns the full axis-angle pose vector (default=False)
+ Returns
+ -------
+ '''
+ model_vars = [betas, global_orient, body_pose, transl]
+ batch_size = 1
+ for var in model_vars:
+ if var is None:
+ continue
+ batch_size = max(batch_size, len(var))
+ device, dtype = self.shapedirs.device, self.shapedirs.dtype
+ if global_orient is None:
+ global_orient = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if body_pose is None:
+ body_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(
+ batch_size, self.NUM_BODY_JOINTS, -1, -1).contiguous()
+ if betas is None:
+ betas = torch.zeros([batch_size, self.num_betas],
+ dtype=dtype, device=device)
+ if transl is None:
+ transl = torch.zeros([batch_size, 3], dtype=dtype, device=device)
+ full_pose = torch.cat(
+ [global_orient.reshape(-1, 1, 3, 3),
+ body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3, 3)],
+ dim=1)
+
+ vertices, joints = lbs(betas, full_pose, self.v_template,
+ self.shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights,
+ pose2rot=False)
+
+ joints = self.vertex_joint_selector(vertices, joints)
+ # Map the joints to the current dataset
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints)
+
+ if transl is not None:
+ joints += transl.unsqueeze(dim=1)
+ vertices += transl.unsqueeze(dim=1)
+
+ output = SMPLOutput(vertices=vertices if return_verts else None,
+ global_orient=global_orient,
+ body_pose=body_pose,
+ joints=joints,
+ betas=betas,
+ full_pose=full_pose if return_full_pose else None)
+
+ return output
+
+
+class SMPLH(SMPL):
+
+ # The hand joints are replaced by MANO
+ NUM_BODY_JOINTS = SMPL.NUM_JOINTS - 2
+ NUM_HAND_JOINTS = 15
+ NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS
+
+ def __init__(
+ self, model_path,
+ kid_template_path: str = '',
+ data_struct: Optional[Struct] = None,
+ create_left_hand_pose: bool = True,
+ left_hand_pose: Optional[Tensor] = None,
+ create_right_hand_pose: bool = True,
+ right_hand_pose: Optional[Tensor] = None,
+ use_pca: bool = True,
+ num_pca_comps: int = 6,
+ flat_hand_mean: bool = False,
+ batch_size: int = 1,
+ gender: str = 'neutral',
+ age: str = 'adult',
+ dtype=torch.float32,
+ vertex_ids=None,
+ use_compressed: bool = True,
+ ext: str = 'pkl',
+ **kwargs
+ ) -> None:
+ ''' SMPLH model constructor
+ Parameters
+ ----------
+ model_path: str
+ The path to the folder or to the file where the model
+ parameters are stored
+ data_struct: Strct
+ A struct object. If given, then the parameters of the model are
+ read from the object. Otherwise, the model tries to read the
+ parameters from the given `model_path`. (default = None)
+ create_left_hand_pose: bool, optional
+ Flag for creating a member variable for the pose of the left
+ hand. (default = True)
+ left_hand_pose: torch.tensor, optional, BxP
+ The default value for the left hand pose member variable.
+ (default = None)
+ create_right_hand_pose: bool, optional
+ Flag for creating a member variable for the pose of the right
+ hand. (default = True)
+ right_hand_pose: torch.tensor, optional, BxP
+ The default value for the right hand pose member variable.
+ (default = None)
+ num_pca_comps: int, optional
+ The number of PCA components to use for each hand.
+ (default = 6)
+ flat_hand_mean: bool, optional
+ If False, then the pose of the hand is initialized to False.
+ batch_size: int, optional
+ The batch size used for creating the member variables
+ gender: str, optional
+ Which gender to load
+ dtype: torch.dtype, optional
+ The data type for the created variables
+ vertex_ids: dict, optional
+ A dictionary containing the indices of the extra vertices that
+ will be selected
+ '''
+
+ self.num_pca_comps = num_pca_comps
+ # If no data structure is passed, then load the data from the given
+ # model folder
+ if data_struct is None:
+ # Load the model
+ if osp.isdir(model_path):
+ model_fn = 'SMPLH_{}.{ext}'.format(gender.upper(), ext=ext)
+ smplh_path = os.path.join(model_path, model_fn)
+ else:
+ smplh_path = model_path
+ assert osp.exists(smplh_path), 'Path {} does not exist!'.format(
+ smplh_path)
+
+ if ext == 'pkl':
+ with open(smplh_path, 'rb') as smplh_file:
+ model_data = pickle.load(smplh_file, encoding='latin1')
+ elif ext == 'npz':
+ model_data = np.load(smplh_path, allow_pickle=True)
+ else:
+ raise ValueError('Unknown extension: {}'.format(ext))
+ data_struct = Struct(**model_data)
+
+ if vertex_ids is None:
+ vertex_ids = VERTEX_IDS['smplh']
+
+ super(SMPLH, self).__init__(
+ model_path=model_path,
+ kid_template_path=kid_template_path,
+ data_struct=data_struct,
+ batch_size=batch_size, vertex_ids=vertex_ids, gender=gender, age=age,
+ use_compressed=use_compressed, dtype=dtype, ext=ext, **kwargs)
+
+ self.use_pca = use_pca
+ self.num_pca_comps = num_pca_comps
+ self.flat_hand_mean = flat_hand_mean
+
+ left_hand_components = data_struct.hands_componentsl[:num_pca_comps]
+ right_hand_components = data_struct.hands_componentsr[:num_pca_comps]
+
+ self.np_left_hand_components = left_hand_components
+ self.np_right_hand_components = right_hand_components
+ if self.use_pca:
+ self.register_buffer(
+ 'left_hand_components',
+ torch.tensor(left_hand_components, dtype=dtype))
+ self.register_buffer(
+ 'right_hand_components',
+ torch.tensor(right_hand_components, dtype=dtype))
+
+ if self.flat_hand_mean:
+ left_hand_mean = np.zeros_like(data_struct.hands_meanl)
+ else:
+ left_hand_mean = data_struct.hands_meanl
+
+ if self.flat_hand_mean:
+ right_hand_mean = np.zeros_like(data_struct.hands_meanr)
+ else:
+ right_hand_mean = data_struct.hands_meanr
+
+ self.register_buffer('left_hand_mean',
+ to_tensor(left_hand_mean, dtype=self.dtype))
+ self.register_buffer('right_hand_mean',
+ to_tensor(right_hand_mean, dtype=self.dtype))
+
+ # Create the buffers for the pose of the left hand
+ hand_pose_dim = num_pca_comps if use_pca else 3 * self.NUM_HAND_JOINTS
+ if create_left_hand_pose:
+ if left_hand_pose is None:
+ default_lhand_pose = torch.zeros([batch_size, hand_pose_dim],
+ dtype=dtype)
+ else:
+ default_lhand_pose = torch.tensor(left_hand_pose, dtype=dtype)
+
+ left_hand_pose_param = nn.Parameter(default_lhand_pose,
+ requires_grad=True)
+ self.register_parameter('left_hand_pose',
+ left_hand_pose_param)
+
+ if create_right_hand_pose:
+ if right_hand_pose is None:
+ default_rhand_pose = torch.zeros([batch_size, hand_pose_dim],
+ dtype=dtype)
+ else:
+ default_rhand_pose = torch.tensor(right_hand_pose, dtype=dtype)
+
+ right_hand_pose_param = nn.Parameter(default_rhand_pose,
+ requires_grad=True)
+ self.register_parameter('right_hand_pose',
+ right_hand_pose_param)
+
+ # Create the buffer for the mean pose.
+ pose_mean_tensor = self.create_mean_pose(
+ data_struct, flat_hand_mean=flat_hand_mean)
+ if not torch.is_tensor(pose_mean_tensor):
+ pose_mean_tensor = torch.tensor(pose_mean_tensor, dtype=dtype)
+ self.register_buffer('pose_mean', pose_mean_tensor)
+
+ def create_mean_pose(self, data_struct, flat_hand_mean=False):
+ # Create the array for the mean pose. If flat_hand is false, then use
+ # the mean that is given by the data, rather than the flat open hand
+ global_orient_mean = torch.zeros([3], dtype=self.dtype)
+ body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3],
+ dtype=self.dtype)
+
+ pose_mean = torch.cat([global_orient_mean, body_pose_mean,
+ self.left_hand_mean,
+ self.right_hand_mean], dim=0)
+ return pose_mean
+
+ def name(self) -> str:
+ return 'SMPL+H'
+
+ def extra_repr(self):
+ msg = super(SMPLH, self).extra_repr()
+ msg = [msg]
+ if self.use_pca:
+ msg.append(f'Number of PCA components: {self.num_pca_comps}')
+ msg.append(f'Flat hand mean: {self.flat_hand_mean}')
+ return '\n'.join(msg)
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ body_pose: Optional[Tensor] = None,
+ left_hand_pose: Optional[Tensor] = None,
+ right_hand_pose: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ return_verts: bool = True,
+ return_full_pose: bool = False,
+ pose2rot: bool = True,
+ **kwargs
+ ) -> SMPLHOutput:
+ '''
+ '''
+
+ # If no shape and pose parameters are passed along, then use the
+ # ones from the module
+ global_orient = (global_orient if global_orient is not None else
+ self.global_orient)
+ body_pose = body_pose if body_pose is not None else self.body_pose
+ betas = betas if betas is not None else self.betas
+ left_hand_pose = (left_hand_pose if left_hand_pose is not None else
+ self.left_hand_pose)
+ right_hand_pose = (right_hand_pose if right_hand_pose is not None else
+ self.right_hand_pose)
+
+ apply_trans = transl is not None or hasattr(self, 'transl')
+ if transl is None:
+ if hasattr(self, 'transl'):
+ transl = self.transl
+
+ if self.use_pca:
+ left_hand_pose = torch.einsum(
+ 'bi,ij->bj', [left_hand_pose, self.left_hand_components])
+ right_hand_pose = torch.einsum(
+ 'bi,ij->bj', [right_hand_pose, self.right_hand_components])
+
+ full_pose = torch.cat([global_orient, body_pose,
+ left_hand_pose,
+ right_hand_pose], dim=1)
+
+ full_pose += self.pose_mean
+
+ vertices, joints = lbs(betas, full_pose, self.v_template,
+ self.shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=pose2rot)
+
+ # Add any extra joints that might be needed
+ joints = self.vertex_joint_selector(vertices, joints)
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints)
+
+ if apply_trans:
+ joints += transl.unsqueeze(dim=1)
+ vertices += transl.unsqueeze(dim=1)
+
+ output = SMPLHOutput(vertices=vertices if return_verts else None,
+ joints=joints,
+ betas=betas,
+ global_orient=global_orient,
+ body_pose=body_pose,
+ left_hand_pose=left_hand_pose,
+ right_hand_pose=right_hand_pose,
+ full_pose=full_pose if return_full_pose else None)
+
+ return output
+
+
+class SMPLHLayer(SMPLH):
+
+ def __init__(
+ self, *args, **kwargs
+ ) -> None:
+ ''' SMPL+H as a layer model constructor
+ '''
+ super(SMPLHLayer, self).__init__(
+ create_global_orient=False,
+ create_body_pose=False,
+ create_left_hand_pose=False,
+ create_right_hand_pose=False,
+ create_betas=False,
+ create_transl=False,
+ *args,
+ **kwargs)
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ body_pose: Optional[Tensor] = None,
+ left_hand_pose: Optional[Tensor] = None,
+ right_hand_pose: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ return_verts: bool = True,
+ return_full_pose: bool = False,
+ pose2rot: bool = True,
+ **kwargs
+ ) -> SMPLHOutput:
+ ''' Forward pass for the SMPL+H model
+ Parameters
+ ----------
+ global_orient: torch.tensor, optional, shape Bx3x3
+ Global rotation of the body. Useful if someone wishes to
+ predicts this with an external model. It is expected to be in
+ rotation matrix format. (default=None)
+ betas: torch.tensor, optional, shape BxN_b
+ Shape parameters. For example, it can used if shape parameters
+ `betas` are predicted from some external model.
+ (default=None)
+ body_pose: torch.tensor, optional, shape BxJx3x3
+ If given, ignore the member variable `body_pose` and use it
+ instead. For example, it can used if someone predicts the
+ pose of the body joints are predicted from some external model.
+ It should be a tensor that contains joint rotations in
+ rotation matrix format. (default=None)
+ left_hand_pose: torch.tensor, optional, shape Bx15x3x3
+ If given, contains the pose of the left hand.
+ It should be a tensor that contains joint rotations in
+ rotation matrix format. (default=None)
+ right_hand_pose: torch.tensor, optional, shape Bx15x3x3
+ If given, contains the pose of the right hand.
+ It should be a tensor that contains joint rotations in
+ rotation matrix format. (default=None)
+ transl: torch.tensor, optional, shape Bx3
+ Translation vector of the body.
+ For example, it can used if the translation
+ `transl` is predicted from some external model.
+ (default=None)
+ return_verts: bool, optional
+ Return the vertices. (default=True)
+ return_full_pose: bool, optional
+ Returns the full axis-angle pose vector (default=False)
+ Returns
+ -------
+ '''
+ model_vars = [betas, global_orient, body_pose, transl, left_hand_pose,
+ right_hand_pose]
+ batch_size = 1
+ for var in model_vars:
+ if var is None:
+ continue
+ batch_size = max(batch_size, len(var))
+ device, dtype = self.shapedirs.device, self.shapedirs.dtype
+ if global_orient is None:
+ global_orient = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if body_pose is None:
+ body_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, 21, -1, -1).contiguous()
+ if left_hand_pose is None:
+ left_hand_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous()
+ if right_hand_pose is None:
+ right_hand_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous()
+ if betas is None:
+ betas = torch.zeros([batch_size, self.num_betas],
+ dtype=dtype, device=device)
+ if transl is None:
+ transl = torch.zeros([batch_size, 3], dtype=dtype, device=device)
+
+ # Concatenate all pose vectors
+ full_pose = torch.cat(
+ [global_orient.reshape(-1, 1, 3, 3),
+ body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3, 3),
+ left_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3, 3),
+ right_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3, 3)],
+ dim=1)
+
+ vertices, joints = lbs(betas, full_pose, self.v_template,
+ self.shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=False)
+
+ # Add any extra joints that might be needed
+ joints = self.vertex_joint_selector(vertices, joints)
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints)
+
+ if transl is not None:
+ joints += transl.unsqueeze(dim=1)
+ vertices += transl.unsqueeze(dim=1)
+
+ output = SMPLHOutput(vertices=vertices if return_verts else None,
+ joints=joints,
+ betas=betas,
+ global_orient=global_orient,
+ body_pose=body_pose,
+ left_hand_pose=left_hand_pose,
+ right_hand_pose=right_hand_pose,
+ full_pose=full_pose if return_full_pose else None)
+
+ return output
+
+
+class SMPLX(SMPLH):
+ '''
+ SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters
+ trained jointly for the face, hands and body.
+ SMPL-X uses standard vertex based linear blend skinning with learned
+ corrective blend shapes, has N=10475 vertices and K=54 joints,
+ which includes joints for the neck, jaw, eyeballs and fingers.
+ '''
+
+ NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS # 21
+ NUM_HAND_JOINTS = 15
+ NUM_FACE_JOINTS = 3
+ NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS
+ EXPRESSION_SPACE_DIM = 100
+ NECK_IDX = 12
+
+ def __init__(
+ self, model_path: str,
+ kid_template_path: str = '',
+ num_expression_coeffs: int = 10,
+ create_expression: bool = True,
+ expression: Optional[Tensor] = None,
+ create_jaw_pose: bool = True,
+ jaw_pose: Optional[Tensor] = None,
+ create_leye_pose: bool = True,
+ leye_pose: Optional[Tensor] = None,
+ create_reye_pose=True,
+ reye_pose: Optional[Tensor] = None,
+ use_face_contour: bool = False,
+ batch_size: int = 1,
+ gender: str = 'neutral',
+ age: str = 'adult',
+ dtype=torch.float32,
+ ext: str = 'npz',
+ **kwargs
+ ) -> None:
+ ''' SMPLX model constructor
+ Parameters
+ ----------
+ model_path: str
+ The path to the folder or to the file where the model
+ parameters are stored
+ num_expression_coeffs: int, optional
+ Number of expression components to use
+ (default = 10).
+ create_expression: bool, optional
+ Flag for creating a member variable for the expression space
+ (default = True).
+ expression: torch.tensor, optional, Bx10
+ The default value for the expression member variable.
+ (default = None)
+ create_jaw_pose: bool, optional
+ Flag for creating a member variable for the jaw pose.
+ (default = False)
+ jaw_pose: torch.tensor, optional, Bx3
+ The default value for the jaw pose variable.
+ (default = None)
+ create_leye_pose: bool, optional
+ Flag for creating a member variable for the left eye pose.
+ (default = False)
+ leye_pose: torch.tensor, optional, Bx10
+ The default value for the left eye pose variable.
+ (default = None)
+ create_reye_pose: bool, optional
+ Flag for creating a member variable for the right eye pose.
+ (default = False)
+ reye_pose: torch.tensor, optional, Bx10
+ The default value for the right eye pose variable.
+ (default = None)
+ use_face_contour: bool, optional
+ Whether to compute the keypoints that form the facial contour
+ batch_size: int, optional
+ The batch size used for creating the member variables
+ gender: str, optional
+ Which gender to load
+ dtype: torch.dtype
+ The data type for the created variables
+ '''
+
+ # Load the model
+ if osp.isdir(model_path):
+ model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext)
+ smplx_path = os.path.join(model_path, model_fn)
+ else:
+ smplx_path = model_path
+ assert osp.exists(smplx_path), 'Path {} does not exist!'.format(
+ smplx_path)
+
+ if ext == 'pkl':
+ with open(smplx_path, 'rb') as smplx_file:
+ model_data = pickle.load(smplx_file, encoding='latin1')
+ elif ext == 'npz':
+ model_data = np.load(smplx_path, allow_pickle=True)
+ else:
+ raise ValueError('Unknown extension: {}'.format(ext))
+
+ # print(colored(f"Use SMPL-X: {smplx_path}", "green"))
+
+ data_struct = Struct(**model_data)
+
+ super(SMPLX, self).__init__(
+ model_path=model_path,
+ kid_template_path=kid_template_path,
+ data_struct=data_struct,
+ dtype=dtype,
+ batch_size=batch_size,
+ vertex_ids=VERTEX_IDS['smplx'],
+ gender=gender, age=age, ext=ext,
+ **kwargs)
+
+ lmk_faces_idx = data_struct.lmk_faces_idx
+ self.register_buffer('lmk_faces_idx',
+ torch.tensor(lmk_faces_idx, dtype=torch.long))
+ lmk_bary_coords = data_struct.lmk_bary_coords
+ self.register_buffer('lmk_bary_coords',
+ torch.tensor(lmk_bary_coords, dtype=dtype))
+
+ self.use_face_contour = use_face_contour
+ if self.use_face_contour:
+ dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx
+ dynamic_lmk_faces_idx = torch.tensor(
+ dynamic_lmk_faces_idx,
+ dtype=torch.long)
+ self.register_buffer('dynamic_lmk_faces_idx',
+ dynamic_lmk_faces_idx)
+
+ dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords
+ dynamic_lmk_bary_coords = torch.tensor(
+ dynamic_lmk_bary_coords, dtype=dtype)
+ self.register_buffer('dynamic_lmk_bary_coords',
+ dynamic_lmk_bary_coords)
+
+ neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents)
+ self.register_buffer(
+ 'neck_kin_chain',
+ torch.tensor(neck_kin_chain, dtype=torch.long))
+
+ if create_jaw_pose:
+ if jaw_pose is None:
+ default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype)
+ else:
+ default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype)
+ jaw_pose_param = nn.Parameter(default_jaw_pose,
+ requires_grad=True)
+ self.register_parameter('jaw_pose', jaw_pose_param)
+
+ if create_leye_pose:
+ if leye_pose is None:
+ default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype)
+ else:
+ default_leye_pose = torch.tensor(leye_pose, dtype=dtype)
+ leye_pose_param = nn.Parameter(default_leye_pose,
+ requires_grad=True)
+ self.register_parameter('leye_pose', leye_pose_param)
+
+ if create_reye_pose:
+ if reye_pose is None:
+ default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype)
+ else:
+ default_reye_pose = torch.tensor(reye_pose, dtype=dtype)
+ reye_pose_param = nn.Parameter(default_reye_pose,
+ requires_grad=True)
+ self.register_parameter('reye_pose', reye_pose_param)
+
+ shapedirs = data_struct.shapedirs
+ if len(shapedirs.shape) < 3:
+ shapedirs = shapedirs[:, :, None]
+ if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM +
+ self.EXPRESSION_SPACE_DIM):
+ # print(f'WARNING: You are using a {self.name()} model, with only'
+ # ' 10 shape and 10 expression coefficients.')
+ expr_start_idx = 10
+ expr_end_idx = 20
+ num_expression_coeffs = min(num_expression_coeffs, 10)
+ else:
+ expr_start_idx = self.SHAPE_SPACE_DIM
+ expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs
+ num_expression_coeffs = min(
+ num_expression_coeffs, self.EXPRESSION_SPACE_DIM)
+
+ self._num_expression_coeffs = num_expression_coeffs
+
+ expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx]
+ self.register_buffer(
+ 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype))
+
+ if create_expression:
+ if expression is None:
+ default_expression = torch.zeros(
+ [batch_size, self.num_expression_coeffs], dtype=dtype)
+ else:
+ default_expression = torch.tensor(expression, dtype=dtype)
+ expression_param = nn.Parameter(default_expression,
+ requires_grad=True)
+ self.register_parameter('expression', expression_param)
+
+ def name(self) -> str:
+ return 'SMPL-X'
+
+ @property
+ def num_expression_coeffs(self):
+ return self._num_expression_coeffs
+
+ def create_mean_pose(self, data_struct, flat_hand_mean=False):
+ # Create the array for the mean pose. If flat_hand is false, then use
+ # the mean that is given by the data, rather than the flat open hand
+ global_orient_mean = torch.zeros([3], dtype=self.dtype)
+ body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3],
+ dtype=self.dtype)
+ jaw_pose_mean = torch.zeros([3], dtype=self.dtype)
+ leye_pose_mean = torch.zeros([3], dtype=self.dtype)
+ reye_pose_mean = torch.zeros([3], dtype=self.dtype)
+
+ pose_mean = np.concatenate([global_orient_mean, body_pose_mean,
+ jaw_pose_mean,
+ leye_pose_mean, reye_pose_mean,
+ self.left_hand_mean, self.right_hand_mean],
+ axis=0)
+
+ return pose_mean
+
+ def extra_repr(self):
+ msg = super(SMPLX, self).extra_repr()
+ msg = [
+ msg,
+ f'Number of Expression Coefficients: {self.num_expression_coeffs}'
+ ]
+ return '\n'.join(msg)
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ body_pose: Optional[Tensor] = None,
+ left_hand_pose: Optional[Tensor] = None,
+ right_hand_pose: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ expression: Optional[Tensor] = None,
+ jaw_pose: Optional[Tensor] = None,
+ leye_pose: Optional[Tensor] = None,
+ reye_pose: Optional[Tensor] = None,
+ return_verts: bool = True,
+ return_full_pose: bool = False,
+ pose2rot: bool = True,
+ return_joint_transformation: bool = False,
+ return_vertex_transformation: bool = False,
+ **kwargs
+ ) -> SMPLXOutput:
+ '''
+ Forward pass for the SMPLX model
+ Parameters
+ ----------
+ global_orient: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable and use it as the global
+ rotation of the body. Useful if someone wishes to predicts this
+ with an external model. (default=None)
+ betas: torch.tensor, optional, shape BxN_b
+ If given, ignore the member variable `betas` and use it
+ instead. For example, it can used if shape parameters
+ `betas` are predicted from some external model.
+ (default=None)
+ expression: torch.tensor, optional, shape BxN_e
+ If given, ignore the member variable `expression` and use it
+ instead. For example, it can used if expression parameters
+ `expression` are predicted from some external model.
+ body_pose: torch.tensor, optional, shape Bx(J*3)
+ If given, ignore the member variable `body_pose` and use it
+ instead. For example, it can used if someone predicts the
+ pose of the body joints are predicted from some external model.
+ It should be a tensor that contains joint rotations in
+ axis-angle format. (default=None)
+ left_hand_pose: torch.tensor, optional, shape BxP
+ If given, ignore the member variable `left_hand_pose` and
+ use this instead. It should either contain PCA coefficients or
+ joint rotations in axis-angle format.
+ right_hand_pose: torch.tensor, optional, shape BxP
+ If given, ignore the member variable `right_hand_pose` and
+ use this instead. It should either contain PCA coefficients or
+ joint rotations in axis-angle format.
+ jaw_pose: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable `jaw_pose` and
+ use this instead. It should either joint rotations in
+ axis-angle format.
+ transl: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable `transl` and use it
+ instead. For example, it can used if the translation
+ `transl` is predicted from some external model.
+ (default=None)
+ return_verts: bool, optional
+ Return the vertices. (default=True)
+ return_full_pose: bool, optional
+ Returns the full axis-angle pose vector (default=False)
+ Returns
+ -------
+ output: ModelOutput
+ A named tuple of type `ModelOutput`
+ '''
+
+ # If no shape and pose parameters are passed along, then use the
+ # ones from the module
+ global_orient = (global_orient if global_orient is not None else
+ self.global_orient)
+ body_pose = body_pose if body_pose is not None else self.body_pose
+ betas = betas if betas is not None else self.betas
+
+ left_hand_pose = (left_hand_pose if left_hand_pose is not None else
+ self.left_hand_pose)
+ right_hand_pose = (right_hand_pose if right_hand_pose is not None else
+ self.right_hand_pose)
+ jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose
+ leye_pose = leye_pose if leye_pose is not None else self.leye_pose
+ reye_pose = reye_pose if reye_pose is not None else self.reye_pose
+ expression = expression if expression is not None else self.expression
+
+ apply_trans = transl is not None or hasattr(self, 'transl')
+ if transl is None:
+ if hasattr(self, 'transl'):
+ transl = self.transl
+
+ if self.use_pca:
+ left_hand_pose = torch.einsum('bi,ij->bj', [left_hand_pose, self.left_hand_components])
+ right_hand_pose = torch.einsum(
+ 'bi,ij->bj', [right_hand_pose, self.right_hand_components])
+
+ full_pose = torch.cat([global_orient, body_pose,
+ jaw_pose, leye_pose, reye_pose,
+ left_hand_pose,
+ right_hand_pose], dim=1)
+
+ # Add the mean pose of the model. Does not affect the body, only the
+ # hands when flat_hand_mean == False
+ full_pose += self.pose_mean
+
+ batch_size = max(betas.shape[0], global_orient.shape[0],
+ body_pose.shape[0])
+ # Concatenate the shape and expression coefficients
+ scale = int(batch_size / betas.shape[0])
+ if scale > 1:
+ betas = betas.expand(scale, -1)
+ shape_components = torch.cat([betas, expression], dim=-1)
+
+ shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)
+
+ if return_joint_transformation or return_vertex_transformation:
+ vertices, joints, joint_transformation, vertex_transformation = lbs(shape_components, full_pose, self.v_template,
+ shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=pose2rot, return_transformation=True
+ )
+ else:
+ vertices, joints = lbs(shape_components, full_pose, self.v_template,
+ shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=pose2rot,
+ )
+
+ lmk_faces_idx = self.lmk_faces_idx.unsqueeze(
+ dim=0).expand(batch_size, -1).contiguous()
+ lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(
+ self.batch_size, 1, 1)
+ if self.use_face_contour:
+ lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(
+ vertices, full_pose, self.dynamic_lmk_faces_idx,
+ self.dynamic_lmk_bary_coords,
+ self.neck_kin_chain,
+ pose2rot=True,
+ )
+ dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords
+
+ lmk_faces_idx = torch.cat([lmk_faces_idx,
+ dyn_lmk_faces_idx], 1)
+ lmk_bary_coords = torch.cat(
+ [lmk_bary_coords.expand(batch_size, -1, -1),
+ dyn_lmk_bary_coords], 1)
+
+ landmarks = vertices2landmarks(vertices, self.faces_tensor,
+ lmk_faces_idx,
+ lmk_bary_coords)
+
+ # Add any extra joints that might be needed
+ joints = self.vertex_joint_selector(vertices, joints)
+ # Add the landmarks to the joints
+ joints = torch.cat([joints, landmarks], dim=1)
+ # Map the joints to the current dataset
+
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints=joints, vertices=vertices)
+
+ if apply_trans:
+ joints += transl.unsqueeze(dim=1)
+ vertices += transl.unsqueeze(dim=1)
+
+ output = SMPLXOutput(vertices=vertices if return_verts else None,
+ joints=joints,
+ betas=betas,
+ expression=expression,
+ global_orient=global_orient,
+ body_pose=body_pose,
+ left_hand_pose=left_hand_pose,
+ right_hand_pose=right_hand_pose,
+ jaw_pose=jaw_pose,
+ full_pose=full_pose if return_full_pose else None,
+ joint_transformation=joint_transformation if return_joint_transformation else None,
+ vertex_transformation=vertex_transformation if return_vertex_transformation else None)
+ return output
+
+
+class SMPLXLayer(SMPLX):
+ def __init__(
+ self,
+ *args,
+ **kwargs
+ ) -> None:
+ # Just create a SMPLX module without any member variables
+ super(SMPLXLayer, self).__init__(
+ create_global_orient=False,
+ create_body_pose=False,
+ create_left_hand_pose=False,
+ create_right_hand_pose=False,
+ create_jaw_pose=False,
+ create_leye_pose=False,
+ create_reye_pose=False,
+ create_betas=False,
+ create_expression=False,
+ create_transl=False,
+ *args, **kwargs,
+ )
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ body_pose: Optional[Tensor] = None,
+ left_hand_pose: Optional[Tensor] = None,
+ right_hand_pose: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ expression: Optional[Tensor] = None,
+ jaw_pose: Optional[Tensor] = None,
+ leye_pose: Optional[Tensor] = None,
+ reye_pose: Optional[Tensor] = None,
+ return_verts: bool = True,
+ return_full_pose: bool = False,
+ **kwargs
+ ) -> SMPLXOutput:
+ '''
+ Forward pass for the SMPLX model
+ Parameters
+ ----------
+ global_orient: torch.tensor, optional, shape Bx3x3
+ If given, ignore the member variable and use it as the global
+ rotation of the body. Useful if someone wishes to predicts this
+ with an external model. It is expected to be in rotation matrix
+ format. (default=None)
+ betas: torch.tensor, optional, shape BxN_b
+ If given, ignore the member variable `betas` and use it
+ instead. For example, it can used if shape parameters
+ `betas` are predicted from some external model.
+ (default=None)
+ expression: torch.tensor, optional, shape BxN_e
+ Expression coefficients.
+ For example, it can used if expression parameters
+ `expression` are predicted from some external model.
+ body_pose: torch.tensor, optional, shape BxJx3x3
+ If given, ignore the member variable `body_pose` and use it
+ instead. For example, it can used if someone predicts the
+ pose of the body joints are predicted from some external model.
+ It should be a tensor that contains joint rotations in
+ rotation matrix format. (default=None)
+ left_hand_pose: torch.tensor, optional, shape Bx15x3x3
+ If given, contains the pose of the left hand.
+ It should be a tensor that contains joint rotations in
+ rotation matrix format. (default=None)
+ right_hand_pose: torch.tensor, optional, shape Bx15x3x3
+ If given, contains the pose of the right hand.
+ It should be a tensor that contains joint rotations in
+ rotation matrix format. (default=None)
+ jaw_pose: torch.tensor, optional, shape Bx3x3
+ Jaw pose. It should either joint rotations in
+ rotation matrix format.
+ transl: torch.tensor, optional, shape Bx3
+ Translation vector of the body.
+ For example, it can used if the translation
+ `transl` is predicted from some external model.
+ (default=None)
+ return_verts: bool, optional
+ Return the vertices. (default=True)
+ return_full_pose: bool, optional
+ Returns the full pose vector (default=False)
+ Returns
+ -------
+ output: ModelOutput
+ A data class that contains the posed vertices and joints
+ '''
+ device, dtype = self.shapedirs.device, self.shapedirs.dtype
+
+ model_vars = [betas, global_orient, body_pose, transl,
+ expression, left_hand_pose, right_hand_pose, jaw_pose]
+ batch_size = 1
+ for var in model_vars:
+ if var is None:
+ continue
+ batch_size = max(batch_size, len(var))
+
+ if global_orient is None:
+ global_orient = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if body_pose is None:
+ body_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(
+ batch_size, self.NUM_BODY_JOINTS, -1, -1).contiguous()
+ if left_hand_pose is None:
+ left_hand_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous()
+ if right_hand_pose is None:
+ right_hand_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous()
+ if jaw_pose is None:
+ jaw_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if leye_pose is None:
+ leye_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if reye_pose is None:
+ reye_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if expression is None:
+ expression = torch.zeros([batch_size, self.num_expression_coeffs],
+ dtype=dtype, device=device)
+ if betas is None:
+ betas = torch.zeros([batch_size, self.num_betas],
+ dtype=dtype, device=device)
+ if transl is None:
+ transl = torch.zeros([batch_size, 3], dtype=dtype, device=device)
+
+ # Concatenate all pose vectors
+ full_pose = torch.cat(
+ [global_orient.reshape(-1, 1, 3, 3),
+ body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3, 3),
+ jaw_pose.reshape(-1, 1, 3, 3),
+ leye_pose.reshape(-1, 1, 3, 3),
+ reye_pose.reshape(-1, 1, 3, 3),
+ left_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3, 3),
+ right_hand_pose.reshape(-1, self.NUM_HAND_JOINTS, 3, 3)],
+ dim=1)
+ shape_components = torch.cat([betas, expression], dim=-1)
+
+ shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)
+
+ vertices, joints = lbs(shape_components, full_pose, self.v_template,
+ shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights,
+ pose2rot=False,
+ )
+
+ lmk_faces_idx = self.lmk_faces_idx.unsqueeze(
+ dim=0).expand(batch_size, -1).contiguous()
+ lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(
+ batch_size, 1, 1)
+ if self.use_face_contour:
+ lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(
+ vertices, full_pose,
+ self.dynamic_lmk_faces_idx,
+ self.dynamic_lmk_bary_coords,
+ self.neck_kin_chain,
+ pose2rot=False,
+ )
+ dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords
+
+ lmk_faces_idx = torch.cat([lmk_faces_idx, dyn_lmk_faces_idx], 1)
+ lmk_bary_coords = torch.cat(
+ [lmk_bary_coords.expand(batch_size, -1, -1),
+ dyn_lmk_bary_coords], 1)
+
+ landmarks = vertices2landmarks(vertices, self.faces_tensor,
+ lmk_faces_idx,
+ lmk_bary_coords)
+
+ # Add any extra joints that might be needed
+ joints = self.vertex_joint_selector(vertices, joints)
+ # Add the landmarks to the joints
+ joints = torch.cat([joints, landmarks], dim=1)
+ # Map the joints to the current dataset
+
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints=joints, vertices=vertices)
+
+ if transl is not None:
+ joints += transl.unsqueeze(dim=1)
+ vertices += transl.unsqueeze(dim=1)
+
+ output = SMPLXOutput(vertices=vertices if return_verts else None,
+ joints=joints,
+ betas=betas,
+ expression=expression,
+ global_orient=global_orient,
+ body_pose=body_pose,
+ left_hand_pose=left_hand_pose,
+ right_hand_pose=right_hand_pose,
+ jaw_pose=jaw_pose,
+ transl=transl,
+ full_pose=full_pose if return_full_pose else None)
+ return output
+
+
+class MANO(SMPL):
+ # The hand joints are replaced by MANO
+ NUM_BODY_JOINTS = 1
+ NUM_HAND_JOINTS = 15
+ NUM_JOINTS = NUM_BODY_JOINTS + NUM_HAND_JOINTS
+
+ def __init__(
+ self,
+ model_path: str,
+ is_rhand: bool = True,
+ data_struct: Optional[Struct] = None,
+ create_hand_pose: bool = True,
+ hand_pose: Optional[Tensor] = None,
+ use_pca: bool = True,
+ num_pca_comps: int = 6,
+ flat_hand_mean: bool = False,
+ batch_size: int = 1,
+ dtype=torch.float32,
+ vertex_ids=None,
+ use_compressed: bool = True,
+ ext: str = 'pkl',
+ **kwargs
+ ) -> None:
+ ''' MANO model constructor
+ Parameters
+ ----------
+ model_path: str
+ The path to the folder or to the file where the model
+ parameters are stored
+ data_struct: Strct
+ A struct object. If given, then the parameters of the model are
+ read from the object. Otherwise, the model tries to read the
+ parameters from the given `model_path`. (default = None)
+ create_hand_pose: bool, optional
+ Flag for creating a member variable for the pose of the right
+ hand. (default = True)
+ hand_pose: torch.tensor, optional, BxP
+ The default value for the right hand pose member variable.
+ (default = None)
+ num_pca_comps: int, optional
+ The number of PCA components to use for each hand.
+ (default = 6)
+ flat_hand_mean: bool, optional
+ If False, then the pose of the hand is initialized to False.
+ batch_size: int, optional
+ The batch size used for creating the member variables
+ dtype: torch.dtype, optional
+ The data type for the created variables
+ vertex_ids: dict, optional
+ A dictionary containing the indices of the extra vertices that
+ will be selected
+ '''
+
+ self.num_pca_comps = num_pca_comps
+ self.is_rhand = is_rhand
+ # If no data structure is passed, then load the data from the given
+ # model folder
+ if data_struct is None:
+ # Load the model
+ if osp.isdir(model_path):
+ model_fn = 'MANO_{}.{ext}'.format(
+ 'RIGHT' if is_rhand else 'LEFT', ext=ext)
+ mano_path = os.path.join(model_path, model_fn)
+ else:
+ mano_path = model_path
+ self.is_rhand = True if 'RIGHT' in os.path.basename(
+ model_path) else False
+ assert osp.exists(mano_path), 'Path {} does not exist!'.format(
+ mano_path)
+
+ if ext == 'pkl':
+ with open(mano_path, 'rb') as mano_file:
+ model_data = pickle.load(mano_file, encoding='latin1')
+ elif ext == 'npz':
+ model_data = np.load(mano_path, allow_pickle=True)
+ else:
+ raise ValueError('Unknown extension: {}'.format(ext))
+ data_struct = Struct(**model_data)
+
+ if vertex_ids is None:
+ vertex_ids = VERTEX_IDS['smplh']
+
+ super(MANO, self).__init__(
+ model_path=model_path, data_struct=data_struct,
+ batch_size=batch_size, vertex_ids=vertex_ids,
+ use_compressed=use_compressed, dtype=dtype, ext=ext, **kwargs)
+
+ # add only MANO tips to the extra joints
+ self.vertex_joint_selector.extra_joints_idxs = to_tensor(
+ list(VERTEX_IDS['mano'].values()), dtype=torch.long)
+
+ self.use_pca = use_pca
+ self.num_pca_comps = num_pca_comps
+ if self.num_pca_comps == 45:
+ self.use_pca = False
+ self.flat_hand_mean = flat_hand_mean
+
+ hand_components = data_struct.hands_components[:num_pca_comps]
+
+ self.np_hand_components = hand_components
+
+ if self.use_pca:
+ self.register_buffer(
+ 'hand_components',
+ torch.tensor(hand_components, dtype=dtype))
+
+ if self.flat_hand_mean:
+ hand_mean = np.zeros_like(data_struct.hands_mean)
+ else:
+ hand_mean = data_struct.hands_mean
+
+ self.register_buffer('hand_mean',
+ to_tensor(hand_mean, dtype=self.dtype))
+
+ # Create the buffers for the pose of the left hand
+ hand_pose_dim = num_pca_comps if use_pca else 3 * self.NUM_HAND_JOINTS
+ if create_hand_pose:
+ if hand_pose is None:
+ default_hand_pose = torch.zeros([batch_size, hand_pose_dim],
+ dtype=dtype)
+ else:
+ default_hand_pose = torch.tensor(hand_pose, dtype=dtype)
+
+ hand_pose_param = nn.Parameter(default_hand_pose,
+ requires_grad=True)
+ self.register_parameter('hand_pose',
+ hand_pose_param)
+
+ # Create the buffer for the mean pose.
+ pose_mean = self.create_mean_pose(
+ data_struct, flat_hand_mean=flat_hand_mean)
+ pose_mean_tensor = pose_mean.clone().to(dtype)
+ # pose_mean_tensor = torch.tensor(pose_mean, dtype=dtype)
+ self.register_buffer('pose_mean', pose_mean_tensor)
+
+ def name(self) -> str:
+ return 'MANO'
+
+ def create_mean_pose(self, data_struct, flat_hand_mean=False):
+ # Create the array for the mean pose. If flat_hand is false, then use
+ # the mean that is given by the data, rather than the flat open hand
+ global_orient_mean = torch.zeros([3], dtype=self.dtype)
+ pose_mean = torch.cat([global_orient_mean, self.hand_mean], dim=0)
+ return pose_mean
+
+ def extra_repr(self):
+ msg = [super(MANO, self).extra_repr()]
+ if self.use_pca:
+ msg.append(f'Number of PCA components: {self.num_pca_comps}')
+ msg.append(f'Flat hand mean: {self.flat_hand_mean}')
+ return '\n'.join(msg)
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ hand_pose: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ return_verts: bool = True,
+ return_full_pose: bool = False,
+ **kwargs
+ ) -> MANOOutput:
+ ''' Forward pass for the MANO model
+ '''
+ # If no shape and pose parameters are passed along, then use the
+ # ones from the module
+ global_orient = (global_orient if global_orient is not None else
+ self.global_orient)
+ betas = betas if betas is not None else self.betas
+ hand_pose = (hand_pose if hand_pose is not None else
+ self.hand_pose)
+
+ apply_trans = transl is not None or hasattr(self, 'transl')
+ if transl is None:
+ if hasattr(self, 'transl'):
+ transl = self.transl
+
+ if self.use_pca:
+ hand_pose = torch.einsum(
+ 'bi,ij->bj', [hand_pose, self.hand_components])
+
+ full_pose = torch.cat([global_orient, hand_pose], dim=1)
+ full_pose += self.pose_mean
+
+ vertices, joints = lbs(betas, full_pose, self.v_template,
+ self.shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=True,
+ )
+
+ # # Add pre-selected extra joints that might be needed
+ # joints = self.vertex_joint_selector(vertices, joints)
+
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints)
+
+ if apply_trans:
+ joints = joints + transl.unsqueeze(dim=1)
+ vertices = vertices + transl.unsqueeze(dim=1)
+
+ output = MANOOutput(vertices=vertices if return_verts else None,
+ joints=joints if return_verts else None,
+ betas=betas,
+ global_orient=global_orient,
+ hand_pose=hand_pose,
+ full_pose=full_pose if return_full_pose else None)
+
+ return output
+
+
+class MANOLayer(MANO):
+ def __init__(self, *args, **kwargs) -> None:
+ ''' MANO as a layer model constructor
+ '''
+ super(MANOLayer, self).__init__(
+ create_global_orient=False,
+ create_hand_pose=False,
+ create_betas=False,
+ create_transl=False,
+ *args, **kwargs)
+
+ def name(self) -> str:
+ return 'MANO'
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ hand_pose: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ return_verts: bool = True,
+ return_full_pose: bool = False,
+ **kwargs
+ ) -> MANOOutput:
+ ''' Forward pass for the MANO model
+ '''
+ device, dtype = self.shapedirs.device, self.shapedirs.dtype
+ if global_orient is None:
+ batch_size = 1
+ global_orient = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ else:
+ batch_size = global_orient.shape[0]
+ if hand_pose is None:
+ hand_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, 15, -1, -1).contiguous()
+ if betas is None:
+ betas = torch.zeros(
+ [batch_size, self.num_betas], dtype=dtype, device=device)
+ if transl is None:
+ transl = torch.zeros([batch_size, 3], dtype=dtype, device=device)
+
+ full_pose = torch.cat([global_orient, hand_pose], dim=1)
+ vertices, joints = lbs(betas, full_pose, self.v_template,
+ self.shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=False)
+
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints)
+
+ if transl is not None:
+ joints = joints + transl.unsqueeze(dim=1)
+ vertices = vertices + transl.unsqueeze(dim=1)
+
+ output = MANOOutput(
+ vertices=vertices if return_verts else None,
+ joints=joints if return_verts else None,
+ betas=betas,
+ global_orient=global_orient,
+ hand_pose=hand_pose,
+ full_pose=full_pose if return_full_pose else None)
+
+ return output
+
+
+class FLAME(SMPL):
+ NUM_JOINTS = 5
+ SHAPE_SPACE_DIM = 300
+ EXPRESSION_SPACE_DIM = 100
+ NECK_IDX = 0
+
+ def __init__(
+ self,
+ model_path: str,
+ data_struct=None,
+ num_expression_coeffs=10,
+ create_expression: bool = True,
+ expression: Optional[Tensor] = None,
+ create_neck_pose: bool = True,
+ neck_pose: Optional[Tensor] = None,
+ create_jaw_pose: bool = True,
+ jaw_pose: Optional[Tensor] = None,
+ create_leye_pose: bool = True,
+ leye_pose: Optional[Tensor] = None,
+ create_reye_pose=True,
+ reye_pose: Optional[Tensor] = None,
+ use_face_contour=False,
+ batch_size: int = 1,
+ gender: str = 'neutral',
+ dtype: torch.dtype = torch.float32,
+ ext='pkl',
+ **kwargs
+ ) -> None:
+ ''' FLAME model constructor
+ Parameters
+ ----------
+ model_path: str
+ The path to the folder or to the file where the model
+ parameters are stored
+ num_expression_coeffs: int, optional
+ Number of expression components to use
+ (default = 10).
+ create_expression: bool, optional
+ Flag for creating a member variable for the expression space
+ (default = True).
+ expression: torch.tensor, optional, Bx10
+ The default value for the expression member variable.
+ (default = None)
+ create_neck_pose: bool, optional
+ Flag for creating a member variable for the neck pose.
+ (default = False)
+ neck_pose: torch.tensor, optional, Bx3
+ The default value for the neck pose variable.
+ (default = None)
+ create_jaw_pose: bool, optional
+ Flag for creating a member variable for the jaw pose.
+ (default = False)
+ jaw_pose: torch.tensor, optional, Bx3
+ The default value for the jaw pose variable.
+ (default = None)
+ create_leye_pose: bool, optional
+ Flag for creating a member variable for the left eye pose.
+ (default = False)
+ leye_pose: torch.tensor, optional, Bx10
+ The default value for the left eye pose variable.
+ (default = None)
+ create_reye_pose: bool, optional
+ Flag for creating a member variable for the right eye pose.
+ (default = False)
+ reye_pose: torch.tensor, optional, Bx10
+ The default value for the right eye pose variable.
+ (default = None)
+ use_face_contour: bool, optional
+ Whether to compute the keypoints that form the facial contour
+ batch_size: int, optional
+ The batch size used for creating the member variables
+ gender: str, optional
+ Which gender to load
+ dtype: torch.dtype
+ The data type for the created variables
+ '''
+ model_fn = f'FLAME_{gender.upper()}.{ext}'
+ flame_path = os.path.join(model_path, model_fn)
+ assert osp.exists(flame_path), 'Path {} does not exist!'.format(
+ flame_path)
+ if ext == 'npz':
+ file_data = np.load(flame_path, allow_pickle=True)
+ elif ext == 'pkl':
+ with open(flame_path, 'rb') as smpl_file:
+ file_data = pickle.load(smpl_file, encoding='latin1')
+ else:
+ raise ValueError('Unknown extension: {}'.format(ext))
+ data_struct = Struct(**file_data)
+
+ super(FLAME, self).__init__(
+ model_path=model_path,
+ data_struct=data_struct,
+ dtype=dtype,
+ batch_size=batch_size,
+ gender=gender,
+ ext=ext,
+ **kwargs)
+
+ self.use_face_contour = use_face_contour
+
+ self.vertex_joint_selector.extra_joints_idxs = to_tensor(
+ [], dtype=torch.long)
+
+ if create_neck_pose:
+ if neck_pose is None:
+ default_neck_pose = torch.zeros([batch_size, 3], dtype=dtype)
+ else:
+ default_neck_pose = torch.tensor(neck_pose, dtype=dtype)
+ neck_pose_param = nn.Parameter(
+ default_neck_pose, requires_grad=True)
+ self.register_parameter('neck_pose', neck_pose_param)
+
+ if create_jaw_pose:
+ if jaw_pose is None:
+ default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype)
+ else:
+ default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype)
+ jaw_pose_param = nn.Parameter(default_jaw_pose,
+ requires_grad=True)
+ self.register_parameter('jaw_pose', jaw_pose_param)
+
+ if create_leye_pose:
+ if leye_pose is None:
+ default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype)
+ else:
+ default_leye_pose = torch.tensor(leye_pose, dtype=dtype)
+ leye_pose_param = nn.Parameter(default_leye_pose,
+ requires_grad=True)
+ self.register_parameter('leye_pose', leye_pose_param)
+
+ if create_reye_pose:
+ if reye_pose is None:
+ default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype)
+ else:
+ default_reye_pose = torch.tensor(reye_pose, dtype=dtype)
+ reye_pose_param = nn.Parameter(default_reye_pose,
+ requires_grad=True)
+ self.register_parameter('reye_pose', reye_pose_param)
+
+ shapedirs = data_struct.shapedirs
+ if len(shapedirs.shape) < 3:
+ shapedirs = shapedirs[:, :, None]
+ if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM +
+ self.EXPRESSION_SPACE_DIM):
+ # print(f'WARNING: You are using a {self.name()} model, with only'
+ # ' 10 shape and 10 expression coefficients.')
+ expr_start_idx = 10
+ expr_end_idx = 20
+ num_expression_coeffs = min(num_expression_coeffs, 10)
+ else:
+ expr_start_idx = self.SHAPE_SPACE_DIM
+ expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs
+ num_expression_coeffs = min(
+ num_expression_coeffs, self.EXPRESSION_SPACE_DIM)
+
+ self._num_expression_coeffs = num_expression_coeffs
+
+ expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx]
+ self.register_buffer(
+ 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype))
+
+ if create_expression:
+ if expression is None:
+ default_expression = torch.zeros(
+ [batch_size, self.num_expression_coeffs], dtype=dtype)
+ else:
+ default_expression = torch.tensor(expression, dtype=dtype)
+ expression_param = nn.Parameter(default_expression,
+ requires_grad=True)
+ self.register_parameter('expression', expression_param)
+
+ # The pickle file that contains the barycentric coordinates for
+ # regressing the landmarks
+ landmark_bcoord_filename = osp.join(
+ model_path, 'flame_static_embedding.pkl')
+
+ with open(landmark_bcoord_filename, 'rb') as fp:
+ landmarks_data = pickle.load(fp, encoding='latin1')
+
+ lmk_faces_idx = landmarks_data['lmk_face_idx'].astype(np.int64)
+ self.register_buffer('lmk_faces_idx',
+ torch.tensor(lmk_faces_idx, dtype=torch.long))
+ lmk_bary_coords = landmarks_data['lmk_b_coords']
+ self.register_buffer('lmk_bary_coords',
+ torch.tensor(lmk_bary_coords, dtype=dtype))
+ if self.use_face_contour:
+ face_contour_path = os.path.join(
+ model_path, 'flame_dynamic_embedding.npy')
+ contour_embeddings = np.load(face_contour_path,
+ allow_pickle=True,
+ encoding='latin1')[()]
+
+ dynamic_lmk_faces_idx = np.array(
+ contour_embeddings['lmk_face_idx'], dtype=np.int64)
+ dynamic_lmk_faces_idx = torch.tensor(
+ dynamic_lmk_faces_idx,
+ dtype=torch.long)
+ self.register_buffer('dynamic_lmk_faces_idx',
+ dynamic_lmk_faces_idx)
+
+ dynamic_lmk_b_coords = torch.tensor(
+ contour_embeddings['lmk_b_coords'], dtype=dtype)
+ self.register_buffer(
+ 'dynamic_lmk_bary_coords', dynamic_lmk_b_coords)
+
+ neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents)
+ self.register_buffer(
+ 'neck_kin_chain',
+ torch.tensor(neck_kin_chain, dtype=torch.long))
+
+ @property
+ def num_expression_coeffs(self):
+ return self._num_expression_coeffs
+
+ def name(self) -> str:
+ return 'FLAME'
+
+ def extra_repr(self):
+ msg = [
+ super(FLAME, self).extra_repr(),
+ f'Number of Expression Coefficients: {self.num_expression_coeffs}',
+ f'Use face contour: {self.use_face_contour}',
+ ]
+ return '\n'.join(msg)
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ neck_pose: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ expression: Optional[Tensor] = None,
+ jaw_pose: Optional[Tensor] = None,
+ leye_pose: Optional[Tensor] = None,
+ reye_pose: Optional[Tensor] = None,
+ return_verts: bool = True,
+ return_full_pose: bool = False,
+ pose2rot: bool = True,
+ **kwargs
+ ) -> FLAMEOutput:
+ '''
+ Forward pass for the SMPLX model
+ Parameters
+ ----------
+ global_orient: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable and use it as the global
+ rotation of the body. Useful if someone wishes to predicts this
+ with an external model. (default=None)
+ betas: torch.tensor, optional, shape Bx10
+ If given, ignore the member variable `betas` and use it
+ instead. For example, it can used if shape parameters
+ `betas` are predicted from some external model.
+ (default=None)
+ expression: torch.tensor, optional, shape Bx10
+ If given, ignore the member variable `expression` and use it
+ instead. For example, it can used if expression parameters
+ `expression` are predicted from some external model.
+ jaw_pose: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable `jaw_pose` and
+ use this instead. It should either joint rotations in
+ axis-angle format.
+ jaw_pose: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable `jaw_pose` and
+ use this instead. It should either joint rotations in
+ axis-angle format.
+ transl: torch.tensor, optional, shape Bx3
+ If given, ignore the member variable `transl` and use it
+ instead. For example, it can used if the translation
+ `transl` is predicted from some external model.
+ (default=None)
+ return_verts: bool, optional
+ Return the vertices. (default=True)
+ return_full_pose: bool, optional
+ Returns the full axis-angle pose vector (default=False)
+ Returns
+ -------
+ output: ModelOutput
+ A named tuple of type `ModelOutput`
+ '''
+
+ # If no shape and pose parameters are passed along, then use the
+ # ones from the module
+ global_orient = (global_orient if global_orient is not None else
+ self.global_orient)
+ jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose
+ neck_pose = neck_pose if neck_pose is not None else self.neck_pose
+
+ leye_pose = leye_pose if leye_pose is not None else self.leye_pose
+ reye_pose = reye_pose if reye_pose is not None else self.reye_pose
+
+ betas = betas if betas is not None else self.betas
+ expression = expression if expression is not None else self.expression
+
+ apply_trans = transl is not None or hasattr(self, 'transl')
+ if transl is None:
+ if hasattr(self, 'transl'):
+ transl = self.transl
+
+ full_pose = torch.cat(
+ [global_orient, neck_pose, jaw_pose, leye_pose, reye_pose], dim=1)
+
+ batch_size = max(betas.shape[0], global_orient.shape[0],
+ jaw_pose.shape[0])
+ # Concatenate the shape and expression coefficients
+ scale = int(batch_size / betas.shape[0])
+ if scale > 1:
+ betas = betas.expand(scale, -1)
+ shape_components = torch.cat([betas, expression], dim=-1)
+ shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)
+
+ vertices, joints = lbs(shape_components, full_pose, self.v_template,
+ shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=pose2rot,
+ )
+
+ lmk_faces_idx = self.lmk_faces_idx.unsqueeze(
+ dim=0).expand(batch_size, -1).contiguous()
+ lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(
+ self.batch_size, 1, 1)
+ if self.use_face_contour:
+ lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(
+ vertices, full_pose, self.dynamic_lmk_faces_idx,
+ self.dynamic_lmk_bary_coords,
+ self.neck_kin_chain,
+ pose2rot=True,
+ )
+ dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords
+ lmk_faces_idx = torch.cat([lmk_faces_idx,
+ dyn_lmk_faces_idx], 1)
+ lmk_bary_coords = torch.cat(
+ [lmk_bary_coords.expand(batch_size, -1, -1),
+ dyn_lmk_bary_coords], 1)
+
+ landmarks = vertices2landmarks(vertices, self.faces_tensor,
+ lmk_faces_idx,
+ lmk_bary_coords)
+
+ # Add any extra joints that might be needed
+ joints = self.vertex_joint_selector(vertices, joints)
+ # Add the landmarks to the joints
+ joints = torch.cat([joints, landmarks], dim=1)
+
+ # Map the joints to the current dataset
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints=joints, vertices=vertices)
+
+ if apply_trans:
+ joints += transl.unsqueeze(dim=1)
+ vertices += transl.unsqueeze(dim=1)
+
+ output = FLAMEOutput(vertices=vertices if return_verts else None,
+ joints=joints,
+ betas=betas,
+ expression=expression,
+ global_orient=global_orient,
+ neck_pose=neck_pose,
+ jaw_pose=jaw_pose,
+ full_pose=full_pose if return_full_pose else None)
+ return output
+
+
+class FLAMELayer(FLAME):
+ def __init__(self, *args, **kwargs) -> None:
+ ''' FLAME as a layer model constructor '''
+ super(FLAMELayer, self).__init__(
+ create_betas=False,
+ create_expression=False,
+ create_global_orient=False,
+ create_neck_pose=False,
+ create_jaw_pose=False,
+ create_leye_pose=False,
+ create_reye_pose=False,
+ *args,
+ **kwargs)
+
+ def forward(
+ self,
+ betas: Optional[Tensor] = None,
+ global_orient: Optional[Tensor] = None,
+ neck_pose: Optional[Tensor] = None,
+ transl: Optional[Tensor] = None,
+ expression: Optional[Tensor] = None,
+ jaw_pose: Optional[Tensor] = None,
+ leye_pose: Optional[Tensor] = None,
+ reye_pose: Optional[Tensor] = None,
+ return_verts: bool = True,
+ return_full_pose: bool = False,
+ pose2rot: bool = True,
+ **kwargs
+ ) -> FLAMEOutput:
+ '''
+ Forward pass for the SMPLX model
+ Parameters
+ ----------
+ global_orient: torch.tensor, optional, shape Bx3x3
+ Global rotation of the body. Useful if someone wishes to
+ predicts this with an external model. It is expected to be in
+ rotation matrix format. (default=None)
+ betas: torch.tensor, optional, shape BxN_b
+ Shape parameters. For example, it can used if shape parameters
+ `betas` are predicted from some external model.
+ (default=None)
+ expression: torch.tensor, optional, shape BxN_e
+ If given, ignore the member variable `expression` and use it
+ instead. For example, it can used if expression parameters
+ `expression` are predicted from some external model.
+ jaw_pose: torch.tensor, optional, shape Bx3x3
+ Jaw pose. It should either joint rotations in
+ rotation matrix format.
+ transl: torch.tensor, optional, shape Bx3
+ Translation vector of the body.
+ For example, it can used if the translation
+ `transl` is predicted from some external model.
+ (default=None)
+ return_verts: bool, optional
+ Return the vertices. (default=True)
+ return_full_pose: bool, optional
+ Returns the full axis-angle pose vector (default=False)
+ Returns
+ -------
+ output: ModelOutput
+ A named tuple of type `ModelOutput`
+ '''
+ device, dtype = self.shapedirs.device, self.shapedirs.dtype
+ if global_orient is None:
+ batch_size = 1
+ global_orient = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ else:
+ batch_size = global_orient.shape[0]
+ if neck_pose is None:
+ neck_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, 1, -1, -1).contiguous()
+ if jaw_pose is None:
+ jaw_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if leye_pose is None:
+ leye_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if reye_pose is None:
+ reye_pose = torch.eye(3, device=device, dtype=dtype).view(
+ 1, 1, 3, 3).expand(batch_size, -1, -1, -1).contiguous()
+ if betas is None:
+ betas = torch.zeros([batch_size, self.num_betas],
+ dtype=dtype, device=device)
+ if expression is None:
+ expression = torch.zeros([batch_size, self.num_expression_coeffs],
+ dtype=dtype, device=device)
+ if transl is None:
+ transl = torch.zeros([batch_size, 3], dtype=dtype, device=device)
+
+ full_pose = torch.cat(
+ [global_orient, neck_pose, jaw_pose, leye_pose, reye_pose], dim=1)
+
+ shape_components = torch.cat([betas, expression], dim=-1)
+ shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)
+
+ vertices, joints = lbs(shape_components, full_pose, self.v_template,
+ shapedirs, self.posedirs,
+ self.J_regressor, self.parents,
+ self.lbs_weights, pose2rot=False,
+ )
+
+ lmk_faces_idx = self.lmk_faces_idx.unsqueeze(
+ dim=0).expand(batch_size, -1).contiguous()
+ lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(
+ self.batch_size, 1, 1)
+ if self.use_face_contour:
+ lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(
+ vertices, full_pose, self.dynamic_lmk_faces_idx,
+ self.dynamic_lmk_bary_coords,
+ self.neck_kin_chain,
+ pose2rot=False,
+ )
+ dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords
+ lmk_faces_idx = torch.cat([lmk_faces_idx,
+ dyn_lmk_faces_idx], 1)
+ lmk_bary_coords = torch.cat(
+ [lmk_bary_coords.expand(batch_size, -1, -1),
+ dyn_lmk_bary_coords], 1)
+
+ landmarks = vertices2landmarks(vertices, self.faces_tensor,
+ lmk_faces_idx,
+ lmk_bary_coords)
+
+ # Add any extra joints that might be needed
+ joints = self.vertex_joint_selector(vertices, joints)
+ # Add the landmarks to the joints
+ joints = torch.cat([joints, landmarks], dim=1)
+
+ # Map the joints to the current dataset
+ if self.joint_mapper is not None:
+ joints = self.joint_mapper(joints=joints, vertices=vertices)
+
+ joints += transl.unsqueeze(dim=1)
+ vertices += transl.unsqueeze(dim=1)
+
+ output = FLAMEOutput(vertices=vertices if return_verts else None,
+ joints=joints,
+ betas=betas,
+ expression=expression,
+ global_orient=global_orient,
+ neck_pose=neck_pose,
+ jaw_pose=jaw_pose,
+ full_pose=full_pose if return_full_pose else None)
+ return output
+
+
+def build_layer(
+ model_path: str,
+ model_type: str = 'smpl',
+ **kwargs
+) -> Union[SMPLLayer, SMPLHLayer, SMPLXLayer, MANOLayer, FLAMELayer]:
+ ''' Method for creating a model from a path and a model type
+ Parameters
+ ----------
+ model_path: str
+ Either the path to the model you wish to load or a folder,
+ where each subfolder contains the differents types, i.e.:
+ model_path:
+ |
+ |-- smpl
+ |-- SMPL_FEMALE
+ |-- SMPL_NEUTRAL
+ |-- SMPL_MALE
+ |-- smplh
+ |-- SMPLH_FEMALE
+ |-- SMPLH_MALE
+ |-- smplx
+ |-- SMPLX_FEMALE
+ |-- SMPLX_NEUTRAL
+ |-- SMPLX_MALE
+ |-- mano
+ |-- MANO RIGHT
+ |-- MANO LEFT
+ |-- flame
+ |-- FLAME_FEMALE
+ |-- FLAME_MALE
+ |-- FLAME_NEUTRAL
+ model_type: str, optional
+ When model_path is a folder, then this parameter specifies the
+ type of model to be loaded
+ **kwargs: dict
+ Keyword arguments
+ Returns
+ -------
+ body_model: nn.Module
+ The PyTorch module that implements the corresponding body model
+ Raises
+ ------
+ ValueError: In case the model type is not one of SMPL, SMPLH,
+ SMPLX, MANO or FLAME
+ '''
+
+ if osp.isdir(model_path):
+ model_path = os.path.join(model_path, model_type)
+ else:
+ model_type = osp.basename(model_path).split('_')[0].lower()
+
+ if model_type.lower() == 'smpl':
+ return SMPLLayer(model_path, **kwargs)
+ elif model_type.lower() == 'smplh':
+ return SMPLHLayer(model_path, **kwargs)
+ elif model_type.lower() == 'smplx':
+ return SMPLXLayer(model_path, **kwargs)
+ elif 'mano' in model_type.lower():
+ return MANOLayer(model_path, **kwargs)
+ elif 'flame' in model_type.lower():
+ return FLAMELayer(model_path, **kwargs)
+ else:
+ raise ValueError(f'Unknown model type {model_type}, exiting!')
+
+
+def create(
+ model_path: str,
+ model_type: str = 'smpl',
+ **kwargs
+) -> Union[SMPL, SMPLH, SMPLX, MANO, FLAME]:
+ ''' Method for creating a model from a path and a model type
+ Parameters
+ ----------
+ model_path: str
+ Either the path to the model you wish to load or a folder,
+ where each subfolder contains the differents types, i.e.:
+ model_path:
+ |
+ |-- smpl
+ |-- SMPL_FEMALE
+ |-- SMPL_NEUTRAL
+ |-- SMPL_MALE
+ |-- smplh
+ |-- SMPLH_FEMALE
+ |-- SMPLH_MALE
+ |-- smplx
+ |-- SMPLX_FEMALE
+ |-- SMPLX_NEUTRAL
+ |-- SMPLX_MALE
+ |-- mano
+ |-- MANO RIGHT
+ |-- MANO LEFT
+ model_type: str, optional
+ When model_path is a folder, then this parameter specifies the
+ type of model to be loaded
+ **kwargs: dict
+ Keyword arguments
+ Returns
+ -------
+ body_model: nn.Module
+ The PyTorch module that implements the corresponding body model
+ Raises
+ ------
+ ValueError: In case the model type is not one of SMPL, SMPLH,
+ SMPLX, MANO or FLAME
+ '''
+
+ model_path = os.path.join(model_path, model_type)
+
+ if model_type.lower() == 'smpl':
+ return SMPL(model_path, **kwargs)
+ elif model_type.lower() == 'smplh':
+ return SMPLH(model_path, **kwargs)
+ elif model_type.lower() == 'smplx':
+ return SMPLX(model_path, **kwargs)
+ elif 'mano' in model_type.lower():
+ return MANO(model_path, **kwargs)
+ elif 'flame' in model_type.lower():
+ return FLAME(model_path, **kwargs)
+ else:
+ raise ValueError(f'Unknown model type {model_type}, exiting!')
diff --git a/lib /smplx /joint_names.py b/lib /smplx /joint_names.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e1ee746bb1bb28e9b966eaf5c5f7c87ba26ac42
--- /dev/null
+++ b/lib /smplx /joint_names.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+JOINT_NAMES = [
+ 'pelvis',
+ 'left_hip',
+ 'right_hip',
+ 'spine1',
+ 'left_knee',
+ 'right_knee',
+ 'spine2',
+ 'left_ankle',
+ 'right_ankle',
+ 'spine3',
+ 'left_foot',
+ 'right_foot',
+ 'neck',
+ 'left_collar',
+ 'right_collar',
+ 'head',
+ 'left_shoulder',
+ 'right_shoulder',
+ 'left_elbow',
+ 'right_elbow',
+ 'left_wrist',
+ 'right_wrist',
+ 'jaw',
+ 'left_eye_smplhf',
+ 'right_eye_smplhf',
+ 'left_index1',
+ 'left_index2',
+ 'left_index3',
+ 'left_middle1',
+ 'left_middle2',
+ 'left_middle3',
+ 'left_pinky1',
+ 'left_pinky2',
+ 'left_pinky3',
+ 'left_ring1',
+ 'left_ring2',
+ 'left_ring3',
+ 'left_thumb1',
+ 'left_thumb2',
+ 'left_thumb3',
+ 'right_index1',
+ 'right_index2',
+ 'right_index3',
+ 'right_middle1',
+ 'right_middle2',
+ 'right_middle3',
+ 'right_pinky1',
+ 'right_pinky2',
+ 'right_pinky3',
+ 'right_ring1',
+ 'right_ring2',
+ 'right_ring3',
+ 'right_thumb1',
+ 'right_thumb2',
+ 'right_thumb3',
+ 'nose',
+ 'right_eye',
+ 'left_eye',
+ 'right_ear',
+ 'left_ear',
+ 'left_big_toe',
+ 'left_small_toe',
+ 'left_heel',
+ 'right_big_toe',
+ 'right_small_toe',
+ 'right_heel',
+ 'left_thumb',
+ 'left_index',
+ 'left_middle',
+ 'left_ring',
+ 'left_pinky',
+ 'right_thumb',
+ 'right_index',
+ 'right_middle',
+ 'right_ring',
+ 'right_pinky',
+ 'right_eye_brow1',
+ 'right_eye_brow2',
+ 'right_eye_brow3',
+ 'right_eye_brow4',
+ 'right_eye_brow5',
+ 'left_eye_brow5',
+ 'left_eye_brow4',
+ 'left_eye_brow3',
+ 'left_eye_brow2',
+ 'left_eye_brow1',
+ 'nose1',
+ 'nose2',
+ 'nose3',
+ 'nose4',
+ 'right_nose_2',
+ 'right_nose_1',
+ 'nose_middle',
+ 'left_nose_1',
+ 'left_nose_2',
+ 'right_eye1',
+ 'right_eye2',
+ 'right_eye3',
+ 'right_eye4',
+ 'right_eye5',
+ 'right_eye6',
+ 'left_eye4',
+ 'left_eye3',
+ 'left_eye2',
+ 'left_eye1',
+ 'left_eye6',
+ 'left_eye5',
+ 'right_mouth_1',
+ 'right_mouth_2',
+ 'right_mouth_3',
+ 'mouth_top',
+ 'left_mouth_3',
+ 'left_mouth_2',
+ 'left_mouth_1',
+ 'left_mouth_5', # 59 in OpenPose output
+ 'left_mouth_4', # 58 in OpenPose output
+ 'mouth_bottom',
+ 'right_mouth_4',
+ 'right_mouth_5',
+ 'right_lip_1',
+ 'right_lip_2',
+ 'lip_top',
+ 'left_lip_2',
+ 'left_lip_1',
+ 'left_lip_3',
+ 'lip_bottom',
+ 'right_lip_3',
+ # Face contour
+ 'right_contour_1',
+ 'right_contour_2',
+ 'right_contour_3',
+ 'right_contour_4',
+ 'right_contour_5',
+ 'right_contour_6',
+ 'right_contour_7',
+ 'right_contour_8',
+ 'contour_middle',
+ 'left_contour_8',
+ 'left_contour_7',
+ 'left_contour_6',
+ 'left_contour_5',
+ 'left_contour_4',
+ 'left_contour_3',
+ 'left_contour_2',
+ 'left_contour_1',
+]
\ No newline at end of file
diff --git a/lib /smplx /lbs.py b/lib /smplx /lbs.py
new file mode 100644
index 0000000000000000000000000000000000000000..bab88d266c7078d7a341b7dcd9afec15b89fce77
--- /dev/null
+++ b/lib /smplx /lbs.py
@@ -0,0 +1,389 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import division
+
+from typing import Tuple, List, Optional
+import numpy as np
+
+import torch
+import torch.nn.functional as F
+
+from .utils import rot_mat_to_euler, Tensor
+
+
+def find_dynamic_lmk_idx_and_bcoords(
+ vertices: Tensor,
+ pose: Tensor,
+ dynamic_lmk_faces_idx: Tensor,
+ dynamic_lmk_b_coords: Tensor,
+ neck_kin_chain: List[int],
+ pose2rot: bool = True,
+) -> Tuple[Tensor, Tensor]:
+ ''' Compute the faces, barycentric coordinates for the dynamic landmarks
+ To do so, we first compute the rotation of the neck around the y-axis
+ and then use a pre-computed look-up table to find the faces and the
+ barycentric coordinates that will be used.
+ Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
+ for providing the original TensorFlow implementation and for the LUT.
+ Parameters
+ ----------
+ vertices: torch.tensor BxVx3, dtype = torch.float32
+ The tensor of input vertices
+ pose: torch.tensor Bx(Jx3), dtype = torch.float32
+ The current pose of the body model
+ dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
+ The look-up table from neck rotation to faces
+ dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
+ The look-up table from neck rotation to barycentric coordinates
+ neck_kin_chain: list
+ A python list that contains the indices of the joints that form the
+ kinematic chain of the neck.
+ dtype: torch.dtype, optional
+ Returns
+ -------
+ dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
+ A tensor of size BxL that contains the indices of the faces that
+ will be used to compute the current dynamic landmarks.
+ dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
+ A tensor of size BxL that contains the indices of the faces that
+ will be used to compute the current dynamic landmarks.
+ '''
+
+ dtype = vertices.dtype
+ batch_size = vertices.shape[0]
+
+ if pose2rot:
+ aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
+ neck_kin_chain)
+ rot_mats = batch_rodrigues(
+ aa_pose.view(-1, 3)).view(batch_size, -1, 3, 3)
+ else:
+ rot_mats = torch.index_select(
+ pose.view(batch_size, -1, 3, 3), 1, neck_kin_chain)
+
+ rel_rot_mat = torch.eye(
+ 3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0).repeat(
+ batch_size, 1, 1)
+ for idx in range(len(neck_kin_chain)):
+ rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
+
+ y_rot_angle = torch.round(
+ torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
+ max=39)).to(dtype=torch.long)
+ neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
+ mask = y_rot_angle.lt(-39).to(dtype=torch.long)
+ neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
+ y_rot_angle = (neg_mask * neg_vals +
+ (1 - neg_mask) * y_rot_angle)
+
+ dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
+ 0, y_rot_angle)
+ dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
+ 0, y_rot_angle)
+
+ return dyn_lmk_faces_idx, dyn_lmk_b_coords
+
+
+def vertices2landmarks(
+ vertices: Tensor,
+ faces: Tensor,
+ lmk_faces_idx: Tensor,
+ lmk_bary_coords: Tensor
+) -> Tensor:
+ ''' Calculates landmarks by barycentric interpolation
+ Parameters
+ ----------
+ vertices: torch.tensor BxVx3, dtype = torch.float32
+ The tensor of input vertices
+ faces: torch.tensor Fx3, dtype = torch.long
+ The faces of the mesh
+ lmk_faces_idx: torch.tensor L, dtype = torch.long
+ The tensor with the indices of the faces used to calculate the
+ landmarks.
+ lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
+ The tensor of barycentric coordinates that are used to interpolate
+ the landmarks
+ Returns
+ -------
+ landmarks: torch.tensor BxLx3, dtype = torch.float32
+ The coordinates of the landmarks for each mesh in the batch
+ '''
+ # Extract the indices of the vertices for each face
+ # BxLx3
+ batch_size, num_verts = vertices.shape[:2]
+ device = vertices.device
+
+ lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
+ batch_size, -1, 3)
+
+ lmk_faces += torch.arange(
+ batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
+
+ lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
+ batch_size, -1, 3, 3)
+
+ landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
+ return landmarks
+
+
+def lbs(
+ betas: Tensor,
+ pose: Tensor,
+ v_template: Tensor,
+ shapedirs: Tensor,
+ posedirs: Tensor,
+ J_regressor: Tensor,
+ parents: Tensor,
+ lbs_weights: Tensor,
+ pose2rot: bool = True,
+ return_transformation: bool = False,
+) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]:
+ ''' Performs Linear Blend Skinning with the given shape and pose parameters
+ Parameters
+ ----------
+ betas : torch.tensor BxNB
+ The tensor of shape parameters
+ pose : torch.tensor Bx(J + 1) * 3
+ The pose parameters in axis-angle format
+ v_template torch.tensor BxVx3
+ The template mesh that will be deformed
+ shapedirs : torch.tensor 1xNB
+ The tensor of PCA shape displacements
+ posedirs : torch.tensor Px(V * 3)
+ The pose PCA coefficients
+ J_regressor : torch.tensor JxV
+ The regressor array that is used to calculate the joints from
+ the position of the vertices
+ parents: torch.tensor J
+ The array that describes the kinematic tree for the model
+ lbs_weights: torch.tensor N x V x (J + 1)
+ The linear blend skinning weights that represent how much the
+ rotation matrix of each part affects each vertex
+ pose2rot: bool, optional
+ Flag on whether to convert the input pose tensor to rotation
+ matrices. The default value is True. If False, then the pose tensor
+ should already contain rotation matrices and have a size of
+ Bx(J + 1)x9
+ dtype: torch.dtype, optional
+ Returns
+ -------
+ verts: torch.tensor BxVx3
+ The vertices of the mesh after applying the shape and pose
+ displacements.
+ joints: torch.tensor BxJx3
+ The joints of the model
+ '''
+
+ batch_size = max(betas.shape[0], pose.shape[0])
+ device, dtype = betas.device, betas.dtype
+
+ # Add shape contribution
+ v_shaped = v_template + blend_shapes(betas, shapedirs)
+
+ # Get the joints
+ # NxJx3 array
+ J = vertices2joints(J_regressor, v_shaped)
+
+ # 3. Add pose blend shapes
+ # N x J x 3 x 3
+ ident = torch.eye(3, dtype=dtype, device=device)
+ if pose2rot:
+ rot_mats = batch_rodrigues(pose.view(-1, 3)).view(
+ [batch_size, -1, 3, 3])
+
+ pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
+ # (N x P) x (P, V * 3) -> N x V x 3
+ pose_offsets = torch.matmul(
+ pose_feature, posedirs).view(batch_size, -1, 3)
+ else:
+ pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
+ rot_mats = pose.view(batch_size, -1, 3, 3)
+
+ pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
+ posedirs).view(batch_size, -1, 3)
+
+ v_posed = pose_offsets + v_shaped
+ # 4. Get the global joint location
+ J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
+
+ # 5. Do skinning:
+ # W is N x V x (J + 1)
+ W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
+ # (N x V x (J + 1)) x (N x (J + 1) x 16)
+ num_joints = J_regressor.shape[0]
+ T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
+ .view(batch_size, -1, 4, 4)
+
+ homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
+ dtype=dtype, device=device)
+ v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
+ v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
+
+ verts = v_homo[:, :, :3, 0]
+
+ if return_transformation:
+ return verts, J_transformed, A, T
+
+ return verts, J_transformed
+
+
+def vertices2joints(J_regressor: Tensor, vertices: Tensor) -> Tensor:
+ ''' Calculates the 3D joint locations from the vertices
+ Parameters
+ ----------
+ J_regressor : torch.tensor JxV
+ The regressor array that is used to calculate the joints from the
+ position of the vertices
+ vertices : torch.tensor BxVx3
+ The tensor of mesh vertices
+ Returns
+ -------
+ torch.tensor BxJx3
+ The location of the joints
+ '''
+
+ return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
+
+
+def blend_shapes(betas: Tensor, shape_disps: Tensor) -> Tensor:
+ ''' Calculates the per vertex displacement due to the blend shapes
+ Parameters
+ ----------
+ betas : torch.tensor Bx(num_betas)
+ Blend shape coefficients
+ shape_disps: torch.tensor Vx3x(num_betas)
+ Blend shapes
+ Returns
+ -------
+ torch.tensor BxVx3
+ The per-vertex displacement due to shape deformation
+ '''
+
+ # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
+ # i.e. Multiply each shape displacement by its corresponding beta and
+ # then sum them.
+ blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
+ return blend_shape
+
+
+def batch_rodrigues(
+ rot_vecs: Tensor,
+ epsilon: float = 1e-8,
+) -> Tensor:
+ ''' Calculates the rotation matrices for a batch of rotation vectors
+ Parameters
+ ----------
+ rot_vecs: torch.tensor Nx3
+ array of N axis-angle vectors
+ Returns
+ -------
+ R: torch.tensor Nx3x3
+ The rotation matrices for the given axis-angle parameters
+ '''
+
+ batch_size = rot_vecs.shape[0]
+ device, dtype = rot_vecs.device, rot_vecs.dtype
+
+ angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
+ rot_dir = rot_vecs / angle
+
+ cos = torch.unsqueeze(torch.cos(angle), dim=1)
+ sin = torch.unsqueeze(torch.sin(angle), dim=1)
+
+ # Bx1 arrays
+ rx, ry, rz = torch.split(rot_dir, 1, dim=1)
+ K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
+
+ zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
+ K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
+ .view((batch_size, 3, 3))
+
+ ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
+ rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
+ return rot_mat
+
+
+def transform_mat(R: Tensor, t: Tensor) -> Tensor:
+ ''' Creates a batch of transformation matrices
+ Args:
+ - R: Bx3x3 array of a batch of rotation matrices
+ - t: Bx3x1 array of a batch of translation vectors
+ Returns:
+ - T: Bx4x4 Transformation matrix
+ '''
+ # No padding left or right, only add an extra row
+ return torch.cat([F.pad(R, [0, 0, 0, 1]),
+ F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
+
+
+def batch_rigid_transform(
+ rot_mats: Tensor,
+ joints: Tensor,
+ parents: Tensor,
+ dtype=torch.float32
+) -> Tensor:
+ """
+ Applies a batch of rigid transformations to the joints
+ Parameters
+ ----------
+ rot_mats : torch.tensor BxNx3x3
+ Tensor of rotation matrices
+ joints : torch.tensor BxNx3
+ Locations of joints
+ parents : torch.tensor BxN
+ The kinematic tree of each object
+ dtype : torch.dtype, optional:
+ The data type of the created tensors, the default is torch.float32
+ Returns
+ -------
+ posed_joints : torch.tensor BxNx3
+ The locations of the joints after applying the pose rotations
+ rel_transforms : torch.tensor BxNx4x4
+ The relative (with respect to the root joint) rigid transformations
+ for all the joints
+ """
+
+ joints = torch.unsqueeze(joints, dim=-1)
+
+ rel_joints = joints.clone()
+ rel_joints[:, 1:] -= joints[:, parents[1:]]
+
+ transforms_mat = transform_mat(
+ rot_mats.reshape(-1, 3, 3),
+ rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
+
+ transform_chain = [transforms_mat[:, 0]]
+ for i in range(1, parents.shape[0]):
+ # Subtract the joint location at the rest pose
+ # No need for rotation, since it's identity when at rest
+ curr_res = torch.matmul(transform_chain[parents[i]],
+ transforms_mat[:, i])
+ transform_chain.append(curr_res)
+
+ transforms = torch.stack(transform_chain, dim=1)
+
+ # The last column of the transformations contains the posed joints
+ posed_joints = transforms[:, :, :3, 3]
+
+ joints_homogen = F.pad(joints, [0, 0, 0, 1])
+
+ rel_transforms = transforms - F.pad(
+ torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
+
+ return posed_joints, rel_transforms
\ No newline at end of file
diff --git a/lib /smplx /utils.py b/lib /smplx /utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd0c7cdec8b4eadace59a3644767c5e27750de48
--- /dev/null
+++ b/lib /smplx /utils.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from typing import NewType, Union, Optional
+from dataclasses import dataclass, asdict, fields
+import numpy as np
+import torch
+
+Tensor = NewType('Tensor', torch.Tensor)
+Array = NewType('Array', np.ndarray)
+
+
+@dataclass
+class ModelOutput:
+ vertices: Optional[Tensor] = None
+ joints: Optional[Tensor] = None
+ full_pose: Optional[Tensor] = None
+ global_orient: Optional[Tensor] = None
+ transl: Optional[Tensor] = None
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def get(self, key, default=None):
+ return getattr(self, key, default)
+
+ def __iter__(self):
+ return self.keys()
+
+ def keys(self):
+ keys = [t.name for t in fields(self)]
+ return iter(keys)
+
+ def values(self):
+ values = [getattr(self, t.name) for t in fields(self)]
+ return iter(values)
+
+ def items(self):
+ data = [(t.name, getattr(self, t.name)) for t in fields(self)]
+ return iter(data)
+
+
+@dataclass
+class SMPLOutput(ModelOutput):
+ betas: Optional[Tensor] = None
+ body_pose: Optional[Tensor] = None
+
+
+@dataclass
+class SMPLHOutput(SMPLOutput):
+ left_hand_pose: Optional[Tensor] = None
+ right_hand_pose: Optional[Tensor] = None
+ transl: Optional[Tensor] = None
+
+
+@dataclass
+class SMPLXOutput(SMPLHOutput):
+ expression: Optional[Tensor] = None
+ jaw_pose: Optional[Tensor] = None
+ joint_transformation: Optional[Tensor] = None
+ vertex_transformation: Optional[Tensor] = None
+
+
+@dataclass
+class MANOOutput(ModelOutput):
+ betas: Optional[Tensor] = None
+ hand_pose: Optional[Tensor] = None
+
+
+@dataclass
+class FLAMEOutput(ModelOutput):
+ betas: Optional[Tensor] = None
+ expression: Optional[Tensor] = None
+ jaw_pose: Optional[Tensor] = None
+ neck_pose: Optional[Tensor] = None
+
+
+def find_joint_kin_chain(joint_id, kinematic_tree):
+ kin_chain = []
+ curr_idx = joint_id
+ while curr_idx != -1:
+ kin_chain.append(curr_idx)
+ curr_idx = kinematic_tree[curr_idx]
+ return kin_chain
+
+
+def to_tensor(
+ array: Union[Array, Tensor], dtype=torch.float32
+) -> Tensor:
+ if torch.is_tensor(array):
+ return array
+ else:
+ return torch.tensor(array, dtype=dtype)
+
+
+class Struct(object):
+ def __init__(self, **kwargs):
+ for key, val in kwargs.items():
+ setattr(self, key, val)
+
+
+def to_np(array, dtype=np.float32):
+ if 'scipy.sparse' in str(type(array)):
+ array = array.todense()
+ return np.array(array, dtype=dtype)
+
+
+def rot_mat_to_euler(rot_mats):
+ # Calculates rotation matrix to euler angles
+ # Careful for extreme cases of eular angles like [0.0, pi, 0.0]
+
+ sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
+ rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
+ return torch.atan2(-rot_mats[:, 2, 0], sy)
\ No newline at end of file
diff --git a/lib /smplx /vertex_joint_selector.py b/lib /smplx /vertex_joint_selector.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b8298bd5e087731f86c1c699703b5219e046c5c
--- /dev/null
+++ b/lib /smplx /vertex_joint_selector.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
+# holder of all proprietary rights on this computer program.
+# You can only use this computer program if you have closed
+# a license agreement with MPG or you get the right to use the computer
+# program from someone who is authorized to grant you that right.
+# Any use of the computer program without a valid license is prohibited and
+# liable to prosecution.
+#
+# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
+# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
+# for Intelligent Systems. All rights reserved.
+#
+# Contact: ps-license@tuebingen.mpg.de
+
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import division
+
+import numpy as np
+
+import torch
+import torch.nn as nn
+
+from .utils import to_tensor
+
+
+class VertexJointSelector(nn.Module):
+
+ def __init__(self, vertex_ids=None,
+ use_hands=True,
+ use_feet_keypoints=True, **kwargs):
+ super(VertexJointSelector, self).__init__()
+
+ extra_joints_idxs = []
+
+ face_keyp_idxs = np.array([
+ vertex_ids['nose'],
+ vertex_ids['reye'],
+ vertex_ids['leye'],
+ vertex_ids['rear'],
+ vertex_ids['lear']], dtype=np.int64)
+
+ extra_joints_idxs = np.concatenate([extra_joints_idxs,
+ face_keyp_idxs])
+
+ if use_feet_keypoints:
+ feet_keyp_idxs = np.array([vertex_ids['LBigToe'],
+ vertex_ids['LSmallToe'],
+ vertex_ids['LHeel'],
+ vertex_ids['RBigToe'],
+ vertex_ids['RSmallToe'],
+ vertex_ids['RHeel']], dtype=np.int32)
+
+ extra_joints_idxs = np.concatenate(
+ [extra_joints_idxs, feet_keyp_idxs])
+
+ if use_hands:
+ self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky']
+
+ tips_idxs = []
+ for hand_id in ['l', 'r']:
+ for tip_name in self.tip_names:
+ tips_idxs.append(vertex_ids[hand_id + tip_name])
+
+ extra_joints_idxs = np.concatenate(
+ [extra_joints_idxs, tips_idxs])
+
+ self.register_buffer('extra_joints_idxs',
+ to_tensor(extra_joints_idxs, dtype=torch.long))
+
+ def forward(self, vertices, joints):
+ extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs)
+ joints = torch.cat([joints, extra_joints], dim=1)
+
+ return joints
diff --git a/packages.txt b/packages.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7530dcb319e1cfca414a6c89b589b19585c01718
--- /dev/null
+++ b/packages.txt
@@ -0,0 +1,12 @@
+libgl1
+freeglut3-dev
+unzip
+ffmpeg
+libsm6
+libxext6
+python
+libgl1-mesa-dri
+build-essential
+libturbojpeg0
+libegl1-mesa
+libgbm1
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6b387e5acdf9e4fc15d818918e8afea17f7c6abf
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,46 @@
+pip>=21.2.4
+numpy==1.22.4
+matplotlib==3.5.0
+Pillow==9.2.0
+PyOpenGL
+PyOpenGL_accelerate
+PyYAML>=6.0
+yacs>=0.1.8
+scikit-image==0.19.1
+termcolor
+tqdm
+trimesh==3.9.35
+flatten_dict==0.4.2
+jpeg4py
+shapely==1.7.1
+rtree==0.9.7
+pytorch_lightning==1.2.5
+PyMCubes
+opencv-python
+opencv_contrib_python
+scikit-learn
+protobuf==3.20.0
+pymeshlab
+iopath
+fvcore
+chumpy
+git+https://github.com/YuliangXiu/rembg.git
+--extra-index-url https://download.pytorch.org/whl/cu113
+torch
+-f https://download.pytorch.org/whl/cu113/torch_stable.html
+torch==1.11.0+cu113
+torchvision==0.12.0+cu113
+loguru==0.5.3
+vedo==2020.4.2
+gdown==4.6.0
+kornia
+chumpy
+open3d
+tinyobjloader==2.0.0rc7
+cython==0.29.20
+ipykernel==5.3.4
+ipywidgets==7.6.5
+dataclasses>=0.6
+https://download.is.tue.mpg.de/icon/colab/pytorch3d-0.7.0-cp38-cp38-linux_x86_64.whl
+https://download.is.tue.mpg.de/icon/colab/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl
+https://download.is.tue.mpg.de/icon/colab/voxelize_cuda-0.0.0-cp38-cp38-linux_x86_64.whl
\ No newline at end of file