diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..24f980e0115c7057f2e57b235e98894f3932c564 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+*.task filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..e0926aef1e996760784d1e51965517ce493b825c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+__pycache__
+files
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4c40f4d72a5436ca581bebae28761450df9e4b1
--- /dev/null
+++ b/app.py
@@ -0,0 +1,185 @@
+import spaces
+import gradio as gr
+
+
+'''
+
+'''
+from gradio_utils import clear_old_files,read_file
+from face_mesh_spinning import process_face_mesh_spinning
+from mp_estimate import mean_std_label,estimate_horizontal,estimate_vertical,estimate_horizontal_points,estimate_vertical_points
+
+def process_images(image,draw_type,center_scaleup,animation_direction,
+ z_multiply,inner_eyes,inner_mouth,
+ progress=gr.Progress(track_tqdm=True)):
+
+ clear_old_files()
+
+ if image==None:
+ raise gr.Error("need image")
+
+ result,face_landmarker_result,rotated_points = process_face_mesh_spinning(image,draw_type,center_scaleup,animation_direction,z_multiply,inner_eyes,inner_mouth)
+
+ return result
+
+
+css="""
+#col-left {
+ margin: 0 auto;
+ max-width: 640px;
+}
+#col-right {
+ margin: 0 auto;
+ max-width: 640px;
+}
+.grid-container {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap:10px
+}
+
+.image {
+ width: 128px;
+ height: 128px;
+ object-fit: cover;
+}
+
+.text {
+ font-size: 16px;
+}
+"""
+
+from glibvision.cv2_utils import pil_to_bgr_image,copy_image
+from mp_utils import extract_landmark,get_pixel_cordinate
+import numpy as np
+# TODO move mp_util
+def extract_landmark_double_check(numpy_image,double_check=True,center_index=4,extract_matrix=True):#4 is nose-tip
+ mp_image,face_landmarker_result = extract_landmark(numpy_image,"face_landmarker.task",0,0,extract_matrix)
+ h,w = numpy_image.shape[:2]
+ second_mp_image,first_landmarker_result = None,None
+ numpy_view = mp_image.numpy_view()
+ if double_check:
+ root_cordinate = get_pixel_cordinate(face_landmarker_result.face_landmarks,center_index,w,h)
+ diff_center_x = int(w/2 - root_cordinate[0])
+ diff_center_y = int(h/2 - root_cordinate[1])
+ base = np.zeros_like(numpy_view)
+ copy_image(base,numpy_view,diff_center_x,diff_center_y)
+ first_landmarker_result = face_landmarker_result
+ second_mp_image,face_landmarker_result = extract_landmark(base,"face_landmarker.task",0,0,extract_matrix)
+ return mp_image,face_landmarker_result,second_mp_image,first_landmarker_result
+
+#css=css,
+
+from scipy.spatial.transform import Rotation as R
+def calculate_angle(image,double_check,ignore_x,order):
+ cv2_base_image = pil_to_bgr_image(image)
+ mp_image,face_landmarker_result,_,_ = extract_landmark_double_check(cv2_base_image,double_check)
+ if len(face_landmarker_result.facial_transformation_matrixes)>0:
+ transformation_matrix=face_landmarker_result.facial_transformation_matrixes[0]
+
+ rotation_matrix, translation_vector = transformation_matrix[:3, :3],transformation_matrix[:3, 3]
+
+ r = R.from_matrix(rotation_matrix)
+ euler_angles = r.as_euler(order, degrees=True)
+ label = f"Mediapipe Euler yxz: {euler_angles}"
+ if ignore_x:
+ euler_angles[1]=0
+
+ result = [label,0,0,0]
+ for i,ch in enumerate(order.lower()):
+ if ch == "x":
+ result[1] = -euler_angles[i]
+ elif ch == "y":
+ result[2] = euler_angles[i]
+ elif ch == "z":
+ result[3] = euler_angles[i]
+
+ return result
+ return label,-euler_angles[1],euler_angles[0],euler_angles[2]
+ return "",0,0,0
+
+def change_animation(animation):
+ if animation:
+ return gr.Column(visible=True),gr.Column(visible=False)
+ else:
+ return gr.Column(visible=False),gr.Column(visible=True)
+with gr.Blocks(css=css, elem_id="demo-container") as demo:
+ with gr.Column():
+ gr.HTML(read_file("demo_header.html"))
+ gr.HTML(read_file("demo_tools.html"))
+ with gr.Row():
+ with gr.Column():
+ image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Image")
+
+ with gr.Row(elem_id="prompt-container", equal_height=False):
+ with gr.Row():
+ btn = gr.Button("Rotate Mesh", elem_id="run_button",variant="primary")
+
+
+
+ with gr.Accordion(label="Advanced Settings", open=True):
+
+ draw_type = gr.Radio(label="Draw type",choices=["Dot","Line","Line+Fill","Image"],value="Line",info="making image animation,take over 60 sec and limited frame only")
+ with gr.Row( equal_height=True):
+ inner_eyes=gr.Checkbox(label="Inner Eyes",value=True)
+ inner_mouth=gr.Checkbox(label="Inner Mouth",value=True)
+ with gr.Row( equal_height=True):
+
+ center_scaleup = gr.Checkbox(label="ScaleUp/Fit",value=True,info="center is nose-tip,Zoomed face usually make small")
+ z_multiply = gr.Slider(info="Nose height",
+ label="Depth-Multiply",
+ minimum=0.1,
+ maximum=1.5,
+ step=0.01,
+ value=0.8)
+ animation_column = gr.Column(visible=True)
+ with animation_column:
+ with gr.Row( equal_height=True):
+ animation_direction = gr.Radio(label="Animation Direction",choices=["X","Y","Z"],value="Y")
+
+
+
+
+
+
+ with gr.Column():
+ result_image = gr.Image(height=760,label="Result", elem_id="output-animation",image_mode='RGBA')
+
+
+
+
+
+ btn.click(fn=process_images, inputs=[image,draw_type,center_scaleup,animation_direction,
+ z_multiply,inner_eyes,inner_mouth,
+ ],outputs=[result_image,
+
+ ] ,api_name='infer')
+
+ example_images = [
+ ["examples/02316230.jpg","examples/02316230.webp"],
+ ["examples/00003245_00.jpg","examples/00003245_00.webp"],
+ ["examples/00827009.jpg","examples/00827009.webp"],
+ ["examples/00002062.jpg","examples/00002062.webp"],
+ ["examples/00824008.jpg","examples/00824008.webp"],
+ ["examples/00825000.jpg","examples/00825000.webp"],
+ ["examples/00826007.jpg","examples/00826007.webp"],
+ ["examples/00824006.jpg","examples/00824006.webp"],
+
+ ["examples/00002200.jpg","examples/00002200.webp"],
+ ["examples/00005259.jpg","examples/00005259.webp"],
+ ["examples/00018022.jpg","examples/00018022.webp"],
+ ["examples/img-above.jpg","examples/img-above.webp"],
+ ["examples/00100265.jpg","examples/00100265.webp"],
+ ["examples/00039259.jpg","examples/00039259.webp"],
+
+ ]
+ example1=gr.Examples(
+ examples = example_images,label="Image",
+ inputs=[image,result_image],examples_per_page=8
+ )
+
+ gr.HTML(read_file("demo_footer.html"))
+
+ if __name__ == "__main__":
+ demo.launch()
diff --git a/demo_footer.html b/demo_footer.html
new file mode 100644
index 0000000000000000000000000000000000000000..18fa5a3fd35b808f80817788fcf3bed61d4cb1e4
--- /dev/null
+++ b/demo_footer.html
@@ -0,0 +1,3 @@
+
\ No newline at end of file
diff --git a/demo_header.html b/demo_header.html
new file mode 100644
index 0000000000000000000000000000000000000000..540aef00b0ffbb29d7aa742c8f63ab457084b49b
--- /dev/null
+++ b/demo_header.html
@@ -0,0 +1,15 @@
+
+
+ Mediapipe Head 2D-Spinning
+
+
+
+
diff --git a/demo_tools.html b/demo_tools.html
new file mode 100644
index 0000000000000000000000000000000000000000..0d895e7e5afccd95b86abe9589a54252cad690bb
--- /dev/null
+++ b/demo_tools.html
@@ -0,0 +1,7 @@
+
diff --git a/examples/00002062.jpg b/examples/00002062.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5c1a25eec25c70fbf32c5ba648d96d426dcc2a66
Binary files /dev/null and b/examples/00002062.jpg differ
diff --git a/examples/00002062.webp b/examples/00002062.webp
new file mode 100644
index 0000000000000000000000000000000000000000..0714335ddc2522363c87046e9c03cbc40df0b894
Binary files /dev/null and b/examples/00002062.webp differ
diff --git a/examples/00002200.jpg b/examples/00002200.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..203bb17853086093e1e62c3d7993f34ef0357d3a
Binary files /dev/null and b/examples/00002200.jpg differ
diff --git a/examples/00002200.webp b/examples/00002200.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b5e8102c3b411da649ea1892ee7104f4b09a7d54
Binary files /dev/null and b/examples/00002200.webp differ
diff --git a/examples/00003245_00.jpg b/examples/00003245_00.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d73a1a0635134917c8e55c8796ede9450585a292
Binary files /dev/null and b/examples/00003245_00.jpg differ
diff --git a/examples/00003245_00.webp b/examples/00003245_00.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8641553face2a84e838d14c2e3a9adc8ba69c856
Binary files /dev/null and b/examples/00003245_00.webp differ
diff --git a/examples/00005259.jpg b/examples/00005259.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c513f7ea77aad2e9afb03c22c47326f08cd536ec
Binary files /dev/null and b/examples/00005259.jpg differ
diff --git a/examples/00005259.webp b/examples/00005259.webp
new file mode 100644
index 0000000000000000000000000000000000000000..10a96138119c28c6aa13a42b5d72bb6d45e69bea
Binary files /dev/null and b/examples/00005259.webp differ
diff --git a/examples/00018022.jpg b/examples/00018022.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4b61e2c87f54bbda6ce60f35f0f316090e8e77a4
Binary files /dev/null and b/examples/00018022.jpg differ
diff --git a/examples/00018022.webp b/examples/00018022.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a8cab9a7e9b12b8c21576953a9157d8d8a4a49d0
Binary files /dev/null and b/examples/00018022.webp differ
diff --git a/examples/00039259.jpg b/examples/00039259.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fc4096c8e56e18cc6ececc7148ec2aff7473efe6
Binary files /dev/null and b/examples/00039259.jpg differ
diff --git a/examples/00039259.webp b/examples/00039259.webp
new file mode 100644
index 0000000000000000000000000000000000000000..27809c4a8c29d27899a80cd94bed21752104b224
Binary files /dev/null and b/examples/00039259.webp differ
diff --git a/examples/00100265.jpg b/examples/00100265.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3348569ae2e2ada4531a81eb110c8fcd4a1227ef
Binary files /dev/null and b/examples/00100265.jpg differ
diff --git a/examples/00100265.webp b/examples/00100265.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7eba5923aa9f389271ae38392e9a8de1431a9f73
Binary files /dev/null and b/examples/00100265.webp differ
diff --git a/examples/00824006.jpg b/examples/00824006.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ba3f2faa710c1e37dd00ebd2e3af849ce1fb1fb6
Binary files /dev/null and b/examples/00824006.jpg differ
diff --git a/examples/00824006.webp b/examples/00824006.webp
new file mode 100644
index 0000000000000000000000000000000000000000..28f19b6bcbe0023ccf37a4f672cac7c7e3e1e4c7
Binary files /dev/null and b/examples/00824006.webp differ
diff --git a/examples/00824008.jpg b/examples/00824008.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2cc8a520e3a5463493d64f81bb50336e699d0f06
Binary files /dev/null and b/examples/00824008.jpg differ
diff --git a/examples/00824008.webp b/examples/00824008.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1014dd340da7161fa2857b4e66b28ebdd2aa74cb
Binary files /dev/null and b/examples/00824008.webp differ
diff --git a/examples/00825000.jpg b/examples/00825000.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c01e2de2f95acf46fc5e3f77f0ae346819255ef7
Binary files /dev/null and b/examples/00825000.jpg differ
diff --git a/examples/00825000.webp b/examples/00825000.webp
new file mode 100644
index 0000000000000000000000000000000000000000..34c2fe26ab5567f0c84f4889b933bf95fe843db0
Binary files /dev/null and b/examples/00825000.webp differ
diff --git a/examples/00826007.jpg b/examples/00826007.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3fe263ebaa95980bcac9fbd743452aa1fb504fac
Binary files /dev/null and b/examples/00826007.jpg differ
diff --git a/examples/00826007.webp b/examples/00826007.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d70c9c8474411f17adcde561ae346e4feb5721f4
Binary files /dev/null and b/examples/00826007.webp differ
diff --git a/examples/00827009.jpg b/examples/00827009.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..45ca5b71fc873dc306df54c4295ee0ab4d6a7bc8
Binary files /dev/null and b/examples/00827009.jpg differ
diff --git a/examples/00827009.webp b/examples/00827009.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3959cbbf29e85bf59cb0595e830d390c72921980
Binary files /dev/null and b/examples/00827009.webp differ
diff --git a/examples/00828003.jpg b/examples/00828003.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..413ec3b1548362f9f9fff4c68648034b417c99a7
Binary files /dev/null and b/examples/00828003.jpg differ
diff --git a/examples/02316230.jpg b/examples/02316230.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8c4ffb3d264317e65869d321b5082437b035e3c0
Binary files /dev/null and b/examples/02316230.jpg differ
diff --git a/examples/02316230.webp b/examples/02316230.webp
new file mode 100644
index 0000000000000000000000000000000000000000..8414fd783eafb6635ce5440f726cddaa0a89b572
Binary files /dev/null and b/examples/02316230.webp differ
diff --git a/examples/_00039259.webp b/examples/_00039259.webp
new file mode 100644
index 0000000000000000000000000000000000000000..810c52b3c2df389fcee64d7fab7760759d464f4c
Binary files /dev/null and b/examples/_00039259.webp differ
diff --git a/examples/img-above.jpg b/examples/img-above.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8fc1a498edec46054e07e94eb82b98773af29d74
Binary files /dev/null and b/examples/img-above.jpg differ
diff --git a/examples/img-above.webp b/examples/img-above.webp
new file mode 100644
index 0000000000000000000000000000000000000000..f9dcbe5f4804fdd59721119f7b5c7b14335396b4
Binary files /dev/null and b/examples/img-above.webp differ
diff --git a/face_landmarker.task b/face_landmarker.task
new file mode 100644
index 0000000000000000000000000000000000000000..fedb14de6d2b6708a56c04ae259783e23404c1aa
--- /dev/null
+++ b/face_landmarker.task
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:64184e229b263107bc2b804c6625db1341ff2bb731874b0bcc2fe6544e0bc9ff
+size 3758596
diff --git a/face_landmarker.task.txt b/face_landmarker.task.txt
new file mode 100644
index 0000000000000000000000000000000000000000..98b01a966fd7b5060ee9b3f519fa767fced7d248
--- /dev/null
+++ b/face_landmarker.task.txt
@@ -0,0 +1,8 @@
+Face landmark detection
+https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker
+
+model card page is
+https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf
+
+license is Apache2.0
+https://www.apache.org/licenses/LICENSE-2.0.html
\ No newline at end of file
diff --git a/face_mesh_spinning.py b/face_mesh_spinning.py
new file mode 100644
index 0000000000000000000000000000000000000000..465321d1415f9b982980e1ddf9a64883d97c7cde
--- /dev/null
+++ b/face_mesh_spinning.py
@@ -0,0 +1,393 @@
+import subprocess
+from PIL import Image,ImageOps,ImageDraw,ImageFilter
+import json
+import os
+import time
+import io
+from mp_utils import get_pixel_cordinate_list,extract_landmark,get_pixel_cordinate,get_normalized_xyz
+from glibvision.draw_utils import points_to_box,box_to_xy,plus_point,calculate_distance
+
+import numpy as np
+from glibvision.pil_utils import fill_points,create_color_image,draw_box
+
+import glibvision.pil_utils
+
+from gradio_utils import save_image,save_buffer,clear_old_files ,read_file
+
+
+import math
+import mp_triangles
+
+
+from glibvision.cv2_utils import create_color_image as cv2_create_color_image,copy_image,pil_to_bgr_image
+import cv2
+#TODO move to CV2
+
+# i'm not sure this is fast
+def apply_affine_transformation_to_triangle_add(src_tri, dst_tri, src_img, dst_img):
+ src_tri_np = np.float32(src_tri)
+ dst_tri_np = np.float32(dst_tri)
+
+ h_dst, w_dst = dst_img.shape[:2]
+
+ M = cv2.getAffineTransform(src_tri_np, dst_tri_np)
+
+ dst_mask = np.zeros((h_dst, w_dst), dtype=np.uint8)
+ cv2.fillPoly(dst_mask, [np.int32(dst_tri)], 255)
+
+ transformed = cv2.warpAffine(src_img, M, (w_dst, h_dst))
+
+ transformed = transformed * (dst_mask[:, :, np.newaxis] / 255).astype(np.uint8)
+ dst_background = dst_img * (1 - (dst_mask[:, :, np.newaxis] / 255)).astype(np.uint8)
+ dst_img = transformed + dst_background
+
+ return dst_img
+
+def apply_affine_transformation_to_triangle_add(src_tri, dst_tri, src_img, dst_img):
+ src_tri_np = np.float32(src_tri)
+ dst_tri_np = np.float32(dst_tri)
+
+ assert src_tri_np.shape == (3, 2), f"src_tri_np の形状が不正 {src_tri_np.shape}"
+ assert dst_tri_np.shape == (3, 2), f"dst_tri_np の形状が不正 {dst_tri_np.shape}"
+
+
+ # 透視変換行列の計算
+ M = cv2.getAffineTransform(src_tri_np, dst_tri_np)
+
+ # 画像のサイズ
+ h_src, w_src = src_img.shape[:2]
+ h_dst, w_dst = dst_img.shape[:2]
+
+ # 元画像から三角形領域を切り抜くマスク生成
+ #src_mask = np.zeros((h_src, w_src), dtype=np.uint8)
+ #cv2.fillPoly(src_mask, [np.int32(src_tri)], 255)
+
+ # Not 元画像の三角形領域のみをマスクで抽出
+ src_triangle = src_img #cv2.bitwise_and(src_img, src_img, mask=src_mask)
+
+ # 変換行列を使って元画像の三角形領域を目標画像のサイズへ変換
+
+ transformed = cv2.warpAffine(src_triangle, M, (w_dst, h_dst))
+ #print(f"dst_img={dst_img.shape}")
+ #print(f"transformed={transformed.shape}")
+ # 変換後のマスクの生成
+ dst_mask = np.zeros((h_dst, w_dst), dtype=np.uint8)
+ cv2.fillPoly(dst_mask, [np.int32(dst_tri)], 255)
+ transformed = cv2.bitwise_and(transformed, transformed, mask=dst_mask)
+
+ # 目標画像のマスク領域をクリアするためにデストのインバートマスクを作成
+ dst_mask_inv = cv2.bitwise_not(dst_mask)
+
+ # 目標画像のマスク部分をクリア
+ dst_background = cv2.bitwise_and(dst_img, dst_img, mask=dst_mask_inv)
+
+ # 変換された元画像の三角形部分と目標画像の背景部分を合成
+ dst_img = cv2.add(dst_background, transformed)
+
+ return dst_img
+
+# TODO move PIL
+def process_create_webp(images,duration=100, loop=0,quality=85):
+ frames = []
+ for image_file in images:
+ frames.append(image_file)
+
+ output_buffer = io.BytesIO()
+ frames[0].save(output_buffer,
+ save_all=True,
+ append_images=frames[1:],
+ duration=duration,
+ loop=loop,
+ format='WebP',
+ quality=quality
+ )
+
+ return output_buffer.getvalue()
+# TODO move numpy
+def rotate_point_euler(point, angles,order="xyz"):
+ """
+ オイラー角を使って3Dポイントを回転させる関数
+
+ Args:
+ point: 回転させる3Dポイント (x, y, z)
+ angles: 各軸周りの回転角度 (rx, ry, rz) [ラジアン]
+
+ Returns:
+ 回転後の3Dポイント (x', y', z')
+ """
+
+ rx, ry, rz = angles
+ point = np.array(point)
+
+ # X軸周りの回転
+ Rx = np.array([
+ [1, 0, 0],
+ [0, np.cos(rx), -np.sin(rx)],
+ [0, np.sin(rx), np.cos(rx)]
+ ])
+
+ # Y軸周りの回転
+ Ry = np.array([
+ [np.cos(ry), 0, np.sin(ry)],
+ [0, 1, 0],
+ [-np.sin(ry), 0, np.cos(ry)]
+ ])
+
+ # Z軸周りの回転
+ Rz = np.array([
+ [np.cos(rz), -np.sin(rz), 0],
+ [np.sin(rz), np.cos(rz), 0],
+ [0, 0, 1]
+ ])
+
+ # 回転行列の合成 (Z軸 -> Y軸 -> X軸 の順で回転)
+ order = order.lower()
+ if order == "xyz":
+ R = Rx @ Ry @ Rz
+ elif order == "xzy":
+ R = Rx @ Rz @ Ry
+ elif order == "yxz":
+ R = Ry @ Rx @ Rz
+ elif order == "yzx":
+ R = Ry @ Rz @ Rx
+ elif order == "zxy":
+ R = Rz @ Rx @ Ry
+ else:
+ R = Rz @ Ry @ Rx
+
+
+
+ # 回転後のポイントを計算
+ rotated_point = R @ point
+
+ return rotated_point
+
+
+def process_face_mesh_spinning(image,draw_type,center_scaleup,animation_direction,z_multiply=0.8,inner_eyes=False,inner_mouth=False):
+ animation = True
+ offset_x = 0
+ offset_y = 0
+ # use when center_scaleup is True,scale is 0.45(half-size:0.5-margin/ nosetip-to-top or nosetip-to-bottom
+ scale_up = 1.0
+
+ face_landmarker_result = None
+
+
+ if image == None:#app stop support none image,if mode still image,make problem
+ # Box for no Image Case
+ image_width = 512
+ image_height = 512
+ #image = create_color_image(image_width,image_height,(0,0,0))
+ points = [(-0.25,-0.25,0),(0.25,-0.25,0),
+ (0.25,0.25,0),(-0.25,0.25,0)
+ ]
+ normalized_center_point = [0.5,0.5]
+ else:
+ image_width = image.width
+ image_height = image.height
+
+
+
+
+ mp_image,face_landmarker_result = extract_landmark(image,"face_landmarker.task",0,0,True)
+
+ def rotate_image():
+ return None,face_landmarker_result,None
+
+ #return rotate_image()
+ # cordinate eyes
+ # cordinate all
+ landmark_points = [get_normalized_xyz(face_landmarker_result.face_landmarks,i) for i in range(0,468)]
+ # do centering
+ normalized_center_point = landmark_points[4]
+ normalized_top_point = landmark_points[10]
+ normalized_bottom_point = landmark_points[152]
+
+
+ offset_x = normalized_center_point[0]
+ offset_y = normalized_center_point[1]
+ offset_z = normalized_center_point[2]
+
+ #need aspect?
+ points = [[point[0]-offset_x,point[1]-offset_y,point[2]*z_multiply] for point in landmark_points]
+
+
+ # split xy-cordinate and z-depth
+ def split_points_xy_z(points,width,height,center_x,center_y):
+ xys = []
+ zs = []
+ for point in points:
+ xys.append(
+ [
+ point[0]*width*scale_up+center_x,
+ point[1]*height*scale_up+center_y
+ ]
+ )
+ zs.append(point[2])
+ return xys,zs
+
+ def draw_grid_in_center(draw,cx,cy,grid_size,grid_color,width=1,draw_horizontal=True,draw_vertical=True):
+ w = image.width
+ h = image.height
+ x_minus_divide = cx//grid_size
+ x_plus_divide = (w -cx)//grid_size
+ y_minus_divide = cy//grid_size
+ y_plus_divide = (h -cx)//grid_size
+ for i in range(-x_minus_divide,x_plus_divide+1):
+ draw.line([(cx+i*grid_size,0),(cx+i*grid_size,h)],fill=grid_color,width=width)
+ for i in range(-y_minus_divide,y_plus_divide+1):
+ draw.line([(0,cy+i*grid_size),(w,cy+i*grid_size)],fill=grid_color,width=width)
+
+ def draw_grid(image,cx=512,cy=512,first_color=(255,0,0)):
+ w = image.width
+ h = image.height
+ second_grid_size=100
+ second_color = (128,128,128)
+ draw = ImageDraw.Draw(image)
+ draw_grid_in_center(draw,cx,cy,20,(100,100,100))
+ draw_grid_in_center(draw,cx,cy,100,(192,192,192))
+
+
+
+ draw.line([(cx,0),(cx,image.height)],fill=first_color)
+ draw.line([(0,cy),(image.width,cy)],fill=first_color)
+
+ def create_triangle_image(points,width,height,center_x,center_y,line_color=(255,255,255),fill_color=None):
+
+ cordinates,angled_depth = split_points_xy_z(points,width,height,center_x,center_y)
+
+ img = create_color_image(width,height,(0,0,0))
+ draw = ImageDraw.Draw(img)
+ triangles = mp_triangles.get_triangles_copy(True,inner_eyes,inner_eyes,inner_mouth)
+
+ triangles.sort(key=lambda triangle: sum(angled_depth[index] for index in triangle) / len(triangle)
+ ,reverse=True)
+ for triangle in triangles:
+ triangle_cordinates = [cordinates[index] for index in triangle]
+ glibvision.pil_utils.image_draw_points(draw,triangle_cordinates,line_color,fill_color)
+
+
+ return img
+
+ def create_texture_image(image,origin_points,angled_points,width,height,center_x,center_y,line_color=(255,255,255),fill_color=None):
+ cv2_image = pil_to_bgr_image(image)
+ #print(f"shape={cv2_image.shape}")
+ #cv2.imwrite("tmp.jpg",cv2_image)
+ original_cordinates = []
+ cordinates,angled_depth = split_points_xy_z(angled_points,width,height,center_x,center_y)
+ # original point need offset
+ for point in origin_points:
+ original_cordinates.append(
+ [
+ (point[0]+offset_x)*width,
+ (point[1]+offset_y)*height
+ ]
+ )
+ if cv2_image.shape[2]==3:
+ cv2_bg_img = cv2_create_color_image(cv2_image,(0,0,0))
+ else:
+ cv2_bg_img = cv2_create_color_image(cv2_image,(0,0,0,0))
+
+ triangles = mp_triangles.get_triangles_copy(True,inner_eyes,inner_eyes,inner_mouth)
+
+ triangles.sort(key=lambda triangle: sum(angled_depth[index] for index in triangle) / len(triangle)
+ ,reverse=True)
+
+ for triangle in triangles:
+ triangle_cordinates = [cordinates[index] for index in triangle]
+ origin_triangle_cordinates = [original_cordinates[index] for index in triangle]
+
+ cv2_bg_img=apply_affine_transformation_to_triangle_add(origin_triangle_cordinates,triangle_cordinates,cv2_image,cv2_bg_img)
+
+ img= Image.fromarray(cv2.cvtColor(cv2_bg_img, cv2.COLOR_RGBA2BGRA))
+
+ return img
+
+ def create_point_image(points,width,height,center_x,center_y):
+ cordinates,_ = split_points_xy_z(points,width,height,center_x,center_y)
+ img = create_color_image(width,height,(0,0,0))
+ glibvision.pil_utils.draw_points(img,cordinates,None,None,3,(255,0,0),3)
+
+ return img
+
+ def angled_points(points,angles,order="xyz"):
+ angled_cordinates = []
+ for point in points:
+ rotated_np_point = rotate_point_euler(point,angles,order)
+ angled_cordinates.append(
+ [
+ rotated_np_point[0],
+ rotated_np_point[1],rotated_np_point[2]
+ ]
+ )
+ return angled_cordinates
+
+
+ frames = []
+
+
+ #frames.append(create_point_image(points))
+ frame_duration=100
+ start_angle=0
+ end_angle=360
+ step_angle=10
+
+ if draw_type == "Image":
+ start_angle=-90
+ end_angle=90
+ step_angle=30
+
+ if not animation:
+ start_angle=0
+ end_angle=0
+ step_angle=360
+ if image == None:
+ draw_type="Dot"
+
+
+ if center_scaleup and image!=None:
+ top_distance = calculate_distance(normalized_center_point,normalized_top_point)
+ bottom_distance = calculate_distance(normalized_center_point,normalized_bottom_point)
+ distance = top_distance if top_distance>bottom_distance else bottom_distance
+ #small_size = image_width if image_widthimage_width:
+ scale_up *= image_width/image_height
+ #print(scale_up)
+ face_center_x = int(0.5* image_width)#half
+ face_center_y = int(0.5* image_height)
+ else:
+ scale_up = 1.0
+ face_center_x = int(normalized_center_point[0]* image_width)
+ face_center_y = int(normalized_center_point[1]* image_height)
+
+
+
+ rotated_points = None
+
+ if animation:
+ for i in range(start_angle,end_angle,step_angle):
+ if animation_direction == "X":
+ angles = [math.radians(i),0,0]
+ elif animation_direction == "Y":
+ angles = [0,math.radians(i),0]
+ else:
+ angles = [0,0,math.radians(i)]
+
+ if draw_type == "Dot":
+ frames.append(create_point_image(angled_points(points,angles),image_width,image_height,face_center_x,face_center_y))
+ elif draw_type == "Line":
+ frames.append(create_triangle_image(angled_points(points,angles),image_width,image_height,face_center_x,face_center_y))
+ elif draw_type == "Line+Fill":
+ frames.append(create_triangle_image(angled_points(points,angles),image_width,image_height,face_center_x,face_center_y,(128,128,128),(200,200,200)))
+ elif draw_type == "Image":
+ frame_duration=500
+ frames.append(create_texture_image(image,points,angled_points(points,angles),image_width,image_height,face_center_x,face_center_y))
+ webp = process_create_webp(frames,frame_duration)
+ path = save_buffer(webp)
+
+
+
+
+ return path,face_landmarker_result,rotated_points
\ No newline at end of file
diff --git a/glibvision/common_utils.py b/glibvision/common_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4939ada2978cb935fcae32957083236de6977d32
--- /dev/null
+++ b/glibvision/common_utils.py
@@ -0,0 +1,112 @@
+import os
+def check_exists_files(files,dirs,exit_on_error=True):
+ if files is not None:
+ if isinstance(files, str):
+ files = [files]
+ for file in files:
+ if not os.path.isfile(file):
+ print(f"File {file} not found")
+ if exit_on_error:
+ exit(1)
+ else:
+ return 1
+ if dirs is not None:
+ if isinstance(dirs, str):
+ dirs = [dirs]
+ for dir in dirs:
+ if not os.path.isdir(dir):
+ print(f"Dir {dir} not found")
+ if exit_on_error:
+ exit(1)
+ else:
+ return 1
+ return 0
+
+image_extensions =[".jpg"]
+
+def add_name_suffix(file_name,suffix,replace_suffix=False):
+ if not suffix.startswith("_"):#force add
+ suffix="_"+suffix
+
+ name,ext = os.path.splitext(file_name)
+ if replace_suffix:
+ index = name.rfind("_")
+ if index!=-1:
+ return f"{name[0:index]}{suffix}{ext}"
+
+ return f"{name}{suffix}{ext}"
+
+def replace_extension(file_name,new_extension,suffix=None,replace_suffix=False):
+ if not new_extension.startswith("."):
+ new_extension="."+new_extension
+
+ name,ext = os.path.splitext(file_name)
+ new_file = f"{name}{new_extension}"
+ if suffix:
+ return add_name_suffix(name+new_extension,suffix,replace_suffix)
+ return new_file
+
+def list_digit_images(input_dir,sort=True):
+ digit_images = []
+ global image_extensions
+ files = os.listdir(input_dir)
+ for file in files:
+ if file.endswith(".jpg"):#TODO check image
+ base,ext = os.path.splitext(file)
+ if not base.isdigit():
+ continue
+ digit_images.append(file)
+
+ if sort:
+ digit_images.sort()
+
+ return digit_images
+def list_suffix_images(input_dir,suffix,is_digit=True,sort=True):
+ digit_images = []
+ global image_extensions
+ files = os.listdir(input_dir)
+ for file in files:
+ if file.endswith(".jpg"):#TODO check image
+ base,ext = os.path.splitext(file)
+ if base.endswith(suffix):
+ if is_digit:
+ if not base.replace(suffix,"").isdigit():
+ continue
+ digit_images.append(file)
+
+ if sort:
+ digit_images.sort()
+
+ return digit_images
+
+import time
+
+class ProgressTracker:
+ """
+ 処理の進捗状況を追跡し、経過時間と残り時間を表示するクラス。
+ """
+
+ def __init__(self,key, total_target):
+ """
+ コンストラクタ
+
+ Args:
+ total_target (int): 処理対象の総数
+ """
+ self.key = key
+ self.total_target = total_target
+ self.complete_target = 0
+ self.start_time = time.time()
+
+ def update(self):
+ """
+ 進捗を1つ進める。
+ 経過時間と残り時間を表示する。
+ """
+ self.complete_target += 1
+ current_time = time.time()
+ consumed_time = current_time - self.start_time
+ remain_time = (consumed_time / self.complete_target) * (self.total_target - self.complete_target) if self.complete_target > 0 else 0
+ print(f"stepped {self.key} {self.total_target} of {self.complete_target}, consumed {(consumed_time / 60):.1f} min, remain {(remain_time / 60):.1f} min")
+
+
\ No newline at end of file
diff --git a/glibvision/cv2_utils.py b/glibvision/cv2_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa1ca7b6e50b3ba9aeceee81e3843944a4910887
--- /dev/null
+++ b/glibvision/cv2_utils.py
@@ -0,0 +1,182 @@
+import cv2
+import numpy as np
+
+
+#2024-11-30 copy paste
+def draw_bbox(image,box,color=(255,0,0),thickness=1):
+ if thickness==0:
+ return
+
+ left = int(box[0])
+ top = int(box[1])
+ right = int(box[0]+box[2])
+ bottom = int(box[1]+box[3])
+ box_points =[(left,top),(right,top),(right,bottom),(left,bottom)]
+
+ cv2.polylines(image, [np.array(box_points)], isClosed=True, color=color, thickness=thickness)
+
+
+def to_int_points(points):
+ int_points=[]
+ for point in points:
+ int_points.append([int(point[0]),int(point[1])])
+ return int_points
+
+def draw_text(img, text, point, font_scale=0.5, color=(200, 200, 200), thickness=1):
+ font = cv2.FONT_HERSHEY_SIMPLEX
+ cv2.putText(img, str(text), point, font, font_scale, color, thickness, cv2.LINE_AA)
+
+plot_text_color = (200, 200, 200)
+plot_text_font_scale = 0.5
+plot_index = 1
+plot_text = True
+
+def set_plot_text(is_plot,text_font_scale,text_color):
+ global plot_index,plot_text,plot_text_font_scale,plot_text_color
+ plot_text = is_plot
+ plot_index = 1
+ plot_text_font_scale = text_font_scale
+ plot_text_color = text_color
+
+def plot_points(image,points,isClosed=False,circle_size=3,circle_color=(255,0,0),line_size=1,line_color=(0,0,255)):
+ global plot_index,plot_text
+ int_points = to_int_points(points)
+ if circle_size>0:
+ for point in int_points:
+ cv2.circle(image,point,circle_size,circle_color,-1)
+ if plot_text:
+ draw_text(image,plot_index,point,plot_text_font_scale,plot_text_color)
+ plot_index+=1
+ if line_size>0:
+ cv2.polylines(image, [np.array(int_points)], isClosed=isClosed, color=line_color, thickness=line_size)
+
+def fill_points(image,points,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ np_points = np.array(points,dtype=np.int32)
+ cv2.fillPoly(image, [np_points], fill_color)
+ cv2.polylines(image, [np_points], isClosed=True, color=line_color, thickness=thickness)
+
+def get_image_size(cv2_image):
+ return cv2_image.shape[:2]
+
+def get_channel(np_array):
+ return np_array.shape[2] if np_array.ndim == 3 else 1
+
+def get_numpy_text(np_array,key=""):
+ channel = get_channel(np_array)
+ return f"{key} shape = {np_array.shape} channel = {channel} ndim = {np_array.ndim} size = {np_array.size}"
+
+
+def gray3d_to_2d(grayscale: np.ndarray) -> np.ndarray:
+ channel = get_channel(grayscale)
+ if channel!=1:
+ raise ValueError(f"color maybe rgb or rgba {get_numpy_text(grayscale)}")
+ """
+ 3 次元グレースケール画像 (チャンネル数 1) を 2 次元に変換する。
+
+ Args:
+ grayscale (np.ndarray): 3 次元グレースケール画像 (チャンネル数 1)。
+
+ Returns:
+ np.ndarray: 2 次元グレースケール画像。
+ """
+
+ if grayscale.ndim == 2:
+ return grayscale
+ return np.squeeze(grayscale)
+
+def blend_rgb_images(image1: np.ndarray, image2: np.ndarray, mask: np.ndarray) -> np.ndarray:
+ """
+ 2 つの RGB 画像をマスク画像を使用してブレンドする。
+
+ Args:
+ image1 (np.ndarray): 最初の画像 (RGB)。
+ image2 (np.ndarray): 2 番目の画像 (RGB)。
+ mask (np.ndarray): マスク画像 (グレースケール)。
+
+ Returns:
+ np.ndarray: ブレンドされた画像 (RGB)。
+
+ Raises:
+ ValueError: 入力画像の形状が一致しない場合。
+ """
+
+ if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
+ raise ValueError("入力画像の形状が一致しません。")
+
+ # 画像を float 型に変換
+ image1 = image1.astype(float)
+ image2 = image2.astype(float)
+
+ # マスクを 3 チャンネルに変換し、0-1 の範囲にスケール
+ alpha = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR).astype(float) / 255.0
+
+ # ブレンド計算
+ blended = (1 - alpha) * image1 + alpha * image2
+
+ return blended.astype(np.uint8)
+
+def create_color_image(img,color=(255,255,255)):
+ mask = np.zeros_like(img)
+
+ h, w = img.shape[:2]
+ cv2.rectangle(mask, (0, 0), (w, h), color, -1)
+ return mask
+#RGB Image use np.array(image, dtype=np.uint8)
+def pil_to_bgr_image(image):
+ np_image = np.array(image, dtype=np.uint8)
+ if np_image.shape[2] == 4:
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGBA2BGRA)
+ else:
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
+ return bgr_img
+
+def bgr_to_rgb(np_image):
+ if np_image.shape[2] == 4:
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RBGRA2RGBA)
+ else:
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB)
+ return bgr_img
+
+def crop(image,bbox):
+ x,y,width,height = bbox
+ return image[y:y+height, x:x+width]
+#not check safe
+def paste(image,replace_image,x,y):
+ height,width = replace_image.shape[:2]
+ image[y:y+height, x:x+width] = replace_image
+
+def copy_image(img1: np.ndarray, img2: np.ndarray, x: int, y: int) -> None:
+ # チャネル数と次元数のチェック
+ if img1.ndim != 3 or img2.ndim != 3:
+ raise ValueError("Both img1 and img2 must be 3-dimensional arrays.")
+ elif img1.shape[2] != img2.shape[2]:
+ raise ValueError(f"img1 and img2 must have the same number of channels. img1 has {img1.shape[2]} channels, but img2 has {img2.shape[1]} channels.")
+
+ # Type check
+ if not isinstance(img1, np.ndarray) or not isinstance(img2, np.ndarray):
+ raise TypeError("img1 and img2 must be NumPy arrays.")
+
+ if x>=0:
+ offset_x=0
+ w = min(img1.shape[1]-x,img2.shape[1])
+ else:
+ w = min(img1.shape[1],img2.shape[1]+x)
+ offset_x=int(-x)
+ x = 0
+
+ if y>=0:
+ h = min(img1.shape[0]-y,img2.shape[0])
+ offset_y=0
+ else:
+ h = min(img1.shape[0]-y,img2.shape[0]+y)
+ offset_y=int(-y)
+ y = 0
+ x=int(x)
+ y=int(y)
+ h=int(h)
+ w=int(w)
+
+
+ print(f"img1 {img1.shape} img2{img2.shape} x={x} y={y} w={w} h={h}")
+ # Paste the overlapping part
+ img1[y:y+h, x:x+w] = img2[offset_y:h+offset_y, offset_x:w+offset_x]
diff --git a/glibvision/draw_utils.py b/glibvision/draw_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb8e670318dc39d97a100aee2d76e53090c1b869
--- /dev/null
+++ b/glibvision/draw_utils.py
@@ -0,0 +1,42 @@
+# DrawUtils
+# not PIL,CV2,Numpy drawing method
+import math
+# 2024-11-29 add calculate_distance
+def points_to_box(points):
+ x1=float('inf')
+ x2=0
+ y1=float('inf')
+ y2=0
+ for point in points:
+ if point[0]x2:
+ x2=point[0]
+ if point[1]y2:
+ y2=point[1]
+ return [x1,y1,x2-x1,y2-y1]
+
+def box_to_point(box):
+ return [
+ [box[0],box[1]],
+ [box[0]+box[2],box[1]],
+ [box[0]+box[2],box[1]+box[3]],
+ [box[0],box[1]+box[3]]
+ ]
+
+def plus_point(base_pt,add_pt):
+ return [base_pt[0]+add_pt[0],base_pt[1]+add_pt[1]]
+
+def box_to_xy(box):
+ return [box[0],box[1],box[2]+box[0],box[3]+box[1]]
+
+def to_int_points(points):
+ int_points=[]
+ for point in points:
+ int_points.append([int(point[0]),int(point[1])])
+ return int_points
+
+def calculate_distance(xy, xy2):
+ return math.sqrt((xy2[0] - xy[0])**2 + (xy2[1] - xy[1])**2)
\ No newline at end of file
diff --git a/glibvision/glandmark_utils.py b/glibvision/glandmark_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ffc1da78eaf504a4ef6b97de6ce3162cce75de9
--- /dev/null
+++ b/glibvision/glandmark_utils.py
@@ -0,0 +1,48 @@
+
+import os
+
+#simple single version
+def bbox_to_glandmarks(file_name,bbox,points = None):
+ base,ext = os.path.splitext(file_name)
+ glandmark = {"image":{
+ "boxes":[{
+ "left":int(bbox[0]),"top":int(bbox[1]),"width":int(bbox[2]),"height":int(bbox[3])
+ }],
+ "file":file_name,
+ "id":int(base)
+ # width,height ignore here
+ }}
+ if points is not None:
+ parts=[
+ ]
+ for point in points:
+ parts.append({"x":int(point[0]),"y":int(point[1])})
+ glandmark["image"]["boxes"][0]["parts"] = parts
+ return glandmark
+
+#technically this is not g-landmark/dlib ,
+def convert_to_landmark_group_json(points):
+ if len(points)!=68:
+ print(f"points must be 68 but {len(points)}")
+ return None
+ new_points=list(points)
+
+ result = [ # possible multi person ,just possible any func support multi person
+
+ { # index start 0 but index-number start 1
+ "chin":new_points[0:17],
+ "left_eyebrow":new_points[17:22],
+ "right_eyebrow":new_points[22:27],
+ "nose_bridge":new_points[27:31],
+ "nose_tip":new_points[31:36],
+ "left_eye":new_points[36:42],
+ "right_eye":new_points[42:48],
+
+ # lip points customized structure
+ # MIT licensed face_recognition
+ # https://github.com/ageitgey/face_recognition
+ "top_lip":new_points[48:55]+[new_points[64]]+[new_points[63]]+[new_points[62]]+[new_points[61]]+[new_points[60]],
+ "bottom_lip":new_points[54:60]+[new_points[48]]+[new_points[60]]+[new_points[67]]+[new_points[66]]+[new_points[65]]+[new_points[64]],
+ }
+ ]
+ return result
\ No newline at end of file
diff --git a/glibvision/mediapipe_utils.py b/glibvision/mediapipe_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4b3f125229b67a8d581e71184b1037b2a42ef06
--- /dev/null
+++ b/glibvision/mediapipe_utils.py
@@ -0,0 +1,17 @@
+import math
+from .numpy_utils import rotate_point_euler
+# mediapipe utils work with other glibvisions
+
+def rotate_points(points,angles,order="xyz",is_degree=False):
+ if is_degree:
+ angles = [math.radians(float(value)) for value in angles]
+ rotated_cordinates = []
+ for point in points:
+ rotated_np_point = rotate_point_euler(point,angles,order)
+ rotated_cordinates.append(
+ [
+ rotated_np_point[0],
+ rotated_np_point[1],rotated_np_point[2]
+ ]
+ )
+ return rotated_cordinates
diff --git a/glibvision/numpy_utils.py b/glibvision/numpy_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7a66578c6be5f0f9c771f406ee83d7878833c09
--- /dev/null
+++ b/glibvision/numpy_utils.py
@@ -0,0 +1,187 @@
+import numpy as np
+
+#2024-12-03 rotate_point_euler
+#2024-12-04 load_data
+def load_data(filepath):
+ """
+ カンマ区切りのテキストファイルからデータをNumPy配列に読み込みます。
+
+ Args:
+ filepath: データファイルのパス
+
+ Returns:
+ NumPy配列: 読み込まれたデータ。エラーが発生した場合はNone。
+ """
+ try:
+ data = np.loadtxt(filepath, delimiter=",")
+ return data
+ except (FileNotFoundError, ValueError) as e:
+ print(f"Error loading data: {e}")
+ return None
+def rotate_point_euler(point, angles,order="xyz",is_degree=False):
+
+ """
+ オイラー角を使って3Dポイントを回転させる関数
+
+ Args:
+ point: 回転させる3Dポイント (x, y, z)
+ angles: 各軸周りの回転角度 (rx, ry, rz) [ラジアン]
+
+ Returns:
+ 回転後の3Dポイント (x', y', z')
+ """
+ if is_degree:
+ angles = [np.deg2rad(value) for value in angles]
+ rx, ry, rz = angles
+ point = np.array(point)
+
+ # X軸周りの回転
+ Rx = np.array([
+ [1, 0, 0],
+ [0, np.cos(rx), -np.sin(rx)],
+ [0, np.sin(rx), np.cos(rx)]
+ ])
+
+ # Y軸周りの回転
+ Ry = np.array([
+ [np.cos(ry), 0, np.sin(ry)],
+ [0, 1, 0],
+ [-np.sin(ry), 0, np.cos(ry)]
+ ])
+
+ # Z軸周りの回転
+ Rz = np.array([
+ [np.cos(rz), -np.sin(rz), 0],
+ [np.sin(rz), np.cos(rz), 0],
+ [0, 0, 1]
+ ])
+
+ # 回転行列の合成 (Z軸 -> Y軸 -> X軸 の順で回転)
+ order = order.lower()
+ if order == "xyz":
+ R = Rx @ Ry @ Rz
+ elif order == "xzy":
+ R = Rx @ Rz @ Ry
+ elif order == "yxz":
+ R = Ry @ Rx @ Rz
+ elif order == "yzx":
+ R = Ry @ Rz @ Rx
+ elif order == "zxy":
+ R = Rz @ Rx @ Ry
+ else:#zyx
+ R = Rz @ Ry @ Rx
+
+
+
+ # 回転後のポイントを計算
+ rotated_point = R @ point
+
+ return rotated_point
+
+def apply_binary_mask_to_color(base_image,color,mask):
+ """
+ 二値マスクを使用して、画像の一部を別の画像にコピーする。
+
+ Args:
+ base_image (np.ndarray): コピー先の画像。
+ paste_image (np.ndarray): コピー元の画像。
+ mask (np.ndarray): 二値マスク画像。
+
+ Returns:
+ np.ndarray: マスクを適用した画像。
+
+ """
+ # TODO check all shape
+ #print_numpy(base_image)
+ #print_numpy(paste_image)
+ #print_numpy(mask)
+ if mask.ndim == 2:
+ condition = mask == 255
+ else:
+ condition = mask[:,:,0] == 255
+
+ base_image[condition] = color
+ return base_image
+
+def apply_binary_mask_to_image(base_image,paste_image,mask):
+ """
+ 二値マスクを使用して、画像の一部を別の画像にコピーする。
+
+ Args:
+ base_image (np.ndarray): コピー先の画像。
+ paste_image (np.ndarray): コピー元の画像。
+ mask (np.ndarray): 二値マスク画像。
+
+ Returns:
+ np.ndarray: マスクを適用した画像。
+
+ """
+ # TODO check all shape
+ #print_numpy(base_image)
+ #print_numpy(paste_image)
+ #print_numpy(mask)
+ if mask.ndim == 2:
+ condition = mask == 255
+ else:
+ condition = mask[:,:,0] == 255
+
+ base_image[condition] = paste_image[condition]
+ return base_image
+
+def pil_to_numpy(image):
+ return np.array(image, dtype=np.uint8)
+
+def extruce_points(points,index,ratio=1.5):
+ """
+ indexのポイントをratio倍だけ、点群の中心から、外側に膨らます。
+ """
+ center_point = np.mean(points, axis=0)
+ if index < 0 or index > len(points):
+ raise ValueError(f"index must be range(0,{len(points)} but value = {index})")
+ point1 =points[index]
+ print(f"center = {center_point}")
+ vec_to_center = point1 - center_point
+ return vec_to_center*ratio + center_point
+
+
+def bulge_polygon(points, bulge_factor=0.1,isClosed=True):
+ """
+ ポリゴンの辺の中間に点を追加し、外側に膨らませる
+ ndarrayを返すので注意
+ """
+ # 入力 points を NumPy 配列に変換
+ points = np.array(points)
+
+ # ポリゴン全体の重心を求める
+ center_point = np.mean(points, axis=0)
+ #print(f"center = {center_point}")
+ new_points = []
+ num_points = len(points)
+ for i in range(num_points):
+ if i == num_points -1 and not isClosed:
+ break
+ p1 = points[i]
+ #print(f"p{i} = {p1}")
+ # 重心から頂点へのベクトル
+ #vec_to_center = p1 - center_point
+
+ # 辺のベクトルを求める
+ mid_diff = points[(i + 1) % num_points] - p1
+ mid = p1+(mid_diff/2)
+
+ #print(f"mid = {mid}")
+ out_vec = mid - center_point
+
+ # 重心からのベクトルに bulge_vec を加算
+ new_point = mid + out_vec * bulge_factor
+
+ new_points.append(p1)
+ new_points.append(new_point.astype(np.int32))
+
+ return np.array(new_points)
+
+
+# image.shape rgb are (1024,1024,3) use 1024,1024 as 2-dimensional
+def create_2d_image(shape):
+ grayscale_image = np.zeros(shape[:2], dtype=np.uint8)
+ return grayscale_image
\ No newline at end of file
diff --git a/glibvision/pil_utils.py b/glibvision/pil_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..959ede824bfeae2c3bc3610aacad8ffda5d8bd46
--- /dev/null
+++ b/glibvision/pil_utils.py
@@ -0,0 +1,38 @@
+from PIL import Image,ImageDraw
+from .draw_utils import box_to_xy,to_int_points,box_to_point
+#ver-2024-11-18
+def create_color_image(width, height, color=(255,255,255)):
+ if color == None:
+ color = (0,0,0)
+
+ if len(color )== 3:
+ mode ="RGB"
+ elif len(color )== 4:
+ mode ="RGBA"
+
+ img = Image.new(mode, (width, height), color)
+ return img
+
+def fill_points(image,points,color=(255,255,255)):
+ return draw_points(image,points,fill=color)
+
+def draw_points(image,points,outline=None,fill=None,width=1,plot_color=None,plot_size=3):
+ draw = ImageDraw.Draw(image)
+ image_draw_points(draw,points,outline,fill,width,plot_color,plot_size)
+ return image
+
+def image_draw_points(draw,points,outline=None,fill=None,width=1,plot_color=None,plot_size=3):
+ int_points = [(int(x), int(y)) for x, y in points]
+ if outline is not None or fill is not None:
+ draw.polygon(int_points, outline=outline,fill=fill,width=width)
+ if plot_color!=None:
+ print(int_points,plot_size,plot_color)
+ for point in int_points:
+ draw.circle(point,plot_size,fill=plot_color)
+
+def draw_box(image,box,outline=None,fill=None):
+ points = to_int_points(box_to_point(box))
+ return draw_points(image,points,outline,fill)
+
+def from_numpy(numpy_array):
+ return Image.fromarray(numpy_array)
\ No newline at end of file
diff --git a/gradio_utils.py b/gradio_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..619ed9f16fc7d3593cfaab6c7ed58348741b5a8d
--- /dev/null
+++ b/gradio_utils.py
@@ -0,0 +1,68 @@
+
+
+import os
+import time
+import io
+import hashlib
+
+#2024-11-28 support bytes get_buffer_id,save_buffer
+def clear_old_files(dir="files",passed_time=60*60):
+ try:
+ files = os.listdir(dir)
+ current_time = time.time()
+ for file in files:
+ file_path = os.path.join(dir,file)
+
+ ctime = os.stat(file_path).st_ctime
+ diff = current_time - ctime
+ #print(f"ctime={ctime},current_time={current_time},passed_time={passed_time},diff={diff}")
+ if diff > passed_time:
+ os.remove(file_path)
+ except:
+ print("maybe still gallery using error")
+
+def get_buffer_id(buffer,length=32):
+ if isinstance(buffer,bytes):
+ value = buffer
+ else:
+ value=buffer.getvalue()
+ hash_object = hashlib.sha256(value)
+ hex_dig = hash_object.hexdigest()
+ unique_id = hex_dig[:length]
+ return unique_id
+
+def get_image_id(image):
+ buffer = io.BytesIO()
+ image.save(buffer, format='PNG')
+ return get_buffer_id(buffer)
+
+def save_image(image,extension="jpg",dir_name="files"):
+ id = get_image_id(image)
+ os.makedirs(dir_name,exist_ok=True)
+ file_path = f"{dir_name}/{id}.{extension}"
+
+ image.save(file_path)
+ return file_path
+
+def save_buffer(buffer,extension="webp",dir_name="files"):
+ id = get_buffer_id(buffer)
+ os.makedirs(dir_name,exist_ok=True)
+ file_path = f"{dir_name}/{id}.{extension}"
+
+ with open(file_path,"wb") as f:
+ if isinstance(buffer,bytes):
+ f.write(buffer)
+ else:
+ f.write(buffer.getvalue())
+ return file_path
+
+def write_file(file_path,text):
+ with open(file_path, 'w', encoding='utf-8') as f:
+ f.write(text)
+
+def read_file(file_path):
+ """read the text of target file
+ """
+ with open(file_path, 'r', encoding='utf-8') as f:
+ content = f.read()
+ return content
\ No newline at end of file
diff --git a/mp_estimate.py b/mp_estimate.py
new file mode 100644
index 0000000000000000000000000000000000000000..124baa160ecef64f59ab4f797dcd69cae5c8da10
--- /dev/null
+++ b/mp_estimate.py
@@ -0,0 +1,250 @@
+#2024-12-04 add forehead_chin_points_pair,estimate_rotatios
+#formart is first,second,middle
+#2024-12-05 deg to rad
+#2024-12-06 get_feature_ratios_cordinate
+#2024-12-08 create_detail_labels
+horizontal_points_pair = [
+ [
+ "inner-eye",133,362,6
+ ],
+ [
+ "outer-eye",33,263,168
+ ],
+ [
+ "mouth",61,291,13
+ ],
+ [
+ "eyeblow",105,334,9
+ ],[
+ "nose",98,327,2
+ ],[
+ "contour",143,372,6
+ ],
+ [
+ "chin",32,262,200
+ ], [
+ "cheek",123,352,5
+ ], [
+ "cheek2",192,416,0
+ ], [
+ "nose1",129,358,1
+ ], [
+ "nose2",47,277,195
+ ], [
+ "cheek3",206,426,2
+ ], [
+ "cheek4",101,330,5
+ ], [
+ "cheek5",153,380,6
+ ]
+ ]
+def angle_between_points_and_x_axis(A, B):
+ """
+ 2点A, Bを結ぶ線分とx軸の正方向との角度を計算する
+
+ Args:
+ A: A点の座標 (x, y) のタプルまたはNumPy配列
+ B: B点の座標 (x, y) のタプルまたはNumPy配列
+
+ Returns:
+ 角度(ラジアン)
+ """
+ x = B[0] - A[0]
+ y = B[1] - A[1]
+ return np.arctan2(y, x)
+
+vertical_points_pair=[
+ ["forehead-chin",8,1,199]
+]
+#formart is first,second,third
+feature_ratios_indices=[
+ ["forehead",67,69,66],
+ ["forehead",10,151,9],
+ ["forehead",297,299,296],
+ #["forehead-chin",8,1,199],
+ #["middle-chin",168,199,2],
+ ["middle",168,195,2],
+ ["right",153,101,206],
+ ["right2",133,47,129],
+ ["left",380,330,426],
+ ["left2",362,277,358],
+ ["right-contour",143,123,192],
+ ["left-contour",372,352,416],
+ ["nose",4,1,2],
+ ]
+
+feature_angles_indices =[
+ ["forehead1",9,6],
+ ["forehead2",69,299],
+ ["eyes1",133,362],
+ ["eyes2",133,33],
+ ["eyes3",362,263],
+ ["nose1",6,2],
+ ["nose1",98,327],
+ ["nose1",2,1],
+ ["nose1",1,6],
+ ["lip",61,291],
+ ["lip",0,17],
+ ["jaw",152,199],
+ ["jaw",194,418],
+ ["cheek",118,214],
+ ["cheek",347,434],
+ ["contour",389,397],
+ ["contour",127,172],
+]
+def get_feature_angles_cordinate(face_landmarks,angles=feature_angles_indices):
+ points = [get_normalized_cordinate(face_landmarks,i) for i in range(468)]
+ return get_feature_angles_cordinate_points(points,angles)
+
+def get_feature_angles_cordinate_points(points,angles=feature_angles_indices):
+ cordinates=[]
+ result_angles = []
+ for indices in angles:
+ points_cordinate = get_points_by_indices(points,indices[1:])#first one is label
+ angle_rad =angle_between_points_and_x_axis(points_cordinate[0][:2],points_cordinate[1][:2])
+ result_angles.append(angle_rad)
+ cordinates.append(points_cordinate)
+ return cordinates,result_angles
+
+def get_feature_ratios_cordinate(face_landmarks,ratios=feature_ratios_indices):
+ points = [get_normalized_cordinate(face_landmarks,i) for i in range(468)]
+ return get_feature_angles_cordinate_points(points,ratios)
+
+def ratios_cordinates(cordinates):
+
+ distance_a = calculate_distance(cordinates[0],cordinates[1])
+ distance_b = calculate_distance(cordinates[1],cordinates[2])
+ if distance_a == 0 or distance_b == 0:
+ return 0
+ else:
+ return distance_a/distance_b
+
+def get_feature_ratios_cordinate_points(points,ratios=feature_ratios_indices):
+ cordinates=[]
+ result_ratios = []
+ for indices in ratios:
+ points_cordinate = get_points_by_indices(points,indices[1:])#first one is label
+ result_ratios.append(ratios_cordinates(points_cordinate))
+ cordinates.append(points_cordinate)
+ return cordinates,result_ratios
+
+
+#vertical-format
+forehead_chin_points_pair=[
+ [
+ "forehead-chin",8,1,199
+ ]
+]
+horizontal_contour_points_pair=[
+ [
+ "contour",143,6,372
+ ]
+]
+import math
+def calculate_distance(xy, xy2):
+ return math.sqrt((xy2[0] - xy[0])**2 + (xy2[1] - xy[1])**2)
+
+def create_detail_labels(values,radian=False,pair_data=horizontal_points_pair):
+ assert len(values) == len(pair_data)
+ lines = []
+ for i,value in enumerate(values):
+ if radian:
+ value=math.degrees(value)
+ lines.append(f"{pair_data[i][0]} = {value:.2f}")
+ return "\n".join(lines)
+
+import numpy as np
+from mp_utils import get_normalized_cordinate
+def estimate_horizontal(face_landmarks,pair_data = horizontal_points_pair):
+ points = [get_normalized_cordinate(face_landmarks,i) for i in range(468)]
+ return estimate_horizontal_points(points,pair_data)
+
+def get_points_by_indices(face_landmark_points,indices):
+ points = [face_landmark_points[index] for index in indices]
+ return points
+
+def normalized_to_pixel(cordinates,width,height):
+ pixel_point = [[pt[0]*width,pt[1]*height] for pt in cordinates]
+ return pixel_point
+
+def estimate_horizontal_points(face_landmark_points,pair_data = horizontal_points_pair):
+ z_angles=[]
+ y_ratios = []
+ cordinates = []
+ for compare_point in pair_data:
+ points_cordinate = get_points_by_indices(face_landmark_points,compare_point[1:])#first one is label
+ cordinates.append(points_cordinate)
+ angle_rad =angle_between_points_and_x_axis(points_cordinate[0][:2],points_cordinate[1][:2])
+ #angle_deg = np.degrees(angle_rad)
+ z_angles.append(angle_rad)
+ right_distance = calculate_distance(points_cordinate[0],points_cordinate[2])
+ left_distance = calculate_distance(points_cordinate[1],points_cordinate[2])
+ y_ratios.append(left_distance/(right_distance+left_distance))
+ return z_angles,y_ratios,cordinates,pair_data
+
+def estimate_vertical(face_landmarks,pair_data = vertical_points_pair):
+ points = [get_normalized_cordinate(face_landmarks,i) for i in range(468)]
+ return estimate_vertical_points(points,pair_data)
+
+
+def estimate_rotations_v2(face_landmarker_result):
+ points = get_normalized_landmarks(face_landmarker_result.face_landmarks,True)
+ values1_text=estimate_rotations_point(points)
+ result3,ratios = get_feature_ratios_cordinate_points(points)
+ key_cordinates,angles = get_feature_angles_cordinate_points(points)
+ angles_str=[str(angle) for angle in angles]
+ ratios_str=[str(ratio) for ratio in ratios]
+ return f"{values1_text},{','.join(angles_str)},{','.join(ratios_str)}"
+
+from mp_utils import get_normalized_landmarks
+def estimate_rotations(face_landmarker_result):
+ points = get_normalized_landmarks(face_landmarker_result.face_landmarks,True)
+ return estimate_rotations_point(points)
+def estimate_rotations_point(points):
+ z_angles,y_ratios,h_cordinates,_ =estimate_horizontal_points(points)
+ z_angle = np.mean(z_angles)
+ y_ratio = np.mean(y_ratios)
+ _,x_ratios,h_cordinates,_ =estimate_vertical_points(points)
+ x_ratio = np.mean(x_ratios)
+
+ x_angle,_,_,_ =estimate_vertical_points(points,forehead_chin_points_pair)
+ x_angle=np.mean(x_angle)
+
+ length_ratio = estimate_ratio(points)
+
+ result = f"{x_ratio:.6f},{y_ratio:.6f},{z_angle:.6f},{x_angle:.6f},{length_ratio:.6f}"
+ return result
+
+def estimate_ratio(face_landmark_points,a_line=forehead_chin_points_pair,b_line=horizontal_contour_points_pair):
+ points_cordinate_a = get_points_by_indices(face_landmark_points,a_line[0][1:])#for campatible
+ points_cordinate_b = get_points_by_indices(face_landmark_points,b_line[0][1:])
+
+ distance_a = calculate_distance(points_cordinate_a[0],points_cordinate_a[2])
+ distance_b = calculate_distance(points_cordinate_b[0],points_cordinate_b[2])
+ if distance_a == 0 or distance_b == 0:
+ return 0
+ else:
+ return distance_a/distance_b
+
+def estimate_vertical_points(face_landmarks,pair_data = vertical_points_pair):
+ angles = []
+ ratios = []
+ cordinates = []
+ for compare_point in pair_data:
+ points_cordinate = get_points_by_indices(face_landmarks,compare_point[1:])#first one is label
+ cordinates.append(points_cordinate)
+ angle_rad =angle_between_points_and_x_axis(points_cordinate[0][:2],points_cordinate[2][:2])
+ #angle_deg = np.degrees(angle_rad)
+ angles.append(angle_rad)
+ up_distance = calculate_distance(points_cordinate[0],points_cordinate[1])
+ down_distance = calculate_distance(points_cordinate[1],points_cordinate[2])
+ ratios.append(down_distance/(down_distance+up_distance))
+ return angles,ratios,cordinates,pair_data
+def mean_std_label(values,radian=False):
+ mean_value = np.mean(values)
+ std_value = np.std(values)
+ if radian:
+ mean_value = math.degrees(mean_value)
+ std_value = math.degrees(std_value)
+ value_text = f"mean:{mean_value:.3f} std:{std_value:.3f}"
+ return value_text
\ No newline at end of file
diff --git a/mp_triangles.py b/mp_triangles.py
new file mode 100644
index 0000000000000000000000000000000000000000..14be3cce712fda95ef72d51834675b9131c8d761
--- /dev/null
+++ b/mp_triangles.py
@@ -0,0 +1,1016 @@
+'''
+I don't know the license,I'll made script if i have spare time.
+see https://stackoverflow.com/questions/69858216/mediapipe-facemesh-vertices-mapping
+so I wrote a simple program to generate the vertice tuples from it. The result is in the given json string. You're welcome to copy it if it helps.
+'''
+
+#2024-12-01 add hole-triangles
+#02
+
+INNER_MOUTH =[
+ [78,191,95],
+ [191,95,80],
+ [80,88,95],
+ [80,81,88],
+ [88,81,178],
+ [81,178,82],
+ [178,82,87],
+ [82,87,13],
+ [87,14,13],
+ [13,312,317],
+ [14,317,13],
+ [312,402,311],
+ [317,402,312],
+ [311,402,318],
+ [311,318,310],
+ [310,324,318],
+ [310,324,415],
+ [308,415,324]
+]
+
+INNER_LEFT_EYES=[
+ [33,246,7],
+
+ [246,7,163],
+ [246,163,161],
+ [161,163,144],
+ [161,144,160],
+ [160,144,145],
+ [160,145,159],
+ [159,145,153],
+ [159,153,158],
+ [158,153,154],
+ [158,154,157],
+ [157,154,155],
+ [157,155,173],
+
+ [173,155,133]
+]
+INNER_RIGHT_EYES=[
+ [362,398,382],
+
+ [382,398,384],
+ [382,384,381],
+ [381,384,385],
+ [381,385,380],
+ [380,385,386],
+ [380,386,374],
+ [374,386,387],
+ [374,387,373],
+ [373,387,388],
+ [373,388,390],
+ [390,388,466],
+ [390,249,466],
+
+ [466,263,249]
+]
+
+RIGHT_CONTOURS = [
+ [152, 175, 199], # LINE_RIGHT_CONTOUR_0
+ [148, 171, 208], # LINE_RIGHT_CONTOUR_1
+ [176, 140, 32], # LINE_RIGHT_CONTOUR_2
+ [149, 170, 211], # LINE_RIGHT_CONTOUR_3
+ [150, 169, 210], # LINE_RIGHT_CONTOUR_4
+ [136, 135, 214], # LINE_RIGHT_CONTOUR_5
+ [172, 138, 192], # LINE_RIGHT_CONTOUR_6
+ [58, 215, 213], # LINE_RIGHT_CONTOUR_7
+ [132, 177, 147], # LINE_RIGHT_CONTOUR_8
+ [93, 137, 123], # LINE_RIGHT_CONTOUR_9
+ [234, 227, 116], # LINE_RIGHT_CONTOUR_10
+ [127, 34, 143], # LINE_RIGHT_CONTOUR_11
+ [162, 139, 156], # LINE_RIGHT_CONTOUR_12
+ [21, 71, 70], # LINE_RIGHT_CONTOUR_13
+ [54, 68, 63], # LINE_RIGHT_CONTOUR_14
+ [103, 104, 105], # LINE_RIGHT_CONTOUR_15
+ [67, 69, 66], # LINE_RIGHT_CONTOUR_16
+ [109, 108, 107], # LINE_RIGHT_CONTOUR_17
+ [10, 151, 9] # LINE_RIGHT_CONTOUR_18
+]
+
+LEFT_CONTOURS = [
+ [377, 396, 428], # LINE_LEFT_CONTOUR_1
+ [400, 369, 262], # LINE_LEFT_CONTOUR_2
+ [378, 395, 431], # LINE_LEFT_CONTOUR_3
+ [379, 394, 430], # LINE_LEFT_CONTOUR_4
+ [365, 364, 434], # LINE_LEFT_CONTOUR_5
+ [397, 367, 416], # LINE_LEFT_CONTOUR_6
+ [288, 435, 433], # LINE_LEFT_CONTOUR_7
+ [361, 401, 376], # LINE_LEFT_CONTOUR_8
+ [323, 366, 352], # LINE_LEFT_CONTOUR_9
+ [454, 447, 345], # LINE_LEFT_CONTOUR_10
+ [356, 264, 372], # LINE_LEFT_CONTOUR_11
+ [389, 368, 383], # LINE_LEFT_CONTOUR_12
+ [251, 301, 300], # LINE_LEFT_CONTOUR_13
+ [284, 298, 293], # LINE_LEFT_CONTOUR_14
+ [332, 333, 334], # LINE_LEFT_CONTOUR_15
+ [297, 299, 296], # LINE_LEFT_CONTOUR_16
+ [338, 337, 336] # LINE_LEFT_CONTOUR_17
+]
+def get_triangles_copy(base=True,left_eye=False,right_eye=False,mouth=False):
+ triangles = []
+ if base:
+ triangles += mesh_triangle_indices
+ if left_eye:
+ triangles += INNER_LEFT_EYES
+ if right_eye:
+ triangles += INNER_RIGHT_EYES
+ if mouth:
+ triangles += INNER_MOUTH
+ return triangles
+
+
+def contour_to_triangles(is_right=True,down_up=True):
+ triangles = []
+ if is_right:
+ if down_up:
+ contours = RIGHT_CONTOURS
+ else:
+ contours = RIGHT_CONTOURS[::-1]
+ else:
+ if down_up:
+ contours = LEFT_CONTOURS
+ else:
+ contours = LEFT_CONTOURS[::-1]
+
+ sorted_mesh_triangle_indices = []
+ for triangle in mesh_triangle_indices:
+ sorted_mesh_triangle_indices.append(sorted(triangle))
+
+ # no way to know how triangle made even in future.
+ for i in range(len(contours)-1):
+ first_line = contours[i]
+ second_line = contours[i+1]
+ #outer
+ triangles.append([first_line[0],first_line[1],second_line[0]])
+ triangles.append([second_line[0],second_line[1],first_line[1]])
+ triangles.append([first_line[0],first_line[1],second_line[1]])
+ triangles.append([second_line[0],second_line[1],first_line[0]])
+
+ #inner
+ triangles.append([first_line[1],first_line[2],second_line[1]])
+ triangles.append([second_line[1],second_line[2],first_line[2]])
+ triangles.append([first_line[1],first_line[2],second_line[2]])
+ triangles.append([second_line[1],second_line[2],first_line[1]])
+
+ exist_triangles = []
+ for triangle in triangles:
+ sorted_triangle = sorted(triangle)
+ if sorted_triangle in sorted_mesh_triangle_indices:
+ exist_triangles.append(triangle)
+ return exist_triangles
+
+
+mesh_triangle_indices=[
+ [127, 34, 139],
+ [ 11, 0, 37],
+ [232, 231, 120],
+ [ 72, 37, 39],
+ [128, 121, 47],
+ [232, 121, 128],
+ [104, 69, 67],
+ [175, 171, 148],
+ [118, 50, 101],
+ [ 73, 39, 40],
+ [ 9, 151, 108],
+ [ 48, 115, 131],
+ [194, 204, 211],
+ [ 74, 40, 185],
+ [ 80, 42, 183],
+ [ 40, 92, 186],
+ [230, 229, 118],
+ [202, 212, 214],
+ [ 83, 18, 17],
+ [ 76, 61, 146],
+ [160, 29, 30],
+ [ 56, 157, 173],
+ [106, 204, 194],
+ [135, 214, 192],
+ [203, 165, 98],
+ [ 21, 71, 68],
+ [ 51, 45, 4],
+ [144, 24, 23],
+ [ 77, 146, 91],
+ [205, 50, 187],
+ [201, 200, 18],
+ [ 91, 106, 182],
+ [ 90, 91, 181],
+ [ 85, 84, 17],
+ [206, 203, 36],
+ [148, 171, 140],
+ [ 92, 40, 39],
+ [193, 189, 244],
+ [159, 158, 28],
+ [247, 246, 161],
+ [236, 3, 196],
+ [ 54, 68, 104],
+ [193, 168, 8],
+ [117, 228, 31],
+ [189, 193, 55],
+ [ 98, 97, 99],
+ [126, 47, 100],
+ [166, 79, 218],
+ [155, 154, 26],
+ [209, 49, 131],
+ [135, 136, 150],
+ [ 47, 126, 217],
+ [223, 52, 53],
+ [ 45, 51, 134],
+ [211, 170, 140],
+ [ 67, 69, 108],
+ [ 43, 106, 91],
+ [230, 119, 120],
+ [226, 130, 247],
+ [ 63, 53, 52],
+ [238, 20, 242],
+ [ 46, 70, 156],
+ [ 78, 62, 96],
+ [ 46, 53, 63],
+ [143, 34, 227],
+ [123, 117, 111],
+ [ 44, 125, 19],
+ [236, 134, 51],
+ [216, 206, 205],
+ [154, 153, 22],
+ [ 39, 37, 167],
+ [200, 201, 208],
+ [ 36, 142, 100],
+ [ 57, 212, 202],
+ [ 20, 60, 99],
+ [ 28, 158, 157],
+ [ 35, 226, 113],
+ [160, 159, 27],
+ [204, 202, 210],
+ [113, 225, 46],
+ [ 43, 202, 204],
+ [ 62, 76, 77],
+ [137, 123, 116],
+ [ 41, 38, 72],
+ [203, 129, 142],
+ [ 64, 98, 240],
+ [ 49, 102, 64],
+ [ 41, 73, 74],
+ [212, 216, 207],
+ [ 42, 74, 184],
+ [169, 170, 211],
+ [170, 149, 176],
+ [105, 66, 69],
+ [122, 6, 168],
+ [123, 147, 187],
+ [ 96, 77, 90],
+ [ 65, 55, 107],
+ [ 89, 90, 180],
+ [101, 100, 120],
+ [ 63, 105, 104],
+ [ 93, 137, 227],
+ [ 15, 86, 85],
+ [129, 102, 49],
+ [ 14, 87, 86],
+ [ 55, 8, 9],
+ [100, 47, 121],
+ [145, 23, 22],
+ [ 88, 89, 179],
+ [ 6, 122, 196],
+ [ 88, 95, 96],
+ [138, 172, 136],
+ [215, 58, 172],
+ [115, 48, 219],
+ [ 42, 80, 81],
+ [195, 3, 51],
+ [ 43, 146, 61],
+ [171, 175, 199],
+ [ 81, 82, 38],
+ [ 53, 46, 225],
+ [144, 163, 110],
+ [ 52, 65, 66],
+ [229, 228, 117],
+ [ 34, 127, 234],
+ [107, 108, 69],
+ [109, 108, 151],
+ [ 48, 64, 235],
+ [ 62, 78, 191],
+ [129, 209, 126],
+ [111, 35, 143],
+ [117, 123, 50],
+ [222, 65, 52],
+ [ 19, 125, 141],
+ [221, 55, 65],
+ [ 3, 195, 197],
+ [ 25, 7, 33],
+ [220, 237, 44],
+ [ 70, 71, 139],
+ [122, 193, 245],
+ [247, 130, 33],
+ [ 71, 21, 162],
+ [170, 169, 150],
+ [188, 174, 196],
+ [216, 186, 92],
+ [ 2, 97, 167],
+ [141, 125, 241],
+ [164, 167, 37],
+ [ 72, 38, 12],
+ [ 38, 82, 13],
+ [ 63, 68, 71],
+ [226, 35, 111],
+ [101, 50, 205],
+ [206, 92, 165],
+ [209, 198, 217],
+ [165, 167, 97],
+ [220, 115, 218],
+ [133, 112, 243],
+ [239, 238, 241],
+ [214, 135, 169],
+ [190, 173, 133],
+ [171, 208, 32],
+ [125, 44, 237],
+ [ 86, 87, 178],
+ [ 85, 86, 179],
+ [ 84, 85, 180],
+ [ 83, 84, 181],
+ [201, 83, 182],
+ [137, 93, 132],
+ [ 76, 62, 183],
+ [ 61, 76, 184],
+ [ 57, 61, 185],
+ [212, 57, 186],
+ [214, 207, 187],
+ [ 34, 143, 156],
+ [ 79, 239, 237],
+ [123, 137, 177],
+ [ 44, 1, 4],
+ [201, 194, 32],
+ [ 64, 102, 129],
+ [213, 215, 138],
+ [ 59, 166, 219],
+ [242, 99, 97],
+ [ 2, 94, 141],
+ [ 75, 59, 235],
+ [ 24, 110, 228],
+ [ 25, 130, 226],
+ [ 23, 24, 229],
+ [ 22, 23, 230],
+ [ 26, 22, 231],
+ [112, 26, 232],
+ [189, 190, 243],
+ [221, 56, 190],
+ [ 28, 56, 221],
+ [ 27, 28, 222],
+ [ 29, 27, 223],
+ [ 30, 29, 224],
+ [247, 30, 225],
+ [238, 79, 20],
+ [166, 59, 75],
+ [ 60, 75, 240],
+ [147, 177, 215],
+ [ 20, 79, 166],
+ [187, 147, 213],
+ [112, 233, 244],
+ [233, 128, 245],
+ [128, 114, 188],
+ [114, 217, 174],
+ [131, 115, 220],
+ [217, 198, 236],
+ [198, 131, 134],
+ [177, 132, 58],
+ [143, 35, 124],
+ [110, 163, 7],
+ [228, 110, 25],
+ [356, 389, 368],
+ [ 11, 302, 267],
+ [452, 350, 349],
+ [302, 303, 269],
+ [357, 343, 277],
+ [452, 453, 357],
+ [333, 332, 297],
+ [175, 152, 377],
+ [347, 348, 330],
+ [303, 304, 270],
+ [ 9, 336, 337],
+ [278, 279, 360],
+ [418, 262, 431],
+ [304, 408, 409],
+ [310, 415, 407],
+ [270, 409, 410],
+ [450, 348, 347],
+ [422, 430, 434],
+ [313, 314, 17],
+ [306, 307, 375],
+ [387, 388, 260],
+ [286, 414, 398],
+ [335, 406, 418],
+ [364, 367, 416],
+ [423, 358, 327],
+ [251, 284, 298],
+ [281, 5, 4],
+ [373, 374, 253],
+ [307, 320, 321],
+ [425, 427, 411],
+ [421, 313, 18],
+ [321, 405, 406],
+ [320, 404, 405],
+ [315, 16, 17],
+ [426, 425, 266],
+ [377, 400, 369],
+ [322, 391, 269],
+ [417, 465, 464],
+ [386, 257, 258],
+ [466, 260, 388],
+ [456, 399, 419],
+ [284, 332, 333],
+ [417, 285, 8],
+ [346, 340, 261],
+ [413, 441, 285],
+ [327, 460, 328],
+ [355, 371, 329],
+ [392, 439, 438],
+ [382, 341, 256],
+ [429, 420, 360],
+ [364, 394, 379],
+ [277, 343, 437],
+ [443, 444, 283],
+ [275, 440, 363],
+ [431, 262, 369],
+ [297, 338, 337],
+ [273, 375, 321],
+ [450, 451, 349],
+ [446, 342, 467],
+ [293, 334, 282],
+ [458, 461, 462],
+ [276, 353, 383],
+ [308, 324, 325],
+ [276, 300, 293],
+ [372, 345, 447],
+ [352, 345, 340],
+ [274, 1, 19],
+ [456, 248, 281],
+ [436, 427, 425],
+ [381, 256, 252],
+ [269, 391, 393],
+ [200, 199, 428],
+ [266, 330, 329],
+ [287, 273, 422],
+ [250, 462, 328],
+ [258, 286, 384],
+ [265, 353, 342],
+ [387, 259, 257],
+ [424, 431, 430],
+ [342, 353, 276],
+ [273, 335, 424],
+ [292, 325, 307],
+ [366, 447, 345],
+ [271, 303, 302],
+ [423, 266, 371],
+ [294, 455, 460],
+ [279, 278, 294],
+ [271, 272, 304],
+ [432, 434, 427],
+ [272, 407, 408],
+ [394, 430, 431],
+ [395, 369, 400],
+ [334, 333, 299],
+ [351, 417, 168],
+ [352, 280, 411],
+ [325, 319, 320],
+ [295, 296, 336],
+ [319, 403, 404],
+ [330, 348, 349],
+ [293, 298, 333],
+ [323, 454, 447],
+ [ 15, 16, 315],
+ [358, 429, 279],
+ [ 14, 15, 316],
+ [285, 336, 9],
+ [329, 349, 350],
+ [374, 380, 252],
+ [318, 402, 403],
+ [ 6, 197, 419],
+ [318, 319, 325],
+ [367, 364, 365],
+ [435, 367, 397],
+ [344, 438, 439],
+ [272, 271, 311],
+ [195, 5, 281],
+ [273, 287, 291],
+ [396, 428, 199],
+ [311, 271, 268],
+ [283, 444, 445],
+ [373, 254, 339],
+ [282, 334, 296],
+ [449, 347, 346],
+ [264, 447, 454],
+ [336, 296, 299],
+ [338, 10, 151],
+ [278, 439, 455],
+ [292, 407, 415],
+ [358, 371, 355],
+ [340, 345, 372],
+ [346, 347, 280],
+ [442, 443, 282],
+ [ 19, 94, 370],
+ [441, 442, 295],
+ [248, 419, 197],
+ [263, 255, 359],
+ [440, 275, 274],
+ [300, 383, 368],
+ [351, 412, 465],
+ [263, 467, 466],
+ [301, 368, 389],
+ [395, 378, 379],
+ [412, 351, 419],
+ [436, 426, 322],
+ [ 2, 164, 393],
+ [370, 462, 461],
+ [164, 0, 267],
+ [302, 11, 12],
+ [268, 12, 13],
+ [293, 300, 301],
+ [446, 261, 340],
+ [330, 266, 425],
+ [426, 423, 391],
+ [429, 355, 437],
+ [391, 327, 326],
+ [440, 457, 438],
+ [341, 382, 362],
+ [459, 457, 461],
+ [434, 430, 394],
+ [414, 463, 362],
+ [396, 369, 262],
+ [354, 461, 457],
+ [316, 403, 402],
+ [315, 404, 403],
+ [314, 405, 404],
+ [313, 406, 405],
+ [421, 418, 406],
+ [366, 401, 361],
+ [306, 408, 407],
+ [291, 409, 408],
+ [287, 410, 409],
+ [432, 436, 410],
+ [434, 416, 411],
+ [264, 368, 383],
+ [309, 438, 457],
+ [352, 376, 401],
+ [274, 275, 4],
+ [421, 428, 262],
+ [294, 327, 358],
+ [433, 416, 367],
+ [289, 455, 439],
+ [462, 370, 326],
+ [ 2, 326, 370],
+ [305, 460, 455],
+ [254, 449, 448],
+ [255, 261, 446],
+ [253, 450, 449],
+ [252, 451, 450],
+ [256, 452, 451],
+ [341, 453, 452],
+ [413, 464, 463],
+ [441, 413, 414],
+ [258, 442, 441],
+ [257, 443, 442],
+ [259, 444, 443],
+ [260, 445, 444],
+ [467, 342, 445],
+ [459, 458, 250],
+ [289, 392, 290],
+ [290, 328, 460],
+ [376, 433, 435],
+ [250, 290, 392],
+ [411, 416, 433],
+ [341, 463, 464],
+ [453, 464, 465],
+ [357, 465, 412],
+ [343, 412, 399],
+ [360, 363, 440],
+ [437, 399, 456],
+ [420, 456, 363],
+ [401, 435, 288],
+ [372, 383, 353],
+ [339, 255, 249],
+ [448, 261, 255],
+ [133, 243, 190],
+ [133, 155, 112],
+ [ 33, 246, 247],
+ [ 33, 130, 25],
+ [398, 384, 286],
+ [362, 398, 414],
+ [362, 463, 341],
+ [263, 359, 467],
+ [263, 249, 255],
+ [466, 467, 260],
+ [ 75, 60, 166],
+ [238, 239, 79],
+ [162, 127, 139],
+ [ 72, 11, 37],
+ [121, 232, 120],
+ [ 73, 72, 39],
+ [114, 128, 47],
+ [233, 232, 128],
+ [103, 104, 67],
+ [152, 175, 148],
+ [119, 118, 101],
+ [ 74, 73, 40],
+ [107, 9, 108],
+ [ 49, 48, 131],
+ [ 32, 194, 211],
+ [184, 74, 185],
+ [191, 80, 183],
+ [185, 40, 186],
+ [119, 230, 118],
+ [210, 202, 214],
+ [ 84, 83, 17],
+ [ 77, 76, 146],
+ [161, 160, 30],
+ [190, 56, 173],
+ [182, 106, 194],
+ [138, 135, 192],
+ [129, 203, 98],
+ [ 54, 21, 68],
+ [ 5, 51, 4],
+ [145, 144, 23],
+ [ 90, 77, 91],
+ [207, 205, 187],
+ [ 83, 201, 18],
+ [181, 91, 182],
+ [180, 90, 181],
+ [ 16, 85, 17],
+ [205, 206, 36],
+ [176, 148, 140],
+ [165, 92, 39],
+ [245, 193, 244],
+ [ 27, 159, 28],
+ [ 30, 247, 161],
+ [174, 236, 196],
+ [103, 54, 104],
+ [ 55, 193, 8],
+ [111, 117, 31],
+ [221, 189, 55],
+ [240, 98, 99],
+ [142, 126, 100],
+ [219, 166, 218],
+ [112, 155, 26],
+ [198, 209, 131],
+ [169, 135, 150],
+ [114, 47, 217],
+ [224, 223, 53],
+ [220, 45, 134],
+ [ 32, 211, 140],
+ [109, 67, 108],
+ [146, 43, 91],
+ [231, 230, 120],
+ [113, 226, 247],
+ [105, 63, 52],
+ [241, 238, 242],
+ [124, 46, 156],
+ [ 95, 78, 96],
+ [ 70, 46, 63],
+ [116, 143, 227],
+ [116, 123, 111],
+ [ 1, 44, 19],
+ [ 3, 236, 51],
+ [207, 216, 205],
+ [ 26, 154, 22],
+ [165, 39, 167],
+ [199, 200, 208],
+ [101, 36, 100],
+ [ 43, 57, 202],
+ [242, 20, 99],
+ [ 56, 28, 157],
+ [124, 35, 113],
+ [ 29, 160, 27],
+ [211, 204, 210],
+ [124, 113, 46],
+ [106, 43, 204],
+ [ 96, 62, 77],
+ [227, 137, 116],
+ [ 73, 41, 72],
+ [ 36, 203, 142],
+ [235, 64, 240],
+ [ 48, 49, 64],
+ [ 42, 41, 74],
+ [214, 212, 207],
+ [183, 42, 184],
+ [210, 169, 211],
+ [140, 170, 176],
+ [104, 105, 69],
+ [193, 122, 168],
+ [ 50, 123, 187],
+ [ 89, 96, 90],
+ [ 66, 65, 107],
+ [179, 89, 180],
+ [119, 101, 120],
+ [ 68, 63, 104],
+ [234, 93, 227],
+ [ 16, 15, 85],
+ [209, 129, 49],
+ [ 15, 14, 86],
+ [107, 55, 9],
+ [120, 100, 121],
+ [153, 145, 22],
+ [178, 88, 179],
+ [197, 6, 196],
+ [ 89, 88, 96],
+ [135, 138, 136],
+ [138, 215, 172],
+ [218, 115, 219],
+ [ 41, 42, 81],
+ [ 5, 195, 51],
+ [ 57, 43, 61],
+ [208, 171, 199],
+ [ 41, 81, 38],
+ [224, 53, 225],
+ [ 24, 144, 110],
+ [105, 52, 66],
+ [118, 229, 117],
+ [227, 34, 234],
+ [ 66, 107, 69],
+ [ 10, 109, 151],
+ [219, 48, 235],
+ [183, 62, 191],
+ [142, 129, 126],
+ [116, 111, 143],
+ [118, 117, 50],
+ [223, 222, 52],
+ [ 94, 19, 141],
+ [222, 221, 65],
+ [196, 3, 197],
+ [ 45, 220, 44],
+ [156, 70, 139],
+ [188, 122, 245],
+ [139, 71, 162],
+ [149, 170, 150],
+ [122, 188, 196],
+ [206, 216, 92],
+ [164, 2, 167],
+ [242, 141, 241],
+ [ 0, 164, 37],
+ [ 11, 72, 12],
+ [ 12, 38, 13],
+ [ 70, 63, 71],
+ [ 31, 226, 111],
+ [ 36, 101, 205],
+ [203, 206, 165],
+ [126, 209, 217],
+ [ 98, 165, 97],
+ [237, 220, 218],
+ [237, 239, 241],
+ [210, 214, 169],
+ [140, 171, 32],
+ [241, 125, 237],
+ [179, 86, 178],
+ [180, 85, 179],
+ [181, 84, 180],
+ [182, 83, 181],
+ [194, 201, 182],
+ [177, 137, 132],
+ [184, 76, 183],
+ [185, 61, 184],
+ [186, 57, 185],
+ [216, 212, 186],
+ [192, 214, 187],
+ [139, 34, 156],
+ [218, 79, 237],
+ [147, 123, 177],
+ [ 45, 44, 4],
+ [208, 201, 32],
+ [ 98, 64, 129],
+ [192, 213, 138],
+ [235, 59, 219],
+ [141, 242, 97],
+ [ 97, 2, 141],
+ [240, 75, 235],
+ [229, 24, 228],
+ [ 31, 25, 226],
+ [230, 23, 229],
+ [231, 22, 230],
+ [232, 26, 231],
+ [233, 112, 232],
+ [244, 189, 243],
+ [189, 221, 190],
+ [222, 28, 221],
+ [223, 27, 222],
+ [224, 29, 223],
+ [225, 30, 224],
+ [113, 247, 225],
+ [ 99, 60, 240],
+ [213, 147, 215],
+ [ 60, 20, 166],
+ [192, 187, 213],
+ [243, 112, 244],
+ [244, 233, 245],
+ [245, 128, 188],
+ [188, 114, 174],
+ [134, 131, 220],
+ [174, 217, 236],
+ [236, 198, 134],
+ [215, 177, 58],
+ [156, 143, 124],
+ [ 25, 110, 7],
+ [ 31, 228, 25],
+ [264, 356, 368],
+ [ 0, 11, 267],
+ [451, 452, 349],
+ [267, 302, 269],
+ [350, 357, 277],
+ [350, 452, 357],
+ [299, 333, 297],
+ [396, 175, 377],
+ [280, 347, 330],
+ [269, 303, 270],
+ [151, 9, 337],
+ [344, 278, 360],
+ [424, 418, 431],
+ [270, 304, 409],
+ [272, 310, 407],
+ [322, 270, 410],
+ [449, 450, 347],
+ [432, 422, 434],
+ [ 18, 313, 17],
+ [291, 306, 375],
+ [259, 387, 260],
+ [424, 335, 418],
+ [434, 364, 416],
+ [391, 423, 327],
+ [301, 251, 298],
+ [275, 281, 4],
+ [254, 373, 253],
+ [375, 307, 321],
+ [280, 425, 411],
+ [200, 421, 18],
+ [335, 321, 406],
+ [321, 320, 405],
+ [314, 315, 17],
+ [423, 426, 266],
+ [396, 377, 369],
+ [270, 322, 269],
+ [413, 417, 464],
+ [385, 386, 258],
+ [248, 456, 419],
+ [298, 284, 333],
+ [168, 417, 8],
+ [448, 346, 261],
+ [417, 413, 285],
+ [326, 327, 328],
+ [277, 355, 329],
+ [309, 392, 438],
+ [381, 382, 256],
+ [279, 429, 360],
+ [365, 364, 379],
+ [355, 277, 437],
+ [282, 443, 283],
+ [281, 275, 363],
+ [395, 431, 369],
+ [299, 297, 337],
+ [335, 273, 321],
+ [348, 450, 349],
+ [359, 446, 467],
+ [283, 293, 282],
+ [250, 458, 462],
+ [300, 276, 383],
+ [292, 308, 325],
+ [283, 276, 293],
+ [264, 372, 447],
+ [346, 352, 340],
+ [354, 274, 19],
+ [363, 456, 281],
+ [426, 436, 425],
+ [380, 381, 252],
+ [267, 269, 393],
+ [421, 200, 428],
+ [371, 266, 329],
+ [432, 287, 422],
+ [290, 250, 328],
+ [385, 258, 384],
+ [446, 265, 342],
+ [386, 387, 257],
+ [422, 424, 430],
+ [445, 342, 276],
+ [422, 273, 424],
+ [306, 292, 307],
+ [352, 366, 345],
+ [268, 271, 302],
+ [358, 423, 371],
+ [327, 294, 460],
+ [331, 279, 294],
+ [303, 271, 304],
+ [436, 432, 427],
+ [304, 272, 408],
+ [395, 394, 431],
+ [378, 395, 400],
+ [296, 334, 299],
+ [ 6, 351, 168],
+ [376, 352, 411],
+ [307, 325, 320],
+ [285, 295, 336],
+ [320, 319, 404],
+ [329, 330, 349],
+ [334, 293, 333],
+ [366, 323, 447],
+ [316, 15, 315],
+ [331, 358, 279],
+ [317, 14, 316],
+ [ 8, 285, 9],
+ [277, 329, 350],
+ [253, 374, 252],
+ [319, 318, 403],
+ [351, 6, 419],
+ [324, 318, 325],
+ [397, 367, 365],
+ [288, 435, 397],
+ [278, 344, 439],
+ [310, 272, 311],
+ [248, 195, 281],
+ [375, 273, 291],
+ [175, 396, 199],
+ [312, 311, 268],
+ [276, 283, 445],
+ [390, 373, 339],
+ [295, 282, 296],
+ [448, 449, 346],
+ [356, 264, 454],
+ [337, 336, 299],
+ [337, 338, 151],
+ [294, 278, 455],
+ [308, 292, 415],
+ [429, 358, 355],
+ [265, 340, 372],
+ [352, 346, 280],
+ [295, 442, 282],
+ [354, 19, 370],
+ [285, 441, 295],
+ [195, 248, 197],
+ [457, 440, 274],
+ [301, 300, 368],
+ [417, 351, 465],
+ [251, 301, 389],
+ [394, 395, 379],
+ [399, 412, 419],
+ [410, 436, 322],
+ [326, 2, 393],
+ [354, 370, 461],
+ [393, 164, 267],
+ [268, 302, 12],
+ [312, 268, 13],
+ [298, 293, 301],
+ [265, 446, 340],
+ [280, 330, 425],
+ [322, 426, 391],
+ [420, 429, 437],
+ [393, 391, 326],
+ [344, 440, 438],
+ [458, 459, 461],
+ [364, 434, 394],
+ [428, 396, 262],
+ [274, 354, 457],
+ [317, 316, 402],
+ [316, 315, 403],
+ [315, 314, 404],
+ [314, 313, 405],
+ [313, 421, 406],
+ [323, 366, 361],
+ [292, 306, 407],
+ [306, 291, 408],
+ [291, 287, 409],
+ [287, 432, 410],
+ [427, 434, 411],
+ [372, 264, 383],
+ [459, 309, 457],
+ [366, 352, 401],
+ [ 1, 274, 4],
+ [418, 421, 262],
+ [331, 294, 358],
+ [435, 433, 367],
+ [392, 289, 439],
+ [328, 462, 326],
+ [ 94, 2, 370],
+ [289, 305, 455],
+ [339, 254, 448],
+ [359, 255, 446],
+ [254, 253, 449],
+ [253, 252, 450],
+ [252, 256, 451],
+ [256, 341, 452],
+ [414, 413, 463],
+ [286, 441, 414],
+ [286, 258, 441],
+ [258, 257, 442],
+ [257, 259, 443],
+ [259, 260, 444],
+ [260, 467, 445],
+ [309, 459, 250],
+ [305, 289, 290],
+ [305, 290, 460],
+ [401, 376, 435],
+ [309, 250, 392],
+ [376, 411, 433],
+ [453, 341, 464],
+ [357, 453, 465],
+ [343, 357, 412],
+ [437, 343, 399],
+ [344, 360, 440],
+ [420, 437, 456],
+ [360, 420, 363],
+ [361, 401, 288],
+ [265, 372, 353],
+ [390, 339, 249],
+ [339, 448, 255]
+]
\ No newline at end of file
diff --git a/mp_utils.py b/mp_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0e40d2edbace78bac0e994157e7e3354e40f315
--- /dev/null
+++ b/mp_utils.py
@@ -0,0 +1,164 @@
+import math
+
+import mediapipe as mp
+from mediapipe.tasks import python
+from mediapipe.tasks.python import vision
+from mediapipe.framework.formats import landmark_pb2
+from mediapipe import solutions
+import numpy as np
+
+# 2024-11-27 -extract_landmark :add args
+# add get_pixel_xyz
+# 2024-11-28 add get_normalized_xyz
+# 2024-11-30 add get_normalized_landmarks,sort_triangles_by_depth
+# 2024-12-04 add get_normalized_landmarks args
+def calculate_distance(p1, p2):
+ """
+
+ """
+ return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
+
+
+
+def to_int_points(points):
+ ints=[]
+ for pt in points:
+ #print(pt)
+ value = [int(pt[0]),int(pt[1])]
+ #print(value)
+ ints.append(value)
+ return ints
+
+debug = False
+def divide_line_to_points(points,divided): # return divided + 1
+ total_length = 0
+ line_length_list = []
+ for i in range(len(points)-1):
+ pt_length = calculate_distance(points[i],points[i+1])
+ total_length += pt_length
+ line_length_list.append(pt_length)
+
+ splited_length = total_length/divided
+
+ def get_new_point(index,lerp):
+ pt1 = points[index]
+ pt2 = points[index+1]
+ diff = [pt2[0] - pt1[0], pt2[1]-pt1[1]]
+ new_point = [pt1[0]+diff[0]*lerp,pt1[1]+diff[1]*lerp]
+ if debug:
+ print(f"pt1 ={pt1} pt2 ={pt2} diff={diff} new_point={new_point}")
+
+ return new_point
+
+ if debug:
+ print(f"{total_length} splitted = {splited_length} line-length-list = {len(line_length_list)}")
+ splited_points=[points[0]]
+ for i in range(1,divided):
+ need_length = splited_length*i
+ if debug:
+ print(f"{i} need length = {need_length}")
+ current_length = 0
+ for j in range(len(line_length_list)):
+ line_length = line_length_list[j]
+ current_length+=line_length
+ if current_length>need_length:
+ if debug:
+ print(f"over need length index = {j} current={current_length}")
+ diff = current_length - need_length
+
+ lerp_point = 1.0 - (diff/line_length)
+ if debug:
+ print(f"over = {diff} lerp ={lerp_point}")
+ new_point = get_new_point(j,lerp_point)
+
+ splited_points.append(new_point)
+ break
+
+ splited_points.append(points[-1]) # last one
+ splited_points=to_int_points(splited_points)
+
+ if debug:
+ print(f"sp={len(splited_points)}")
+ return splited_points
+
+
+
+def expand_bbox(bbox,left=5,top=5,right=5,bottom=5):
+ left_pixel = bbox[2]*(float(left)/100)
+ top_pixel = bbox[3]*(float(top)/100)
+ right_pixel = bbox[2]*(float(right)/100)
+ bottom_pixel = bbox[3]*(float(bottom)/100)
+ new_box = list(bbox)
+ new_box[0] -=left_pixel
+ new_box[1] -=top_pixel
+ new_box[2] +=left_pixel+right_pixel
+ new_box[3] +=top_pixel+bottom_pixel
+ return new_box
+
+#normalized value index see mp_constants
+def get_normalized_cordinate(face_landmarks_list,index):
+ x=face_landmarks_list[0][index].x
+ y=face_landmarks_list[0][index].y
+ return x,y
+
+def get_normalized_xyz(face_landmarks_list,index):
+ x=face_landmarks_list[0][index].x
+ y=face_landmarks_list[0][index].y
+ z=face_landmarks_list[0][index].z
+ return x,y,z
+
+def get_normalized_landmarks(face_landmarks_list,recentering=False,recentering_index=4,z_multiply=0.8):
+ cordinates = [get_normalized_xyz(face_landmarks_list,i) for i in range(0,468)]
+ if recentering:
+ normalized_center_point = cordinates[recentering_index]
+ offset_x = normalized_center_point[0]
+ offset_y = normalized_center_point[1]
+
+ #need aspect?
+ cordinates = [[point[0]-offset_x,point[1]-offset_y,point[2]*z_multiply] for point in cordinates]
+
+ return cordinates
+
+
+
+def sort_triangles_by_depth(landmark_points,mesh_triangle_indices):
+ assert len(landmark_points) == 468
+ mesh_triangle_indices.sort(key=lambda triangle: sum(landmark_points[index][2] for index in triangle) / len(triangle)
+ ,reverse=True)
+# z is normalized
+def get_pixel_xyz(face_landmarks_list,landmark,width,height):
+ point = get_normalized_cordinate(face_landmarks_list,landmark)
+ z = y=face_landmarks_list[0][landmark].z
+ return int(point[0]*width),int(point[1]*height),z
+
+def get_pixel_cordinate(face_landmarks_list,landmark,width,height):
+ point = get_normalized_cordinate(face_landmarks_list,landmark)
+ return int(point[0]*width),int(point[1]*height)
+
+def get_pixel_cordinate_list(face_landmarks_list,indices,width,height):
+ cordinates = []
+ for index in indices:
+ cordinates.append(get_pixel_cordinate(face_landmarks_list,index,width,height))
+ return cordinates
+
+def extract_landmark(image_data,model_path="face_landmarker.task",min_face_detection_confidence=0, min_face_presence_confidence=0,output_facial_transformation_matrixes=False):
+ BaseOptions = mp.tasks.BaseOptions
+ FaceLandmarker = mp.tasks.vision.FaceLandmarker
+ FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
+ VisionRunningMode = mp.tasks.vision.RunningMode
+
+ options = FaceLandmarkerOptions(
+ base_options=BaseOptions(model_asset_path=model_path),
+ running_mode=VisionRunningMode.IMAGE
+ ,min_face_detection_confidence=min_face_detection_confidence, min_face_presence_confidence=min_face_presence_confidence,
+ output_facial_transformation_matrixes=output_facial_transformation_matrixes
+ )
+
+ with FaceLandmarker.create_from_options(options) as landmarker:
+ if isinstance(image_data,str):
+ mp_image = mp.Image.create_from_file(image_data)
+ else:
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(image_data))
+ face_landmarker_result = landmarker.detect(mp_image)
+ return mp_image,face_landmarker_result
+
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f943d679964ba24e1cc071b6a35560ec4aa255a8
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,5 @@
+numpy
+torch
+spaces
+mediapipe
+opencv-python
\ No newline at end of file