diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..24f980e0115c7057f2e57b235e98894f3932c564 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+*.task filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..e0926aef1e996760784d1e51965517ce493b825c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+__pycache__
+files
\ No newline at end of file
diff --git a/.gradio/cached_examples/25/Output/41d0a13d3c5d3c05accc/00003245_00.webp b/.gradio/cached_examples/25/Output/41d0a13d3c5d3c05accc/00003245_00.webp
new file mode 100644
index 0000000000000000000000000000000000000000..7b9a451ca7a8b2df0dc8ebdae31062c3fa3514dd
Binary files /dev/null and b/.gradio/cached_examples/25/Output/41d0a13d3c5d3c05accc/00003245_00.webp differ
diff --git a/.gradio/cached_examples/25/log.csv b/.gradio/cached_examples/25/log.csv
new file mode 100644
index 0000000000000000000000000000000000000000..5b7d71788e5219c161d7763d67967f4c52ebfdfd
--- /dev/null
+++ b/.gradio/cached_examples/25/log.csv
@@ -0,0 +1,2 @@
+Output,timestamp
+"[{""image"": {""path"": "".gradio\\cached_examples\\25\\Output\\41d0a13d3c5d3c05accc\\00003245_00.webp"", ""url"": ""/gradio_api/file=C:\\Users\\owner\\AppData\\Local\\Temp\\gradio\\931aec3e3a5351edb4bba6660e3848db2c5b49fd47c1f07afe9f05213b87363b\\00003245_00.webp"", ""size"": null, ""orig_name"": ""00003245_00.webp"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""caption"": ""animation""}]",2024-11-16 16:03:27.862651
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..c31e5d84ae6e21cac91bf6af6c7863f820447970
--- /dev/null
+++ b/app.py
@@ -0,0 +1,232 @@
+import spaces
+import gradio as gr
+import subprocess
+from PIL import Image
+import json
+import os
+import time
+
+import mp_box
+import draw_landmarks68
+import landmarks68_utils
+import io
+import numpy as np
+
+from glibvision.cv2_utils import pil_to_bgr_image,bgr_to_rgb
+from gradio_utils import save_image,save_buffer,clear_old_files ,read_file
+from close_eyes import process_close_eyes_image
+from open_mouth import process_open_mouth
+'''
+Face landmark detection based Face Detection.
+https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker
+from model card
+https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf
+Licensed Apache License, Version 2.0
+Train with google's dataset(more detail see model card)
+
+'''
+
+#@spaces.GPU(duration=120)
+def process_images(image,eyelid_thick=1,eyelid_blur=9,inpaint_radius=10,inpaint_blur=30,mask_dilate=10,dilate_blur=10,
+ open_size_y=8,inside_layer_low_depth=False,hole_image_name="dark01",
+ make_animation=True,eyes_duration=200,mouth_duration=40,
+ progress=gr.Progress(track_tqdm=True)):
+ clear_old_files()
+ if image == None:
+ raise gr.Error("Need Image")
+
+ progress(0, desc="Start Making Animation")
+ boxes,mp_image,face_landmarker_result = mp_box.mediapipe_to_box(image)
+ annotated_image,bbox,landmark_points = draw_landmarks68.draw_landmarks_on_image(image,face_landmarker_result)
+ landmark_list = draw_landmarks68.convert_to_landmark_group_json(landmark_points)
+
+
+
+
+ galleries = []
+
+ progressed = 0
+ progress_step = 0.8/open_size_y
+ animations = []
+ np_image = pil_to_bgr_image(image)
+ if make_animation:
+ start_index = 0
+ else:
+ start_index = open_size_y-1
+
+ for i in range(start_index,open_size_y):
+ mouth_opened = process_open_mouth(np_image,landmark_list,0,i,True,inside_layer_low_depth,0,hole_image_name+".jpg")
+ animations.append(mouth_opened)
+ mouth_opened_path = save_image(mouth_opened)
+ galleries.append((mouth_opened_path,f"mouth-opened {i}"))
+ progressed+=progress_step
+ progress(progressed)
+
+ if make_animation:
+ np_image = pil_to_bgr_image(animations[0])# TODO option
+
+ eyes_closed_np,mask_np = process_close_eyes_image(np_image,landmark_list,eyelid_thick,eyelid_blur,inpaint_radius,inpaint_blur,mask_dilate,dilate_blur)
+ eyes_closed = Image.fromarray(bgr_to_rgb(eyes_closed_np))
+
+ eyes_closed_path = save_image(eyes_closed)
+ galleries.append((eyes_closed_path,"eyes-closed"))
+
+ eyes_closed_mask_path = save_image(Image.fromarray(mask_np))
+ galleries.append((eyes_closed_mask_path,"eyes-closed-mask"))
+
+
+ duractions = [mouth_duration]*len(animations)*2+[eyes_duration]
+ if make_animation:
+ animations = animations + animations[::-1]+[eyes_closed]
+ output_buffer = io.BytesIO()
+ animations[0].save(output_buffer,
+ save_all=True,
+ append_images=animations[1:],
+ duration=duractions,
+ loop=0,
+ format='WebP')
+ webp_path = save_buffer(output_buffer)
+ #galleries.append((webp_path,"animation"))
+
+ return webp_path,galleries
+
+
+
+
+css="""
+#col-left {
+ margin: 0 auto;
+ max-width: 640px;
+}
+#col-right {
+ margin: 0 auto;
+ max-width: 640px;
+}
+.grid-container {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ gap:10px
+}
+
+.image {
+ width: 128px;
+ height: 128px;
+ object-fit: cover;
+}
+
+.text {
+ font-size: 16px;
+}
+"""
+
+#css=css,
+
+
+
+with gr.Blocks(css=css, elem_id="demo-container") as demo:
+ with gr.Column():
+ gr.HTML(read_file("demo_header.html"))
+ gr.HTML(read_file("demo_tools.html"))
+ with gr.Row():
+ with gr.Column():
+ image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Upload")
+ with gr.Row(elem_id="prompt-container", equal_height=False):
+ with gr.Row():
+ btn = gr.Button("Create Closed-eye and Mouth-opened", elem_id="run_button",variant="primary")
+
+ with gr.Accordion(label="Eyes-Closed Advanced Settings", open=False):
+ with gr.Row( equal_height=True):
+ eyelid_thick = gr.Slider(
+ label="Eyelid thick",
+ minimum=0,
+ maximum=20,
+ step=1,
+ value=1)
+ eyelid_blur = gr.Slider(
+ label="Eyelid blur",
+ minimum=0,
+ maximum=30,
+ step=1,
+ value=7)
+ with gr.Row( equal_height=True):
+ inpaint_radius = gr.Slider(
+ label="Inpaint Radius",
+ minimum=1,
+ maximum=20,
+ step=1,
+ value=10,info="incresing make smooth but slow")
+ inpaint_blur = gr.Slider(
+ label="Inpaint blur",
+ minimum=0,
+ maximum=30,
+ step=1,
+ value=20)
+ with gr.Row( equal_height=True):
+ mask_dilate = gr.Slider(
+ label="Mask dilate",
+ minimum=0,
+ maximum=20,
+ step=1,
+ value=10)
+
+ dilate_blur = gr.Slider(
+ label="dilate blur",
+ minimum=0,
+ maximum=20,
+ step=1,
+ value=10)
+ with gr.Row( equal_height=True):
+ eyes_duration = gr.Slider(
+ label="Eyeclosed animation duration",
+ minimum=1,
+ maximum=500,
+ step=1,
+ value=200)
+ with gr.Accordion(label="Mouth-Opened Advanced Settings", open=False):
+ with gr.Row( equal_height=True):
+ make_animation = gr.Checkbox(label="animation",value=True,info="take long time if open-size is large")
+ open_size_y = gr.Slider(
+ label="Open Size",
+ minimum=1,
+ maximum=40,
+ step=1,
+ value=8,info="Large size is for img2img/inpaint")
+ inside_layer_low_depth=gr.Checkbox(label="Inner Layer Low",value=False,info="if value >20 check on better result")
+
+ hole_image_name=gr.Dropdown(label="inner image name",choices=["dark01","black","mid01","mid02"],value="dark01",info="if you use img2img black is better")
+ with gr.Row( equal_height=True):
+ mouth_duration = gr.Slider(
+ label="mouhtopen animation duration",info="per frame",
+ minimum=1,
+ maximum=500,
+ step=1,
+ value=40)
+ with gr.Column():
+ animation_out = gr.Image(height=760,label="Animation", elem_id="output-animation")
+ image_out = gr.Gallery(label="Output", elem_id="output-img",preview=True)
+
+
+ btn.click(fn=process_images, inputs=[image,eyelid_thick,eyelid_blur,inpaint_radius,inpaint_blur,mask_dilate,dilate_blur,
+ open_size_y,inside_layer_low_depth,hole_image_name,make_animation,
+ eyes_duration,mouth_duration],outputs=[animation_out,image_out] ,api_name='infer')
+ gr.Examples(
+ examples =[
+ ["examples/00003245_00.jpg","examples/00003245_00.webp"],
+ ["examples/00002062.jpg","examples/00002062.webp"],
+ ["examples/00100265.jpg","examples/00100265.webp"],
+ ["examples/00824006.jpg","examples/00824006.webp"],
+ ["examples/00824008.jpg","examples/00824008.webp"],
+ ["examples/00825000.jpg","examples/00825000.webp"],
+ ["examples/00826007.jpg","examples/00826007.webp"],
+ ["examples/00827009.jpg","examples/00827009.webp"],
+ ["examples/00828003.jpg","examples/00828003.webp"],
+ ],
+ #examples =["examples/00003245_00.jpg","examples/00002062.jpg","examples/00100265.jpg","examples/00824006.jpg","examples/00824008.jpg",
+ # "examples/00825000.jpg","examples/00826007.jpg","examples/00827009.jpg","examples/00828003.jpg",],
+ inputs=[image,animation_out],examples_per_page=5
+ )
+ gr.HTML(read_file("demo_footer.html"))
+
+ if __name__ == "__main__":
+ demo.launch()
diff --git a/close_eyes.py b/close_eyes.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5b22088d4933d9bb2d802ba74028139a136fc64
--- /dev/null
+++ b/close_eyes.py
@@ -0,0 +1,120 @@
+
+"""
+close_eye.py
+
+
+目を閉じた画像を作ります。
+
+目と上まつ毛の部分をポイントとしており、そこをinpaintします。
+inpaint部分をぼかして、元の画像の上にはりつけ、ぼかします。
+
+
+
+著者: Akihito Miyazaki
+作成日: 2024-04-23
+更新履歴:
+ - 2024-04-23: 最初のリリース
+ - 2024-09-24: name suffixを追加
+ - 2034-11-15: changed for huggingface
+"""
+import os
+import cv2
+import numpy as np
+
+
+from glibvision.numpy_utils import bulge_polygon
+
+from glibvision.cv2_utils import fill_points,get_image_size,gray3d_to_2d,blend_rgb_images,create_color_image
+from landmarks68_utils import get_left_upper_eyelid_points,get_right_upper_eyelid_points,get_bulged_eyes,get_close_eyelid_point
+
+
+def create_eyelid_mask(image,face_landmarks_list,thick = 1,bulge=0.2):
+ black = create_color_image(image,(0,0,0))
+ left_eyelid = get_left_upper_eyelid_points(face_landmarks_list)
+ left_eyelid = bulge_polygon(left_eyelid,bulge)
+ fill_points(black,left_eyelid)
+
+ print("right")
+ right_eyelid = get_right_upper_eyelid_points(face_landmarks_list)
+ print(right_eyelid)
+ right_eyelid = bulge_polygon(right_eyelid,bulge)
+ fill_points(black,right_eyelid)
+
+ eyes_points = get_bulged_eyes(face_landmarks_list)
+ for points in eyes_points:
+ np_points = np.array(points,dtype=np.int32)
+ cv2.fillPoly(image, [np_points], (255,255,255))
+ if thick > 0:
+ cv2.polylines(black, [np_points], isClosed=False, color=(255,255,255), thickness=thick)
+
+ return cv2.cvtColor(black,cv2.COLOR_BGR2GRAY)
+
+DEBUG = False
+def process_close_eyes_image(img,landmarks_list,eyelid_thick=1,eyelid_blur=9,inpaint_radius=10,inpaint_blur=30,mask_dilate=10,dilate_blur=10):
+ img_h, img_w = get_image_size(img)
+
+ eyelid_mask = create_eyelid_mask(img,landmarks_list)
+ if DEBUG:
+ cv2.imwrite("close_eye_mask.jpg",eyelid_mask)
+
+ mask = gray3d_to_2d(eyelid_mask)
+
+
+ img_inpainted = cv2.inpaint(img, mask,inpaint_radius, cv2.INPAINT_TELEA)
+ if DEBUG:
+ cv2.imwrite("close_eye_inpaint.jpg",img_inpainted)
+
+ ## Inpaintした画像をぼかす。
+ if inpaint_blur>0:
+ if inpaint_blur%2==0: #if even it would error
+ inpaint_blur+=1
+ blurred_image = cv2.GaussianBlur(img_inpainted, (inpaint_blur, inpaint_blur), 0)
+ if DEBUG:
+ cv2.imwrite("close_eye_inpaint_burred.jpg",blurred_image)
+ else:
+ blurred_image=img_inpainted
+
+
+ # まつげを描く
+ if eyelid_thick>0:
+ left,right = get_close_eyelid_point(landmarks_list)
+ for points in [left,right]:
+ print("## draw eyelid")
+ print(points)
+ cv2.polylines(blurred_image, [np.array(points)], isClosed=False, color=(0,0,0), thickness=eyelid_thick,lineType=cv2.LINE_AA)
+ if DEBUG:
+ cv2.imwrite("close_eye_inpaint_burred_eyeline.jpg",blurred_image)
+
+
+
+
+ if eyelid_thick>0 and eyelid_blur>0:
+ if eyelid_blur%2==0:
+ eyelid_blur+=1
+ # blur-eyelid-line
+ blurred_image = cv2.GaussianBlur(blurred_image, (eyelid_blur, eyelid_blur), 2)
+
+
+ print(mask_dilate,dilate_blur)
+ if mask_dilate>0:
+ # Inpaintの境界線から少し広げている
+ kernel = np.ones((mask_dilate, mask_dilate), np.uint8)
+ extend_mask = cv2.dilate(mask, kernel, iterations=1)
+
+ if dilate_blur>0:
+ if dilate_blur%2==0:
+ dilate_blur+=1
+
+ extend_burred_mask = cv2.GaussianBlur(extend_mask, (dilate_blur, dilate_blur), 1)
+ else:
+ extend_burred_mask = extend_mask
+ else:
+ extend_burred_mask=mask
+
+
+ img_inpainted = blend_rgb_images(img,blurred_image,extend_burred_mask)
+
+ if DEBUG:
+ cv2.imwrite("create_no_mouth_image_merged.jpg",img_inpainted)
+
+ return img_inpainted,extend_burred_mask
\ No newline at end of file
diff --git a/close_lip.py b/close_lip.py
new file mode 100644
index 0000000000000000000000000000000000000000..a39a3a16e926d4d7132ab13188c87265741736b7
--- /dev/null
+++ b/close_lip.py
@@ -0,0 +1,150 @@
+
+"""
+close_lip.py
+
+
+唇を閉じた画像を作ります。
+
+唇の開いている範囲をinpaintします。
+そこに、hole部分を影として戻します。
+
+
+
+著者: Akihito Miyazaki
+作成日: 2024-04-23
+更新履歴:
+ - 2024-04-23: 最初のリリース
+ - 1024-11-16:coverted to huggingface-space (but args broken)
+"""
+import os
+import cv2
+import numpy as np
+from PIL import Image
+import lip_utils
+
+
+from glibvision.cv2_utils import blend_rgb_images
+from glibvision.numpy_utils import apply_binary_mask_to_color,create_2d_image
+
+import argparse
+
+
+
+
+
+def create_top_lip_low_mask(image,face_landmarks_list,line_thick = 1):
+ black = create_2d_image(image.shape)
+ lip_utils.fill_top_lower(black,face_landmarks_list,line_thick,lip_utils.COLOR_WHITE)
+ return black
+
+def create_lip_hole_mask(image,face_landmarks_list,line_thick = 1):
+ black = create_2d_image(image.shape)
+ lip_utils.fill_lip_hole(black,face_landmarks_list,line_thick,lip_utils.COLOR_WHITE)
+
+ return black
+
+def process_close_lip_image(img,landmarks_list):
+ img_h, img_w = lip_utils.get_image_size(img)
+
+ hole_mask = create_lip_hole_mask(img,landmarks_list,0)
+
+ lower_lip_mask = create_top_lip_low_mask(img,landmarks_list)
+
+ #these make dirty
+ #kernel = np.ones((3, 3), np.uint8)
+ #lower_lip_mask = cv2.erode(lower_lip_mask, kernel, iterations=1)
+
+
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_01_mask.jpg",lower_lip_mask)
+
+ mixed_mask = cv2.bitwise_or(hole_mask,lower_lip_mask)
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_01_mask_mixed.jpg",mixed_mask)
+
+ img_inpainted = cv2.inpaint(img, mixed_mask,3, cv2.INPAINT_NS)
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_02_inpaint.jpg",img_inpainted)
+
+
+ copy_impainted=img_inpainted.copy()
+ apply_binary_mask_to_color(copy_impainted,(0,8,50),hole_mask)
+ #lip_utils.fill_lip_hole(img_inpainted,landmarks_list,0,(0,8,50)) # BGR
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_03_hole.jpg",copy_impainted)
+
+
+
+
+ ## Inpaintした画像をぼかす。
+ blurred_image = cv2.GaussianBlur(copy_impainted, (9, 9), 0) #場合によっては奇数じゃないとエラーが出ることがある
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_04_burred.jpg",blurred_image)
+
+
+ # Inpaintの境界線から少し広げている
+ kernel = np.ones((3, 3), np.uint8)
+ shrink_mask = cv2.erode(hole_mask, kernel, iterations=1)
+
+
+ shrink_burred_mask = cv2.GaussianBlur(shrink_mask, (3, 3), 0)
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_05_final_hole_mask.jpg",shrink_burred_mask)
+
+ img_inpainted = blend_rgb_images(img_inpainted,blurred_image,shrink_burred_mask)
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_05_final_hole.jpg",img_inpainted)
+ # Inpaintの境界線から少し広げている
+ kernel = np.ones((3, 3), np.uint8)
+ extend_mask = cv2.dilate(lower_lip_mask, kernel, iterations=1)
+
+
+ extend_burred_mask = cv2.GaussianBlur(extend_mask, (3, 3), 0)
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_05_final_lip_mask.jpg",extend_burred_mask)
+ img_inpainted = blend_rgb_images(img_inpainted,blurred_image,extend_burred_mask)
+
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_05_final_lip.jpg",img_inpainted)
+
+ mixed_mask = cv2.bitwise_or(shrink_burred_mask,extend_burred_mask)
+ mixed_mask[mixed_mask>0] = 255
+ mixed_mask = cv2.dilate(mixed_mask, (1,1), iterations=1)
+ # mixed_mask = cv2.GaussianBlur(mixed_mask, (3, 3), 0)
+
+ if lip_utils.DEBUG:
+ cv2.imwrite("close_lip_05_final_mixed_mask.jpg",mixed_mask)
+
+ return img_inpainted,mixed_mask
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Open Mouth')
+ parser.add_argument('--input',"-i",help='変換する画像の元(必須) 口を閉じていること',required=True)
+ parser.add_argument('--output',"-o",help='画像の保存先(別途一時的なレイヤーファイルも作られる)')
+ parser.add_argument('--landmark',"-l",help='landmarkdata')
+ parser.add_argument('--scale',"-sc",help='スケール精度が上がる',default=4,type=int)
+
+ args = parser.parse_args()
+ # 画像ファイルのパス
+ img_path = args.input
+ img = cv2.imread(img_path)
+ #landmarks_list = landmark_utils.load_landmarks(img,args.scale,args.landmark)
+ landmarks_list = None
+ eye_closed_image,mask = process_close_lip_image(img,landmarks_list)
+
+ output_path = args.output
+ if output_path == None:
+ parent_path,file = os.path.split(img_path)
+ name,ext = os.path.splitext(file)
+ output_path = os.path.join(parent_path,f"{name}_lipclose{ext}")
+
+
+
+
+ parent_path,file = os.path.split(output_path)
+ name,ext = os.path.splitext(file)
+
+ mask_path = os.path.join(parent_path,f"{name}_mask{ext}")
+ cv2.imwrite(mask_path,mask)
+ cv2.imwrite(output_path,eye_closed_image)
+ print(f"complete image {output_path} and mask {mask_path}")
\ No newline at end of file
diff --git a/create_bottom_lip.py b/create_bottom_lip.py
new file mode 100644
index 0000000000000000000000000000000000000000..65094b3af17cd3f0c5624765323ce419543d8fc5
--- /dev/null
+++ b/create_bottom_lip.py
@@ -0,0 +1,212 @@
+"""
+bottom_lip.py
+
+open_mouthの一部 下唇レイヤーの生成
+単独ではまだ動かない。
+
+
+著者: Akihito Miyazaki
+作成日: 2024-04-23
+更新履歴:
+ - 2024-04-23: 最初のリリース
+
+"""
+
+import cv2
+import numpy as np
+from PIL import Image
+from lip_utils import *
+import lip_utils
+from scipy.ndimage import binary_dilation, gaussian_filter
+
+def process_lip_image(img,landmarks_list, crop_image_margin, open_size_y, open_size_x):
+ """
+ 唇画像を処理する関数
+
+ open_size_x 最終画像で端をカットしている
+ side_tipsに値をコピーしている。side_tips
+ """
+ # 画像の読み込み
+
+ side_tips = 0 # remove cropped pixel
+ edge_x =0 # remove side pixel from final image
+
+
+ # 口の開きに応じて、中心を下げようとしたが、真ん中が下がるのは極めて不自然なのでやめた。
+ # そのうち、5分割の中心を変形するのに使いたい
+ mid_lip_move_ratio = open_size_y/80.0 if open_size_y>0 else 0
+ mid_lip_move_ratio = 0
+
+ if open_size_x>0:# ここ謎コード? そのうち検証したい
+ print("only support shorten use minus open")
+ side_tips = open_size_x
+ edge_x = int(open_size_x*1.5) #som magic number
+ #edge_x = 0
+ open_size_x = 0 # TODO move (some transform broken)
+
+ # 不自然にはみ出す、サイドのカット用だが、いまいち機能しないので使用停止
+ # TOP と HOLEが未対応なのでOFF
+ side_tips = 0 # remove cropped pixel
+ edge_x =0 # remove side pixel from final image
+
+
+ img_h, img_w = lip_utils.get_image_size(img)
+
+ # 唇領域の抽出と処理 (マージンが追加される。透明化処理が怪しい)
+ gaus = 4
+ box = lip_utils.get_points_box(landmarks_list, lip_utils.POINTS_BOTTOM_LIP, crop_image_margin)
+ align_points = lip_utils.get_bottom_lip_align_points(landmarks_list)
+ alpha_image, rec = get_alpha_image(img, landmarks_list, lip_utils.POINTS_BOTTOM_LIP, crop_image_margin, crop_image_margin, gaus)
+
+
+ # 最後に返す画像の元を作成
+ h, w = lip_utils.get_image_size(alpha_image)
+ if lip_utils.DEBUG:
+ cv2.imwrite("debug/bottom_lip_alpha.png",alpha_image)
+ print(f"bottom-lip cropped w = {w} h = {h}")
+ bottom_lip_final_image=lip_utils.create_rgba(w,h+open_size_y+1)# some how transform image expand TODO まだ必要か確認
+ bottom_lip_final_image_h,bottom_lip_final_image_w = lip_utils.get_image_size(bottom_lip_final_image)
+ print(f"bottom_lip_final_image:w = {bottom_lip_final_image_w} h = {bottom_lip_final_image_h}")
+
+
+ #local_align_points = lip_utils.offset_points(align_points,box[0])
+ #print(align_points)
+
+
+ # 唇の位置、いまだ検討中 https://github.com/akjava/lip_recognition_tools/issues/2
+ mid_left = int(w/5*2)
+ mid_left = int(w/3)
+
+ mid_right = bottom_lip_final_image_w - mid_left
+ print(f"image width = {bottom_lip_final_image_w} mid_left = {mid_left} mid_right ={mid_right}")
+
+ mid_center = int((mid_right+mid_left)/2) # 過去に真ん中下げに使っていたが必要ないと思っている。
+ mid_move_y_divided = 5 # 0 means no move
+
+ # 真ん中左の唇の変形 中心さげのに、無駄に2分割している。 https://github.com/akjava/lip_recognition_tools/issues/3
+
+ mid_image_left = lip_utils.crop_image(alpha_image,mid_left,0,mid_center,h)
+ mid_image_left_h,mid_image_left_w = lip_utils.get_image_size(mid_image_left)
+ max_w = mid_image_left_w
+ max_h = mid_image_left_h
+ opend_mid_lip_left = lip_utils.create_moved_image(mid_image_left,
+ [(0,0),(max_w,0),
+ (0,max_h),(max_w,max_h)],
+
+ [(-0,-0),(max_w,int(max_h*mid_lip_move_ratio)),#int(max_h/2)
+ (0,max_h),(max_w,max_h)]
+ )
+ # 予定外にサイズが伸びると、エラーになるので避けたいが、もう少し検証が必要
+ #opend_mid_lip_left = cv2.resize(opend_mid_lip_left, (max_w, max_h), interpolation=cv2.INTER_AREA)
+ lip_utils.print_width_height(mid_image_left,"mid-left")
+ lip_utils.print_width_height(opend_mid_lip_left,"moved-mid-left")
+
+
+ # 真ん中右の唇の変形
+ mid_image_right = lip_utils.crop_image(alpha_image,mid_center,0,mid_right,h)
+ mid_image_right_h,mid_image_right_w = lip_utils.get_image_size(mid_image_right)
+ max_w = mid_image_right_w
+ max_h = mid_image_right_h
+
+ opend_mid_lip_right = lip_utils.create_moved_image(mid_image_right,
+ [(0,0),(max_w,0),
+ (0,max_h),(max_w,max_h)],
+
+ [(-0,int(max_h*mid_lip_move_ratio)),(max_w,0),#int(max_h/2)
+ (0,max_h),(max_w,max_h)]
+ )
+
+ #opend_mid_lip_right = cv2.resize(opend_mid_lip_right, (max_w, max_h), interpolation=cv2.INTER_AREA)
+ lip_utils.print_width_height(mid_image_right,"mid-right")
+ lip_utils.print_width_height(opend_mid_lip_right,"moved-mid-right")
+
+
+ #no remove side-tip area
+ left_image = lip_utils.crop_image(alpha_image,side_tips,0,mid_left,h)
+ right_image = lip_utils.crop_image(alpha_image,mid_right,0,w-side_tips,h)
+
+ # 左の唇を下げる 左側は固定
+ left_lip_image_h,left_lip_image_w = lip_utils.get_image_size(left_image)
+ print(f"left-image:w = {left_lip_image_w} h = {left_lip_image_h}")
+
+ max_w = left_lip_image_w
+ max_h = left_lip_image_h
+ opend_lip_left = lip_utils.create_moved_image(left_image,
+ [(0,0),(max_w,0),
+ (0,max_h),(max_w,max_h)],
+
+ [(0,-0),(max_w+open_size_x,open_size_y),
+ (0,max_h-0),(max_w+open_size_x,max_h+open_size_y)]
+ )
+ left_lip_image_h,left_lip_image_w = lip_utils.get_image_size(opend_lip_left)
+ max_w = left_lip_image_w
+ max_h = left_lip_image_h
+
+ new_h,new_w = lip_utils.get_image_size(opend_lip_left)
+ print(f"left-image moved:w = {new_w} h = {new_h}")
+ if lip_utils.DEBUG:
+ cv2.imwrite("open_botto_lip_left.png",opend_lip_left)
+
+
+ # 右の唇を下げる 右側は固定
+ right_lip_image_h,right_lip_image_w = lip_utils.get_image_size(right_image)
+ max_w = right_lip_image_w
+ max_h = right_lip_image_h
+ opend_lip_right = lip_utils.create_moved_image(right_image,
+ [(0,0),(max_w,0),
+ (0,max_h),(max_w,max_h)],
+
+ [(0,open_size_y),(max_w+open_size_x,-0),
+ (0,max_h+open_size_y),(0+max_w+open_size_x,max_h-0)]
+ )
+ new_h,new_w = lip_utils.get_image_size(opend_lip_right)
+ print(f"right-image moved :w = {new_w} h = {new_h}")
+ if lip_utils.DEBUG:
+ cv2.imwrite("open_botto_lip_right.png",opend_lip_right)
+
+
+
+
+ # 変形後の各画像を描く alpha含めた全コピーなので注意(元画像のAlphaは無視)
+ #is this ok?
+ #lip_utils.copy_image(bottom_lip_final_image,mid_image,mid_left-1,open_size_y)
+ #lip_utils.copy_image(bottom_lip_final_image,mid_image,mid_left,open_size_y)
+ ## 中央部分
+ lip_utils.copy_image(bottom_lip_final_image,opend_mid_lip_left,mid_left,open_size_y)
+ lip_utils.copy_image(bottom_lip_final_image,opend_mid_lip_right,mid_center,open_size_y)
+
+
+ print(lip_utils.get_image_size(opend_lip_left))
+ print(lip_utils.get_image_size(bottom_lip_final_image))
+
+ ## 左右の端 side_tips
+ lip_utils.copy_image(bottom_lip_final_image,opend_lip_left,open_size_x+side_tips,0)
+ lip_utils.copy_image(bottom_lip_final_image,opend_lip_right,mid_right,0)
+
+
+ #edge_x=22 #for40 #テスト中
+
+ # 両端の処理 https://github.com/akjava/lip_recognition_tools/issues/6
+ lip_utils.fade_in_x(bottom_lip_final_image,edge_x*2)
+ lip_utils.fade_out_x(bottom_lip_final_image,edge_x*2)
+
+ # 最終的な画像の作成と保存
+ if lip_utils.DEBUG:
+ cv2.imwrite("bottom_lip_opend.png", bottom_lip_final_image)
+ face_size_image = lip_utils.create_rgba(img_w, img_h)
+ lip_utils.copy_image(face_size_image, bottom_lip_final_image, box[0][0], box[0][1])
+ if lip_utils.DEBUG:
+ cv2.imwrite("bottom_lip_layer.png", face_size_image)
+ return face_size_image
+
+
+if __name__ == "__main__":
+ # 画像ファイルのパス
+ img_path = "straight.jpg"
+ # パラメータ
+ crop_image_margin = 16
+ open_size_y = 10
+ open_size_x = -10
+
+ # 関数の呼び出し
+ process_lip_image(img_path, crop_image_margin, open_size_y, open_size_x)
\ No newline at end of file
diff --git a/create_chin_image.py b/create_chin_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..58177e07e4830cdb00c5ece4e3370ca8604b9d3e
--- /dev/null
+++ b/create_chin_image.py
@@ -0,0 +1,86 @@
+"""
+create_chin_image.py
+
+open_mouth.pyの一部
+
+口のオープンに合わせて、あご画像を縦に拡大しているだけ、
+
+輪郭はもう少し綺麗に取りたい
+https://github.com/akjava/lip_recognition_tools/issues/7
+
+著者: Akihito Miyazaki
+作成日: 2024-04-23
+更新履歴:
+ - 2024-04-23: 最初のリリース
+
+"""
+
+import cv2
+import numpy as np
+from PIL import Image
+import lip_utils
+
+def process_chin_image(img,landmarks_list, margin, open_size_y, open_size_x):
+ img_h, img_w = lip_utils.get_image_size(img)
+
+
+ open_size_x = 0 #stop support this
+ if open_size_x > 0:
+ print("currently stop support open-sizex")
+
+ jaw_points = lip_utils.get_jaw_points(landmarks_list)
+ print("### JAW POINT")
+ print(jaw_points)
+
+ box = lip_utils.points_to_box(jaw_points)
+ print(box)
+
+ cropped = lip_utils.crop_image_by_box(img,box)
+ cropped_img_h, cropped_img_w = lip_utils.get_image_size(cropped)
+ if lip_utils.DEBUG_CHIN:
+ cv2.imwrite("chin_cropped.jpg",cropped)
+ cropped_jaw_points = lip_utils.offset_points(jaw_points,box[0])
+ #what hell is this?
+ #points = np.array(jaw_points,dtype=np.float32)
+ # 回転矩形を取得
+ #rect = cv2.minAreaRect(points)
+ #print(rect)
+
+ mask = lip_utils.create_mask_from_points(cropped,cropped_jaw_points,4,2)
+ if lip_utils.DEBUG_CHIN:
+ cv2.imwrite("chin_mask.jpg",mask)
+
+ #lip_utils.print_numpy(mask)
+ chin_image = lip_utils.apply_mask(cropped,mask)
+ chin_image_resized = cv2.resize(chin_image, (cropped_img_w, cropped_img_h+open_size_y), interpolation=cv2.INTER_LANCZOS4)
+ #chin_mask_image_resized = cv2.resize(mask, (cropped_img_w, cropped_img_h+open_size_y), interpolation=cv2.INTER_LANCZOS4)
+
+ #lip_utils.print_numpy(chin_image)
+ if lip_utils.DEBUG_CHIN:
+ cv2.imwrite("chin_resized.png",chin_image_resized) #alpha image must be save png
+
+ full_rgba=lip_utils.create_rgba(img_w,img_h)
+ lip_utils.copy_image(full_rgba,chin_image_resized,box[0][0],box[0][1])
+ if lip_utils.DEBUG_CHIN:
+ cv2.imwrite("chin_full.png",full_rgba)
+
+ #mask_gray=lip_utils.create_gray(img_w,img_h)
+ #lip_utils.copy_image(mask_gray,chin_mask_image_resized,box[0][0],box[0][1])
+ # chin mask is useless
+
+ return full_rgba
+
+if __name__ == "__main__":
+ # 画像ファイルのパス
+ img_path = "straight.jpg"
+ img = cv2.imread(img_path)
+ img_h, img_w = lip_utils.get_image_size(img)
+ landmarks_list = lip_utils.image_to_landmarks_list(img)
+
+ # パラメータ
+ margin = 4
+ open_size_y = 20
+ open_size_x = 0
+
+ # 関数の呼び出し
+ process_chin_image(img,landmarks_list, margin, open_size_y, open_size_x)
\ No newline at end of file
diff --git a/create_hole_image.py b/create_hole_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cd20f17a1b5791d9dafdd18835ecf69c8798ab7
--- /dev/null
+++ b/create_hole_image.py
@@ -0,0 +1,149 @@
+
+"""
+create_hole_image.py
+
+open_mouth.pyの一部
+
+
+著者: Akihito Miyazaki
+作成日: 2024-04-23
+更新履歴:
+ - 2024-04-23: 最初のリリース
+ - 2024-09-15:slide_amountを追加
+"""
+
+import cv2
+import numpy as np
+from PIL import Image
+import lip_utils
+
+
+
+def vertical_slide(image, slide_amount):
+ height, width = image.shape[:2]
+
+ # スライド量が画像の高さより大きい場合、画像の高さに制限する
+ slide_amount = min(slide_amount, height)
+ slide_amount = max(slide_amount, -height)
+
+ slide_image = np.zeros_like(image) # 入力画像と同じサイズと型の画像を作成
+
+ if slide_amount > 0: # 下にスライド
+ slide_image[slide_amount:, :] = image[:height - slide_amount, :]
+ elif slide_amount < 0: # 上にスライド
+ slide_image[:height + slide_amount, :] = image[-slide_amount:, :]
+ else:
+ slide_image = image.copy()
+
+ return slide_image
+
+
+def file_name_check(path):
+ max_name_limit = 50
+ check = True
+ if path.find("..")!=-1:
+ check = False
+ if path.find("/")!=-1:
+ check = False
+ if path.find("\\")!=-1:
+ check = False
+ if path.find(":")!=-1:
+ check = False
+ if len(path)>max_name_limit:
+ print(f"name is limited {max_name_limit}")
+ check = False
+ if not check:
+ ValueError(f"Invalid Name {path}")
+
+
+
+def process_create_hole_image(img,landmarks_list,open_size_y = 0,open_size_x=0,hole_offset=0,hole_image_name="dark01.jpg"):
+ file_name_check(hole_image_name)
+ img_h, img_w = lip_utils.get_image_size(img)
+
+ # 画像を複製して、アラインポイントを描画する。アラインは、傾きも考慮した唇の範囲
+ img_lined = np.copy(img)
+
+ points = lip_utils.get_top_lip_align_points(landmarks_list)
+ img_lined1 = np.copy(img)
+ print(points)
+ cv2.polylines(img_lined1, [np.array(points)], isClosed=True, color=(0,255,0), thickness=1)
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_hole_top_lip_align_line.jpg",img_lined1)
+
+
+ print(f"align point = {points}")
+ diff_align_x = points[0][0]-points[2][0]
+ print(f"diff_align_x = {diff_align_x}")
+ np_points = np.array(points)
+
+ diff_left = np_points[2] - np_points[0] #left-bottom ,left-up
+ diff_right = np_points[3] - np_points[1] #right-bottom,right-up
+ print(f"diff left-y = {diff_left},diff right-y ={diff_right}")
+
+ top_lip_thicks = lip_utils.get_top_lip_thicks(landmarks_list) # this ignore rotation
+ top_lip_thicks2 = lip_utils.get_top_lip_thicks(landmarks_list,True) # this ignore rotation
+
+ lip_thick = np.mean(top_lip_thicks)
+ lip_thick2 = np.mean(top_lip_thicks2)
+
+ base_mouth_size = lip_thick2*1.5
+
+ mouth_angle=lip_utils.calculate_clockwise_angle(points[2],points[3])
+ angled_point=lip_utils.calculate_new_point((0,0),base_mouth_size,mouth_angle+90)
+ angled_mouth_size = angled_point[1] + open_size_y
+ #print(f"lip_thick2={lip_thick2}")
+
+ print(f"lip thick2 ={lip_thick2} base_mouth_size={base_mouth_size} mouth_angle={mouth_angle} angled_mouth_size={angled_mouth_size}")
+ #上唇の範囲を元に、口全体を定義するため、根拠ないけど1.x倍にしている。 https://github.com/akjava/lip_recognition_tools/issues/8
+ diff_left[1] = angled_mouth_size
+ diff_right[1] = angled_mouth_size
+ diff_left[0] *=0
+ diff_right[0] *=0
+ expand_left = np_points[2] + diff_left
+ expand_right = np_points[3] + diff_right
+
+ # X座標も拡大するが、基本使わないので無視してもいい。
+ expand_points = np.array([np_points[0],np_points[1],expand_left,expand_right])
+ print(f"expand_points = {[np_points[0],np_points[1],expand_left,expand_right]}")
+ cv2.polylines(img_lined, [expand_points], isClosed=True, color=(0,255,0), thickness=1)
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_hole_image_top-align_line.jpg",img_lined)
+
+
+ # これまた、hole用の画像をなんとなく、サイズに合わせている。
+ # そのため、画像の位置調整が非常に微妙になる
+ # TODO 画像の指定引数 https://github.com/akjava/lip_recognition_tools/issues/9
+ # https://github.com/akjava/lip_recognition_tools/issues/10
+ #hole_image = cv2.imread("hole_images/hole_01_light_dark.jpg")
+ hole_image = cv2.imread(f"hole_images/{hole_image_name}")
+ hole_image = vertical_slide(hole_image,hole_offset)
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_hole_image-slided_hole_image.jpg",hole_image)
+ #exit(0)
+
+ hole_image_h,hole_image_w = lip_utils.get_image_size(hole_image)
+ max_w = hole_image_w
+ max_h = hole_image_h
+ expand_list = expand_points.tolist()
+ aligned_hole_image = lip_utils.create_moved_image(hole_image, [(0,0),(max_w,0),
+ (0,max_h),(max_w,max_h)],
+ expand_list
+
+ )
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_hole_image_top-align_image.jpg",aligned_hole_image)
+
+ img_face = np.copy(img)
+ lip_utils.copy_image(img_face,aligned_hole_image,expand_list[0][0] - diff_align_x,(expand_list[0][1]+expand_list[1][1])//2)
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_hole_image_top-align_face.jpg",img_face)
+ return img_face
+
+
+if __name__ == "__main__":
+ # 画像ファイルのパス
+ img_path = "00012245.jpg" #"straight.jpg"
+ img = cv2.imread(img_path)
+ landmarks_list = lip_utils.image_to_landmarks_list(img)
+ process_create_hole_image(img,landmarks_list)
\ No newline at end of file
diff --git a/create_no_mouth.py b/create_no_mouth.py
new file mode 100644
index 0000000000000000000000000000000000000000..be11249ec92fc34ee4218b47c15edaba2ad2223f
--- /dev/null
+++ b/create_no_mouth.py
@@ -0,0 +1,97 @@
+
+"""
+create_no_mouth.py
+
+open_mouth.pyの一部
+
+口のない画像を作ります。
+
+口の部分のをlandpointを取って、その部分を消して、inpaintします。
+inpaint部分をぼかして、元の画像の上にはりつけ、ぼかします。
+
+
+
+著者: Akihito Miyazaki
+作成日: 2024-04-23
+更新履歴:
+ - 2024-04-23: 最初のリリース
+
+"""
+
+import cv2
+import numpy as np
+from PIL import Image
+import lip_utils
+
+from glibvision.cv2_utils import blend_rgb_images
+from glibvision.numpy_utils import apply_binary_mask_to_image
+
+def process_create_no_mouth_image(img,landmarks_list):
+ img_h, img_w = lip_utils.get_image_size(img)
+
+ ## 口の範囲をInpaintで消す。
+ (bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list)
+ lip_points = lip_utils.get_lip_mask_points(landmarks_list)
+
+ # 選択範囲を丸めてみたけど、それほどメリットが感じなかった。
+ #lip_points = lip_utils.bulge_polygon(lip_points,0.1)
+
+ # 唇のマスク範囲を検証
+ if lip_utils.DEBUG:
+ img_lined = np.copy(img)
+ cv2.polylines(img_lined, [np.array(lip_points)], isClosed=True, color=(0,255,0), thickness=1)
+ cv2.imwrite("create_no_mouth_image_polyline.jpg",img_lined)
+
+ # 唇のエッジ部分は、影やら、ピンクなどあるので、それを削るのに、下唇の高さを元にしている。0.5は根拠ない。
+ dilation_size = int(bottom_height*0.5)
+ lip_mask = lip_utils.create_mask_from_points(img,lip_points,dilation_size,0) # inpaintで使うので、ぼかしは不要
+ if lip_utils.DEBUG:
+ lip_utils.print_numpy(lip_mask,"lip mask")
+
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_no_mouth_image_mask.jpg",lip_mask)
+
+ img_inpainted = cv2.inpaint(img, lip_mask,3, cv2.INPAINT_TELEA)
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_no_mouth_image_inpaint.jpg",img_inpainted)
+
+ ## Inpaintした部分をぼかしている。
+ blurred_image = cv2.GaussianBlur(img_inpainted, (29, 29), 0) #場合によっては奇数じゃないとエラーが出ることがある
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_no_mouth_image_blurred.jpg",blurred_image)
+
+ apply_binary_mask_to_image(img_inpainted,blurred_image,lip_mask)
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_no_mouth_image_blurred1_applied.jpg",blurred_image)
+
+
+ # 全体を少しぼかす
+ blurred_image2 = cv2.GaussianBlur(img_inpainted, (9, 9), 0)
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_no_mouth_image_blurred2.jpg",blurred_image2)
+
+ # Inpaintの境界線から少し広げている
+ kernel = np.ones((8, 8), np.uint8)
+ lip_mask = cv2.dilate(lip_mask, kernel, iterations=1)
+
+ # 全体を少しぼかす
+ blurred_mask = cv2.GaussianBlur(lip_mask, (19, 19), 0)
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_no_mouth_image_blurred_mask.jpg",blurred_mask)
+
+ # https://github.com/akjava/lip_recognition_tools/issues/12
+ #cv2_utils.apply_binary_mask_to_image(img_inpainted,blurred_image2,lip_mask)
+
+ img_inpainted = blend_rgb_images(img_inpainted,blurred_image2,lip_mask)
+
+ if lip_utils.DEBUG:
+ cv2.imwrite("create_no_mouth_image_merged.jpg",img_inpainted)
+
+ return img_inpainted
+
+if __name__ == "__main__":
+ # 画像ファイルのパス
+ img_path = "straight.jpg"
+ img = cv2.imread(img_path)
+ landmarks_list = lip_utils.image_to_landmarks_list(img)
+ process_create_no_mouth_image(img,landmarks_list)
\ No newline at end of file
diff --git a/create_top_lip.py b/create_top_lip.py
new file mode 100644
index 0000000000000000000000000000000000000000..3aeb1cdfeceb625dd3faf123d4e92717bb9477d1
--- /dev/null
+++ b/create_top_lip.py
@@ -0,0 +1,284 @@
+import cv2
+import numpy as np
+from PIL import Image
+import lip_utils
+
+def process_lip_image(img,landmarks_list, margin, open_size_y, open_size_x):
+ print(open_size_x)
+ """
+ 唇画像を処理する関数
+ """
+ img_h, img_w = lip_utils.get_image_size(img)
+
+
+ open_size_x = 0 #stop support this
+ print("currently stop support open-sizex")
+
+ #for 40 # TODO recheck later issues/91
+ side_tips = 0 # TODO depent on size or point
+ #side_tips = 0
+ side_tips = open_size_x
+ edge_x = int(open_size_x*1.4) #som magic number
+
+ mid_lip_move_artio = open_size_y/80.0 if open_size_y>0 else 0
+ mid_lip_shrink_artio = open_size_x/4 if open_size_x>0 else 0
+
+ # 上唇の抽出と処理
+ top_lip_rgba, cropped_box = lip_utils.get_alpha_image(img, landmarks_list, lip_utils.POINTS_TOP_LIP, margin, margin, 4)
+ if lip_utils.DEBUG:
+ cv2.imwrite("top_lip_rgba.png",top_lip_rgba)
+ new_h,new_w = lip_utils.get_image_size(top_lip_rgba)
+ w = new_w
+ h = new_h
+ print(f"top-lip-alpha-margined-size:w = {new_w} h = {new_h} margin = {margin}")
+ align_points = lip_utils.get_top_lip_align_points(landmarks_list)
+
+ box = cropped_box
+ top_points=lip_utils.get_landmark_points(landmarks_list,lip_utils.POINTS_TOP_LIP)
+ cropped_lip_points = [(point[0] - box[0][0], point[1] - box[0][1]) for point in top_points]
+ lip_points = [(point[0] - box[0][0], point[1] - box[0][1]) for point in align_points]
+ middle_lip = ((lip_points[0][0] + lip_points[1][0]) / 2, (lip_points[0][1] + lip_points[1][1]) / 2)
+ print(f"middle:{middle_lip}")
+
+
+ #DEV
+ print(f"box {cropped_box[0][0]},{cropped_box[0][1]}")
+ face_size_image=lip_utils.create_rgba(img_w,img_h)
+ lip_utils.copy_image(face_size_image,top_lip_rgba,cropped_box[0][0]-margin,cropped_box[0][1]-margin)
+ if lip_utils.DEBUG:
+ cv2.imwrite("top_lip_layer.png",face_size_image)
+
+
+
+
+
+
+ #intではないとエラー
+ middle_y = max(1,int(middle_lip[1]-5)) # force move up
+
+
+ # 3分割
+ mid_x1=int(w/3) # LEFT
+ mid_x2 = w -mid_x1 # RIGHT
+ print(f"image width = {new_w} mid_left = {mid_x1} mid_right ={mid_x2}")
+
+ cx, cy, cx2, cy2 = 0, middle_y,mid_x1, h
+
+ print("###",w,",",middle_y)
+ crop_top = lip_utils.crop_image(top_lip_rgba,0,0,w,middle_y)#full top
+
+ #if use mid only left right change control of up
+ #crop_top = lip_utils.crop_image(top_lip_rgba,mid_x1,0,mid_x2,middle_y)
+ if lip_utils.DEBUG:
+ cv2.imwrite("crop_top.png",crop_top)
+ #cv2.imwrite("top_lip_mid.png",crop_mid)
+
+ below_top_lip_image = lip_utils.crop_image(top_lip_rgba,0,middle_y,w,h)
+ below_top_lip_image_h,below_top_lip_image_w = lip_utils.get_image_size(below_top_lip_image)
+ if lip_utils.DEBUG:
+ cv2.imwrite("below_top_lip_image.png",below_top_lip_image)
+ print(f"below_top_lip_image w = {below_top_lip_image_w}, h= {below_top_lip_image_h}")
+
+ #中央部分を切り抜く
+
+ mid_x1_x2_half = int((mid_x2+mid_x1)/2)
+ print(mid_x1_x2_half)
+ crop_mid_left = lip_utils.crop_image(below_top_lip_image,mid_x1,0,mid_x1_x2_half,below_top_lip_image_h)
+ lip_utils.print_width_height(crop_mid_left,"crop_mid_left")
+ crop_mid_h,crop_mid_w = lip_utils.get_image_size(crop_mid_left)
+ max_w = crop_mid_w
+ max_h = crop_mid_h
+ moveup_lip_mid = lip_utils.create_moved_image(crop_mid_left, [(0,0),(max_w,0),
+ (0,max_h),(max_w,max_h)],
+
+ [(0,0),(crop_mid_w,0),
+ (0,int(max_h)),(max_w,int(crop_mid_h*(1.0 - mid_lip_move_artio)))]# TODO ratio
+ )
+ lip_utils.print_width_height(moveup_lip_mid,"crop_mid_left-moved")
+
+ if lip_utils.DEBUG:
+ cv2.imwrite("moveup_lip_mid_left.png",moveup_lip_mid)
+
+ crop_mid_right = lip_utils.crop_image(below_top_lip_image,mid_x1_x2_half,0,mid_x2,below_top_lip_image_h)
+ crop_mid_h,crop_mid_w = lip_utils.get_image_size(crop_mid_right)
+ max_w = crop_mid_w
+ max_h = crop_mid_h
+ lip_utils.print_width_height(crop_mid_right,"crop_mid_right")
+ moveup_lip_mid_right = lip_utils.create_moved_image(crop_mid_right, [(0,0),(max_w,0),
+ (0,max_h),(max_w,max_h)],
+
+ [(0,0),(max_w,0),
+ (0,int(max_h*(1.0-mid_lip_move_artio))),(max_w,int(max_h))]
+ )
+ lip_utils.print_width_height(moveup_lip_mid_right,"crop_mid_right-moved")
+ if lip_utils.DEBUG:
+ cv2.imwrite("moveup_lip_mid_right.png",moveup_lip_mid_right)
+
+
+
+
+ # 最終画像 サイズは、最初の 切り抜きに 口を開く + open_size_y
+ top_lip_final_image=lip_utils.create_rgba(w,h+open_size_y+1)#some how transform image expand
+ final_image_h,final_image_w = lip_utils.get_image_size(top_lip_final_image)
+ print(f"final-image-size:w = {final_image_w} h = {final_image_h}")
+
+
+ # left block
+ left_lip_image = lip_utils.crop_image(below_top_lip_image,side_tips,0,mid_x1,below_top_lip_image_h)
+ left_lip_image_h,left_lip_image_w = lip_utils.get_image_size(left_lip_image)
+ print(f"left-image-cropped:w = {left_lip_image_w} h = {left_lip_image_h}")
+ # this left transofom is very important change result (dont feel strange +open_size_x)
+ max_w = left_lip_image_w
+ max_h = left_lip_image_h
+
+ opend_lip_left = lip_utils.create_moved_image(left_lip_image,
+ [(0,0),(max_w,0),
+ (0,max_h),(max_w,max_h)],
+
+ [(0,0+open_size_y),(max_w+open_size_x,0),
+ (0,max_h+open_size_y),(max_w+open_size_x,max_h)]
+ )
+
+
+ new_h,new_w = lip_utils.get_image_size(opend_lip_left)
+ print(f"left-image-moved:w = {new_w} h = {new_h}")
+ if lip_utils.DEBUG:
+ cv2.imwrite("top_lip_opend_left.png",opend_lip_left)
+
+ right_lip_image = lip_utils.crop_image(below_top_lip_image,mid_x2,0,below_top_lip_image_w-side_tips,below_top_lip_image_h)
+ right_lip_image_h,right_lip_image_w = lip_utils.get_image_size(right_lip_image)
+ print(f"right-image-cropped:w = {right_lip_image_w} h = {right_lip_image_h}")
+ max_w = right_lip_image_w
+ #cv2.imwrite("top_lip_opend_left.png",opend_lip_left)
+ # right block
+ #right-image-cropped:w = 39 h = 32
+ opend_lip_right = lip_utils.create_moved_image(right_lip_image,
+ [(0,0),(right_lip_image_w-1,0),
+ (0,right_lip_image_h-1),(right_lip_image_w-1,right_lip_image_h-1)],
+
+ [(-0,0),(right_lip_image_w-1+open_size_x,open_size_y), # remove corner shrink it broke image
+ (0,int(crop_mid_h-1)),(right_lip_image_w+open_size_x-1,right_lip_image_h-1+open_size_y)]
+ #,(39+open_size_x,right_lip_image_h+open_size_y) #TOD
+ )
+
+
+ new_h,new_w = lip_utils.get_image_size(opend_lip_right)
+ right_image_w_changed = new_w-right_lip_image_w
+
+ print(f"right-image-moved:w = {new_w} h = {new_h}")
+ if lip_utils.DEBUG:
+ cv2.imwrite("top_lip_opend_right.png",opend_lip_right)
+
+
+ move_x = open_size_x +(open_size_x-right_image_w_changed)
+ print(f"right_image_w_changed ={right_image_w_changed} open_size_x ={open_size_x} move_x ={move_x}")
+
+ lip_utils.copy_image(top_lip_final_image,crop_top,0,0) # full version
+ #lip_utils.copy_image(top_lip_final_image,crop_top,mid_x1,0)
+ print(f"open size x = {open_size_x}")
+ lip_utils.copy_image(top_lip_final_image,opend_lip_left,side_tips,middle_y)# open_size_x must slided and minus value
+
+ #mid
+ lip_utils.copy_image(top_lip_final_image,moveup_lip_mid,mid_x1,middle_y)
+ lip_utils.copy_image(top_lip_final_image,moveup_lip_mid_right,mid_x1_x2_half,middle_y)
+
+ lip_utils.copy_image(top_lip_final_image,opend_lip_right,mid_x2,middle_y)
+
+
+ if lip_utils.DEBUG:
+ cv2.imwrite("top_lip_opend.png",top_lip_final_image)
+ face_size_image=lip_utils.create_rgba(img_w,img_h)
+ lip_utils.copy_image(face_size_image,top_lip_final_image,box[0][0],box[0][1])
+ if lip_utils.DEBUG:
+ cv2.imwrite("top_lip_layer.png",face_size_image)
+
+
+ # possible bug inverted
+ points = lip_utils.get_lip_hole_points(landmarks_list)
+ #points = lip_utils.get_lip_hole_top_points(landmarks_list)
+
+ statics=[1,2,3]
+ half =[0,4,5,9]
+
+ #not effect now open-sizex set 0 at begging
+ m = 1
+
+
+ ## TOP Lip Move Up basically upper thick is 1.5 x lower lip toher are teeth
+ (bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list)
+ left_thick,mid_thick,right_thick = lip_utils.get_top_lip_thicks(landmarks_list)
+ bottom_base = bottom_height/1.5
+
+ diff_left = max(0,int(left_thick - bottom_base))
+ diff_right = max(0,int(right_thick - bottom_base))
+ diff_mid = max(0,int((diff_right+diff_left)*0.4))
+ print(f"bottom base = {bottom_base} left thick ={left_thick} diff ={diff_left}")
+ print(f"bottom base = {bottom_base} left thick ={left_thick} mid ={diff_mid}")
+
+
+ (bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list)
+
+ mid_lip_drop_size = lip_utils.get_bottom_mid_drop_size(open_size_y,bottom_height)
+ print(f"mid_lip_drop_size = {mid_lip_drop_size}")
+ moved_points = []
+ for idx,point in enumerate(points):
+ if idx not in statics:
+ if idx in half:
+ plus_x = 0
+ if idx == 5 :#or idx ==0
+ plus_x = open_size_x*m
+ elif idx == 9:#idx == 4 or
+ plus_x = -open_size_x*m
+ elif idx == 0:
+ plus_x = open_size_x*m
+ elif idx == 4:
+ plus_x =-open_size_x*m
+ print(f"idx ={idx} plus {plus_x}")
+
+ moved_points.append((point[0]+plus_x,point[1]+open_size_y/2))
+ else:
+ #bottom
+ moved_points.append((point[0],point[1]+int(open_size_y*2)+mid_lip_drop_size))
+ else:
+ print(f"static ? {idx}")
+ #static top
+ #moved_points.append((point[0],point[1]-crop_mid_h/4)) #for open 40
+ #moved_points.append((point[0],point[1]))
+ if idx == 3:
+ moved_points.append((point[0],point[1]-diff_left))
+ print(f"left top lip move up {diff_left}")
+ elif idx == 2:
+ moved_points.append((point[0],point[1]-diff_mid))
+ elif idx == 1:
+ moved_points.append((point[0],point[1]-diff_right))
+ print(f"right top lip move up {diff_right}")
+
+
+
+
+
+ # force moved
+ #moved_points[1][1] = moved_points[1][1] -4
+
+ tmp = lip_utils.create_mask_from_points(img,points,int(open_size_y/2),0)
+ if lip_utils.DEBUG:
+ cv2.imwrite("lip_hole_mask_base.jpg",tmp)
+
+ gaus = 2 # ADD OPTION
+ mask = lip_utils.create_mask_from_points(img,moved_points,int(open_size_y/2),gaus)
+ if lip_utils.DEBUG:
+ cv2.imwrite("lip_hole_mask.jpg",mask)
+
+
+ return face_size_image,mask
+
+if __name__ == "__main__":
+ # 画像ファイルのパス
+ img_path = "straight.jpg"
+ # パラメータ
+ margin = 4
+ open_size_y = 20
+ open_size_x = 0
+
+ # 関数の呼び出し
+ process_lip_image(img_path, margin, open_size_y, open_size_x)
\ No newline at end of file
diff --git a/demo_footer.html b/demo_footer.html
new file mode 100644
index 0000000000000000000000000000000000000000..18fa5a3fd35b808f80817788fcf3bed61d4cb1e4
--- /dev/null
+++ b/demo_footer.html
@@ -0,0 +1,3 @@
+
\ No newline at end of file
diff --git a/demo_header.html b/demo_header.html
new file mode 100644
index 0000000000000000000000000000000000000000..3f0ca444fa8c1175c04e3a849a8c08828deef726
--- /dev/null
+++ b/demo_header.html
@@ -0,0 +1,16 @@
+
+
+ Mediapipe 68-points Eyes-Closed and Mouth-Opened
+
+
+

+
+
+ This Space use the Apache 2.0 Licensed Mediapipe FaceLandmarker
+ One of json format is from MIT licensed face_recognition
+ I should clarify because it is confusing: I'm not using dlib's non-MIT licensed 68-point model at all.
+ This is 10-year-old technology. However, most amazing talk-head models,.
while often having their core code under MIT/Apache licenses, rely on datasets or NVIDIA libraries with more restrictive licenses.
+
+
+
+
diff --git a/demo_tools.html b/demo_tools.html
new file mode 100644
index 0000000000000000000000000000000000000000..30d78feabed1dca8e28d24fbcdad3d92a72a00e3
--- /dev/null
+++ b/demo_tools.html
@@ -0,0 +1,10 @@
+
diff --git a/draw_landmarks68.py b/draw_landmarks68.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea5770368b02b29d98956fa0fc32f0dfadc46632
--- /dev/null
+++ b/draw_landmarks68.py
@@ -0,0 +1,516 @@
+
+import mediapipe as mp
+from mediapipe.tasks import python
+from mediapipe.tasks.python import vision
+from mediapipe.framework.formats import landmark_pb2
+from mediapipe import solutions
+import numpy as np
+import time
+import cv2
+import argparse
+import os
+import math
+
+# modified in gradio
+
+from mp_constants import *
+from mp_utils import divide_line_to_points,points_to_bbox,expand_bbox
+
+import logging
+
+# for share lib,TODO make module
+#import sys
+#sys.path.append("C:\\Users\\owner\\Documents\\pythons\\glibvision")
+from glibvision.glandmark_utils import bbox_to_glandmarks,convert_to_landmark_group_json
+from glibvision.cv2_utils import draw_bbox,plot_points,set_plot_text
+
+def parse_arguments():
+ """
+ 引数
+
+ """
+ parser = argparse.ArgumentParser(
+ description="draw 68 points"
+ )
+ parser.add_argument(
+ "--input_file","-i",required=True,help="Input file"
+ )
+ parser.add_argument(
+ "--model_path","-m",default="face_landmarker.task",help="model path"
+ )
+ parser.add_argument(
+ "--save_glandmark","-g",action="store_true",help="save godot-landmark json"
+ )
+ parser.add_argument(
+ "--save_group_landmark","-landmark",action="store_true",help="save group-landmark json"
+ )
+ return parser.parse_args()
+
+
+
+
+
+def draw_landmarks_on_image(rgb_image, detection_result,draw_number=True,font_scale=0.5,text_color=(200,200,200),dot_size=3,dot_color=(255,0,0),line_size=1,line_color=(0,0,355),box_size=1,box_color=(200,200,200)):
+ #print(f"dot_size={dot_size},dot_color={dot_color},line_size={line_size},line_color={line_color}")
+ image_width,iamge_height = rgb_image.size
+ face_landmarks_list = detection_result.face_landmarks
+ annotated_image = np.copy(rgb_image)
+
+ def get_cordinate(index):
+ x=face_landmarks_list[0][index].x
+ y=face_landmarks_list[0][index].y
+ return x,y
+
+ def get_distance(x1,y1,x2,y2):
+ return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
+
+ def get_centers():
+ center_indices =[
+ #(POINT_LEFT_HEAD_OUTER,POINT_RIGHT_HEAD_OUTER,POINT_FOREHEAD_TOP),
+ #(POINT_LEFT_HEAD_OUTER,POINT_RIGHT_HEAD_OUTER,POINT_CHIN_BOTTOM),
+ [POINT_NOSE_CENTER_MIDDLE],
+ #[POINT_LOWER_LIP_CENTER_BOTTOM]
+ #(POINT_UPPER_LIP_CENTER_BOTTOM,POINT_LOWER_LIP_CENTER_TOP)
+ ]
+ centers = []
+ for indices in center_indices:
+ total_x = 0
+ total_y = 0
+ for index in indices:
+ x,y = get_cordinate(index)
+ total_x+=x
+ total_y+=y
+ centers.append ((total_x/len(indices),total_y/len(indices)))
+ return centers
+
+ centers = get_centers()
+ for center in centers:
+ center_x,center_y = center
+
+ pt = int(center_x*image_width),int(center_y*iamge_height)
+
+ #cv2.circle(annotated_image,pt,20,(0,0,255),-1)
+
+ def get_closed_center(x,y):
+ closed = None
+ closed_distance = 0
+ for center in centers:
+ distance = get_distance(center[0],center[1],x,y)
+ if closed == None:
+ closed = center
+ closed_distance = distance
+ else:
+ if distancex1:
+ x1=x
+ if y>y1:
+ y1=y
+
+ if x 0 else 0
+ print(f"stepped {self.key} {self.total_target} of {self.complete_target}, consumed {(consumed_time / 60):.1f} min, remain {(remain_time / 60):.1f} min")
+
+
\ No newline at end of file
diff --git a/glibvision/cv2_utils.py b/glibvision/cv2_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..035fe8bd20c32e6f440d3ed6e0773bc3a5bd51a3
--- /dev/null
+++ b/glibvision/cv2_utils.py
@@ -0,0 +1,138 @@
+import cv2
+import numpy as np
+
+
+
+def draw_bbox(image,box,color=(255,0,0),thickness=1):
+ if thickness==0:
+ return
+
+ left = int(box[0])
+ top = int(box[1])
+ right = int(box[0]+box[2])
+ bottom = int(box[1]+box[3])
+ box_points =[(left,top),(right,top),(right,bottom),(left,bottom)]
+
+ cv2.polylines(image, [np.array(box_points)], isClosed=True, color=color, thickness=thickness)
+
+
+def to_int_points(points):
+ int_points=[]
+ for point in points:
+ int_points.append([int(point[0]),int(point[1])])
+ return int_points
+
+def draw_text(img, text, point, font_scale=0.5, color=(200, 200, 200), thickness=1):
+ font = cv2.FONT_HERSHEY_SIMPLEX
+ cv2.putText(img, str(text), point, font, font_scale, color, thickness, cv2.LINE_AA)
+
+plot_text_color = (200, 200, 200)
+plot_text_font_scale = 0.5
+plot_index = 1
+plot_text = True
+
+def set_plot_text(is_plot,text_font_scale,text_color):
+ global plot_index,plot_text,plot_text_font_scale,plot_text_color
+ plot_text = is_plot
+ plot_index = 1
+ plot_text_font_scale = text_font_scale
+ plot_text_color = text_color
+
+def plot_points(image,points,isClosed=False,circle_size=3,circle_color=(255,0,0),line_size=1,line_color=(0,0,255)):
+ global plot_index,plot_text
+ int_points = to_int_points(points)
+ if circle_size>0:
+ for point in int_points:
+ cv2.circle(image,point,circle_size,circle_color,-1)
+ if plot_text:
+ draw_text(image,plot_index,point,plot_text_font_scale,plot_text_color)
+ plot_index+=1
+ if line_size>0:
+ cv2.polylines(image, [np.array(int_points)], isClosed=isClosed, color=line_color, thickness=line_size)
+
+def fill_points(image,points,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ np_points = np.array(points,dtype=np.int32)
+ cv2.fillPoly(image, [np_points], fill_color)
+ cv2.polylines(image, [np_points], isClosed=True, color=line_color, thickness=thickness)
+
+def get_image_size(cv2_image):
+ return cv2_image.shape[:2]
+
+def get_channel(np_array):
+ return np_array.shape[2] if np_array.ndim == 3 else 1
+
+def get_numpy_text(np_array,key=""):
+ channel = get_channel(np_array)
+ return f"{key} shape = {np_array.shape} channel = {channel} ndim = {np_array.ndim} size = {np_array.size}"
+
+
+def gray3d_to_2d(grayscale: np.ndarray) -> np.ndarray:
+ channel = get_channel(grayscale)
+ if channel!=1:
+ raise ValueError(f"color maybe rgb or rgba {get_numpy_text(grayscale)}")
+ """
+ 3 次元グレースケール画像 (チャンネル数 1) を 2 次元に変換する。
+
+ Args:
+ grayscale (np.ndarray): 3 次元グレースケール画像 (チャンネル数 1)。
+
+ Returns:
+ np.ndarray: 2 次元グレースケール画像。
+ """
+
+ if grayscale.ndim == 2:
+ return grayscale
+ return np.squeeze(grayscale)
+
+def blend_rgb_images(image1: np.ndarray, image2: np.ndarray, mask: np.ndarray) -> np.ndarray:
+ """
+ 2 つの RGB 画像をマスク画像を使用してブレンドする。
+
+ Args:
+ image1 (np.ndarray): 最初の画像 (RGB)。
+ image2 (np.ndarray): 2 番目の画像 (RGB)。
+ mask (np.ndarray): マスク画像 (グレースケール)。
+
+ Returns:
+ np.ndarray: ブレンドされた画像 (RGB)。
+
+ Raises:
+ ValueError: 入力画像の形状が一致しない場合。
+ """
+
+ if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
+ raise ValueError("入力画像の形状が一致しません。")
+
+ # 画像を float 型に変換
+ image1 = image1.astype(float)
+ image2 = image2.astype(float)
+
+ # マスクを 3 チャンネルに変換し、0-1 の範囲にスケール
+ alpha = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR).astype(float) / 255.0
+
+ # ブレンド計算
+ blended = (1 - alpha) * image1 + alpha * image2
+
+ return blended.astype(np.uint8)
+
+def create_color_image(img,color=(255,255,255)):
+ mask = np.zeros_like(img)
+
+ h, w = img.shape[:2]
+ cv2.rectangle(mask, (0, 0), (w, h), color, -1)
+ return mask
+
+def pil_to_bgr_image(image):
+ np_image = np.array(image, dtype=np.uint8)
+ if np_image.shape[2] == 4:
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGBA2BGRA)
+ else:
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
+ return bgr_img
+
+def bgr_to_rgb(np_image):
+ if np_image.shape[2] == 4:
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RBGRA2RGBA)
+ else:
+ bgr_img = cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB)
+ return bgr_img
\ No newline at end of file
diff --git a/glibvision/glandmark_utils.py b/glibvision/glandmark_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ffc1da78eaf504a4ef6b97de6ce3162cce75de9
--- /dev/null
+++ b/glibvision/glandmark_utils.py
@@ -0,0 +1,48 @@
+
+import os
+
+#simple single version
+def bbox_to_glandmarks(file_name,bbox,points = None):
+ base,ext = os.path.splitext(file_name)
+ glandmark = {"image":{
+ "boxes":[{
+ "left":int(bbox[0]),"top":int(bbox[1]),"width":int(bbox[2]),"height":int(bbox[3])
+ }],
+ "file":file_name,
+ "id":int(base)
+ # width,height ignore here
+ }}
+ if points is not None:
+ parts=[
+ ]
+ for point in points:
+ parts.append({"x":int(point[0]),"y":int(point[1])})
+ glandmark["image"]["boxes"][0]["parts"] = parts
+ return glandmark
+
+#technically this is not g-landmark/dlib ,
+def convert_to_landmark_group_json(points):
+ if len(points)!=68:
+ print(f"points must be 68 but {len(points)}")
+ return None
+ new_points=list(points)
+
+ result = [ # possible multi person ,just possible any func support multi person
+
+ { # index start 0 but index-number start 1
+ "chin":new_points[0:17],
+ "left_eyebrow":new_points[17:22],
+ "right_eyebrow":new_points[22:27],
+ "nose_bridge":new_points[27:31],
+ "nose_tip":new_points[31:36],
+ "left_eye":new_points[36:42],
+ "right_eye":new_points[42:48],
+
+ # lip points customized structure
+ # MIT licensed face_recognition
+ # https://github.com/ageitgey/face_recognition
+ "top_lip":new_points[48:55]+[new_points[64]]+[new_points[63]]+[new_points[62]]+[new_points[61]]+[new_points[60]],
+ "bottom_lip":new_points[54:60]+[new_points[48]]+[new_points[60]]+[new_points[67]]+[new_points[66]]+[new_points[65]]+[new_points[64]],
+ }
+ ]
+ return result
\ No newline at end of file
diff --git a/glibvision/numpy_utils.py b/glibvision/numpy_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae0e2c4ee3556da6539930b7b4c5582c7d43e580
--- /dev/null
+++ b/glibvision/numpy_utils.py
@@ -0,0 +1,110 @@
+import numpy as np
+
+
+def apply_binary_mask_to_color(base_image,color,mask):
+ """
+ 二値マスクを使用して、画像の一部を別の画像にコピーする。
+
+ Args:
+ base_image (np.ndarray): コピー先の画像。
+ paste_image (np.ndarray): コピー元の画像。
+ mask (np.ndarray): 二値マスク画像。
+
+ Returns:
+ np.ndarray: マスクを適用した画像。
+
+ """
+ # TODO check all shape
+ #print_numpy(base_image)
+ #print_numpy(paste_image)
+ #print_numpy(mask)
+ if mask.ndim == 2:
+ condition = mask == 255
+ else:
+ condition = mask[:,:,0] == 255
+
+ base_image[condition] = color
+ return base_image
+
+def apply_binary_mask_to_image(base_image,paste_image,mask):
+ """
+ 二値マスクを使用して、画像の一部を別の画像にコピーする。
+
+ Args:
+ base_image (np.ndarray): コピー先の画像。
+ paste_image (np.ndarray): コピー元の画像。
+ mask (np.ndarray): 二値マスク画像。
+
+ Returns:
+ np.ndarray: マスクを適用した画像。
+
+ """
+ # TODO check all shape
+ #print_numpy(base_image)
+ #print_numpy(paste_image)
+ #print_numpy(mask)
+ if mask.ndim == 2:
+ condition = mask == 255
+ else:
+ condition = mask[:,:,0] == 255
+
+ base_image[condition] = paste_image[condition]
+ return base_image
+
+def pil_to_numpy(image):
+ return np.array(image, dtype=np.uint8)
+
+def extruce_points(points,index,ratio=1.5):
+ """
+ indexのポイントをratio倍だけ、点群の中心から、外側に膨らます。
+ """
+ center_point = np.mean(points, axis=0)
+ if index < 0 or index > len(points):
+ raise ValueError(f"index must be range(0,{len(points)} but value = {index})")
+ point1 =points[index]
+ print(f"center = {center_point}")
+ vec_to_center = point1 - center_point
+ return vec_to_center*ratio + center_point
+
+
+def bulge_polygon(points, bulge_factor=0.1,isClosed=True):
+ """
+ ポリゴンの辺の中間に点を追加し、外側に膨らませる
+ ndarrayを返すので注意
+ """
+ # 入力 points を NumPy 配列に変換
+ points = np.array(points)
+
+ # ポリゴン全体の重心を求める
+ center_point = np.mean(points, axis=0)
+ #print(f"center = {center_point}")
+ new_points = []
+ num_points = len(points)
+ for i in range(num_points):
+ if i == num_points -1 and not isClosed:
+ break
+ p1 = points[i]
+ #print(f"p{i} = {p1}")
+ # 重心から頂点へのベクトル
+ #vec_to_center = p1 - center_point
+
+ # 辺のベクトルを求める
+ mid_diff = points[(i + 1) % num_points] - p1
+ mid = p1+(mid_diff/2)
+
+ #print(f"mid = {mid}")
+ out_vec = mid - center_point
+
+ # 重心からのベクトルに bulge_vec を加算
+ new_point = mid + out_vec * bulge_factor
+
+ new_points.append(p1)
+ new_points.append(new_point.astype(np.int32))
+
+ return np.array(new_points)
+
+
+# image.shape rgb are (1024,1024,3) use 1024,1024 as 2-dimensional
+def create_2d_image(shape):
+ grayscale_image = np.zeros(shape[:2], dtype=np.uint8)
+ return grayscale_image
\ No newline at end of file
diff --git a/glibvision/pil_utils.py b/glibvision/pil_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f63fb51b6516739853513135bf0c7966dd7f56a
--- /dev/null
+++ b/glibvision/pil_utils.py
@@ -0,0 +1,14 @@
+from PIL import Image,ImageDraw
+
+def create_color_image(width, height, color=(255,255,255)):
+ img = Image.new('RGB', (width, height), color)
+ return img
+
+def fill_points(image,points,color=(255,255,255)):
+ draw = ImageDraw.Draw(image)
+ int_points = [(int(x), int(y)) for x, y in points]
+ draw.polygon(int_points, fill=color)
+ return image
+
+def from_numpy(numpy_array):
+ return Image.fromarray(numpy_array)
\ No newline at end of file
diff --git a/gradio_utils.py b/gradio_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..17f80349710b2f1c8250a4b6f5addb642e5156cf
--- /dev/null
+++ b/gradio_utils.py
@@ -0,0 +1,60 @@
+
+
+import os
+import time
+import io
+import hashlib
+
+def clear_old_files(dir="files",passed_time=60*60):
+ try:
+ files = os.listdir(dir)
+ current_time = time.time()
+ for file in files:
+ file_path = os.path.join(dir,file)
+
+ ctime = os.stat(file_path).st_ctime
+ diff = current_time - ctime
+ #print(f"ctime={ctime},current_time={current_time},passed_time={passed_time},diff={diff}")
+ if diff > passed_time:
+ os.remove(file_path)
+ except:
+ print("maybe still gallery using error")
+
+def get_buffer_id(buffer):
+ hash_object = hashlib.sha256(buffer.getvalue())
+ hex_dig = hash_object.hexdigest()
+ unique_id = hex_dig[:32]
+ return unique_id
+
+def get_image_id(image):
+ buffer = io.BytesIO()
+ image.save(buffer, format='PNG')
+ return get_buffer_id(buffer)
+
+def save_image(image,extension="jpg",dir_name="files"):
+ id = get_image_id(image)
+ os.makedirs(dir_name,exist_ok=True)
+ file_path = f"{dir_name}/{id}.{extension}"
+
+ image.save(file_path)
+ return file_path
+
+def save_buffer(buffer,extension="webp",dir_name="files"):
+ id = get_buffer_id(buffer)
+ os.makedirs(dir_name,exist_ok=True)
+ file_path = f"{dir_name}/{id}.{extension}"
+
+ with open(file_path,"wb") as f:
+ f.write(buffer.getvalue())
+ return file_path
+
+def write_file(file_path,text):
+ with open(file_path, 'w', encoding='utf-8') as f:
+ f.write(text)
+
+def read_file(file_path):
+ """read the text of target file
+ """
+ with open(file_path, 'r', encoding='utf-8') as f:
+ content = f.read()
+ return content
\ No newline at end of file
diff --git a/hole_images/black.jpg b/hole_images/black.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..93ff7ba258e4a5392b1f8a8f0766b121a24fee2c
Binary files /dev/null and b/hole_images/black.jpg differ
diff --git a/hole_images/dark01.jpg b/hole_images/dark01.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f641fa4e5cb489d9de0fbc643c7d2f85d01b5832
Binary files /dev/null and b/hole_images/dark01.jpg differ
diff --git a/hole_images/mid01.jpg b/hole_images/mid01.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a2c7d008f96e0f6cf797b669eec0ebae977a8e8b
Binary files /dev/null and b/hole_images/mid01.jpg differ
diff --git a/hole_images/mid02.jpg b/hole_images/mid02.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..73f08ea1cdc08354249c7adc0633a200bb54db2a
Binary files /dev/null and b/hole_images/mid02.jpg differ
diff --git a/landmarks68_utils.py b/landmarks68_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..85bbe2aec800b7313d531b656db0e484ada398fb
--- /dev/null
+++ b/landmarks68_utils.py
@@ -0,0 +1,147 @@
+import numpy as np
+from PIL import Image,ImageDraw
+from glibvision.numpy_utils import extruce_points,bulge_polygon
+
+
+def minus_point(pt1,pt2):
+ return [pt1[0]-pt2[0],pt1[1]-pt2[1]]
+
+def lerp_point(pt1,pt2,pt2_ratio):
+ return [int(pt1[0]*(1.0-pt2_ratio)+pt2[0]*pt2_ratio),pt1[1]*(1.0-pt2_ratio)+pt2[1]*pt2_ratio]
+
+def mean_point(points):
+ xs = 0
+ ys = 0
+ for pt in points:
+ xs +=pt[0]
+ ys +=pt[1]
+ return [int(xs/len(points)),int(ys/len(points))]
+
+def get_face_points(face_landmarks_list):
+ contour_points=get_landmark_points(face_landmarks_list,PARTS_CONTOUR)
+ left_eyebrow_points=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYEBROW)
+
+ right_eyebrow_points=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYEBROW)
+
+ nose_points=get_landmark_points(face_landmarks_list,PARTS_NOSE_BRIDGE)
+
+ diff_right = minus_point(contour_points[1],contour_points[0])
+ right_minus_corner = minus_point(contour_points[0] , diff_right)
+ right_contour = lerp_point(right_minus_corner,left_eyebrow_points[0],0.3)
+
+ diff_left = minus_point(contour_points[15],contour_points[16])
+ left_minus_corner = minus_point(contour_points[16] , diff_left)
+ left_contour = lerp_point(left_minus_corner,right_eyebrow_points[-1],0.3)
+
+ middle_face = mean_point([nose_points[0],right_eyebrow_points[0],left_eyebrow_points[-1]])
+ return [right_contour]+list(contour_points)+[left_contour,middle_face]
+
+
+def get_innner_mouth_points(face_landmarks_list):
+ top_points=get_landmark_points(face_landmarks_list,PARTS_UPPER_LIP)
+ bottom_points=get_landmark_points(face_landmarks_list,PARTS_LOWER_LIP)
+ return top_points[7:]+bottom_points[7:]#[::-1]
+
+
+PARTS_UPPER_LIP = "top_lip"
+PARTS_LOWER_LIP = "bottom_lip"
+PARTS_CONTOUR ="chin"
+PARTS_LEFT_EYEBROW ="left_eyebrow"
+PARTS_RIGHT_EYEBROW ="right_eyebrow"
+PARTS_LEFT_EYE ="left_eye"
+PARTS_RIGHT_EYE ="right_eye"
+PARTS_NOSE_TIP ="nose_tip"
+PARTS_NOSE_BRIDGE ="nose_bridge"
+
+def get_landmark_points(face_landmarks_list,key):
+ matching_landmark_points = []
+ for face_landmarks in face_landmarks_list:
+ for landmark_name, landmark_points in face_landmarks.items():
+ matching_landmark_points = landmark_points.copy()
+ if landmark_name ==key:
+ return tuple(matching_landmark_points)
+
+def get_left_upper_eyelid_points(face_landmarks_list,bulge_factor = 0.2):
+ eye_points=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYE)
+ extruded_points=[]
+
+ need_extrude =[0,1,2]
+ for index in range(len(eye_points)):
+ if index in need_extrude:
+ ratio = 1.3
+ else:
+ ratio = 1.1
+ ex_point=extruce_points(eye_points,index,ratio)
+ extruded_points.append(ex_point)
+ return extruded_points
+
+def get_right_upper_eyelid_points(face_landmarks_list,bulge_factor = 0.2):
+ eye_points=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYE)
+ extruded_points=[]
+
+ need_extrude =[1,2,3]
+ for index in range(len(eye_points)):
+ if index in need_extrude:
+ ratio = 1.3
+ else:
+ ratio = 1.1
+ ex_point=extruce_points(eye_points,index,ratio)
+ extruded_points.append(ex_point)
+ #return list(eye_points[0:4])+extruded_points
+ return extruded_points
+
+def get_bulged_eyes(face_landmarks_list,bulge_factor=0.2):
+ points1=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYE)
+ points2=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYE)
+
+ return bulge_polygon(points1, bulge_factor=bulge_factor),bulge_polygon(points2, bulge_factor=bulge_factor)
+
+
+def get_lerp(point1,point2,ratio1=0.5):
+ x = point1[0]*ratio1 + point2[0]*(1.0-ratio1)
+ y = point1[1]*ratio1 + point2[1]*(1.0-ratio1)
+ return [int(x),int(y)]
+
+def get_close_eyelid_point(landmarks_list,bulge = 0.01):
+ left = get_landmark_points(landmarks_list,PARTS_LEFT_EYE)
+ left_points = [get_lerp(left[3],left[4],0.6)]+list(left[4:6])+[left[0]]
+ right = get_landmark_points(landmarks_list,PARTS_RIGHT_EYE)
+ right_points = [get_lerp(right[0],right[5],0.6)]+list(right[3:6][::-1])
+
+ #print("right points")
+ #print(right_points)
+ last2 = right_points[-2:]
+ #print(last2[0])
+ #print(last2[1])
+ extra_dvidied = 10
+ diff = ((last2[0][0]-last2[1][0])/extra_dvidied,(last2[0][1]-last2[1][1])/extra_dvidied)
+ extra = [int(last2[1][0] - diff[0]),int(last2[1][1] - diff[1])]
+
+ height = abs(right_points[0][1]-right_points[-1][1])
+ print(f"height = {height}")
+ move_down = int(height/5)
+ print("diff")
+ print(diff)
+ print(right_points[-1])
+ print(extra)
+ right_points.append(extra)
+ for pt in right_points:
+ pt[1]+=move_down
+
+ last2 = left_points[-2:]
+ diff = ((last2[0][0]-last2[1][0])/extra_dvidied,(last2[0][1]-last2[1][1])/extra_dvidied)
+ extra = [int(last2[1][0] - diff[0]),int(last2[1][1] - diff[1])]
+ left_points.append(extra)
+ for pt in left_points:
+ pt[1]+=move_down
+
+ print(right_points)
+ if bulge:
+ left_points = bulge_polygon(left_points,0.1,False).tolist()
+ right_points = bulge_polygon(right_points,0.1,False).tolist()
+ ###LEFT
+ print("####RIGHT")
+ # last 2 points
+
+
+ return left_points,right_points
\ No newline at end of file
diff --git a/lip_utils.py b/lip_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e28b652c83291527174f42076e7922a0ae8048ff
--- /dev/null
+++ b/lip_utils.py
@@ -0,0 +1,781 @@
+import face_recognition
+import os
+import json
+#from glibvision.cv2_utils import get_numpy_text
+from glibvision.numpy_utils import bulge_polygon
+import numpy as np
+import math
+USE_CACHE = True
+
+# face structures are same
+# MIT LICENSED
+# https://github.com/ageitgey/face_recognition
+
+TOP_LIP = "top_lip"
+BOTTOM_LIP = "bottom_lip"
+PARTS_CHIN ="chin"
+PARTS_LEFT_EYEBROW ="left_eyebrow"
+PARTS_RIGHT_EYEBROW ="right_eyebrow"
+PARTS_LEFT_EYE ="left_eye"
+PARTS_RIGHT_EYE ="right_eye"
+
+POINTS_TOP_LIP = "top_lip"
+POINTS_BOTTOM_LIP = "bottom_lip"
+POINTS_CHIN = "chin"
+
+COLOR_WHITE=(255,255,255)
+COLOR_BLACK=(0,0,0)
+COLOR_ALPHA=(0,0,0,0)
+
+DEBUG = False
+DEBUG_CHIN = False
+def load_image_file(path):
+ image = face_recognition.load_image_file(path)
+ data_path=path+".json"
+ if USE_CACHE and os.path.exists(data_path):
+ with open(data_path, "r") as f:
+ face_landmarks_list = json.loads(f.read())
+ else:
+ face_landmarks_list = image_to_landmarks_list(image)
+ if USE_CACHE:
+ json_data = json.dumps(face_landmarks_list)
+ with open(data_path, "w") as f:
+ f.write(json_data)
+
+ return image,face_landmarks_list
+
+def save_landmarks(face_landmarks,out_path):
+ json_data = json.dumps(face_landmarks)
+ with open(out_path, "w") as f:
+ f.write(json_data)
+
+def load_landmarks(input_path):
+ with open(input_path, "r") as f:
+ face_landmarks_list = json.loads(f.read())
+ return face_landmarks_list
+
+
+
+def image_to_landmarks_list(image):
+ face_landmarks_list = face_recognition.face_landmarks(image)
+ return face_landmarks_list
+
+def fill_polygon(image,face_landmarks_list,key,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ points=get_landmark_points(face_landmarks_list,key)
+ np_points = np.array(points,dtype=np.int32)
+ cv2.fillPoly(image, [np_points], fill_color)
+ cv2.polylines(image, [np_points], isClosed=True, color=line_color, thickness=thickness)
+
+def fill_lip(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ points1=get_landmark_points(face_landmarks_list,TOP_LIP)[0:7]
+ points2=get_landmark_points(face_landmarks_list,BOTTOM_LIP)[0:7]
+
+ np_points = np.array(points1+points2[::-1],dtype=np.int32)
+
+
+ cv2.fillPoly(image, [np_points], fill_color)
+ if thickness > 0:
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+def fill_top(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ points1=get_landmark_points(face_landmarks_list,TOP_LIP)[0:7]
+
+ np_points = np.array(points1,dtype=np.int32)
+
+
+ cv2.fillPoly(image, [np_points], fill_color)
+ if thickness > 0:
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+def fill_top_lower(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ top_lip_points=get_landmark_points(face_landmarks_list,TOP_LIP) # 5 to 7 ,1 t- 11
+ points1 = [lerp_points(top_lip_points[5],top_lip_points[7],0.7)]+ \
+ [mid_points(top_lip_points[7],top_lip_points[8])]+ \
+ list(top_lip_points[8:11]) +\
+ [mid_points(top_lip_points[10],top_lip_points[11])]+ \
+ [lerp_points(top_lip_points[1],top_lip_points[11],0.7)]+\
+ [mid_points(top_lip_points[2],top_lip_points[10])]+\
+ [mid_points(top_lip_points[3],top_lip_points[9])]+\
+ [mid_points(top_lip_points[4],top_lip_points[8])]
+
+ np_points = np.array(points1,dtype=np.int32)
+
+
+ cv2.fillPoly(image, [np_points], fill_color)
+ if thickness > 0:
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+def get_lip_mask_points(face_landmarks_list):
+ points1=get_landmark_points(face_landmarks_list,TOP_LIP)[0:7]
+ points2=get_landmark_points(face_landmarks_list,BOTTOM_LIP)[0:7]
+ return points1+points2
+
+
+
+from scipy.special import comb
+
+def bernstein_poly(i, n, t):
+ """
+ n 次ベジェ曲線の i 番目の Bernstein 基底関数を計算する
+ """
+ return comb(n, i) * (t**(n-i)) * (1 - t)**i
+
+def bezier_curve(points, num_points=100):
+ """
+ 与えられた点からベジェ曲線を計算する
+ """
+ nPoints = len(points)
+ xPoints = np.array([p[0] for p in points])
+ yPoints = np.array([p[1] for p in points])
+
+ t = np.linspace(0.0, 1.0, num_points)
+
+ polynomial_array = np.array([bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints)])
+
+ xvals = np.dot(xPoints, polynomial_array)
+ yvals = np.dot(yPoints, polynomial_array)
+
+ return np.array(list(zip(xvals, yvals)))
+import cv2
+import numpy as np
+
+
+
+
+
+
+def fill_eyes(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ points1=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYE)
+ points2=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYE)
+
+ for points in [points1,points2]:
+ #points = bezier_curve(points, num_points=10)
+ #print(points)
+ points = bulge_polygon(points, bulge_factor=0.2)
+ #print(points)
+ np_points = np.array(points,dtype=np.int32)
+
+ cv2.fillPoly(image, [np_points], fill_color)
+ if thickness > 0:
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+
+
+
+
+def fill_face(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ points1=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYEBROW)
+ points2=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYEBROW)
+ points3=get_landmark_points(face_landmarks_list,PARTS_CHIN)
+
+ np_points = np.array(points1+points2+points3[::-1],dtype=np.int32)
+
+
+ cv2.fillPoly(image, [np_points], fill_color)
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+def fill_face_inside(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ print("not support yet")
+ return None
+ points1=get_landmark_points(face_landmarks_list,PARTS_LEFT_EYEBROW)
+ points2=get_landmark_points(face_landmarks_list,PARTS_RIGHT_EYEBROW)
+ points3=get_landmark_points(face_landmarks_list,PARTS_CHIN)
+ points3=get_landmark_points(face_landmarks_list,PARTS_CHIN)
+ points3=get_landmark_points(face_landmarks_list,PARTS_CHIN)
+
+ np_points = np.array(points1+points2+points3[::-1],dtype=np.int32)
+
+
+ cv2.fillPoly(image, [np_points], fill_color)
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+def half_pt(point1,point2):
+ return [sum(x) / 2 for x in zip(point1, point2)]
+
+
+def line_lip(image,face_landmarks_list,key,thickness=1,line_color=(255,255,255)):
+ points=get_landmark_points(face_landmarks_list,key)
+ print(len(points))
+ #st=[(points[0]+points[11])/2]
+ st = [sum(x) / 2 for x in zip(points[0], points[11])]
+
+ #et=[(points[6]+points[7])/2]
+ et = [sum(x) / 2 for x in zip(points[6], points[7])]
+ print(et)
+ print(points)
+ np_points = np.array([st]+points[1:6]+[et],dtype=np.int32)
+ #if key == TOP_LIP:
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+def get_lip_hole_points(face_landmarks_list):
+ top_points=get_landmark_points(face_landmarks_list,TOP_LIP)
+ bottom_points=get_landmark_points(face_landmarks_list,BOTTOM_LIP)
+ return top_points[7:]+bottom_points[7:]#[::-1]
+ #np_points = np.array(top_points[7:]+bottom_points[7:][::-1],dtype=np.int32)
+
+def get_lip_hole_top_points(face_landmarks_list):
+ top_points=get_landmark_points(face_landmarks_list,TOP_LIP)
+ #bottom_points=get_landmark_points(face_landmarks_list,BOTTOM_LIP)
+ return top_points[7:]
+
+def get_lip_hole_bottom_points(face_landmarks_list):
+ #top_points=get_landmark_points(face_landmarks_list,TOP_LIP)
+ bottom_points=get_landmark_points(face_landmarks_list,BOTTOM_LIP)
+ #inverted for connect top
+ return bottom_points[7:][::-1]
+
+#for hide too long tooth
+def get_lip_hole_bottom_half_points(face_landmarks_list):
+ #top_points=get_landmark_points(face_landmarks_list,TOP_LIP)
+ bottom_points=get_landmark_points(face_landmarks_list,BOTTOM_LIP)
+ #inverted for connect top
+ st = [sum(x) / 2 for x in zip(bottom_points[7], bottom_points[8])]
+ et = [sum(x) / 2 for x in zip(bottom_points[10], bottom_points[11])]
+ points = [st]+bottom_points[8:11]+[et]
+ #print(points)
+ return points[::-1]
+
+def fill_points(points,image,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ np_points = np.array(points,dtype=np.int32)
+
+
+
+ cv2.fillPoly(image, [np_points], fill_color)
+ if thickness>0:
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+def fill_lip_hole_top(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ np_points = np.array(get_lip_hole_top_points(face_landmarks_list),dtype=np.int32)
+
+ cv2.fillPoly(image, [np_points], fill_color)
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+
+
+def fill_lip_hole(image,face_landmarks_list,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
+ np_points = np.array(get_lip_hole_points(face_landmarks_list),dtype=np.int32)
+ #print(np_points)
+ cv2.fillPoly(image, [np_points], fill_color)
+ cv2.polylines(image, [np_points], isClosed=False, color=line_color, thickness=thickness)
+
+
+
+def get_landmark_points(face_landmarks_list,key):
+ matching_landmark_points = []
+ for face_landmarks in face_landmarks_list:
+ for landmark_name, landmark_points in face_landmarks.items():
+ #matching_landmark_points = landmark_points.copy()
+ if landmark_name ==key:
+ for value in landmark_points:
+ matching_landmark_points.append([value[0],value[1]])
+ return tuple(matching_landmark_points)
+
+def get_image_size(cv2_image):
+ return cv2_image.shape[:2]
+
+def get_top_lip_box(face_landmarks_list,margin = 0):
+ print(f"get_top_lip_box margin = {margin}")
+ points = get_landmark_points(face_landmarks_list,TOP_LIP)
+ box= points_to_box(points)
+ if margin>0:
+ return ((box[0][0] - margin,box[0][1] - margin),(box[1][0] + margin, box[1][1] + margin))
+ else:
+ return box
+
+def get_points_box(face_landmarks_list,key,margin = 0):
+ print(f"margin = {margin}")
+ points = get_landmark_points(face_landmarks_list,key)
+ box= points_to_box(points)
+ if margin>0:
+ return ((box[0][0] - margin,box[0][1] - margin),(box[1][0] + margin, box[1][1] + margin))
+ else:
+ return box
+
+#for size up
+
+def create_moved_image(image,src_points,dst_points,force_size=None):
+ # keep top of lip stable but affing must be 4 point
+ #print(f"src = {src_points}")
+ #print(f"dst = {dst_points}")
+ src_pts=np.array([src_points],dtype=np.float32)
+ dst_pts=np.array([dst_points],dtype=np.float32)
+ #BORDER_REPLICATE
+ return warp_with_auto_resize(image, src_pts, dst_pts,cv2.BORDER_REPLICATE,force_size)
+
+# lip-index
+"""
+ 1 2 3 4 5
+0 6
+ 11 10 9 8 7
+"""
+def get_top_lip_align_points(face_landmarks_list):
+ landmark=get_landmark_points(face_landmarks_list,TOP_LIP)
+ index_center = 3
+ index_right= 0 #mirror
+ #index_ritht_top= 2 #mirror
+ #index_left_top= 4 #mirror
+ index_left = 6
+ #if landmark_name ==key:
+ # 0 is right edge
+ x1 = landmark[index_right][0]
+ y1 = landmark[index_right][1]
+ # 6 is left edge
+ x2 = landmark[index_left][0]
+ y2 = landmark[index_left][1]
+
+ #left_top = landmark[index_left_top][1]
+ #right_top = landmark[index_ritht_top][1]
+ #top = left_top if left_topmax_x:
+ max_x=int(point[0])
+ if point[1]>max_y:
+ max_y=int(point[1])
+ if point[0] 0:
+ smooth_mask = cv2.GaussianBlur(dilated_mask, (0,0 ), sigmaX=gaussian_size, sigmaY=gaussian_size)
+ expanded_mask = np.expand_dims(smooth_mask, axis=-1)
+ else:
+ expanded_mask = np.expand_dims(dilated_mask, axis=-1)
+
+ #lip_utils.print_numpy(image_rgba,"rgba")
+ #lip_utils.print_numpy(smooth_mask,"smooth")
+ #lip_utils.print_numpy(expanded_mask,"expanded_mask")
+
+ image_rgba[..., 3] = expanded_mask[..., 0]
+
+
+ return image_rgba,box
+
+def apply_mask(image,mask):
+ if len(mask.shape) == 3:
+ expanded_mask = mask
+ else:
+ expanded_mask = np.expand_dims(mask, axis=-1)
+
+ if len(mask.shape)!=3:
+ error = f"image must be shape 3 {image.shape}"
+ raise ValueError(error)
+
+ if get_channel(image)!=4:
+ image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA) #why rgb to gray?
+ else:
+ image_rgba = image
+ image_rgba[..., 3] = expanded_mask[..., 0]
+ return image_rgba
+
+def apply_mask_alpha(image,mask,invert=False):
+ if len(mask.shape) == 3:
+ expanded_mask = mask
+ else:
+ expanded_mask = np.expand_dims(mask, axis=-1)
+
+ image_rgba = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
+ if invert:
+ image_rgba[..., 3] = expanded_mask[..., 0]
+ else:
+ image_rgba[..., 3] = 255 - expanded_mask[..., 0]
+ return image_rgba
+
+def print_width_height(image,label):
+ new_h,new_w = get_image_size(image)
+ print(f"{label}:width = {new_w} height = {new_h}")
+
+
+def create_mask_from_points(img,points,dilation_size=4,gaussian_size=4):
+ np_points = np.array(points,dtype=np.int32)
+ mask = np.zeros(img.shape[:2], dtype="uint8")
+ cv2.fillPoly(mask, [np_points], 255)
+
+ kernel = np.ones((abs(dilation_size),abs(dilation_size) ), np.uint8)
+ if dilation_size > 0:
+ dilated_mask = cv2.dilate(mask, kernel, iterations=1)
+ else:
+ dilated_mask = cv2.erode(mask, kernel, iterations=1) # TODO support dilation_size
+ # Gaussian Blur
+ if gaussian_size > 0:
+ smooth_mask = cv2.GaussianBlur(dilated_mask, (0,0 ), sigmaX=gaussian_size, sigmaY=gaussian_size)
+ expanded_mask = np.expand_dims(smooth_mask, axis=-1)
+ else:
+ expanded_mask = np.expand_dims(dilated_mask, axis=-1)
+ return expanded_mask
+ #lip_utils.print_numpy(image_rgba,"rgba")
+ #lip_utils.print_numpy(smooth_mask,"smooth")
+ #lip_utils.print_numpy(expanded_mask,"expanded_mask")
+
+def mid_points(point1,point2):
+ return [sum(x) / 2 for x in zip(point1,point2)]
+
+def lerp_points(point1, point2, lerp):
+ return [(1.0 - lerp) * p1 + lerp * p2 for p1, p2 in zip(point1, point2)]
+
+def get_jaw_points(face_landmarks_list):
+ chin_points = get_landmark_points(face_landmarks_list,POINTS_CHIN)
+ bottom_lip_points = get_landmark_points(face_landmarks_list,POINTS_BOTTOM_LIP)
+
+ points =[]
+
+ points.extend(chin_points[4:13])
+ points.append(mid_points(chin_points[12],bottom_lip_points[0]))
+ points.append(mid_points(chin_points[8],bottom_lip_points[3]))
+ points.append(mid_points(chin_points[4],bottom_lip_points[6]))
+
+ return points
+
+def get_bottom_mid_drop_size(open_size_y,lip_height):
+ # when full open case open_size_y 40 lip become half
+ mid_lip_move_ratio = open_size_y/80.0 if open_size_y>0 else 0
+ return mid_lip_move_ratio*lip_height
+
+
+def fade_in_x(img,size):
+ if size==0:
+ return
+ per_pixel = 1.0/size
+ for y in range(img.shape[0]):
+ for x in range(img.shape[1]):
+
+ if x w:
+ diff = x - w
+ alpha_base = 1.0 - (per_pixel * x)
+ # アルファ値を変更し、ピクセルに設定
+ #print(f"before x ={x} = {img[y,x,3]} after = {img[y,x,3] * alpha_base}")
+ img[y, x, 3] = img[y,x,3] * alpha_base
+
+
+def alpha_blend_with_image2_alpha(image1, image2):
+ return cv2.addWeighted(image1, 1, image2, 1, 0)
+def numpy_alpha_blend_with_image2_alpha(image1, image2,invert=False):
+ """
+ image1をimage2のアルファチャンネルを使用してアルファブレンディングします。
+ """
+ # 画像のサイズを確認し、必要に応じてリサイズします。
+ if image1.shape[:2] != image2.shape[:2]:
+ image1 = cv2.resize(image1, (image2.shape[1], image2.shape[0]))
+
+ src1 = np.array(image1)
+ src2 = np.array(image2)
+ mask1 = np.array(image2[:, :, 3])
+ mask1 = mask1 / 255
+ mask1 = np.expand_dims(mask1, axis=-1)
+ if invert:
+ dst = src1 * (1-mask1) + src2 * mask1
+ else:
+ dst = src1 * mask1 + src2 * (1 - mask1)
+ # アルファブレンディングを行います。
+ #blended = cv2.cvtColor(dst, cv2.COLOR_BGRA2BGRA)
+ dst = dst.astype(np.uint8)
+ return dst
+
+def distance_2d(point1, point2):
+ return math.sqrt((point2[0] - point1[0])**2 + (point2[1] - point1[1])**2)
+
+# points[index][x=0 y=1] index is see landmark image by plot2.py
+def get_top_lip_thicks(landmarks_list,is_distance_base=False):
+ points = get_landmark_points(landmarks_list,POINTS_TOP_LIP)
+ if is_distance_base:
+ return (distance_2d(points[10],points[2]),distance_2d(points[9],points[3]),distance_2d(points[8],points[4]))
+ return (points[10][1] -points[2][1],points[9][1] -points[3][1],points[8][1] -points[4][1])
+
+
+def scale_down_values(data, scale_factor=0.25):
+ """
+ Scales down the values in a list of dictionaries by a given scale factor.
+
+ Parameters:
+ - data: A list of dictionaries where each dictionary represents facial landmarks.
+ - scale_factor: The factor by which to scale down the values. Default is 0.25 (1/4).
+
+ Returns:
+ - A new list of dictionaries with scaled down values.
+ """
+ scaled_data = []
+ for item in data:
+ scaled_item = {}
+ for key, values in item.items():
+ scaled_values = [(int(x * scale_factor), int(y * scale_factor)) for x, y in values]
+ scaled_item[key] = scaled_values
+ scaled_data.append(scaled_item)
+ return scaled_data
+
+def save_landmarks(face_landmarks,out_path):
+ json_data = json.dumps(face_landmarks)
+ with open(out_path, "w") as f:
+ f.write(json_data)
+
+def load_landmarks(input_path):
+ with open(input_path, "r") as f:
+ face_landmarks_list = json.loads(f.read())
+ return face_landmarks_list
+
+
+
+
diff --git a/mp_box.py b/mp_box.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d42f19122b764961dbd846571221049329ee203
--- /dev/null
+++ b/mp_box.py
@@ -0,0 +1,133 @@
+import mediapipe as mp
+from mediapipe.tasks import python
+from mediapipe.tasks.python import vision
+from mediapipe.framework.formats import landmark_pb2
+from mediapipe import solutions
+import numpy as np
+
+# for X,Y,W,H to x1,y1,x2,y2(Left-top,right-bottom style)
+def xywh_to_xyxy(box):
+ return [box[0],box[1],box[0]+box[2],box[1]+box[3]]
+
+def convert_to_box(face_landmarks_list,indices,w=1024,h=1024):
+ x1=w
+ y1=h
+ x2=0
+ y2=0
+ for index in indices:
+ x=min(w,max(0,(face_landmarks_list[0][index].x*w)))
+ y=min(h,max(0,(face_landmarks_list[0][index].y*h)))
+ if xx2:
+ x2=x
+ if y>y2:
+ y2=y
+
+
+ return [int(x1),int(y1),int(x2-x1),int(y2-y1)]
+
+
+def box_to_square(bbox):
+ box=list(bbox)
+ if box[2]>box[3]:
+ diff = box[2]-box[3]
+ box[3]+=diff
+ box[1]-=diff/2
+ elif box[3]>box[2]:
+ diff = box[3]-box[2]
+ box[2]+=diff
+ box[0]-=diff/2
+ return box
+
+
+def face_landmark_result_to_box(face_landmarker_result,width=1024,height=1024):
+ face_landmarks_list = face_landmarker_result.face_landmarks
+
+
+ full_indices = list(range(456))
+
+ MIDDLE_FOREHEAD = 151
+ BOTTOM_CHIN_EX = 152
+ BOTTOM_CHIN = 175
+ CHIN_TO_MIDDLE_FOREHEAD = [200,14,1,6,18,9]
+ MOUTH_BOTTOM = [202,200,422]
+ EYEBROW_CHEEK_LEFT_RIGHT = [46,226,50,1,280,446,276]
+
+ LEFT_HEAD_OUTER_EX = 251 #on side face almost same as full
+ LEFT_HEAD_OUTER = 301
+ LEFT_EYE_OUTER_EX = 356
+ LEFT_EYE_OUTER = 264
+ LEFT_MOUTH_OUTER_EX = 288
+ LEFT_MOUTH_OUTER = 288
+ LEFT_CHIN_OUTER = 435
+ RIGHT_HEAD_OUTER_EX = 21
+ RIGHT_HEAD_OUTER = 71
+ RIGHT_EYE_OUTER_EX = 127
+ RIGHT_EYE_OUTER = 34
+ RIGHT_MOUTH_OUTER_EX = 58
+ RIGHT_MOUTH_OUTER = 215
+ RIGHT_CHIN_OUTER = 150
+
+ # TODO naming line
+ min_indices=CHIN_TO_MIDDLE_FOREHEAD+EYEBROW_CHEEK_LEFT_RIGHT+MOUTH_BOTTOM
+
+ chin_to_brow_indices = [LEFT_CHIN_OUTER,LEFT_MOUTH_OUTER,LEFT_EYE_OUTER,LEFT_HEAD_OUTER,MIDDLE_FOREHEAD,RIGHT_HEAD_OUTER,RIGHT_EYE_OUTER,RIGHT_MOUTH_OUTER,RIGHT_CHIN_OUTER,BOTTOM_CHIN]+min_indices
+
+ box1 = convert_to_box(face_landmarks_list,min_indices,width,height)
+ box2 = convert_to_box(face_landmarks_list,chin_to_brow_indices,width,height)
+ box3 = convert_to_box(face_landmarks_list,full_indices,width,height)
+ #print(box)
+
+ return [box1,box2,box3,box_to_square(box1),box_to_square(box2),box_to_square(box3)]
+
+
+def draw_landmarks_on_image(detection_result,rgb_image):
+ face_landmarks_list = detection_result.face_landmarks
+ annotated_image = np.copy(rgb_image)
+
+ # Loop through the detected faces to visualize.
+ for idx in range(len(face_landmarks_list)):
+ face_landmarks = face_landmarks_list[idx]
+
+ # Draw the face landmarks.
+ face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
+ face_landmarks_proto.landmark.extend([
+ landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
+ ])
+
+ solutions.drawing_utils.draw_landmarks(
+ image=annotated_image,
+ landmark_list=face_landmarks_proto,
+ connections=mp.solutions.face_mesh.FACEMESH_TESSELATION,
+ landmark_drawing_spec=None,
+ connection_drawing_spec=mp.solutions.drawing_styles
+ .get_default_face_mesh_tesselation_style())
+
+ return annotated_image
+
+def mediapipe_to_box(image_data,model_path="face_landmarker.task"):
+ BaseOptions = mp.tasks.BaseOptions
+ FaceLandmarker = mp.tasks.vision.FaceLandmarker
+ FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
+ VisionRunningMode = mp.tasks.vision.RunningMode
+
+ options = FaceLandmarkerOptions(
+ base_options=BaseOptions(model_asset_path=model_path),
+ running_mode=VisionRunningMode.IMAGE
+ ,min_face_detection_confidence=0, min_face_presence_confidence=0
+ )
+
+
+ with FaceLandmarker.create_from_options(options) as landmarker:
+ if isinstance(image_data,str):
+ mp_image = mp.Image.create_from_file(image_data)
+ else:
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(image_data))
+ face_landmarker_result = landmarker.detect(mp_image)
+ boxes = face_landmark_result_to_box(face_landmarker_result,mp_image.width,mp_image.height)
+ return boxes,mp_image,face_landmarker_result
\ No newline at end of file
diff --git a/mp_constants.py b/mp_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2ae64286923ed091c4683ee012a6863ff10e488
--- /dev/null
+++ b/mp_constants.py
@@ -0,0 +1,320 @@
+
+# contour
+POINT_LEFT_HEAD_OUTER_EX = 251 #on side face almost same as full
+POINT_LEFT_HEAD_OUTER = 301
+POINT_LEFT_EYE_OUTER_EX = 356
+POINT_LEFT_EYE_OUTER = 264
+POINT_LEFT_MOUTH_OUTER_EX = 288
+POINT_LEFT_MOUTH_OUTER = 435
+POINT_LEFT_CHIN_OUTER = 379
+POINT_RIGHT_HEAD_OUTER_EX = 21
+POINT_RIGHT_HEAD_OUTER = 71
+POINT_RIGHT_EYE_OUTER_EX = 127
+POINT_RIGHT_EYE_OUTER = 34
+POINT_RIGHT_MOUTH_OUTER_EX = 58
+POINT_RIGHT_MOUTH_OUTER = 215
+POINT_RIGHT_CHIN_OUTER = 150
+POINT_CHIN_BOTTOM = 152
+
+POINT_FOREHEAD_TOP = 10
+
+POINT_UPPER_LIP_CENTER_BOTTOM=13
+POINT_LOWER_LIP_CENTER_TOP=14
+POINT_LOWER_LIP_CENTER_BOTTOM=17
+POINT_NOSE_CENTER_MIDDLE=5
+
+LINE_RIGHT_CONTOUR_OUTER_EYE_TO_CHIN =[127,234,93,132,58,172,136,150,149,176,148,152]
+LINE_RIGHT_CONTOUR_EYE_TO_CHIN = [34,227,137,177,215,138,135,169,170,140,171,175]
+LINE_RIGHT_CONTOUR_INNER_EYE_TO_CHIN =[143,116,123,147,213,192,214,210,211,32,208,199]
+
+
+LINE_RIGHT_CONTOUR_0 = [152,175,199]
+LINE_RIGHT_CONTOUR_1 = [148,171,208]
+LINE_RIGHT_CONTOUR_2 = [176,140,32]
+LINE_RIGHT_CONTOUR_3 = [149,170,211]
+LINE_RIGHT_CONTOUR_4 = [150,169,210]
+LINE_RIGHT_CONTOUR_5 = [136,135,214]
+LINE_RIGHT_CONTOUR_6 = [172,138,192]
+LINE_RIGHT_CONTOUR_7 = [58,215,213]
+LINE_RIGHT_CONTOUR_8 = [132,177,147]
+LINE_RIGHT_CONTOUR_9 = [93,137,123]
+LINE_RIGHT_CONTOUR_10 = [234,227,116]
+LINE_RIGHT_CONTOUR_11 = [127,34,143]
+
+LANDMARK_68_CONTOUR_1 = LINE_RIGHT_CONTOUR_11
+LANDMARK_68_CONTOUR_2_PART1 = LINE_RIGHT_CONTOUR_10
+LANDMARK_68_CONTOUR_2_PART2 = LINE_RIGHT_CONTOUR_9
+LANDMARK_68_CONTOUR_3 = LINE_RIGHT_CONTOUR_8
+LANDMARK_68_CONTOUR_4 = LINE_RIGHT_CONTOUR_7
+LANDMARK_68_CONTOUR_5 = LINE_RIGHT_CONTOUR_6
+LANDMARK_68_CONTOUR_6_PART1 = LINE_RIGHT_CONTOUR_5
+LANDMARK_68_CONTOUR_6_PART2 = LINE_RIGHT_CONTOUR_4
+
+LANDMARK_68_CONTOUR_7 = LINE_RIGHT_CONTOUR_3
+LANDMARK_68_CONTOUR_8_PART1 = LINE_RIGHT_CONTOUR_2
+LANDMARK_68_CONTOUR_8_PART2 = LINE_RIGHT_CONTOUR_1
+LANDMARK_68_CONTOUR_9 = LINE_RIGHT_CONTOUR_0
+
+
+LINE_LEFT_CONTOUR_1 = [377,396,428]
+LINE_LEFT_CONTOUR_2 = [400,369,262]
+LINE_LEFT_CONTOUR_3 = [378,395,431]
+LINE_LEFT_CONTOUR_4 = [379,394,430]
+LINE_LEFT_CONTOUR_5 = [365,364,434]
+LINE_LEFT_CONTOUR_6 = [397,367,416]
+LINE_LEFT_CONTOUR_7 = [288,435,433]
+LINE_LEFT_CONTOUR_8 = [361,401,376]
+LINE_LEFT_CONTOUR_9 = [323,366,352]
+LINE_LEFT_CONTOUR_10 = [454,447,345]
+LINE_LEFT_CONTOUR_11 = [356,264,372]
+LINE_LEFT_CONTOUR_12 = [389,368,383]
+
+LANDMARK_68_CONTOUR_10 = LINE_LEFT_CONTOUR_1
+LANDMARK_68_CONTOUR_11_PART1 = LINE_LEFT_CONTOUR_2
+LANDMARK_68_CONTOUR_11_PART2 = LINE_LEFT_CONTOUR_3
+LANDMARK_68_CONTOUR_12 = LINE_LEFT_CONTOUR_4
+LANDMARK_68_CONTOUR_13 = LINE_LEFT_CONTOUR_5
+LANDMARK_68_CONTOUR_14 = LINE_LEFT_CONTOUR_6
+LANDMARK_68_CONTOUR_15_PART1 = LINE_LEFT_CONTOUR_7
+LANDMARK_68_CONTOUR_15_PART2 = LINE_LEFT_CONTOUR_8
+
+LANDMARK_68_CONTOUR_16 = LINE_LEFT_CONTOUR_9
+LANDMARK_68_CONTOUR_17_PART1 = LINE_LEFT_CONTOUR_10
+LANDMARK_68_CONTOUR_17_PART2 = LINE_LEFT_CONTOUR_11
+
+LANDMARK_68_RIGHT_EYEBROW_18 = [70,46] #upper,lower
+LANDMARK_68_RIGHT_EYEBROW_19 = [63,53]
+LANDMARK_68_RIGHT_EYEBROW_20 = [105,52]
+LANDMARK_68_RIGHT_EYEBROW_21 = [66,65]
+LANDMARK_68_RIGHT_EYEBROW_22 = [107,55]
+
+LANDMARK_68_LEFT_EYEBROW_23 = [336,285] #upper,lower
+LANDMARK_68_LEFT_EYEBROW_24 = [296,295]
+LANDMARK_68_LEFT_EYEBROW_25 = [334,282]
+LANDMARK_68_LEFT_EYEBROW_26 = [293,283]
+LANDMARK_68_LEFT_EYEBROW_27 = [300,276]
+
+POINT_NOSE_0 = 8
+POINT_NOSE_1 = 168
+POINT_NOSE_2 = 6
+POINT_NOSE_3 = 197
+POINT_NOSE_4 = 195
+POINT_NOSE_5 = 5
+POINT_NOSE_6 = 4
+POINT_NOSE_7 = 19
+POINT_NOSE_8 = 94
+POINT_NOSE_9 = 2
+
+#side
+POINT_NOSE_10 = 98
+POINT_NOSE_11 = 97
+POINT_NOSE_12 = 326
+POINT_NOSE_13 = 327
+
+LANDMARK_68_VERTICAL_NOSE_28 =[8,168]
+LANDMARK_68_VERTICAL_NOSE_29 = [6]
+LANDMARK_68_VERTICAL_NOSE_30=[197,195]
+LANDMARK_68_VERTICAL_NOSE_31=[5,4]
+
+LANDMARK_68_HORIZONTAL_NOSE_32 =[POINT_NOSE_10]
+LANDMARK_68_HORIZONTAL_NOSE_33 = [POINT_NOSE_11]
+LANDMARK_68_HORIZONTAL_NOSE_34=[POINT_NOSE_9]
+LANDMARK_68_HORIZONTAL_NOSE_35=[POINT_NOSE_12]
+LANDMARK_68_HORIZONTAL_NOSE_36=[POINT_NOSE_13]
+
+
+LINE_VERTICAL_NOSE = [POINT_NOSE_0,POINT_NOSE_1,POINT_NOSE_2,POINT_NOSE_3,POINT_NOSE_4,POINT_NOSE_5,POINT_NOSE_6,POINT_NOSE_7,POINT_NOSE_8,POINT_NOSE_9]
+LINE_HORIZONTAL_NOSE =[POINT_NOSE_10,POINT_NOSE_11,POINT_NOSE_9,POINT_NOSE_12,POINT_NOSE_13]
+
+### EYES
+POINT_RIGHT_UPPER_INNER_EYE_1 = 33
+POINT_RIGHT_UPPER_INNER_EYE_2 = 246
+POINT_RIGHT_UPPER_INNER_EYE_3 = 161
+POINT_RIGHT_UPPER_INNER_EYE_4 = 160
+POINT_RIGHT_UPPER_INNER_EYE_5 = 159
+POINT_RIGHT_UPPER_INNER_EYE_6 = 158
+POINT_RIGHT_UPPER_INNER_EYE_7 = 157
+POINT_RIGHT_UPPER_INNER_EYE_8 = 173
+POINT_RIGHT_UPPER_INNER_EYE_9 = 133
+
+LINE_RIGHT_UPPER_INNER_EYE=[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_INNER_EYE_7,POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9]
+
+POINT_RIGHT_LOWER_INNER_EYE_1 = 155
+POINT_RIGHT_LOWER_INNER_EYE_2 = 154
+POINT_RIGHT_LOWER_INNER_EYE_3 = 153
+POINT_RIGHT_LOWER_INNER_EYE_4 = 145
+POINT_RIGHT_LOWER_INNER_EYE_5 = 144
+POINT_RIGHT_LOWER_INNER_EYE_6 = 163
+POINT_RIGHT_LOWER_INNER_EYE_7 = 7
+
+LINE_RIGHT_LOWER_INNER_EYE=[POINT_RIGHT_UPPER_INNER_EYE_9,POINT_RIGHT_LOWER_INNER_EYE_1,POINT_RIGHT_LOWER_INNER_EYE_2,POINT_RIGHT_LOWER_INNER_EYE_3,POINT_RIGHT_LOWER_INNER_EYE_4,POINT_RIGHT_LOWER_INNER_EYE_5,POINT_RIGHT_LOWER_INNER_EYE_6,POINT_RIGHT_LOWER_INNER_EYE_7,POINT_RIGHT_UPPER_INNER_EYE_1]
+
+
+POINT_RIGHT_UPPER_OUTER_EYE_1 = 130
+POINT_RIGHT_UPPER_OUTER_EYE_2 = 247
+POINT_RIGHT_UPPER_OUTER_EYE_3 = 30
+POINT_RIGHT_UPPER_OUTER_EYE_4 = 29
+POINT_RIGHT_UPPER_OUTER_EYE_5 = 27
+POINT_RIGHT_UPPER_OUTER_EYE_6 = 28
+POINT_RIGHT_UPPER_OUTER_EYE_7 = 56
+POINT_RIGHT_UPPER_OUTER_EYE_8 = 190
+POINT_RIGHT_UPPER_OUTER_EYE_9 = 243
+
+LINE_RIGHT_UPPER_OUTER_EYE=[POINT_RIGHT_UPPER_OUTER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_7,POINT_RIGHT_UPPER_OUTER_EYE_8,POINT_RIGHT_UPPER_OUTER_EYE_9]
+
+LINE_RIGHT_UPPER_MIXED_EYE =[#firs eye1 and eye2 is intesionaly for moveup
+ [POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2], [POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_2], [POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_3], [POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_4], [POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_5], [POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_6]
+ ,[POINT_RIGHT_UPPER_INNER_EYE_8],[POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9] #I'm not sure need this one or not POINT_RIGHT_LOWER_INNER_EYE_1
+]
+
+LINE_RIGHT_UPPER_MIXED_EYE2 =[#firs eye1 and eye2 is intesionaly for moveup
+ [POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2],
+ [POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_2],
+ [POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_3],
+ [POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_4],
+ [POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_5],
+ [POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_6]
+ ,[POINT_RIGHT_UPPER_INNER_EYE_8],
+ [POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9] #I'm not sure need this one or not POINT_RIGHT_LOWER_INNER_EYE_1
+]
+
+POINT_RIGHT_LOWER_OUTER_EYE_1 = 112
+POINT_RIGHT_LOWER_OUTER_EYE_2 = 26
+POINT_RIGHT_LOWER_OUTER_EYE_3 = 22
+POINT_RIGHT_LOWER_OUTER_EYE_4 = 23
+POINT_RIGHT_LOWER_OUTER_EYE_5 = 24
+POINT_RIGHT_LOWER_OUTER_EYE_6 = 110
+POINT_RIGHT_LOWER_OUTER_EYE_7 = 25
+
+LINE_RIGHT_LOWER_OUTER_EYE=[POINT_RIGHT_UPPER_OUTER_EYE_9,POINT_RIGHT_LOWER_OUTER_EYE_1,POINT_RIGHT_LOWER_OUTER_EYE_2,POINT_RIGHT_LOWER_OUTER_EYE_3,POINT_RIGHT_LOWER_OUTER_EYE_4,POINT_RIGHT_LOWER_OUTER_EYE_5,POINT_RIGHT_LOWER_OUTER_EYE_6,POINT_RIGHT_LOWER_OUTER_EYE_7,POINT_RIGHT_UPPER_OUTER_EYE_1]
+
+LINE_RIGHT_LOWER_MIXED_EYE =[
+ [POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9,POINT_RIGHT_LOWER_INNER_EYE_1]
+ ,[POINT_RIGHT_LOWER_INNER_EYE_2]
+ ,POINT_RIGHT_LOWER_INNER_EYE_3,POINT_RIGHT_LOWER_INNER_EYE_4,POINT_RIGHT_LOWER_INNER_EYE_5,POINT_RIGHT_LOWER_INNER_EYE_6,POINT_RIGHT_LOWER_INNER_EYE_7
+ ,[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2] #combine 1 and 2 for move up
+]
+
+
+POINT_LEFT_UPPER_INNER_EYE_1 = 362
+POINT_LEFT_UPPER_INNER_EYE_2 = 398
+POINT_LEFT_UPPER_INNER_EYE_3 = 384
+POINT_LEFT_UPPER_INNER_EYE_4 = 385
+POINT_LEFT_UPPER_INNER_EYE_5 = 386
+POINT_LEFT_UPPER_INNER_EYE_6 = 387
+POINT_LEFT_UPPER_INNER_EYE_7 = 388
+POINT_LEFT_UPPER_INNER_EYE_8 = 466
+POINT_LEFT_UPPER_INNER_EYE_9 = 263
+
+LINE_LEFT_UPPER_INNER_EYE=[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
+LINE_LEFT_UPPER_INNER_EYE2=[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
+
+
+
+POINT_LEFT_LOWER_INNER_EYE_1 = 249
+POINT_LEFT_LOWER_INNER_EYE_2 = 390
+POINT_LEFT_LOWER_INNER_EYE_3 = 373
+POINT_LEFT_LOWER_INNER_EYE_4 = 374
+POINT_LEFT_LOWER_INNER_EYE_5 = 380
+POINT_LEFT_LOWER_INNER_EYE_6 = 381
+POINT_LEFT_LOWER_INNER_EYE_7 = 382
+
+
+LINE_LEFT_LOWER_INNER_EYE=[POINT_LEFT_UPPER_INNER_EYE_9,POINT_LEFT_LOWER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_3,POINT_LEFT_LOWER_INNER_EYE_4,POINT_LEFT_LOWER_INNER_EYE_5,POINT_LEFT_LOWER_INNER_EYE_6,POINT_LEFT_LOWER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_1]
+
+#outer
+
+POINT_LEFT_UPPER_OUTER_EYE_1 = 463
+POINT_LEFT_UPPER_OUTER_EYE_2 = 414
+POINT_LEFT_UPPER_OUTER_EYE_3 = 286
+POINT_LEFT_UPPER_OUTER_EYE_4 = 258
+POINT_LEFT_UPPER_OUTER_EYE_5 = 257
+POINT_LEFT_UPPER_OUTER_EYE_6 = 259
+POINT_LEFT_UPPER_OUTER_EYE_7 = 260
+POINT_LEFT_UPPER_OUTER_EYE_8 = 467
+POINT_LEFT_UPPER_OUTER_EYE_9 = 359
+
+LINE_LEFT_UPPER_OUTER_EYE=[POINT_LEFT_UPPER_OUTER_EYE_1,POINT_LEFT_UPPER_OUTER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_7,POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_OUTER_EYE_9]
+
+
+POINT_LEFT_LOWER_OUTER_EYE_1 = 255
+POINT_LEFT_LOWER_OUTER_EYE_2 = 339
+POINT_LEFT_LOWER_OUTER_EYE_3 = 254
+POINT_LEFT_LOWER_OUTER_EYE_4 = 253
+POINT_LEFT_LOWER_OUTER_EYE_5 = 252
+POINT_LEFT_LOWER_OUTER_EYE_6 = 256
+POINT_LEFT_LOWER_OUTER_EYE_7 = 341
+
+LINE_LEFT_LOWER_OUTER_EYE=[POINT_LEFT_UPPER_OUTER_EYE_9,POINT_LEFT_LOWER_OUTER_EYE_1,POINT_LEFT_LOWER_OUTER_EYE_2,POINT_LEFT_LOWER_OUTER_EYE_3,POINT_LEFT_LOWER_OUTER_EYE_4,POINT_LEFT_LOWER_OUTER_EYE_5,POINT_LEFT_LOWER_OUTER_EYE_6,POINT_LEFT_LOWER_OUTER_EYE_7,POINT_LEFT_UPPER_OUTER_EYE_1]
+
+LINE_LEFT_UPPER_MIXED_EYE =[#firs eye1 and eye2 is intesionaly for moveup
+ [POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7],
+ [POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_2], [POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_3], [POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_4], [POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_5], [POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_6]
+ ,[POINT_LEFT_UPPER_INNER_EYE_8],[POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
+]
+
+LINE_LEFT_UPPER_MIXED_EYE2 =[#firs eye1 and eye2 is intesionaly for moveup
+ [POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7],
+ [POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_2],
+ [POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_3],
+ [POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_4],
+ [POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_5],
+ [POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_6]
+ ,[POINT_LEFT_UPPER_INNER_EYE_8],
+ [POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
+]
+
+LINE_LEFT_LOWER_MIXED_EYE =[
+ [POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
+ ,[POINT_LEFT_LOWER_INNER_EYE_2]
+ ,POINT_LEFT_LOWER_INNER_EYE_3,POINT_LEFT_LOWER_INNER_EYE_4,POINT_LEFT_LOWER_INNER_EYE_5,POINT_LEFT_LOWER_INNER_EYE_6,POINT_LEFT_LOWER_INNER_EYE_7
+ , [POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7] #combine 1 and 2 for move up
+]
+
+
+#LIP
+LINE_RIGHT_UPPER_OUTER_LIP=[
+ 61,185,40,39,37,0
+ ]
+LINE_LEFT_UPPER_OUTER_LIP=[
+ 0,267,269,270,409,291
+ ]
+
+
+LINE_LOWER_OUTER_LIP=[291,#upper
+ 375,321,405,314,17,84,181,91,146
+ ,61 #upper
+ ]
+
+LINE_UPPER_INNER_LIP=[
+ 61,185,40,39,37,0,267,269,270,409,291
+ ]
+
+LINE_LOWER_INNER_LIP=[291,#upper
+ 375,321,405,314,17,84,181,91,146
+ ,61 #upper
+ ]
+
+LANDMARK_68_UPPER_OUTER_LIP_49 =[61]
+LANDMARK_68_UPPER_OUTER_LIP_50 =[40,39]
+LANDMARK_68_UPPER_OUTER_LIP_51 =[37]
+LANDMARK_68_UPPER_OUTER_LIP_52 =[0]
+LANDMARK_68_UPPER_OUTER_LIP_53 =[267]
+LANDMARK_68_UPPER_OUTER_LIP_54 =[270,269]
+LANDMARK_68_UPPER_OUTER_LIP_55 =[291]
+
+LANDMARK_68_LOWER_OUTER_LIP_56 =[375,321]
+LANDMARK_68_LOWER_OUTER_LIP_57 =[405,314]
+LANDMARK_68_LOWER_OUTER_LIP_58 =[17]
+LANDMARK_68_LOWER_OUTER_LIP_59 =[84,181]
+LANDMARK_68_LOWER_OUTER_LIP_60 =[146,91]
+
+LANDMARK_68_UPPER_INNER_LIP_61 =[78]
+LANDMARK_68_UPPER_INNER_LIP_62 =[81]
+LANDMARK_68_UPPER_INNER_LIP_63 =[13]
+LANDMARK_68_UPPER_INNER_LIP_64 =[311]
+LANDMARK_68_UPPER_INNER_LIP_65 =[308]
+
+LANDMARK_68_LOWER_INNER_LIP_66 =[402]
+LANDMARK_68_LOWER_INNER_LIP_67 =[14]
+LANDMARK_68_LOWER_INNER_LIP_68 =[178]
\ No newline at end of file
diff --git a/mp_utils.py b/mp_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..18d634ccc032dffa539bab3e77717242a83cdcbd
--- /dev/null
+++ b/mp_utils.py
@@ -0,0 +1,94 @@
+import math
+def calculate_distance(p1, p2):
+ """
+
+ """
+ return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
+def to_int_points(points):
+ ints=[]
+ for pt in points:
+ #print(pt)
+ value = [int(pt[0]),int(pt[1])]
+ #print(value)
+ ints.append(value)
+ return ints
+
+debug = False
+def divide_line_to_points(points,divided): # return divided + 1
+ total_length = 0
+ line_length_list = []
+ for i in range(len(points)-1):
+ pt_length = calculate_distance(points[i],points[i+1])
+ total_length += pt_length
+ line_length_list.append(pt_length)
+
+ splited_length = total_length/divided
+
+ def get_new_point(index,lerp):
+ pt1 = points[index]
+ pt2 = points[index+1]
+ diff = [pt2[0] - pt1[0], pt2[1]-pt1[1]]
+ new_point = [pt1[0]+diff[0]*lerp,pt1[1]+diff[1]*lerp]
+ if debug:
+ print(f"pt1 ={pt1} pt2 ={pt2} diff={diff} new_point={new_point}")
+
+ return new_point
+
+ if debug:
+ print(f"{total_length} splitted = {splited_length} line-length-list = {len(line_length_list)}")
+ splited_points=[points[0]]
+ for i in range(1,divided):
+ need_length = splited_length*i
+ if debug:
+ print(f"{i} need length = {need_length}")
+ current_length = 0
+ for j in range(len(line_length_list)):
+ line_length = line_length_list[j]
+ current_length+=line_length
+ if current_length>need_length:
+ if debug:
+ print(f"over need length index = {j} current={current_length}")
+ diff = current_length - need_length
+
+ lerp_point = 1.0 - (diff/line_length)
+ if debug:
+ print(f"over = {diff} lerp ={lerp_point}")
+ new_point = get_new_point(j,lerp_point)
+
+ splited_points.append(new_point)
+ break
+
+ splited_points.append(points[-1]) # last one
+ splited_points=to_int_points(splited_points)
+
+ if debug:
+ print(f"sp={len(splited_points)}")
+ return splited_points
+
+def points_to_bbox(points):
+ x1=float('inf')
+ x2=0
+ y1=float('inf')
+ y2=0
+ for point in points:
+ if point[0]x2:
+ x2=point[0]
+ if point[1]y2:
+ y2=point[1]
+ return [x1,y1,x2-x1,y2-y1]
+
+def expand_bbox(bbox,left=5,top=5,right=5,bottom=5):
+ left_pixel = bbox[2]*(float(left)/100)
+ top_pixel = bbox[3]*(float(top)/100)
+ right_pixel = bbox[2]*(float(right)/100)
+ bottom_pixel = bbox[3]*(float(bottom)/100)
+ new_box = list(bbox)
+ new_box[0] -=left_pixel
+ new_box[1] -=top_pixel
+ new_box[2] +=left_pixel+right_pixel
+ new_box[3] +=top_pixel+bottom_pixel
+ return new_box
\ No newline at end of file
diff --git a/open_mouth.py b/open_mouth.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1899565e61c9ebd2a390c247c5fc7309bb63db1
--- /dev/null
+++ b/open_mouth.py
@@ -0,0 +1,363 @@
+"""
+# Currently on hold,Verifying whether to use other technologies.
+# this is temporaly fixed for work huggingface space
+
+スクリプト名
+batch_open_mouth.py
+
+概要
+静止画から口を開ける画像を作成
+
+説明
+
+引数
+argparseを参照
+
+不具合
+横方向は思ったより機能していない。
+Issueが山盛り
+https://github.com/akjava/lip_recognition_tools/issues
+
+著者: Akihito Miyazaki
+作成日: 2024-04-23
+更新履歴:
+ - 2024-04-23: 最初のリリース
+ - 2024-09-15:hole_offsetを追加
+# 口の中の位置は、create_hole_image.pyの画像を変更すること
+
+"""
+
+import cv2
+import numpy as np
+from PIL import Image
+import lip_utils
+import create_top_lip
+import create_bottom_lip
+import create_chin_image
+import create_no_mouth
+import create_hole_image
+import os
+import argparse
+import landmarks68_utils
+import math
+import sys
+
+from glibvision.common_utils import check_exists_files
+
+#arg version not tested
+def parse_arguments():
+ parser = argparse.ArgumentParser(description='Open Mouth')
+ #parser.add_argument('--scale',"-sc",help='スケール精度が上がる',default=4,type=int)
+ parser.add_argument('--no_close_lip',"-ccl",help='Close Lip画像を作らない',action="store_false")
+ parser.add_argument('--landmark',"-l",help='landmarkdata')
+ parser.add_argument('--input',"-i",help='変換する画像の元(必須) 口を閉じていること',required=True)
+ parser.add_argument('--output',"-o",help='画像の保存先(別途一時的なレイヤーファイルも作られる)')
+ parser.add_argument('--open_size_x',"-x",help='横方向へ広がるサイズ(いまいち機能していない)',default = 0,type=int)
+ parser.add_argument('--open_size_y',"-y",help='縦方向への口の広がり(最大20ぐらい)',default=9,type=int)
+ parser.add_argument('--hole_offset',"-hole",help='口内画像の上下',type=int,default=0)
+ parser.add_argument('--hole_image_name',"-hname",help='口内画像・ただし、hole_images内にあること',default="dark01.jpg")
+ parser.add_argument('--side_edge_expand',"-see",help='横の端をどれだけ動かすか',type=float,default=0.02)
+ parser.add_argument('--inside_layer_low_depth',"-illd",action="store_true",help="basically not good small size but works for large and img2img")
+
+ lip_utils.DEBUG=True
+ #parser.add_argument('--hole_image_key',"-hi",help='口内画像',default="hole_01")
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args=parse_arguments()
+ # 画像ファイルのパス
+ img_path = args.input
+
+ output = args.output
+
+ if output==None:
+ base,ext = os.path.splitext(img_path)
+ output = f"{base}_{args.open_size_y:02d}.jpg"
+
+
+ #landmark = landmark_utils.create_landmarks_path(img_path,args.landmark)
+ landmark = None
+ if check_exists_files([landmark,img_path],[],False):
+ print("File Error happend and exit app")
+ exit(1)
+ img = cv2.imread(img_path) # force load 3 channel
+ #landmarks_list = landmark_utils.load_landmarks_json(landmark)
+ #side_edge_expand = args.side_edge_expand
+ use_close_lip = args.no_close_lip
+ process_open_mouth(img,None,use_close_lip)#TODO test
+# LOAD Image and Landmarkdata
+
+def process_open_mouth(cv_image,landmarks_list,open_size_x=0,open_size_y=8,use_close_lip=True,inside_layer_low_depth=False,hole_offset=0,hole_image_name="dark01.jpg",side_edge_expand=0.02):
+ img = cv_image
+ img_h, img_w = lip_utils.get_image_size(img)
+
+
+ ## MODIFY POINTS
+ top_points=lip_utils.get_landmark_points(landmarks_list,lip_utils.TOP_LIP)
+ print(top_points)
+
+
+
+
+ '''
+ right_outer = top_points[0]
+ left_outer = top_points[6]
+ lip_width = lip_utils.distance_2d(left_outer,right_outer)
+ #print(lip_width)
+ lip_diff = [left_outer[0]-right_outer[0],left_outer[1]-right_outer[1]]
+
+ side_edge_expand_point =[lip_diff[0]*side_edge_expand,lip_diff[1]*side_edge_expand]
+ #print(side_edge_expand_point)
+ print(f"side-edge expanded {side_edge_expand_point}")
+
+ top_points[0][0]-=int(side_edge_expand_point[0])
+ top_points[6][0]+=int(side_edge_expand_point[0])
+ top_points[0][1]-=int(side_edge_expand_point[1])
+ top_points[6][1]+=int(side_edge_expand_point[1])
+ #img = cv2.imread(img_path,cv2.IMREAD_UNCHANGED) #4channel got problem use green back
+ #img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
+ '''
+ # 常に作成
+ if use_close_lip: # store falseというわかりにくいやつ
+ import close_lip
+ img,mask = close_lip.process_close_lip_image(img,landmarks_list)
+ close_lip_image = img
+ #return img
+
+
+
+
+ margin = 12
+
+
+
+ hole_points = lip_utils.get_lip_hole_points(landmarks_list)
+
+ #print(hole_points)
+
+
+ ## LIP MOVE UP
+ (bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list)
+ left_thick,mid_thick,right_thick = lip_utils.get_top_lip_thicks(landmarks_list)
+ bottom_base = bottom_height/1.5
+
+ diff_left = max(0,int(left_thick - bottom_base))
+ diff_right = max(0,int(right_thick - bottom_base))
+ diff_mid = max(0,int((diff_right+diff_left)*0.4))
+ diff_avg = int((diff_right+diff_left)*0.5)
+
+
+ # たいてい歯がご認識、そのぶん戻す。4は下唇との比較で
+ fix_top_thick_hole_points = []
+ top_point = [1,2,3]
+ for idx,point in enumerate(hole_points):
+ if idx in top_point:
+ new_point = np.copy(point)
+ if idx == 2:
+ new_point[1] -= int(diff_avg*0.5) # TODO calcurate
+ else:
+ new_point[1] -= int(diff_avg*1) # TODO calcurate
+ fix_top_thick_hole_points.append(new_point)
+ else:
+ fix_top_thick_hole_points.append(point)
+
+
+ mask = lip_utils.create_mask_from_points(img,fix_top_thick_hole_points,2,2)
+ inverse_mask = cv2.bitwise_not(mask)
+ if lip_utils.DEBUG:
+ cv2.imwrite("holeed_mask.jpg",mask)
+ cv2.imwrite("holeed_inverse_mask.jpg",inverse_mask)
+
+ img_transparent = lip_utils.apply_mask_alpha(img,mask)
+ img_inpainted = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA)
+ if lip_utils.DEBUG:
+ cv2.imwrite("holeed_transparent.png",img_transparent)
+ cv2.imwrite("holeed_inpainted.jpg",img_inpainted)
+
+ #img_holed = np.copy(img)
+
+
+ ## APPLY MASK OTHER WAY TODO check later
+ #I'm not sure this logic
+ #mask_2d = mask[:, :, 0]
+ #img_holed[:, :, 3] = mask_2d
+ #cv2.imwrite("holed_image_mask.png",mask)
+
+
+ # create gaussin image
+ gaussian_size = 10
+ gaused = cv2.GaussianBlur(img_inpainted, (0,0 ), sigmaX=gaussian_size, sigmaY=gaussian_size)
+ #img_holed = lip_utils.apply_mask_alpha(img_holed,mask)
+ #lip_utils.fill_points(hole_points,img,2,(255,0,0),(255,0,0))
+ #lip_utils.fill_points(hole_points,img,0,(255,0,0,0),(255,0,0,0))
+ if lip_utils.DEBUG:
+ cv2.imwrite("holed_gaused.jpg",gaused)
+ mask_1d = np.squeeze(mask)
+ img_inpainted[mask_1d==255] = gaused[mask_1d==255]
+
+
+ # image bitwise faild
+ thresh, binary_mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
+#result = cv2.bitwise_and(img_inpainted, gaused, mask=inverse_mask) # transform mask area
+#result = cv2.bitwise_and(gaused, result, mask=binary_mask) # transform mask area
+#result = cv2.bitwise_or(result, img_inpainted) # transform remains?
+#cv2.imwrite("holeed_bitwise.jpg",result)
+
+#exit(0)
+
+# 重みを計算する関数を定義します。
+# この例では、単純な重み付け方法を使用していますが、
+# 実際のアプリケーションでは、より複雑なロジックを使用することができます。
+# examle
+ def calculate_weights(image):
+ # ここで重みを計算します。この例では、単純な例を示しています。
+ # 実際のアプリケーションでは、画像の特性に基づいて重みを計算することができます。
+ weights = np.ones_like(image) * 0.5 # 例: すべてのピクセルに対して0.5の重みを割り当てます。
+ return weights
+
+# 画像ごとに重みを計算します。
+
+#weights1 = 1.0 - img_holed[:, :, 3] / 255.0
+#weights1 = 1.0 - mask / 255.0
+#weights2 = 1.0 - weights1
+#weights1 = calculate_weights(img)
+#weights2 = calculate_weights(img_holed)
+
+# 重み付きの加算を行います。
+#result = (img_holed * weights1 + img * weights2) / (weights1 + weights2)
+# 重み付きの加算を行います。アルファチャンネルを除いたRGBチャンネルに対して加算を行います。
+#result_rgb = (img_holed[:, :, :3] * weights1[:, :, np.newaxis] + img[:, :, :3] * weights2[:, :, np.newaxis]) / (weights1[:, :, np.newaxis] + weights2[:, :, np.newaxis])
+
+# アルファチャンネルを計算します。
+# TODO support alpha
+#result_alpha = (weights1 + weights2)
+# 結果をアルファチャンネルと結合します。#somehow error toto check
+#result = cv2.merge([result_rgb, result_alpha.astype(np.uint8)])
+
+
+#result_rgb = cv2.cvtColor(img_holed, cv2.COLOR_BGRA2BGR)
+#result = result.astype(np.uint8)
+#cv2.imwrite("holed_image_mixed.jpg",result_rgb)
+#exit(0)
+
+
+
+ top_lip_layer,lip_mask = create_top_lip.process_lip_image(img_transparent,landmarks_list, margin, open_size_y, open_size_x)# Y is first
+ hole_image = create_hole_image.process_create_hole_image(img,landmarks_list,open_size_y,open_size_x,hole_offset,hole_image_name)
+ hole_image_apply_mask = lip_utils.apply_mask(hole_image,lip_mask)
+ if lip_utils.DEBUG:
+ cv2.imwrite("hole_image.jpg",hole_image)
+ cv2.imwrite("hole_image_apply_mask.png",hole_image_apply_mask)
+
+ bottom_lip_layer = create_bottom_lip.process_lip_image(img,landmarks_list, margin, open_size_y*2, open_size_x)
+ chin_layer = create_chin_image.process_chin_image(img,landmarks_list, margin, open_size_y, open_size_x)
+
+ no_mouth_face = create_no_mouth.process_create_no_mouth_image(img,landmarks_list)
+
+
+ chin_points = lip_utils.get_landmark_points(landmarks_list,lip_utils.POINTS_CHIN)
+ points =[]
+ lip_points = lip_utils.get_lip_mask_points(landmarks_list)
+ center_lips = lip_points[2:5]
+ print("center")
+ print(center_lips)
+ center_lips = center_lips[::-1]
+ print(center_lips)
+ points.extend(center_lips+chin_points[4:13])
+ print(points)
+ for i in range(4,len(points)-1):
+ points[i][1] += open_size_y
+
+ jaw_mask_line = lip_utils.create_mask(no_mouth_face,(0,0,0))
+ cv2.polylines(jaw_mask_line, [np.array(points)], isClosed=True, color=(0,255,0), thickness=1)
+ if lip_utils.DEBUG:
+ cv2.imwrite("open_mouth_jaw_mask_line.jpg",jaw_mask_line)
+
+ dilation_size=3
+ jaw_mask = lip_utils.create_mask_from_points(img,points,dilation_size,3)
+#cv2.imwrite("open_mouth_jaw_mask.jpg",jaw_mask)
+
+
+
+ from PIL import Image
+
+ def convert_cv2_to_pil(cv2_img,is_bgra=True):
+ """
+ OpenCV (cv2) 画像を PIL 画像に変換する関数
+ """
+ # BGR から RGB に変換
+ if is_bgra:
+ rgb_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGRA2RGBA)
+ # PIL Image オブジェクトを作成
+ pil_img = Image.fromarray(rgb_img)
+ return pil_img
+
+ # 画像のリストを作成
+ pil_images =[]
+#pil_images.append(Image.open("face_no_lip.jpg").convert("RGBA"))
+
+#below are wrong too weak hole_image
+#layers = [no_mouth_face,hole_image,top_lip_layer,bottom_lip_layer,chin_layer]
+
+ #this order is right
+ # second chind is wrong,when animation ghosted
+ if inside_layer_low_depth:
+ layers = [no_mouth_face,hole_image_apply_mask,top_lip_layer,chin_layer,bottom_lip_layer]
+ else:
+ layers = [no_mouth_face,top_lip_layer,chin_layer,bottom_lip_layer,hole_image_apply_mask]
+ for layer in layers:
+ pil_images.append(convert_cv2_to_pil(layer))
+
+
+ #images = [convert_cv2_to_pil(mask),convert_cv2_to_pil(face_size_image)]
+ layers = layers[::-1]
+ output_image = None
+ for i in range(len(pil_images)):
+ if output_image == None:
+ #cv2_image_rgb = cv2.cvtColor(layers[i], cv2.COLOR_BGRA2RGBA)
+ #pil_image1 = Image.fromarray(cv2_image_rgb)
+ output_image = pil_images[i]
+ continue
+ else:
+ pil_image1 = output_image
+
+ #cv2_image_rgb = cv2.cvtColor(layers[i], cv2.COLOR_BGRA2RGBA)
+ #pil_image2 = Image.fromarray(cv2_image_rgb)
+
+ output_image = Image.alpha_composite(pil_image1, pil_images[i])
+
+ #output_image = lip_utils.alpha_blend_with_image2_alpha(output_image,layers[i+1])
+
+
+ output_image = output_image.convert("RGB")
+
+ #import webp
+ #webp.save_images(pil_images, 'anim.webp', fps=10, lossless=True)
+
+ return output_image
+
+ name,ext = os.path.splitext(output)
+
+
+
+
+
+ if ext == "":
+ output += ".jpg"
+
+ output_image_path = output.replace(".png",".jpg")
+ output_image.save(output_image_path)#force save jpeg
+ cv2.imwrite(output_image_path.replace(".jpg","_mask.jpg"),jaw_mask)
+
+ if close_lip_image is not None:
+ pass # no save 連番でつくるはめになる 保留
+ #close_lip_path = f"{name}_close-lip.jpg"
+ cv2.imwrite("close-lip.jpg",close_lip_image)
+
+ print(f"open-mouth created {output}")
+ #cv2.imwrite(output,output_image)
+
+ # アニメーションとして保存 for later checking TODO add option
+
+
+
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f943d679964ba24e1cc071b6a35560ec4aa255a8
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,5 @@
+numpy
+torch
+spaces
+mediapipe
+opencv-python
\ No newline at end of file