File size: 13,155 Bytes
ec53eb5
 
 
 
81e69dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368

#I'm truly sorry, but I must admit the code is very confusing. comment still written in Japanese


"""
# Currently on hold,Verifying whether to use other technologies.
# this is temporaly fixed for work huggingface space

スクリプト名
batch_open_mouth.py

概要
静止画から口を開ける画像を作成

説明

引数
argparseを参照

不具合
横方向は思ったより機能していない。
Issueが山盛り
https://github.com/akjava/lip_recognition_tools/issues

著者: Akihito Miyazaki
作成日: 2024-04-23
更新履歴:
  - 2024-04-23: 最初のリリース
  - 2024-09-15:hole_offsetを追加
# 口の中の位置は、create_hole_image.pyの画像を変更すること

"""

import cv2
import numpy as np
from PIL import Image
import lip_utils
import create_top_lip
import create_bottom_lip
import create_chin_image
import create_no_mouth
import create_hole_image
import os
import argparse
import landmarks68_utils
import math
import sys

from glibvision.common_utils import check_exists_files

#arg version not tested
def parse_arguments():
   parser = argparse.ArgumentParser(description='Open Mouth')
   #parser.add_argument('--scale',"-sc",help='スケール精度が上がる',default=4,type=int)
   parser.add_argument('--no_close_lip',"-ccl",help='Close Lip画像を作らない',action="store_false")
   parser.add_argument('--landmark',"-l",help='landmarkdata')
   parser.add_argument('--input',"-i",help='変換する画像の元(必須) 口を閉じていること',required=True)
   parser.add_argument('--output',"-o",help='画像の保存先(別途一時的なレイヤーファイルも作られる)')
   parser.add_argument('--open_size_x',"-x",help='横方向へ広がるサイズ(いまいち機能していない)',default = 0,type=int)
   parser.add_argument('--open_size_y',"-y",help='縦方向への口の広がり(最大20ぐらい)',default=9,type=int)
   parser.add_argument('--hole_offset',"-hole",help='口内画像の上下',type=int,default=0)
   parser.add_argument('--hole_image_name',"-hname",help='口内画像・ただし、hole_images内にあること',default="dark01.jpg")
   parser.add_argument('--side_edge_expand',"-see",help='横の端をどれだけ動かすか',type=float,default=0.02)
   parser.add_argument('--inside_layer_low_depth',"-illd",action="store_true",help="basically not good small size but works for large and img2img")

   lip_utils.DEBUG=True
   #parser.add_argument('--hole_image_key',"-hi",help='口内画像',default="hole_01")
   return parser.parse_args()

if __name__ == "__main__":
   args=parse_arguments()
   # 画像ファイルのパス
   img_path = args.input

   output = args.output

   if output==None:
      base,ext = os.path.splitext(img_path)
      output = f"{base}_{args.open_size_y:02d}.jpg"
   

   #landmark = landmark_utils.create_landmarks_path(img_path,args.landmark)
   landmark = None
   if check_exists_files([landmark,img_path],[],False):
      print("File Error happend and exit app")
      exit(1)
   img = cv2.imread(img_path) # force load 3 channel
   #landmarks_list = landmark_utils.load_landmarks_json(landmark)
   #side_edge_expand = args.side_edge_expand
   use_close_lip = args.no_close_lip
   process_open_mouth(img,None,use_close_lip)#TODO test
# LOAD Image and Landmarkdata

def process_open_mouth(cv_image,landmarks_list,open_size_x=0,open_size_y=8,use_close_lip=True,inside_layer_low_depth=False,hole_offset=0,hole_image_name="dark01.jpg",side_edge_expand=0.02):
   img = cv_image 
   img_h, img_w = lip_utils.get_image_size(img)
   

   ## MODIFY POINTS
   top_points=lip_utils.get_landmark_points(landmarks_list,lip_utils.TOP_LIP)
   print(top_points)



   
   '''
   right_outer = top_points[0]
   left_outer = top_points[6]
   lip_width = lip_utils.distance_2d(left_outer,right_outer)
   #print(lip_width)
   lip_diff = [left_outer[0]-right_outer[0],left_outer[1]-right_outer[1]]

   side_edge_expand_point =[lip_diff[0]*side_edge_expand,lip_diff[1]*side_edge_expand]
   #print(side_edge_expand_point)
   print(f"side-edge expanded {side_edge_expand_point}")

   top_points[0][0]-=int(side_edge_expand_point[0])
   top_points[6][0]+=int(side_edge_expand_point[0])
   top_points[0][1]-=int(side_edge_expand_point[1])
   top_points[6][1]+=int(side_edge_expand_point[1])
   #img = cv2.imread(img_path,cv2.IMREAD_UNCHANGED) #4channel got problem use green back
   #img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
   '''
   # 常に作成
   if use_close_lip: # store falseというわかりにくいやつ
      import close_lip
      img,mask = close_lip.process_close_lip_image(img,landmarks_list)
      close_lip_image = img
      #return img


  

   margin = 12



   hole_points = lip_utils.get_lip_hole_points(landmarks_list)

   #print(hole_points)


   ## LIP MOVE UP
   (bottom_width,bottom_height)=lip_utils.get_bottom_lip_width_height(landmarks_list)
   left_thick,mid_thick,right_thick = lip_utils.get_top_lip_thicks(landmarks_list)
   bottom_base =  bottom_height/1.5

   diff_left = max(0,int(left_thick - bottom_base))
   diff_right = max(0,int(right_thick - bottom_base))
   diff_mid = max(0,int((diff_right+diff_left)*0.4))
   diff_avg = int((diff_right+diff_left)*0.5)


   # たいてい歯がご認識、そのぶん戻す。4は下唇との比較で
   fix_top_thick_hole_points = []
   top_point = [1,2,3]
   for idx,point in enumerate(hole_points):
      if idx in top_point:
         new_point = np.copy(point)
         if idx == 2:
            new_point[1] -= int(diff_avg*0.5) # TODO calcurate
         else:
            new_point[1] -= int(diff_avg*1) # TODO calcurate
         fix_top_thick_hole_points.append(new_point)
      else:
         fix_top_thick_hole_points.append(point)


   mask = lip_utils.create_mask_from_points(img,fix_top_thick_hole_points,2,2)
   inverse_mask = cv2.bitwise_not(mask)
   if lip_utils.DEBUG:
      cv2.imwrite("holeed_mask.jpg",mask)
      cv2.imwrite("holeed_inverse_mask.jpg",inverse_mask)

   img_transparent = lip_utils.apply_mask_alpha(img,mask)
   img_inpainted = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA)
   if lip_utils.DEBUG:
      cv2.imwrite("holeed_transparent.png",img_transparent)
      cv2.imwrite("holeed_inpainted.jpg",img_inpainted)

   #img_holed = np.copy(img)


   ## APPLY MASK OTHER WAY TODO check later
   #I'm not sure this logic
   #mask_2d = mask[:, :, 0] 
   #img_holed[:, :, 3] = mask_2d
   #cv2.imwrite("holed_image_mask.png",mask)


   # create gaussin image
   gaussian_size = 10
   gaused = cv2.GaussianBlur(img_inpainted, (0,0 ), sigmaX=gaussian_size, sigmaY=gaussian_size)
   #img_holed = lip_utils.apply_mask_alpha(img_holed,mask)
   #lip_utils.fill_points(hole_points,img,2,(255,0,0),(255,0,0))
   #lip_utils.fill_points(hole_points,img,0,(255,0,0,0),(255,0,0,0))
   if lip_utils.DEBUG:
      cv2.imwrite("holed_gaused.jpg",gaused)
   mask_1d = np.squeeze(mask)
   img_inpainted[mask_1d==255] = gaused[mask_1d==255]


   # image bitwise faild
   thresh, binary_mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
#result = cv2.bitwise_and(img_inpainted, gaused, mask=inverse_mask) # transform mask area
#result = cv2.bitwise_and(gaused, result, mask=binary_mask) # transform mask area
#result = cv2.bitwise_or(result, img_inpainted) # transform remains?
#cv2.imwrite("holeed_bitwise.jpg",result)

#exit(0)

# 重みを計算する関数を定義します。
# この例では、単純な重み付け方法を使用していますが、
# 実際のアプリケーションでは、より複雑なロジックを使用することができます。
# examle
   def calculate_weights(image):
      # ここで重みを計算します。この例では、単純な例を示しています。
      # 実際のアプリケーションでは、画像の特性に基づいて重みを計算することができます。
      weights = np.ones_like(image) * 0.5 # 例: すべてのピクセルに対して0.5の重みを割り当てます。
      return weights

# 画像ごとに重みを計算します。

#weights1 = 1.0 - img_holed[:, :, 3] / 255.0  
#weights1 = 1.0 - mask / 255.0 
#weights2 = 1.0 - weights1
#weights1 = calculate_weights(img)
#weights2 = calculate_weights(img_holed)

# 重み付きの加算を行います。
#result = (img_holed * weights1 + img * weights2) / (weights1 + weights2)
# 重み付きの加算を行います。アルファチャンネルを除いたRGBチャンネルに対して加算を行います。
#result_rgb = (img_holed[:, :, :3] * weights1[:, :, np.newaxis] + img[:, :, :3] * weights2[:, :, np.newaxis]) / (weights1[:, :, np.newaxis] + weights2[:, :, np.newaxis])

# アルファチャンネルを計算します。
# TODO support alpha
#result_alpha = (weights1 + weights2)
# 結果をアルファチャンネルと結合します。#somehow error toto check
#result = cv2.merge([result_rgb, result_alpha.astype(np.uint8)])


#result_rgb = cv2.cvtColor(img_holed, cv2.COLOR_BGRA2BGR)
#result = result.astype(np.uint8)
#cv2.imwrite("holed_image_mixed.jpg",result_rgb)
#exit(0)



   top_lip_layer,lip_mask = create_top_lip.process_lip_image(img_transparent,landmarks_list, margin, open_size_y, open_size_x)# Y is first
   hole_image = create_hole_image.process_create_hole_image(img,landmarks_list,open_size_y,open_size_x,hole_offset,hole_image_name)
   hole_image_apply_mask = lip_utils.apply_mask(hole_image,lip_mask)
   if lip_utils.DEBUG:
      cv2.imwrite("hole_image.jpg",hole_image)
      cv2.imwrite("hole_image_apply_mask.png",hole_image_apply_mask)

   bottom_lip_layer = create_bottom_lip.process_lip_image(img,landmarks_list, margin, open_size_y*2, open_size_x)
   chin_layer = create_chin_image.process_chin_image(img,landmarks_list, margin, open_size_y, open_size_x)

   no_mouth_face = create_no_mouth.process_create_no_mouth_image(img,landmarks_list)


   chin_points = lip_utils.get_landmark_points(landmarks_list,lip_utils.POINTS_CHIN)
   points =[]
   lip_points = lip_utils.get_lip_mask_points(landmarks_list)
   center_lips = lip_points[2:5]
   print("center")
   print(center_lips)
   center_lips = center_lips[::-1]
   print(center_lips)
   points.extend(center_lips+chin_points[4:13])
   print(points)
   for i in range(4,len(points)-1):
      points[i][1] += open_size_y

   jaw_mask_line = lip_utils.create_mask(no_mouth_face,(0,0,0))
   cv2.polylines(jaw_mask_line, [np.array(points)], isClosed=True, color=(0,255,0), thickness=1)
   if lip_utils.DEBUG:
      cv2.imwrite("open_mouth_jaw_mask_line.jpg",jaw_mask_line)

   dilation_size=3
   jaw_mask = lip_utils.create_mask_from_points(img,points,dilation_size,3)
#cv2.imwrite("open_mouth_jaw_mask.jpg",jaw_mask)


   
   from PIL import Image

   def convert_cv2_to_pil(cv2_img,is_bgra=True):
      """
      OpenCV (cv2) 画像を PIL 画像に変換する関数
      """
      # BGR から RGB に変換
      if is_bgra:
         rgb_img = cv2.cvtColor(cv2_img, cv2.COLOR_BGRA2RGBA)
      # PIL Image オブジェクトを作成
      pil_img = Image.fromarray(rgb_img)
      return pil_img

   # 画像のリストを作成
   pil_images =[]
#pil_images.append(Image.open("face_no_lip.jpg").convert("RGBA"))

#below are wrong too weak hole_image
#layers = [no_mouth_face,hole_image,top_lip_layer,bottom_lip_layer,chin_layer]

   #this order is right
   # second chind is wrong,when animation ghosted
   if inside_layer_low_depth:
      layers = [no_mouth_face,hole_image_apply_mask,top_lip_layer,chin_layer,bottom_lip_layer]
   else:
      layers = [no_mouth_face,top_lip_layer,chin_layer,bottom_lip_layer,hole_image_apply_mask]
   for layer in layers:
      pil_images.append(convert_cv2_to_pil(layer))


   #images = [convert_cv2_to_pil(mask),convert_cv2_to_pil(face_size_image)]
   layers = layers[::-1]
   output_image = None
   for i in range(len(pil_images)):
      if output_image == None:
         #cv2_image_rgb = cv2.cvtColor(layers[i], cv2.COLOR_BGRA2RGBA)
         #pil_image1 = Image.fromarray(cv2_image_rgb)
         output_image = pil_images[i]
         continue
      else:
         pil_image1 = output_image

   #cv2_image_rgb = cv2.cvtColor(layers[i], cv2.COLOR_BGRA2RGBA)
   #pil_image2 = Image.fromarray(cv2_image_rgb)

      output_image = Image.alpha_composite(pil_image1, pil_images[i])

   #output_image = lip_utils.alpha_blend_with_image2_alpha(output_image,layers[i+1])


   output_image = output_image.convert("RGB")
   
   #import webp
   #webp.save_images(pil_images, 'anim.webp', fps=10, lossless=True)

   return output_image

   name,ext = os.path.splitext(output)





   if ext == "":
      output += ".jpg"
      
   output_image_path = output.replace(".png",".jpg")
   output_image.save(output_image_path)#force save jpeg
   cv2.imwrite(output_image_path.replace(".jpg","_mask.jpg"),jaw_mask)

   if close_lip_image is not None:
      pass # no save 連番でつくるはめになる 保留
      #close_lip_path = f"{name}_close-lip.jpg"
      cv2.imwrite("close-lip.jpg",close_lip_image)

   print(f"open-mouth created {output}")
   #cv2.imwrite(output,output_image)

   # アニメーションとして保存 for later checking TODO add option