Makhinur commited on
Commit
8e07454
·
verified ·
1 Parent(s): cafcff1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -26
app.py CHANGED
@@ -1,14 +1,15 @@
1
  import os
2
  import onnxruntime as rt
3
  import sys
4
- import PIL
5
- from PIL import Image, ImageOps, ImageFile
6
  import numpy as np
7
  from pathlib import Path
8
  import collections
9
  from typing import Union, List
10
  import scipy.ndimage
11
  import requests
 
 
12
 
13
  MODEL_FILE = "ffhqu2vintage512_pix2pixHD_v1E11-inp2inst-simp.onnx"
14
  so = rt.SessionOptions()
@@ -20,27 +21,25 @@ print("input_name = " + str(input_name))
20
  output_name = session.get_outputs()[0].name
21
  print("output_name = " + str(output_name))
22
 
23
- import face_detection
24
-
25
  def array_to_image(array_in):
26
- array_in = np.squeeze(255*(array_in + 1)/2)
27
  array_in = np.transpose(array_in, (1, 2, 0))
28
  im = Image.fromarray(array_in.astype(np.uint8))
29
  return im
30
 
31
  def image_as_array(image_in):
32
  im_array = np.array(image_in, np.float32)
33
- im_array = (im_array/255)*2 - 1
34
  im_array = np.transpose(im_array, (2, 0, 1))
35
  im_array = np.expand_dims(im_array, 0)
36
  return im_array
37
 
38
- def find_aligned_face(image_in, size=512):
39
  aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
40
  return aligned_image, n_faces, quad
41
 
42
  def align_first_face(image_in, size=512):
43
- aligned_image, n_faces, quad = find_aligned_face(image_in,size=size)
44
  if n_faces == 0:
45
  try:
46
  image_in = ImageOps.exif_transpose(image_in)
@@ -59,17 +58,11 @@ def img_concat_h(im1, im2):
59
  dst.paste(im2, (im1.width, 0))
60
  return dst
61
 
62
- import gradio as gr
63
-
64
- def face2vintage(
65
- img: Image.Image,
66
- size: int
67
- ) -> Image.Image:
68
-
69
  aligned_img = align_first_face(img)
70
  if aligned_img is None:
71
- output=None
72
- else:
73
  output = session.run([output_name], {input_name: aligned_img})[0]
74
  output = array_to_image(output)
75
  aligned_img = array_to_image(aligned_img).resize((output.width, output.height))
@@ -80,13 +73,13 @@ def face2vintage(
80
  def inference(img):
81
  out = face2vintage(img, 512)
82
  return out
83
-
84
-
85
  title = "Vintage style Pix2PixHD"
86
- description = "Style a face to look more \"Vintage\". Upload an image with a face, or click on one of the examples below. If a face could not be detected, an image will still be created."
87
- article = "<hr><p style='text-align: center'>See the <a href='https://github.com/justinpinkney/pix2pixHD' target='_blank'>Github Repo</a></p><p style='text-align: center'>samples: <img src='https://hf.space/gradioiframe/Norod78/VintageStyle/file/Sample00001.jpg' alt='Sample00001'/><img src='https://hf.space/gradioiframe/Norod78/VintageStyle/file/Sample00002.jpg' alt='Sample00002'/><img src='https://hf.space/gradioiframe/Norod78/VintageStyle/file/Sample00003.jpg' alt='Sample00003'/><img src='https://hf.space/gradioiframe/Norod78/VintageStyle/file/Sample00004.jpg' alt='Sample00004'/><img src='https://hf.space/gradioiframe/Norod78/VintageStyle/file/Sample00005.jpg' alt='Sample00005'/></p><p>The \"Vintage Style\" Pix2PixHD model was trained by <a href='https://linktr.ee/Norod78' target='_blank'>Doron Adler</a></p>"
 
 
88
 
89
- examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg'], ['Example00006.jpg']]
90
  demo = gr.Interface(
91
  inference,
92
  inputs=[gr.Image(type="pil", label="Input")],
@@ -94,10 +87,8 @@ demo = gr.Interface(
94
  title=title,
95
  description=description,
96
  article=article,
97
- examples=examples,
98
- allow_flagging=False
99
- )
100
-
101
 
102
  demo.queue()
103
  demo.launch()
 
1
  import os
2
  import onnxruntime as rt
3
  import sys
4
+ from PIL import Image, ImageOps
 
5
  import numpy as np
6
  from pathlib import Path
7
  import collections
8
  from typing import Union, List
9
  import scipy.ndimage
10
  import requests
11
+ import gradio as gr
12
+ import face_detection # Ensure this is the adjusted face_detection.py
13
 
14
  MODEL_FILE = "ffhqu2vintage512_pix2pixHD_v1E11-inp2inst-simp.onnx"
15
  so = rt.SessionOptions()
 
21
  output_name = session.get_outputs()[0].name
22
  print("output_name = " + str(output_name))
23
 
 
 
24
  def array_to_image(array_in):
25
+ array_in = np.squeeze(255 * (array_in + 1) / 2)
26
  array_in = np.transpose(array_in, (1, 2, 0))
27
  im = Image.fromarray(array_in.astype(np.uint8))
28
  return im
29
 
30
  def image_as_array(image_in):
31
  im_array = np.array(image_in, np.float32)
32
+ im_array = (im_array / 255) * 2 - 1
33
  im_array = np.transpose(im_array, (2, 0, 1))
34
  im_array = np.expand_dims(im_array, 0)
35
  return im_array
36
 
37
+ def find_aligned_face(image_in, size=512):
38
  aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
39
  return aligned_image, n_faces, quad
40
 
41
  def align_first_face(image_in, size=512):
42
+ aligned_image, n_faces, quad = find_aligned_face(image_in, size=size)
43
  if n_faces == 0:
44
  try:
45
  image_in = ImageOps.exif_transpose(image_in)
 
58
  dst.paste(im2, (im1.width, 0))
59
  return dst
60
 
61
+ def face2vintage(img: Image.Image, size: int) -> Image.Image:
 
 
 
 
 
 
62
  aligned_img = align_first_face(img)
63
  if aligned_img is None:
64
+ output = None
65
+ else:
66
  output = session.run([output_name], {input_name: aligned_img})[0]
67
  output = array_to_image(output)
68
  aligned_img = array_to_image(aligned_img).resize((output.width, output.height))
 
73
  def inference(img):
74
  out = face2vintage(img, 512)
75
  return out
76
+
 
77
  title = "Vintage style Pix2PixHD"
78
+ description = "Style a face to look more \"Vintage\". Upload an image with a face, or click on one of the examples below."
79
+ article = "<hr><p style='text-align: center'>See the <a href='https://github.com/justinpinkney/pix2pixHD' target='_blank'>Github Repo</a></p>"
80
+
81
+ examples = [['Example00001.jpg'], ['Example00002.jpg'], ['Example00003.jpg'], ['Example00004.jpg'], ['Example00005.jpg']]
82
 
 
83
  demo = gr.Interface(
84
  inference,
85
  inputs=[gr.Image(type="pil", label="Input")],
 
87
  title=title,
88
  description=description,
89
  article=article,
90
+
91
+ )
 
 
92
 
93
  demo.queue()
94
  demo.launch()