Commit
Β·
04d5d6b
1
Parent(s):
beed138
fix some bugs
Browse files- app.py +1 -1
- leffa/inference.py +4 -0
- leffa/pipeline.py +3 -3
- preprocess/humanparsing/run_parsing.py +5 -2
app.py
CHANGED
|
@@ -60,7 +60,7 @@ class LeffaPredictor(object):
|
|
| 60 |
src_image_path,
|
| 61 |
ref_image_path,
|
| 62 |
control_type,
|
| 63 |
-
ref_acceleration=
|
| 64 |
step=50,
|
| 65 |
scale=2.5,
|
| 66 |
seed=42,
|
|
|
|
| 60 |
src_image_path,
|
| 61 |
ref_image_path,
|
| 62 |
control_type,
|
| 63 |
+
ref_acceleration=False,
|
| 64 |
step=50,
|
| 65 |
scale=2.5,
|
| 66 |
seed=42,
|
leffa/inference.py
CHANGED
|
@@ -33,18 +33,22 @@ class LeffaInference(object):
|
|
| 33 |
def __call__(self, data: Dict[str, Any], **kwargs) -> Dict[str, Any]:
|
| 34 |
data = self.to_gpu(data)
|
| 35 |
|
|
|
|
| 36 |
num_inference_steps = kwargs.get("num_inference_steps", 50)
|
| 37 |
guidance_scale = kwargs.get("guidance_scale", 2.5)
|
| 38 |
seed = kwargs.get("seed", 42)
|
|
|
|
| 39 |
generator = torch.Generator(self.pipe.device).manual_seed(seed)
|
| 40 |
images = self.pipe(
|
| 41 |
src_image=data["src_image"],
|
| 42 |
ref_image=data["ref_image"],
|
| 43 |
mask=data["mask"],
|
| 44 |
densepose=data["densepose"],
|
|
|
|
| 45 |
num_inference_steps=num_inference_steps,
|
| 46 |
guidance_scale=guidance_scale,
|
| 47 |
generator=generator,
|
|
|
|
| 48 |
)[0]
|
| 49 |
|
| 50 |
# images = [pil_to_tensor(image) for image in images]
|
|
|
|
| 33 |
def __call__(self, data: Dict[str, Any], **kwargs) -> Dict[str, Any]:
|
| 34 |
data = self.to_gpu(data)
|
| 35 |
|
| 36 |
+
ref_acceleration = kwargs.get("ref_acceleration", False)
|
| 37 |
num_inference_steps = kwargs.get("num_inference_steps", 50)
|
| 38 |
guidance_scale = kwargs.get("guidance_scale", 2.5)
|
| 39 |
seed = kwargs.get("seed", 42)
|
| 40 |
+
repaint = kwargs.get("repaint", False)
|
| 41 |
generator = torch.Generator(self.pipe.device).manual_seed(seed)
|
| 42 |
images = self.pipe(
|
| 43 |
src_image=data["src_image"],
|
| 44 |
ref_image=data["ref_image"],
|
| 45 |
mask=data["mask"],
|
| 46 |
densepose=data["densepose"],
|
| 47 |
+
ref_acceleration=ref_acceleration,
|
| 48 |
num_inference_steps=num_inference_steps,
|
| 49 |
guidance_scale=guidance_scale,
|
| 50 |
generator=generator,
|
| 51 |
+
repaint=repaint,
|
| 52 |
)[0]
|
| 53 |
|
| 54 |
# images = [pil_to_tensor(image) for image in images]
|
leffa/pipeline.py
CHANGED
|
@@ -48,7 +48,7 @@ class LeffaPipeline(object):
|
|
| 48 |
ref_image,
|
| 49 |
mask,
|
| 50 |
densepose,
|
| 51 |
-
ref_acceleration=
|
| 52 |
num_inference_steps=50,
|
| 53 |
do_classifier_free_guidance=True,
|
| 54 |
guidance_scale=2.5,
|
|
@@ -182,7 +182,7 @@ class LeffaPipeline(object):
|
|
| 182 |
mask = numpy_to_pil(mask)
|
| 183 |
mask = [i.convert("RGB") for i in mask]
|
| 184 |
gen_image = [
|
| 185 |
-
|
| 186 |
for _src_image, _mask, _gen_image in zip(src_image, mask, gen_image)
|
| 187 |
]
|
| 188 |
|
|
@@ -216,7 +216,7 @@ def numpy_to_pil(images):
|
|
| 216 |
return pil_images
|
| 217 |
|
| 218 |
|
| 219 |
-
def
|
| 220 |
_, h = result.size
|
| 221 |
kernal_size = h // 100
|
| 222 |
if kernal_size % 2 == 0:
|
|
|
|
| 48 |
ref_image,
|
| 49 |
mask,
|
| 50 |
densepose,
|
| 51 |
+
ref_acceleration=False,
|
| 52 |
num_inference_steps=50,
|
| 53 |
do_classifier_free_guidance=True,
|
| 54 |
guidance_scale=2.5,
|
|
|
|
| 182 |
mask = numpy_to_pil(mask)
|
| 183 |
mask = [i.convert("RGB") for i in mask]
|
| 184 |
gen_image = [
|
| 185 |
+
do_repaint(_src_image, _mask, _gen_image)
|
| 186 |
for _src_image, _mask, _gen_image in zip(src_image, mask, gen_image)
|
| 187 |
]
|
| 188 |
|
|
|
|
| 216 |
return pil_images
|
| 217 |
|
| 218 |
|
| 219 |
+
def do_repaint(person, mask, result):
|
| 220 |
_, h = result.size
|
| 221 |
kernal_size = h // 100
|
| 222 |
if kernal_size % 2 == 0:
|
preprocess/humanparsing/run_parsing.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
from pathlib import Path
|
|
|
|
| 2 |
import sys
|
| 3 |
import onnxruntime as ort
|
| 4 |
PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
|
|
@@ -9,14 +10,16 @@ from parsing_api import onnx_inference
|
|
| 9 |
class Parsing:
|
| 10 |
def __init__(self, atr_path, lip_path):
|
| 11 |
session_options = ort.SessionOptions()
|
|
|
|
|
|
|
| 12 |
session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
|
| 13 |
session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
|
| 14 |
self.session = ort.InferenceSession(atr_path,
|
| 15 |
sess_options=session_options, providers=['CPUExecutionProvider'])
|
| 16 |
self.lip_session = ort.InferenceSession(lip_path,
|
| 17 |
sess_options=session_options, providers=['CPUExecutionProvider'])
|
| 18 |
-
|
| 19 |
|
| 20 |
def __call__(self, input_image):
|
| 21 |
-
parsed_image, face_mask = onnx_inference(
|
|
|
|
| 22 |
return parsed_image, face_mask
|
|
|
|
| 1 |
from pathlib import Path
|
| 2 |
+
import os
|
| 3 |
import sys
|
| 4 |
import onnxruntime as ort
|
| 5 |
PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
|
|
|
|
| 10 |
class Parsing:
|
| 11 |
def __init__(self, atr_path, lip_path):
|
| 12 |
session_options = ort.SessionOptions()
|
| 13 |
+
session_options.inter_op_num_threads = os.cpu_count() // 2
|
| 14 |
+
session_options.intra_op_num_threads = os.cpu_count() // 2
|
| 15 |
session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
|
| 16 |
session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
|
| 17 |
self.session = ort.InferenceSession(atr_path,
|
| 18 |
sess_options=session_options, providers=['CPUExecutionProvider'])
|
| 19 |
self.lip_session = ort.InferenceSession(lip_path,
|
| 20 |
sess_options=session_options, providers=['CPUExecutionProvider'])
|
|
|
|
| 21 |
|
| 22 |
def __call__(self, input_image):
|
| 23 |
+
parsed_image, face_mask = onnx_inference(
|
| 24 |
+
self.session, self.lip_session, input_image)
|
| 25 |
return parsed_image, face_mask
|