Spaces:
Runtime error
Runtime error
Duplicate from keras-io/Human-Part-Segmentation
Browse filesCo-authored-by: Satpal Singh Rathore <[email protected]>
- .gitattributes +27 -0
- README.md +50 -0
- app.py +63 -0
- example_image_1.jpg +0 -0
- example_image_2.jpeg +0 -0
- example_image_2.jpg +0 -0
- example_image_3.jpeg +0 -0
- example_image_3.jpg +0 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Human Part Segmentation
|
3 |
+
emoji: 👤
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
app_file: app.py
|
8 |
+
pinned: false
|
9 |
+
tags:
|
10 |
+
- computer-vision
|
11 |
+
- image-segmentation
|
12 |
+
license: cc0-1.0
|
13 |
+
duplicated_from: keras-io/Human-Part-Segmentation
|
14 |
+
---
|
15 |
+
|
16 |
+
# Configuration
|
17 |
+
|
18 |
+
`title`: _string_
|
19 |
+
Display title for the Space
|
20 |
+
|
21 |
+
`emoji`: _string_
|
22 |
+
Space emoji (emoji-only character allowed)
|
23 |
+
|
24 |
+
`colorFrom`: _string_
|
25 |
+
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
26 |
+
|
27 |
+
`colorTo`: _string_
|
28 |
+
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
29 |
+
|
30 |
+
`sdk`: _string_
|
31 |
+
Can be either `gradio`, `streamlit`, or `static`
|
32 |
+
|
33 |
+
`sdk_version` : _string_
|
34 |
+
Only applicable for `streamlit` SDK.
|
35 |
+
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
36 |
+
|
37 |
+
`app_file`: _string_
|
38 |
+
Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
|
39 |
+
Path is relative to the root of the repository.
|
40 |
+
|
41 |
+
`models`: _List[string]_
|
42 |
+
HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
|
43 |
+
Will be parsed automatically from your code if not specified here.
|
44 |
+
|
45 |
+
`datasets`: _List[string]_
|
46 |
+
HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
|
47 |
+
Will be parsed automatically from your code if not specified here.
|
48 |
+
|
49 |
+
`pinned`: _boolean_
|
50 |
+
Whether the Space stays on top of your list.
|
app.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import tensorflow as tf
|
3 |
+
import gradio as gr
|
4 |
+
from huggingface_hub import from_pretrained_keras
|
5 |
+
import cv2
|
6 |
+
|
7 |
+
model = from_pretrained_keras("keras-io/deeplabv3p-resnet50")
|
8 |
+
|
9 |
+
colormap = np.array([[0,0,0], [31,119,180], [44,160,44], [44, 127, 125], [52, 225, 143],
|
10 |
+
[217, 222, 163], [254, 128, 37], [130, 162, 128], [121, 7, 166], [136, 183, 248],
|
11 |
+
[85, 1, 76], [22, 23, 62], [159, 50, 15], [101, 93, 152], [252, 229, 92],
|
12 |
+
[167, 173, 17], [218, 252, 252], [238, 126, 197], [116, 157, 140], [214, 220, 252]], dtype=np.uint8)
|
13 |
+
|
14 |
+
img_size = 512
|
15 |
+
|
16 |
+
def read_image(image):
|
17 |
+
image = tf.convert_to_tensor(image)
|
18 |
+
image.set_shape([None, None, 3])
|
19 |
+
image = tf.image.resize(images=image, size=[img_size, img_size])
|
20 |
+
image = image / 127.5 - 1
|
21 |
+
return image
|
22 |
+
|
23 |
+
def infer(model, image_tensor):
|
24 |
+
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
|
25 |
+
predictions = np.squeeze(predictions)
|
26 |
+
predictions = np.argmax(predictions, axis=2)
|
27 |
+
return predictions
|
28 |
+
|
29 |
+
def decode_segmentation_masks(mask, colormap, n_classes):
|
30 |
+
r = np.zeros_like(mask).astype(np.uint8)
|
31 |
+
g = np.zeros_like(mask).astype(np.uint8)
|
32 |
+
b = np.zeros_like(mask).astype(np.uint8)
|
33 |
+
for l in range(0, n_classes):
|
34 |
+
idx = mask == l
|
35 |
+
r[idx] = colormap[l, 0]
|
36 |
+
g[idx] = colormap[l, 1]
|
37 |
+
b[idx] = colormap[l, 2]
|
38 |
+
rgb = np.stack([r, g, b], axis=2)
|
39 |
+
return rgb
|
40 |
+
|
41 |
+
def get_overlay(image, colored_mask):
|
42 |
+
image = tf.keras.preprocessing.image.array_to_img(image)
|
43 |
+
image = np.array(image).astype(np.uint8)
|
44 |
+
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
|
45 |
+
return overlay
|
46 |
+
|
47 |
+
def segmentation(input_image):
|
48 |
+
image_tensor = read_image(input_image)
|
49 |
+
prediction_mask = infer(image_tensor=image_tensor, model=model)
|
50 |
+
prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20)
|
51 |
+
overlay = get_overlay(image_tensor, prediction_colormap)
|
52 |
+
return (overlay, prediction_colormap)
|
53 |
+
|
54 |
+
i = gr.inputs.Image()
|
55 |
+
o = [gr.outputs.Image(), gr.outputs.Image()]
|
56 |
+
|
57 |
+
examples = [["example_image_2.jpeg"], ["example_image_3.jpeg"]]
|
58 |
+
title = "Human Part Segmentation"
|
59 |
+
description = "Upload an image or select from examples to segment out different human parts."
|
60 |
+
|
61 |
+
article = "<div style='text-align: center;'><a href='https://twitter.com/SatpalPatawat' target='_blank'>Space by Satpal Singh Rathore</a><br><a href='https://keras.io/examples/vision/deeplabv3_plus/' target='_blank'>Keras example by Soumik Rakshit</a></div>"
|
62 |
+
gr.Interface(segmentation, i, o, examples=examples, allow_flagging=False, analytics_enabled=False,
|
63 |
+
title=title, description=description, article=article).launch(enable_queue=True)
|
example_image_1.jpg
ADDED
![]() |
example_image_2.jpeg
ADDED
![]() |
example_image_2.jpg
ADDED
![]() |
example_image_3.jpeg
ADDED
![]() |
example_image_3.jpg
ADDED
![]() |
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow
|
2 |
+
numpy
|
3 |
+
opencv-python-headless
|