Spaces:
Runtime error
Runtime error
Upload 7 files
Browse files- .gitattributes +0 -33
- .gitignore +26 -0
- Dockerfile +25 -0
- application.py +24 -0
- model/model.h5 +3 -0
- model/predict.py +112 -0
- requirements.txt +3 -0
.gitattributes
CHANGED
@@ -1,34 +1 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.DS_Store
|
2 |
+
.env
|
3 |
+
.flaskenv
|
4 |
+
*.pyc
|
5 |
+
*.pyo
|
6 |
+
env/
|
7 |
+
venv/
|
8 |
+
.venv/
|
9 |
+
env*
|
10 |
+
dist/
|
11 |
+
build/
|
12 |
+
*.egg
|
13 |
+
*.egg-info/
|
14 |
+
.tox/
|
15 |
+
.cache/
|
16 |
+
.pytest_cache/
|
17 |
+
.idea/
|
18 |
+
docs/_build/
|
19 |
+
.vscode
|
20 |
+
.tflite
|
21 |
+
|
22 |
+
# Coverage reports
|
23 |
+
htmlcov/
|
24 |
+
.coverage
|
25 |
+
.coverage.*
|
26 |
+
*,cover
|
Dockerfile
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# FROM python:3.8-slim-buster
|
2 |
+
# COPY . /app
|
3 |
+
# WORKDIR /app
|
4 |
+
# RUN apt-get update
|
5 |
+
# RUN pip3 install -r requirements.txt
|
6 |
+
# EXPOSE 5000
|
7 |
+
# ENTRYPOINT [ "python" ]
|
8 |
+
# CMD [ "application.py" ]
|
9 |
+
FROM tensorflow/tensorflow
|
10 |
+
|
11 |
+
# Copy local code to the container image.
|
12 |
+
COPY . /app
|
13 |
+
WORKDIR /app
|
14 |
+
|
15 |
+
# Install production dependencies.
|
16 |
+
RUN pip3 install -r requirements.txt
|
17 |
+
|
18 |
+
ENV PORT 5000
|
19 |
+
EXPOSE 5000
|
20 |
+
|
21 |
+
ENTRYPOINT [ "python" ]
|
22 |
+
CMD [ "application.py" ]
|
23 |
+
|
24 |
+
# webserver, with one worker process and 8 threads.
|
25 |
+
# CMD exec gunicorn --bind 0.0.0.0:$PORT --workers 1 --threads 8 --timeout 0 main:app
|
application.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify
|
2 |
+
import os
|
3 |
+
from waitress import serve
|
4 |
+
from model.predict import *
|
5 |
+
|
6 |
+
app = Flask(__name__)
|
7 |
+
|
8 |
+
@app.route('/api/create-texture', methods=['POST', 'GET', 'DELETE'])
|
9 |
+
def create_texture():
|
10 |
+
print(request.files)
|
11 |
+
front_image = request.files["front"].read()
|
12 |
+
back_image = request.files["back"].read()
|
13 |
+
primary_color = request.form["color"]
|
14 |
+
|
15 |
+
front_output = predict_h5(front_image)
|
16 |
+
back_output = predict_h5(back_image)
|
17 |
+
|
18 |
+
return jsonify({"front": front_output, "back" : back_output})
|
19 |
+
|
20 |
+
|
21 |
+
if __name__ == '__main__':
|
22 |
+
port = int(os.environ.get('PORT', 5000))
|
23 |
+
app.run(debug=True, host="0.0.0.0")
|
24 |
+
# serve(app, host="0.0.0.0", port=port)
|
model/model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8907e86ca825a4222ce652ef48fde473da62d6693dcedbc563f347a3837ce166
|
3 |
+
size 215186872
|
model/predict.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import tensorflow as tf
|
5 |
+
from tensorflow.keras.utils import CustomObjectScope
|
6 |
+
|
7 |
+
def iou(y_true, y_pred):
|
8 |
+
def f(y_true, y_pred):
|
9 |
+
intersection = (y_true * y_pred).sum()
|
10 |
+
union = y_true.sum() + y_pred.sum() - intersection
|
11 |
+
x = (intersection + 1e-15) / (union + 1e-15)
|
12 |
+
x = x.astype(np.float32)
|
13 |
+
return x
|
14 |
+
return tf.numpy_function(f, [y_true, y_pred], tf.float32)
|
15 |
+
|
16 |
+
smooth = 1e-15
|
17 |
+
def dice_coef(y_true, y_pred):
|
18 |
+
y_true = tf.keras.layers.Flatten()(y_true)
|
19 |
+
y_pred = tf.keras.layers.Flatten()(y_pred)
|
20 |
+
intersection = tf.reduce_sum(y_true * y_pred)
|
21 |
+
return (2. * intersection + smooth) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) + smooth)
|
22 |
+
|
23 |
+
def dice_loss(y_true, y_pred):
|
24 |
+
return 1.0 - dice_coef(y_true, y_pred)
|
25 |
+
|
26 |
+
def predict():
|
27 |
+
#Load Model
|
28 |
+
interpreter = tf.lite.Interpreter(model_path='model/segmentation_model.tflite')
|
29 |
+
interpreter.allocate_tensors()
|
30 |
+
|
31 |
+
# Get input and output tensors.
|
32 |
+
input_details = interpreter.get_input_details()
|
33 |
+
output_details = interpreter.get_output_details()
|
34 |
+
|
35 |
+
# Global Parameters
|
36 |
+
H = 512
|
37 |
+
W = 384
|
38 |
+
|
39 |
+
# Set up your input data.
|
40 |
+
image = cv2.imread('model/test.jpg', cv2.IMREAD_COLOR)
|
41 |
+
|
42 |
+
resized_image = cv2.resize(image, (W, H))
|
43 |
+
x = resized_image/255.0
|
44 |
+
x = x.astype(np.float32)
|
45 |
+
x = np.expand_dims(x,0)
|
46 |
+
|
47 |
+
# Input Data to Model
|
48 |
+
interpreter.set_tensor(input_details[0]['index'], x)
|
49 |
+
interpreter.invoke()
|
50 |
+
|
51 |
+
#Geting Output
|
52 |
+
prediction = interpreter.get_tensor(output_details[0]['index'])[0]
|
53 |
+
|
54 |
+
prediction = cv2.resize(prediction, (W, H))
|
55 |
+
prediction = np.expand_dims(prediction, axis=-1)
|
56 |
+
prediction = prediction > 0.5
|
57 |
+
|
58 |
+
mask = np.uint8(prediction) * 255
|
59 |
+
prediction = cv2.merge([mask, mask, mask])
|
60 |
+
|
61 |
+
mask_inv = cv2.cvtColor(prediction, cv2.COLOR_BGR2GRAY)
|
62 |
+
|
63 |
+
output_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2BGRA)
|
64 |
+
output_image[..., 3] = mask_inv
|
65 |
+
|
66 |
+
# cv2.imshow(output_image)
|
67 |
+
|
68 |
+
cv2.imwrite("model/output.png", output_image)
|
69 |
+
|
70 |
+
return "done"
|
71 |
+
|
72 |
+
def predict_h5(file):
|
73 |
+
#Load Model
|
74 |
+
with CustomObjectScope({'iou': iou, 'dice_coef': dice_coef, 'dice_loss': dice_loss}):
|
75 |
+
model = tf.keras.models.load_model("model/model.h5")
|
76 |
+
|
77 |
+
# Global Parameters
|
78 |
+
H = 512
|
79 |
+
W = 384
|
80 |
+
|
81 |
+
# Set up your input data.
|
82 |
+
# Convert File to np array
|
83 |
+
file_bytes = np.fromstring(file, np.uint8)
|
84 |
+
# convert numpy array to image
|
85 |
+
image = cv2.imdecode(file_bytes, cv2.IMREAD_UNCHANGED)
|
86 |
+
|
87 |
+
resized_image = cv2.resize(image, (W, H))
|
88 |
+
x = resized_image/255.0
|
89 |
+
x = x.astype(np.float32)
|
90 |
+
x = np.expand_dims(x,0)
|
91 |
+
|
92 |
+
# Input Data to Model
|
93 |
+
|
94 |
+
#Geting Output
|
95 |
+
prediction = model.predict(x)[0]
|
96 |
+
|
97 |
+
prediction = cv2.resize(prediction, (W, H))
|
98 |
+
prediction = np.expand_dims(prediction, axis=-1)
|
99 |
+
prediction = prediction > 0.5
|
100 |
+
|
101 |
+
mask = np.uint8(prediction) * 255
|
102 |
+
prediction = cv2.merge([mask, mask, mask])
|
103 |
+
|
104 |
+
mask_inv = cv2.cvtColor(prediction, cv2.COLOR_BGR2GRAY)
|
105 |
+
|
106 |
+
output_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2BGRA)
|
107 |
+
output_image[..., 3] = mask_inv
|
108 |
+
|
109 |
+
retval, buffer = cv2.imencode('.png', output_image)
|
110 |
+
encoded_image = base64.b64encode(buffer).decode('utf-8')
|
111 |
+
|
112 |
+
return encoded_image
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
flask
|
2 |
+
opencv-python-headless
|
3 |
+
waitress
|