Spaces:
Runtime error
Runtime error
Commit
·
d3b11cc
1
Parent(s):
cfaf2f8
Upload 11 files
Browse files- Procfile +1 -0
- README.md +17 -13
- app.py +39 -0
- best_model.h5 +3 -0
- img_classification.py +63 -0
- requirements.txt +114 -0
- runtime.txt +1 -0
- setup.sh +13 -0
- tests/__pycache__/test_static.cpython-39-pytest-7.2.0.pyc +0 -0
- tests/test_static.py +14 -0
- utils.py +43 -0
Procfile
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
web: sh setup.sh && streamlit run app.py
|
README.md
CHANGED
@@ -1,13 +1,17 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
# Streamlit app for trying out poison oak classifier
|
2 |
+
Try it out: https://poison-app.herokuapp.com (note: may take ~30sec for webpage to load since the server is on a non-paid licens)
|
3 |
+
|
4 |
+
## What is it
|
5 |
+
This is a image classifier trained on hand-collected poison oak images over 1 year. Poison ivy will not necessarily expected to be classified as poison oak even though it's leaves also contain the oil causing the allergic reaction, Sumac.
|
6 |
+
|
7 |
+
At time of repository creation, this model was implemented in a free iOS app "Poizon Plants", however due to Apple developer licence costs it may not be live.
|
8 |
+
|
9 |
+
In order to help the user assess interpret the probability of a given image containing poison oak, different prompts based on the softmax probability are output.
|
10 |
+
|
11 |
+
Model is initialized at same time as server start-up to speed up inference. (Tensorflow lite was tested and optimized for latency, but did not significantly improve speed but did significantly decrease f1 metrics)
|
12 |
+
|
13 |
+
## Run
|
14 |
+
*streamlit run app.py*
|
15 |
+
|
16 |
+
## Tests
|
17 |
+
pytest
|
app.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image, ImageOps
|
3 |
+
import io
|
4 |
+
from img_classification import teachable_machine_classification, load_model
|
5 |
+
|
6 |
+
from tensorflow import keras
|
7 |
+
|
8 |
+
|
9 |
+
st.set_option("deprecation.showfileUploaderEncoding", False)
|
10 |
+
|
11 |
+
st.title("Detecting presence of Poison Oak")
|
12 |
+
st.header("Poison Oak Classification Example")
|
13 |
+
st.text("Upload an image for classification as poison oak or no poison oak")
|
14 |
+
|
15 |
+
|
16 |
+
# Load trained model
|
17 |
+
model = load_model("./best_model.h5")
|
18 |
+
|
19 |
+
print("Starting Streamlit app")
|
20 |
+
uploaded_file = st.file_uploader("Select an image ...", type=["jpg", "png", "jpeg"])
|
21 |
+
|
22 |
+
if uploaded_file is not None:
|
23 |
+
image = Image.open(uploaded_file)
|
24 |
+
st.image(image, caption="Uploaded image", use_column_width=True)
|
25 |
+
st.write("")
|
26 |
+
st.write("Classifying...")
|
27 |
+
label = teachable_machine_classification(img=image, model=model)
|
28 |
+
if label <= 0.2:
|
29 |
+
st.write("Very unlikely that this is poison oak.")
|
30 |
+
elif (label > 0.2) & (label <= 0.6):
|
31 |
+
st.write(
|
32 |
+
"Unsure from this picture. You may need to retake a closer/clearer picture."
|
33 |
+
)
|
34 |
+
elif (label > 0.6) & (label <= 0.7):
|
35 |
+
st.write("Decent chance that this is poison oak.")
|
36 |
+
else:
|
37 |
+
st.write("{:.1f}% chance that this might be poison oak".format(label * 100))
|
38 |
+
else:
|
39 |
+
st.write("No file uploaded")
|
best_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:148242b744c1e8115f7a417fae172b938b8d9ebb9e387b2690dbd478f1e6ea5b
|
3 |
+
size 6860424
|
img_classification.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from tensorflow import keras
|
2 |
+
from PIL import Image, ImageOps
|
3 |
+
import numpy as np
|
4 |
+
import io, os
|
5 |
+
import logging
|
6 |
+
import keras_metrics
|
7 |
+
from tensorflow import keras
|
8 |
+
import utils
|
9 |
+
## Configs
|
10 |
+
keras.utils.get_custom_objects()['recall'] = utils.recall
|
11 |
+
keras.utils.get_custom_objects()['precision'] = utils.precision
|
12 |
+
keras.utils.get_custom_objects()['f1'] = utils.f1
|
13 |
+
|
14 |
+
|
15 |
+
def teachable_machine_classification(img=None, model=None):
|
16 |
+
"""Performs inference on image uploaded"""
|
17 |
+
|
18 |
+
# Create the array of the right shape to feed into the keras model
|
19 |
+
data = np.ndarray(shape=(1, 299, 299, 3), dtype=np.float32)
|
20 |
+
image = img
|
21 |
+
# image sizing
|
22 |
+
size = (299, 299)
|
23 |
+
image = ImageOps.fit(image, size, Image.ANTIALIAS)
|
24 |
+
|
25 |
+
# turn the image into a numpy array
|
26 |
+
image_array = np.asarray(image)
|
27 |
+
|
28 |
+
# Normalize the image
|
29 |
+
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
|
30 |
+
|
31 |
+
# Load the image into the array
|
32 |
+
data[0] = normalized_image_array
|
33 |
+
|
34 |
+
# run the inference
|
35 |
+
prediction = model.predict(data)
|
36 |
+
|
37 |
+
print("Prediction", prediction)
|
38 |
+
return prediction[0][
|
39 |
+
1
|
40 |
+
] # np.argmax(prediction) # return position of the highest probability
|
41 |
+
|
42 |
+
|
43 |
+
def load_model(weights_file=None):
|
44 |
+
"""Loads trained keras model"""
|
45 |
+
dependencies = {
|
46 |
+
"binary_f1_score": keras_metrics.binary_f1_score,
|
47 |
+
"binary_precision": keras_metrics.binary_precision,
|
48 |
+
"binary_recall": keras_metrics.binary_recall,
|
49 |
+
}
|
50 |
+
|
51 |
+
try:
|
52 |
+
assert os.path.exists(weights_file), f"File '{weights_file}' does not exist"
|
53 |
+
# Load the model
|
54 |
+
model = keras.models.load_model(
|
55 |
+
weights_file, custom_objects=dependencies, compile=False
|
56 |
+
)
|
57 |
+
|
58 |
+
return model
|
59 |
+
except Exception as e:
|
60 |
+
logging.error("ERROR: ", e)
|
61 |
+
print("ERROR: ", e, " Failed to load ML model")
|
62 |
+
|
63 |
+
return None
|
requirements.txt
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==0.11.0
|
2 |
+
altair==4.1.0
|
3 |
+
appnope==0.1.0
|
4 |
+
argon2-cffi==20.1.0
|
5 |
+
astor==0.8.1
|
6 |
+
astroid==2.4.2
|
7 |
+
astunparse==1.6.3
|
8 |
+
async-generator==1.10
|
9 |
+
attrs==20.3.0
|
10 |
+
backcall==0.2.0
|
11 |
+
base58==2.0.1
|
12 |
+
bleach==3.2.1
|
13 |
+
blinker==1.4
|
14 |
+
boto3==1.16.12
|
15 |
+
botocore==1.19.12
|
16 |
+
cachetools==4.1.1
|
17 |
+
certifi==2020.6.20
|
18 |
+
cffi==1.14.3
|
19 |
+
chardet==3.0.4
|
20 |
+
click==7.1.2
|
21 |
+
decorator==4.4.2
|
22 |
+
defusedxml==0.6.0
|
23 |
+
entrypoints==0.3
|
24 |
+
enum-compat==0.0.3
|
25 |
+
gast==0.3.3
|
26 |
+
gitdb==4.0.5
|
27 |
+
GitPython==3.1.11
|
28 |
+
google-auth==1.23.0
|
29 |
+
google-auth-oauthlib==0.4.2
|
30 |
+
google-pasta==0.2.0
|
31 |
+
grpcio==1.33.2
|
32 |
+
h5py==2.10.0
|
33 |
+
idna==2.10
|
34 |
+
ipykernel==5.3.4
|
35 |
+
ipython-genutils==0.2.0
|
36 |
+
ipywidgets==7.5.1
|
37 |
+
isort==5.6.4
|
38 |
+
jedi==0.17.2
|
39 |
+
Jinja2==2.11.2
|
40 |
+
jmespath==0.10.0
|
41 |
+
jsonschema==3.2.0
|
42 |
+
jupyter-client==6.1.7
|
43 |
+
jupyter-core==4.6.3
|
44 |
+
jupyterlab-pygments==0.1.2
|
45 |
+
Keras==2.4.3
|
46 |
+
keras-metrics==1.1.0
|
47 |
+
Keras-Preprocessing==1.1.2
|
48 |
+
lazy-object-proxy==1.4.3
|
49 |
+
Markdown==3.3.3
|
50 |
+
MarkupSafe==1.1.1
|
51 |
+
mccabe==0.6.1
|
52 |
+
mistune==0.8.4
|
53 |
+
nbclient==0.5.1
|
54 |
+
nbconvert==6.0.7
|
55 |
+
nbformat==5.0.8
|
56 |
+
nest-asyncio==1.4.2
|
57 |
+
notebook==6.1.5
|
58 |
+
numpy==1.18.5
|
59 |
+
oauthlib==3.1.0
|
60 |
+
opt-einsum==3.3.0
|
61 |
+
packaging==20.4
|
62 |
+
pandas==1.1.4
|
63 |
+
pandocfilters==1.4.3
|
64 |
+
parso==0.7.1
|
65 |
+
pathtools==0.1.2
|
66 |
+
pexpect==4.8.0
|
67 |
+
pickleshare==0.7.5
|
68 |
+
Pillow==8.0.1
|
69 |
+
prometheus-client==0.8.0
|
70 |
+
prompt-toolkit==3.0.8
|
71 |
+
protobuf==3.13.0
|
72 |
+
ptyprocess==0.6.0
|
73 |
+
pyarrow==2.0.0
|
74 |
+
pyasn1==0.4.8
|
75 |
+
pyasn1-modules==0.2.8
|
76 |
+
pycparser==2.20
|
77 |
+
pydeck==0.5.0
|
78 |
+
Pygments==2.7.2
|
79 |
+
pylint==2.6.0
|
80 |
+
pyparsing==2.4.7
|
81 |
+
pyrsistent==0.17.3
|
82 |
+
python-dateutil==2.8.1
|
83 |
+
pytz==2020.4
|
84 |
+
PyYAML==5.3.1
|
85 |
+
pyzmq==19.0.2
|
86 |
+
requests==2.24.0
|
87 |
+
requests-oauthlib==1.3.0
|
88 |
+
rsa==4.6
|
89 |
+
s3transfer==0.3.3
|
90 |
+
scipy==1.5.4
|
91 |
+
Send2Trash==1.5.0
|
92 |
+
six==1.15.0
|
93 |
+
smmap==3.0.4
|
94 |
+
streamlit==0.70.0
|
95 |
+
tensorboard==2.3.0
|
96 |
+
tensorboard-plugin-wit==1.7.0
|
97 |
+
tensorflow-cpu==2.3.1
|
98 |
+
tensorflow-estimator==2.3.0
|
99 |
+
termcolor==1.1.0
|
100 |
+
terminado==0.9.1
|
101 |
+
testpath==0.4.4
|
102 |
+
toml==0.10.2
|
103 |
+
toolz==0.11.1
|
104 |
+
tornado==6.1
|
105 |
+
traitlets
|
106 |
+
tzlocal==2.1
|
107 |
+
urllib3==1.25.11
|
108 |
+
validators==0.18.1
|
109 |
+
watchdog==0.10.3
|
110 |
+
wcwidth==0.2.5
|
111 |
+
webencodings==0.5.1
|
112 |
+
Werkzeug==1.0.1
|
113 |
+
widgetsnbextension==3.5.1
|
114 |
+
wrapt==1.12.1
|
runtime.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
python-3.8.6
|
setup.sh
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
mkdir -p ~/.streamlit/
|
2 |
+
|
3 |
+
echo "\
|
4 |
+
[general]\n\
|
5 |
+
email = \"[email protected]\"\n\
|
6 |
+
" > ~/.streamlit/credentials.toml
|
7 |
+
|
8 |
+
echo "\
|
9 |
+
[server]\n\
|
10 |
+
headless = true\n\
|
11 |
+
enableCORS=false\n\
|
12 |
+
port = $PORT\n\
|
13 |
+
" > ~/.streamlit/config.toml
|
tests/__pycache__/test_static.cpython-39-pytest-7.2.0.pyc
ADDED
Binary file (1.23 kB). View file
|
|
tests/test_static.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
###############################################
|
2 |
+
# Tests for existence of files not in remote
|
3 |
+
###############################################
|
4 |
+
import os
|
5 |
+
|
6 |
+
|
7 |
+
def test_model_weights_exist():
|
8 |
+
"""Tests for existence of classifier model."""
|
9 |
+
assert os.path.isfile("best_model.h5")
|
10 |
+
|
11 |
+
|
12 |
+
def test_heroku_setup():
|
13 |
+
"""Tests for existance of setup file with personal email"""
|
14 |
+
assert os.path.isfile("setup.sh")
|
utils.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This module contains custom metric functions for binary classification models using TensorFlow and Keras.
|
3 |
+
|
4 |
+
Functions:
|
5 |
+
- f1(y_true, y_pred): Computes the F1 score metric for binary classification models.
|
6 |
+
- recall(y_true, y_pred): Computes the recall metric for binary classification models.
|
7 |
+
- precision(y_true, y_pred): Computes the precision metric for binary classification models.
|
8 |
+
"""
|
9 |
+
|
10 |
+
import tensorflow as tf
|
11 |
+
from tensorflow.keras import backend as K
|
12 |
+
|
13 |
+
|
14 |
+
def f1(y_true, y_pred):
|
15 |
+
y_pred = K.round(y_pred)
|
16 |
+
tp = K.sum(K.cast(y_true * y_pred, "float"), axis=0)
|
17 |
+
tn = K.sum(K.cast((1 - y_true) * (1 - y_pred), "float"), axis=0)
|
18 |
+
fp = K.sum(K.cast((1 - y_true) * y_pred, "float"), axis=0)
|
19 |
+
fn = K.sum(K.cast(y_true * (1 - y_pred), "float"), axis=0)
|
20 |
+
|
21 |
+
precision = tp / (tp + fp + K.epsilon())
|
22 |
+
recall = tp / (tp + fn + K.epsilon())
|
23 |
+
f1_score = 2 * precision * recall / (precision + recall + K.epsilon())
|
24 |
+
|
25 |
+
return f1_score
|
26 |
+
|
27 |
+
|
28 |
+
def recall(y_true, y_pred):
|
29 |
+
y_pred = K.round(y_pred)
|
30 |
+
tp = K.sum(K.cast(y_true * y_pred, "float"), axis=0)
|
31 |
+
fn = K.sum(K.cast(y_true * (1 - y_pred), "float"), axis=0)
|
32 |
+
|
33 |
+
recall = tp / (tp + fn + K.epsilon())
|
34 |
+
return recall
|
35 |
+
|
36 |
+
|
37 |
+
def precision(y_true, y_pred):
|
38 |
+
y_pred = K.round(y_pred)
|
39 |
+
tp = K.sum(K.cast(y_true * y_pred, "float"), axis=0)
|
40 |
+
fp = K.sum(K.cast((1 - y_true) * y_pred, "float"), axis=0)
|
41 |
+
|
42 |
+
precision = tp / (tp + fp + K.epsilon())
|
43 |
+
return precision
|