Spaces:
Sleeping
Sleeping
rmm
commited on
Commit
·
b582a0e
1
Parent(s):
5c7e462
fix: use cv2 to load image, now compatible with pre-processing model
Browse files- Note that the streamlit file_uploader directly takes the bytestream,
not a file name, so we have to convert it instead of just using the
cv2 imread. see https://github.com/streamlit/streamlit/issues/888#issuecomment-568578281
- resolves #1
call_models/entry_and_hotdog.py
CHANGED
@@ -25,6 +25,8 @@ from transformers import AutoModelForImageClassification
|
|
25 |
|
26 |
# setup for the ML model on huggingface (our wrapper)
|
27 |
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
|
|
|
|
|
28 |
# and the dataset of observations (hf dataset in our space)
|
29 |
dataset_id = "Saving-Willy/Happywhale-kaggle"
|
30 |
data_files = "data/train-00000-of-00001.parquet"
|
@@ -221,7 +223,8 @@ if __name__ == "__main__":
|
|
221 |
if tab_inference.button("Identify with cetacean classifier"):
|
222 |
#pipe = pipeline("image-classification", model="Saving-Willy/cetacean-classifier", trust_remote_code=True)
|
223 |
cetacean_classifier = AutoModelForImageClassification.from_pretrained("Saving-Willy/cetacean-classifier",
|
224 |
-
revision=
|
|
|
225 |
|
226 |
if st.session_state.image is None:
|
227 |
# TODO: cleaner design to disable the button until data input done?
|
|
|
25 |
|
26 |
# setup for the ML model on huggingface (our wrapper)
|
27 |
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
|
28 |
+
#classifier_revision = '0f9c15e2db4d64e7f622ade518854b488d8d35e6'
|
29 |
+
classifier_revision = 'main' # default/latest version
|
30 |
# and the dataset of observations (hf dataset in our space)
|
31 |
dataset_id = "Saving-Willy/Happywhale-kaggle"
|
32 |
data_files = "data/train-00000-of-00001.parquet"
|
|
|
223 |
if tab_inference.button("Identify with cetacean classifier"):
|
224 |
#pipe = pipeline("image-classification", model="Saving-Willy/cetacean-classifier", trust_remote_code=True)
|
225 |
cetacean_classifier = AutoModelForImageClassification.from_pretrained("Saving-Willy/cetacean-classifier",
|
226 |
+
revision=classifier_revision,
|
227 |
+
trust_remote_code=True)
|
228 |
|
229 |
if st.session_state.image is None:
|
230 |
# TODO: cleaner design to disable the button until data input done?
|
call_models/input_handling.py
CHANGED
@@ -6,6 +6,8 @@ import hashlib
|
|
6 |
import logging
|
7 |
|
8 |
import streamlit as st
|
|
|
|
|
9 |
|
10 |
m_logger = logging.getLogger(__name__)
|
11 |
# we can set the log level locally for funcs in this module
|
@@ -135,7 +137,12 @@ def setup_input(viewcontainer: st.delta_generator.DeltaGenerator=None, _allowed_
|
|
135 |
|
136 |
if uploaded_filename is not None:
|
137 |
# Display the uploaded image
|
138 |
-
image = Image.open(uploaded_filename)
|
|
|
|
|
|
|
|
|
|
|
139 |
viewcontainer.image(image, caption='Uploaded Image.', use_column_width=True)
|
140 |
# store the image in the session state
|
141 |
st.session_state.image = image
|
|
|
6 |
import logging
|
7 |
|
8 |
import streamlit as st
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
|
12 |
m_logger = logging.getLogger(__name__)
|
13 |
# we can set the log level locally for funcs in this module
|
|
|
137 |
|
138 |
if uploaded_filename is not None:
|
139 |
# Display the uploaded image
|
140 |
+
#image = Image.open(uploaded_filename)
|
141 |
+
# load image using cv2 format, so it is compatible with the ML models
|
142 |
+
file_bytes = np.asarray(bytearray(uploaded_filename.read()), dtype=np.uint8)
|
143 |
+
image = cv2.imdecode(file_bytes, 1)
|
144 |
+
|
145 |
+
|
146 |
viewcontainer.image(image, caption='Uploaded Image.', use_column_width=True)
|
147 |
# store the image in the session state
|
148 |
st.session_state.image = image
|