Spaces:
Runtime error
Runtime error
first commit
Browse files- app.py +49 -0
- categories.txt +1 -0
- models/VGG16.py +34 -0
- models/__pycache__/VGG16.cpython-38.pyc +0 -0
- models/__pycache__/VGG16.cpython-39.pyc +0 -0
- models/__pycache__/modelNet.cpython-38.pyc +0 -0
- models/__pycache__/modelNet.cpython-39.pyc +0 -0
- models/__pycache__/model_v1.cpython-38.pyc +0 -0
- models/__pycache__/model_v1.cpython-39.pyc +0 -0
- models/mobilenet_v2/best_model.pth +3 -0
- models/mobilenet_v2/config.json +39 -0
- models/modelNet.py +19 -0
- models/model_v1.py +47 -0
- samples/basking.jpg +0 -0
- samples/blacktip.jpg +0 -0
- samples/blue.jpg +0 -0
- samples/bull.jpg +0 -0
- samples/hammerhead.jpg +0 -0
- samples/lemon.jpg +0 -0
- samples/mako.jpg +0 -0
- samples/nurse.jpg +0 -0
- samples/sand tiger.jpg +0 -0
- samples/thresher.jpg +0 -0
- samples/tigre.jpg +0 -0
- samples/whale.jpg +0 -0
- samples/white.jpg +0 -0
- samples/whitetip.jpg +0 -0
app.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
from hugsvision.inference.TorchVisionClassifierInference import TorchVisionClassifierInference
|
| 6 |
+
|
| 7 |
+
models_name = [
|
| 8 |
+
"VGG16",
|
| 9 |
+
"ShuffleNetV2",
|
| 10 |
+
"mobilenet_v2"
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
colname = "mobilenet_v2"
|
| 14 |
+
|
| 15 |
+
radio = gr.inputs.Radio(models_name, default="mobilenet_v2", type="value", label=colname)
|
| 16 |
+
print(radio.label)
|
| 17 |
+
|
| 18 |
+
def predict_image(image):
|
| 19 |
+
image = np.array(image) / 255
|
| 20 |
+
image = np.expand_dims(image, axis=0)
|
| 21 |
+
|
| 22 |
+
classifier = TorchVisionClassifierInference(
|
| 23 |
+
model_path = "./models/" + colname + ".pth",
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
pred = classifier.predict(img=image)
|
| 27 |
+
return pred
|
| 28 |
+
|
| 29 |
+
# open categories.txt in read mode
|
| 30 |
+
categories = open("categories.txt", "r")
|
| 31 |
+
labels = categories.readline().split(";")
|
| 32 |
+
|
| 33 |
+
image = gr.inputs.Image(shape=(300, 300), label="Upload Your Image Here")
|
| 34 |
+
label = gr.outputs.Label(num_top_classes=len(labels))
|
| 35 |
+
|
| 36 |
+
samples = ['./samples/basking.jpg', './samples/blacktip.jpg']
|
| 37 |
+
# , './samples/blacktip.jpg', './samples/blue.jpg', './samples/bull.jpg', './samples/hammerhead.jpg',
|
| 38 |
+
# './samples/lemon.jpg', './samples/mako.jpg', './samples/nurse.jpg', './samples/sand tiger.jpg', './samples/thresher.jpg',
|
| 39 |
+
# './samples/tigre.jpg', './samples/whale.jpg', './samples/white.jpg', './samples/whitetip.jpg']
|
| 40 |
+
|
| 41 |
+
interface = gr.Interface(
|
| 42 |
+
fn=predict_image,
|
| 43 |
+
inputs=[image, radio],
|
| 44 |
+
outputs=label,
|
| 45 |
+
capture_session=True,
|
| 46 |
+
allow_flagging=False,
|
| 47 |
+
examples=samples
|
| 48 |
+
)
|
| 49 |
+
interface.launch()
|
categories.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
basking;blacktip;blue;bull;hammerhead;lemon;mako;nurse;sand tiger;thresher;tiger;whale;white;whitetip
|
models/VGG16.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
|
| 3 |
+
from tensorflow.keras.layers import Conv2D, Dense, Flatten, GlobalMaxPooling2D
|
| 4 |
+
from tensorflow.keras.layers import Dense, Input, MaxPooling2D
|
| 5 |
+
from tensorflow.keras import Model
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def VGG16(nbr_class):
|
| 9 |
+
# 224 224 3
|
| 10 |
+
img_input = Input(shape=(224,224,3))
|
| 11 |
+
|
| 12 |
+
# first convolution
|
| 13 |
+
x = Conv2D(64, (3,3), activation='relu', padding='same')(img_input)
|
| 14 |
+
x = Conv2D(64, (3,3), activation='relu', padding='same')(x)
|
| 15 |
+
x = MaxPooling2D((2,2), strides = (2,2))(x)
|
| 16 |
+
|
| 17 |
+
# second convolution
|
| 18 |
+
x = Conv2D(128, (3,3), activation='relu', padding='same')(x)
|
| 19 |
+
x = Conv2D(128, (3,3), activation='relu', padding='same')(x)
|
| 20 |
+
x = MaxPooling2D((2,2), strides = (2,2))(x)
|
| 21 |
+
|
| 22 |
+
# third convolution
|
| 23 |
+
x = Conv2D(256, (3,3), activation='relu', padding='same')(x)
|
| 24 |
+
x = Conv2D(256, (3,3), activation='relu', padding='same')(x)
|
| 25 |
+
x = Conv2D(256, (3,3), activation='relu', padding='same')(x)
|
| 26 |
+
x = MaxPooling2D((2,2), strides = (2,2))(x)
|
| 27 |
+
|
| 28 |
+
x = Flatten()(x)
|
| 29 |
+
x = Dense(1024, activation='relu')(x)
|
| 30 |
+
x = Dense(1024, activation='relu')(x)
|
| 31 |
+
x = Dense(nbr_class, activation='softmax')(x)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
return Model(img_input, x, name="vgg16")
|
models/__pycache__/VGG16.cpython-38.pyc
ADDED
|
Binary file (1.12 kB). View file
|
|
|
models/__pycache__/VGG16.cpython-39.pyc
ADDED
|
Binary file (1.11 kB). View file
|
|
|
models/__pycache__/modelNet.cpython-38.pyc
ADDED
|
Binary file (941 Bytes). View file
|
|
|
models/__pycache__/modelNet.cpython-39.pyc
ADDED
|
Binary file (937 Bytes). View file
|
|
|
models/__pycache__/model_v1.cpython-38.pyc
ADDED
|
Binary file (1.36 kB). View file
|
|
|
models/__pycache__/model_v1.cpython-39.pyc
ADDED
|
Binary file (1.36 kB). View file
|
|
|
models/mobilenet_v2/best_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:650c8c31d438caf26c768908e204a0fd8ee48af3f22d157e6eb88237dc103828
|
| 3 |
+
size 9235021
|
models/mobilenet_v2/config.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"num_classes": 14,
|
| 3 |
+
"hidden_size": 1280,
|
| 4 |
+
"id2label": {
|
| 5 |
+
"0": "basking",
|
| 6 |
+
"1": "blacktip",
|
| 7 |
+
"2": "blue",
|
| 8 |
+
"3": "bull",
|
| 9 |
+
"4": "hammerhead",
|
| 10 |
+
"5": "lemon",
|
| 11 |
+
"6": "mako",
|
| 12 |
+
"7": "nurse",
|
| 13 |
+
"8": "sand_tiger",
|
| 14 |
+
"9": "thresher",
|
| 15 |
+
"10": "tiger",
|
| 16 |
+
"11": "whale",
|
| 17 |
+
"12": "white",
|
| 18 |
+
"13": "whitetip"
|
| 19 |
+
},
|
| 20 |
+
"label2id": {
|
| 21 |
+
"basking": "0",
|
| 22 |
+
"blacktip": "1",
|
| 23 |
+
"blue": "2",
|
| 24 |
+
"bull": "3",
|
| 25 |
+
"hammerhead": "4",
|
| 26 |
+
"lemon": "5",
|
| 27 |
+
"mako": "6",
|
| 28 |
+
"nurse": "7",
|
| 29 |
+
"sand_tiger": "8",
|
| 30 |
+
"thresher": "9",
|
| 31 |
+
"tiger": "10",
|
| 32 |
+
"whale": "11",
|
| 33 |
+
"white": "12",
|
| 34 |
+
"whitetip": "13"
|
| 35 |
+
},
|
| 36 |
+
"architectures": [
|
| 37 |
+
"mobilenet_v2"
|
| 38 |
+
]
|
| 39 |
+
}
|
models/modelNet.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import tensorflow as tf
|
| 3 |
+
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
|
| 4 |
+
from tensorflow.keras.layers import Conv2D, Dense, GlobalMaxPooling2D
|
| 5 |
+
from tensorflow.keras.layers import Dense, MaxPooling2D, BatchNormalization
|
| 6 |
+
from tensorflow.keras.models import Sequential
|
| 7 |
+
from tensorflow.keras import Model
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def modelNet(nbr_class):
|
| 11 |
+
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(224,224,3), include_top=False)
|
| 12 |
+
mobile_net.trainable=False
|
| 13 |
+
|
| 14 |
+
model = Sequential([
|
| 15 |
+
mobile_net,
|
| 16 |
+
tf.keras.layers.GlobalAveragePooling2D(),
|
| 17 |
+
tf.keras.layers.Dense(nbr_class, activation = 'softmax')])
|
| 18 |
+
|
| 19 |
+
return model
|
models/model_v1.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import tensorflow as tf
|
| 3 |
+
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
|
| 4 |
+
from tensorflow.keras.layers import Conv2D, Dense, GlobalMaxPooling2D
|
| 5 |
+
from tensorflow.keras.layers import Dense, MaxPooling2D, BatchNormalization
|
| 6 |
+
from tensorflow.keras.models import Sequential
|
| 7 |
+
from tensorflow.keras import Model
|
| 8 |
+
|
| 9 |
+
def model_v1(nbr_class):
|
| 10 |
+
model = Sequential()
|
| 11 |
+
model.add(Conv2D(64,(3,3), activation="relu", input_shape=(224,224,3)))
|
| 12 |
+
model.add(BatchNormalization())
|
| 13 |
+
model.add(Conv2D(64,(3,3), activation="relu"))
|
| 14 |
+
model.add(BatchNormalization())
|
| 15 |
+
model.add(MaxPooling2D())
|
| 16 |
+
|
| 17 |
+
model.add(Conv2D(128,(3,3), activation="relu"))
|
| 18 |
+
model.add(BatchNormalization())
|
| 19 |
+
model.add(Conv2D(128,(3,3), activation="relu"))
|
| 20 |
+
model.add(BatchNormalization())
|
| 21 |
+
model.add(MaxPooling2D())
|
| 22 |
+
|
| 23 |
+
model.add(Conv2D(256,(3,3), activation="relu"))
|
| 24 |
+
model.add(BatchNormalization())
|
| 25 |
+
model.add(Conv2D(256,(3,3), activation="relu"))
|
| 26 |
+
model.add(BatchNormalization())
|
| 27 |
+
model.add(MaxPooling2D())
|
| 28 |
+
|
| 29 |
+
# model.add(Conv2D(512,(3,3), activation="relu"))
|
| 30 |
+
# model.add(BatchNormalization())
|
| 31 |
+
# model.add(Conv2D(512,(3,3), activation="relu"))
|
| 32 |
+
# model.add(BatchNormalization())
|
| 33 |
+
# model.add(MaxPooling2D())
|
| 34 |
+
|
| 35 |
+
# model.add(Conv2D(512,(3,3), activation="relu"))
|
| 36 |
+
# model.add(BatchNormalization())
|
| 37 |
+
# model.add(Conv2D(512,(3,3), activation="relu"))
|
| 38 |
+
# model.add(BatchNormalization())
|
| 39 |
+
# model.add(Conv2D(512,(3,3), activation="relu"))
|
| 40 |
+
# model.add(BatchNormalization())
|
| 41 |
+
# model.add(GlobalMaxPooling2D())
|
| 42 |
+
|
| 43 |
+
model.add(Dense(1024, activation="relu"))
|
| 44 |
+
model.add(BatchNormalization())
|
| 45 |
+
model.add(Dense(nbr_class, activation="softmax"))
|
| 46 |
+
|
| 47 |
+
return model
|
samples/basking.jpg
ADDED
|
samples/blacktip.jpg
ADDED
|
samples/blue.jpg
ADDED
|
samples/bull.jpg
ADDED
|
samples/hammerhead.jpg
ADDED
|
samples/lemon.jpg
ADDED
|
samples/mako.jpg
ADDED
|
samples/nurse.jpg
ADDED
|
samples/sand tiger.jpg
ADDED
|
samples/thresher.jpg
ADDED
|
samples/tigre.jpg
ADDED
|
samples/whale.jpg
ADDED
|
samples/white.jpg
ADDED
|
samples/whitetip.jpg
ADDED
|