JamesJayamuni commited on
Commit
4787998
·
verified ·
1 Parent(s): bb4cec6

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Flower Image Classification 5 Classes V1.0
3
- emoji: 🌖
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 4.38.1
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: flower_image_classification_5_classes_v1.0
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 4.25.0
6
  ---
 
 
app.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "44c33030-a3e9-4f51-a5e8-a37de32e54e1",
7
+ "metadata": {
8
+ "scrolled": true
9
+ },
10
+ "outputs": [
11
+ {
12
+ "name": "stderr",
13
+ "output_type": "stream",
14
+ "text": [
15
+ "WARNING:absl:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n"
16
+ ]
17
+ },
18
+ {
19
+ "name": "stdout",
20
+ "output_type": "stream",
21
+ "text": [
22
+ "Running on local URL: http://127.0.0.1:7869\n",
23
+ "\n",
24
+ "To create a public link, set `share=True` in `launch()`.\n"
25
+ ]
26
+ },
27
+ {
28
+ "data": {
29
+ "text/html": [
30
+ "<div><iframe src=\"http://127.0.0.1:7869/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
31
+ ],
32
+ "text/plain": [
33
+ "<IPython.core.display.HTML object>"
34
+ ]
35
+ },
36
+ "metadata": {},
37
+ "output_type": "display_data"
38
+ },
39
+ {
40
+ "data": {
41
+ "text/plain": []
42
+ },
43
+ "execution_count": 1,
44
+ "metadata": {},
45
+ "output_type": "execute_result"
46
+ },
47
+ {
48
+ "name": "stdout",
49
+ "output_type": "stream",
50
+ "text": [
51
+ "IMPORTANT: You are using gradio version 4.25.0, however version 4.29.0 is available, please upgrade.\n",
52
+ "--------\n"
53
+ ]
54
+ },
55
+ {
56
+ "name": "stderr",
57
+ "output_type": "stream",
58
+ "text": [
59
+ "Traceback (most recent call last):\n",
60
+ " File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\queueing.py\", line 522, in process_events\n",
61
+ " response = await route_utils.call_process_api(\n",
62
+ " File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\route_utils.py\", line 260, in call_process_api\n",
63
+ " output = await app.get_blocks().process_api(\n",
64
+ " File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\blocks.py\", line 1741, in process_api\n",
65
+ " result = await self.call_function(\n",
66
+ " File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\blocks.py\", line 1296, in call_function\n",
67
+ " prediction = await anyio.to_thread.run_sync(\n",
68
+ " File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n",
69
+ " return await get_async_backend().run_sync_in_worker_thread(\n",
70
+ " File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2134, in run_sync_in_worker_thread\n",
71
+ " return await future\n",
72
+ " File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 851, in run\n",
73
+ " result = context.run(func, *args)\n",
74
+ " File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\utils.py\", line 751, in wrapper\n",
75
+ " response = f(*args, **kwargs)\n",
76
+ " File \"C:\\Users\\ASUSS\\AppData\\Local\\Temp\\ipykernel_3748\\1829143819.py\", line 37, in predict\n",
77
+ " image_resized, output_class, confidence_level = preprocess_image(image)\n",
78
+ " File \"C:\\Users\\ASUSS\\AppData\\Local\\Temp\\ipykernel_3748\\1829143819.py\", line 19, in preprocess_image\n",
79
+ " image_resized = cv2.resize(image, (img_height, img_width))\n",
80
+ "NameError: name 'img_height' is not defined\n"
81
+ ]
82
+ }
83
+ ],
84
+ "source": [
85
+ "import gradio as gr\n",
86
+ "import cv2\n",
87
+ "import numpy as np\n",
88
+ "import tensorflow as tf\n",
89
+ "from PIL import Image\n",
90
+ "\n",
91
+ "# Assuming you have already defined img_height, img_width, and class_names\n",
92
+ "# img_height, img_width = 180, 180\n",
93
+ "class_names = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']\n",
94
+ "\n",
95
+ "# Load the fine-tuned model (from local)\n",
96
+ "resnet_model = tf.keras.models.load_model('./flower_image_classification_ResNet50_v1.0.h5')\n",
97
+ "\n",
98
+ "def preprocess_image(image):\n",
99
+ " # Convert the PIL image to an array\n",
100
+ " image = np.array(image)\n",
101
+ " \n",
102
+ " # Read and resize the image\n",
103
+ " image_resized = cv2.resize(image, (img_height, img_width))\n",
104
+ " \n",
105
+ " # Preprocess the image\n",
106
+ " image = np.expand_dims(image_resized, axis=0)\n",
107
+ " \n",
108
+ " # Predict with the model\n",
109
+ " pred = resnet_model.predict(image)\n",
110
+ " \n",
111
+ " # Get the predicted class label\n",
112
+ " predicted_class = np.argmax(pred)\n",
113
+ " output_class = class_names[predicted_class]\n",
114
+ " \n",
115
+ " # Get the confidence level (probability)\n",
116
+ " confidence_level = pred[0][predicted_class]\n",
117
+ " \n",
118
+ " return image_resized, output_class, confidence_level\n",
119
+ "\n",
120
+ "def predict(image):\n",
121
+ " image_resized, output_class, confidence_level = preprocess_image(image)\n",
122
+ " return Image.fromarray(image_resized), output_class, str(confidence_level)\n",
123
+ "\n",
124
+ "# Define the Gradio interface\n",
125
+ "inputs = gr.Image(type=\"pil\", label=\"Upload Image\")\n",
126
+ "outputs = [\n",
127
+ " gr.Image(type=\"pil\", label=\"Resized Image\"),\n",
128
+ " gr.Textbox(label=\"Predicted Class\"),\n",
129
+ " gr.Textbox(label=\"Confidence Level\")\n",
130
+ "]\n",
131
+ "\n",
132
+ "# Create the Gradio Interface\n",
133
+ "gr.Interface(\n",
134
+ " fn=predict,\n",
135
+ " inputs=inputs,\n",
136
+ " outputs=outputs,\n",
137
+ " title=\"Flower Classification with ResNet50\",\n",
138
+ " description=\"Upload an image of a flower to classify it into one of the five categories.\",\n",
139
+ " live=True\n",
140
+ ").launch()\n"
141
+ ]
142
+ }
143
+ ],
144
+ "metadata": {
145
+ "kernelspec": {
146
+ "display_name": "Python 3 (ipykernel)",
147
+ "language": "python",
148
+ "name": "python3"
149
+ },
150
+ "language_info": {
151
+ "codemirror_mode": {
152
+ "name": "ipython",
153
+ "version": 3
154
+ },
155
+ "file_extension": ".py",
156
+ "mimetype": "text/x-python",
157
+ "name": "python",
158
+ "nbconvert_exporter": "python",
159
+ "pygments_lexer": "ipython3",
160
+ "version": "3.10.12"
161
+ }
162
+ },
163
+ "nbformat": 4,
164
+ "nbformat_minor": 5
165
+ }
flower_image_classification_ResNet50_v1.0.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c56d5c7725041796c23fc419053821b6b7e61ac21c7480b6fc28bbb95f8e92f
3
+ size 547757072
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Python 3.10.12
2
+ gradio==4.25.0
3
+ opencv-python==4.10.0
4
+ numpy==1.26.4
5
+ tensorflow==2.16.1
6
+ pillow==10.3.0