|
{ |
|
"cells": [ |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 1, |
|
"id": "44c33030-a3e9-4f51-a5e8-a37de32e54e1", |
|
"metadata": { |
|
"scrolled": true |
|
}, |
|
"outputs": [ |
|
{ |
|
"name": "stderr", |
|
"output_type": "stream", |
|
"text": [ |
|
"WARNING:absl:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\n" |
|
] |
|
}, |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"Running on local URL: http://127.0.0.1:7869\n", |
|
"\n", |
|
"To create a public link, set `share=True` in `launch()`.\n" |
|
] |
|
}, |
|
{ |
|
"data": { |
|
"text/html": [ |
|
"<div><iframe src=\"http://127.0.0.1:7869/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
|
], |
|
"text/plain": [ |
|
"<IPython.core.display.HTML object>" |
|
] |
|
}, |
|
"metadata": {}, |
|
"output_type": "display_data" |
|
}, |
|
{ |
|
"data": { |
|
"text/plain": [] |
|
}, |
|
"execution_count": 1, |
|
"metadata": {}, |
|
"output_type": "execute_result" |
|
}, |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"IMPORTANT: You are using gradio version 4.25.0, however version 4.29.0 is available, please upgrade.\n", |
|
"--------\n" |
|
] |
|
}, |
|
{ |
|
"name": "stderr", |
|
"output_type": "stream", |
|
"text": [ |
|
"Traceback (most recent call last):\n", |
|
" File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\queueing.py\", line 522, in process_events\n", |
|
" response = await route_utils.call_process_api(\n", |
|
" File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\route_utils.py\", line 260, in call_process_api\n", |
|
" output = await app.get_blocks().process_api(\n", |
|
" File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\blocks.py\", line 1741, in process_api\n", |
|
" result = await self.call_function(\n", |
|
" File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\blocks.py\", line 1296, in call_function\n", |
|
" prediction = await anyio.to_thread.run_sync(\n", |
|
" File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", |
|
" return await get_async_backend().run_sync_in_worker_thread(\n", |
|
" File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2134, in run_sync_in_worker_thread\n", |
|
" return await future\n", |
|
" File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 851, in run\n", |
|
" result = context.run(func, *args)\n", |
|
" File \"C:\\Users\\ASUSS\\anaconda3\\envs\\bootcampai\\lib\\site-packages\\gradio\\utils.py\", line 751, in wrapper\n", |
|
" response = f(*args, **kwargs)\n", |
|
" File \"C:\\Users\\ASUSS\\AppData\\Local\\Temp\\ipykernel_3748\\1829143819.py\", line 37, in predict\n", |
|
" image_resized, output_class, confidence_level = preprocess_image(image)\n", |
|
" File \"C:\\Users\\ASUSS\\AppData\\Local\\Temp\\ipykernel_3748\\1829143819.py\", line 19, in preprocess_image\n", |
|
" image_resized = cv2.resize(image, (img_height, img_width))\n", |
|
"NameError: name 'img_height' is not defined\n" |
|
] |
|
} |
|
], |
|
"source": [ |
|
"import gradio as gr\n", |
|
"import cv2\n", |
|
"import numpy as np\n", |
|
"import tensorflow as tf\n", |
|
"from PIL import Image\n", |
|
"\n", |
|
"# Assuming you have already defined img_height, img_width, and class_names\n", |
|
"# img_height, img_width = 180, 180\n", |
|
"class_names = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']\n", |
|
"\n", |
|
"# Load the fine-tuned model (from local)\n", |
|
"resnet_model = tf.keras.models.load_model('./flower_image_classification_ResNet50_v1.0.h5')\n", |
|
"\n", |
|
"def preprocess_image(image):\n", |
|
" # Convert the PIL image to an array\n", |
|
" image = np.array(image)\n", |
|
" \n", |
|
" # Read and resize the image\n", |
|
" image_resized = cv2.resize(image, (img_height, img_width))\n", |
|
" \n", |
|
" # Preprocess the image\n", |
|
" image = np.expand_dims(image_resized, axis=0)\n", |
|
" \n", |
|
" # Predict with the model\n", |
|
" pred = resnet_model.predict(image)\n", |
|
" \n", |
|
" # Get the predicted class label\n", |
|
" predicted_class = np.argmax(pred)\n", |
|
" output_class = class_names[predicted_class]\n", |
|
" \n", |
|
" # Get the confidence level (probability)\n", |
|
" confidence_level = pred[0][predicted_class]\n", |
|
" \n", |
|
" return image_resized, output_class, confidence_level\n", |
|
"\n", |
|
"def predict(image):\n", |
|
" image_resized, output_class, confidence_level = preprocess_image(image)\n", |
|
" return Image.fromarray(image_resized), output_class, str(confidence_level)\n", |
|
"\n", |
|
"# Define the Gradio interface\n", |
|
"inputs = gr.Image(type=\"pil\", label=\"Upload Image\")\n", |
|
"outputs = [\n", |
|
" gr.Image(type=\"pil\", label=\"Resized Image\"),\n", |
|
" gr.Textbox(label=\"Predicted Class\"),\n", |
|
" gr.Textbox(label=\"Confidence Level\")\n", |
|
"]\n", |
|
"\n", |
|
"# Create the Gradio Interface\n", |
|
"gr.Interface(\n", |
|
" fn=predict,\n", |
|
" inputs=inputs,\n", |
|
" outputs=outputs,\n", |
|
" title=\"Flower Classification with ResNet50\",\n", |
|
" description=\"Upload an image of a flower to classify it into one of the five categories.\",\n", |
|
" live=True\n", |
|
").launch()\n" |
|
] |
|
} |
|
], |
|
"metadata": { |
|
"kernelspec": { |
|
"display_name": "Python 3 (ipykernel)", |
|
"language": "python", |
|
"name": "python3" |
|
}, |
|
"language_info": { |
|
"codemirror_mode": { |
|
"name": "ipython", |
|
"version": 3 |
|
}, |
|
"file_extension": ".py", |
|
"mimetype": "text/x-python", |
|
"name": "python", |
|
"nbconvert_exporter": "python", |
|
"pygments_lexer": "ipython3", |
|
"version": "3.10.12" |
|
} |
|
}, |
|
"nbformat": 4, |
|
"nbformat_minor": 5 |
|
} |
|
|