krishnamishra8848 commited on
Commit
5936e3e
·
verified ·
1 Parent(s): 0e96f81

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -0
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import cv2
4
+ from tensorflow.keras.models import load_model
5
+ from PIL import Image
6
+ import requests
7
+
8
+ # Load the model from Hugging Face
9
+ @st.cache(allow_output_mutation=True)
10
+ def load_model_from_hf():
11
+ url = "https://huggingface.co/krishnamishra8848/Devanagari_Character_Recognition/resolve/main/saved_model.keras"
12
+ response = requests.get(url)
13
+ with open("saved_model.keras", "wb") as f:
14
+ f.write(response.content)
15
+ model = load_model("saved_model.keras")
16
+ return model
17
+
18
+ model = load_model_from_hf()
19
+
20
+ # Label mapping
21
+ label_mapping = [
22
+ "क", "ख", "ग", "घ", "ङ", "च", "छ", "ज", "झ", "ञ",
23
+ "ट", "ठ", "ड", "ढ", "ण", "त", "थ", "द", "ध", "न",
24
+ "प", "फ", "ब", "भ", "म", "य", "र", "ल", "व", "श",
25
+ "ष", "स", "ह", "क्ष", "त्र", "ज्ञ", "०", "१", "२", "३",
26
+ "४", "५", "६", "७", "८", "९"
27
+ ]
28
+
29
+ # Streamlit app interface
30
+ st.title("Devanagari Character Recognition")
31
+ st.write("Upload an image of a Devanagari character or digit, and the model will predict it.")
32
+
33
+ # File uploader
34
+ uploaded_file = st.file_uploader("Choose a file", type=["jpg", "png", "jpeg"])
35
+
36
+ if uploaded_file is not None:
37
+ # Load and preprocess the image
38
+ img = Image.open(uploaded_file)
39
+ img = img.convert("L") # Convert to grayscale
40
+ img_resized = img.resize((32, 32)) # Resize to 32x32
41
+ img_array = np.array(img_resized).astype("float32") / 255.0 # Normalize
42
+ img_input = img_array.reshape(1, 32, 32, 1) # Reshape for the model
43
+
44
+ # Display uploaded image
45
+ st.image(img, caption="Uploaded Image", use_column_width=True)
46
+
47
+ # Make prediction
48
+ prediction = model.predict(img_input)
49
+ predicted_class_index = np.argmax(prediction)
50
+ predicted_character = label_mapping[predicted_class_index]
51
+
52
+ # Display result
53
+ st.success(f"Predicted Character: {predicted_character}")