Spaces:
Running
Running
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +34 -11
src/streamlit_app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import streamlit as st
|
2 |
from PIL import Image
|
3 |
import requests
|
|
|
|
|
4 |
|
5 |
st.set_page_config(page_title="WikiExplorer AR", layout="centered")
|
6 |
st.title("π· WikiExplorer AR (Streamlit Edition)")
|
@@ -19,13 +21,38 @@ lang = st.selectbox(
|
|
19 |
|
20 |
lang_code = lang[1]
|
21 |
|
22 |
-
# ---
|
23 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
place_name = st.text_input("ποΈ For example: Charminar, Taj Mahal, Shah Jahan")
|
25 |
|
26 |
# --- Camera input ---
|
27 |
img_file_buffer = st.camera_input("πΈ Take a picture (optional)")
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
# --- Translation helpers ---
|
30 |
def translate_text(text, target_lang):
|
31 |
try:
|
@@ -117,17 +144,13 @@ if place_name.strip():
|
|
117 |
else:
|
118 |
st.warning("No images found on Wikimedia Commons.")
|
119 |
|
120 |
-
# --- Show captured image ---
|
121 |
-
if img_file_buffer is not None:
|
122 |
-
st.markdown("### π· Captured Image")
|
123 |
-
st.image(img_file_buffer, caption="Uploaded via camera", use_column_width=True)
|
124 |
-
|
125 |
# --- Footer ---
|
126 |
st.markdown("""
|
127 |
---
|
128 |
- π Supports text search and camera input.
|
129 |
-
-
|
130 |
-
-
|
|
|
131 |
- β
Ready for Hugging Face deployment.
|
132 |
-
- π οΈ
|
133 |
-
""")
|
|
|
1 |
import streamlit as st
|
2 |
from PIL import Image
|
3 |
import requests
|
4 |
+
import easyocr
|
5 |
+
from io import BytesIO
|
6 |
|
7 |
st.set_page_config(page_title="WikiExplorer AR", layout="centered")
|
8 |
st.title("π· WikiExplorer AR (Streamlit Edition)")
|
|
|
21 |
|
22 |
lang_code = lang[1]
|
23 |
|
24 |
+
# --- Load OCR Model ---
|
25 |
+
@st.cache_resource
|
26 |
+
def load_ocr_model():
|
27 |
+
return easyocr.Reader(['en']) # You can add 'hi', 'te', 'ta' for multilingual OCR
|
28 |
+
|
29 |
+
ocr_reader = load_ocr_model()
|
30 |
+
|
31 |
+
# --- Place name input (optional if image is provided) ---
|
32 |
+
st.markdown("**π Enter a place or person name to learn more (or capture it):**")
|
33 |
place_name = st.text_input("ποΈ For example: Charminar, Taj Mahal, Shah Jahan")
|
34 |
|
35 |
# --- Camera input ---
|
36 |
img_file_buffer = st.camera_input("πΈ Take a picture (optional)")
|
37 |
|
38 |
+
# --- OCR from camera image ---
|
39 |
+
if img_file_buffer is not None:
|
40 |
+
st.markdown("### π· Captured Image")
|
41 |
+
st.image(img_file_buffer, caption="Uploaded via camera", use_column_width=True)
|
42 |
+
|
43 |
+
image_bytes = BytesIO(img_file_buffer.getvalue())
|
44 |
+
result = ocr_reader.readtext(image_bytes)
|
45 |
+
|
46 |
+
if result:
|
47 |
+
detected_texts = [item[1] for item in result if item[1].strip()]
|
48 |
+
if detected_texts:
|
49 |
+
place_name = detected_texts[0] # Top detected phrase
|
50 |
+
st.success(f"π§ OCR detected: **{place_name}**")
|
51 |
+
else:
|
52 |
+
st.warning("OCR ran but could not extract any meaningful text.")
|
53 |
+
else:
|
54 |
+
st.warning("Could not detect text in the image.")
|
55 |
+
|
56 |
# --- Translation helpers ---
|
57 |
def translate_text(text, target_lang):
|
58 |
try:
|
|
|
144 |
else:
|
145 |
st.warning("No images found on Wikimedia Commons.")
|
146 |
|
|
|
|
|
|
|
|
|
|
|
147 |
# --- Footer ---
|
148 |
st.markdown("""
|
149 |
---
|
150 |
- π Supports text search and camera input.
|
151 |
+
- π§ OCR auto-detects place name from image.
|
152 |
+
- π Wikipedia multilingual summary with fallback + sentence-level translation.
|
153 |
+
- πΌοΈ Commons image gallery integration.
|
154 |
- β
Ready for Hugging Face deployment.
|
155 |
+
- π οΈ Streamlit only β no backend needed.
|
156 |
+
""")
|