|
import streamlit as st
|
|
import sparknlp
|
|
import os
|
|
import pandas as pd
|
|
|
|
from sparknlp.base import *
|
|
from sparknlp.annotator import *
|
|
from pyspark.ml import Pipeline
|
|
from sparknlp.pretrained import PretrainedPipeline
|
|
from streamlit_tags import st_tags
|
|
|
|
|
|
st.set_page_config(
|
|
layout="wide",
|
|
initial_sidebar_state="auto"
|
|
)
|
|
|
|
|
|
st.markdown("""
|
|
<style>
|
|
.main-title {
|
|
font-size: 36px;
|
|
color: #4A90E2;
|
|
font-weight: bold;
|
|
text-align: center;
|
|
}
|
|
.section {
|
|
background-color: #f9f9f9;
|
|
padding: 10px;
|
|
border-radius: 10px;
|
|
margin-top: 10px;
|
|
}
|
|
.section p, .section ul {
|
|
color: #666666;
|
|
}
|
|
</style>
|
|
""", unsafe_allow_html=True)
|
|
|
|
@st.cache_resource
|
|
def init_spark():
|
|
return sparknlp.start()
|
|
|
|
@st.cache_resource
|
|
def create_pipeline(model, labels):
|
|
image_assembler = ImageAssembler() \
|
|
.setInputCol("image") \
|
|
.setOutputCol("image_assembler")
|
|
|
|
imageClassifier = CLIPForZeroShotClassification \
|
|
.pretrained() \
|
|
.setInputCols(["image_assembler"]) \
|
|
.setOutputCol("label") \
|
|
.setCandidateLabels(labels)
|
|
|
|
pipeline = Pipeline(stages=[
|
|
image_assembler,
|
|
imageClassifier,
|
|
])
|
|
return pipeline
|
|
|
|
def fit_data(pipeline, data):
|
|
model = pipeline.fit(data)
|
|
light_pipeline = LightPipeline(model)
|
|
annotations_result = light_pipeline.fullAnnotateImage(data)
|
|
return annotations_result[0]['label'][0].result
|
|
|
|
def save_uploadedfile(uploadedfile):
|
|
filepath = os.path.join(IMAGE_FILE_PATH, uploadedfile.name)
|
|
with open(filepath, "wb") as f:
|
|
if hasattr(uploadedfile, 'getbuffer'):
|
|
f.write(uploadedfile.getbuffer())
|
|
else:
|
|
f.write(uploadedfile.read())
|
|
|
|
|
|
model = st.sidebar.selectbox(
|
|
"Choose the pretrained model",
|
|
["CLIPForZeroShotClassification"],
|
|
help="For more info about the models visit: https://sparknlp.org/models"
|
|
)
|
|
|
|
|
|
st.markdown(f'<div class="main-title">CLIPForZeroShotClassification</div>', unsafe_allow_html=True)
|
|
|
|
|
|
|
|
link = """
|
|
<a href="https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/image/CLIPForZeroShotClassification.ipynb">
|
|
<img src="https://colab.research.google.com/assets/colab-badge.svg" style="zoom: 1.3" alt="Open In Colab"/>
|
|
</a>
|
|
"""
|
|
st.sidebar.markdown('Reference notebook:')
|
|
st.sidebar.markdown(link, unsafe_allow_html=True)
|
|
|
|
|
|
IMAGE_FILE_PATH = "/content/sparknlp CLIPForZeroShotClassification/input"
|
|
image_files = sorted([file for file in os.listdir(IMAGE_FILE_PATH) if file.split('.')[-1]=='png' or file.split('.')[-1]=='jpg' or file.split('.')[-1]=='JPEG' or file.split('.')[-1]=='jpeg'])
|
|
|
|
img_options = st.selectbox("Select an image", image_files)
|
|
uploadedfile = st.file_uploader("Try it for yourself!")
|
|
|
|
if uploadedfile:
|
|
file_details = {"FileName":uploadedfile.name,"FileType":uploadedfile.type}
|
|
save_uploadedfile(uploadedfile)
|
|
selected_image = f"{IMAGE_FILE_PATH}/{uploadedfile.name}"
|
|
elif img_options:
|
|
selected_image = f"{IMAGE_FILE_PATH}/{img_options}"
|
|
|
|
candidateLabels = [
|
|
"a photo of a bird",
|
|
"a photo of a cat",
|
|
"a photo of a dog",
|
|
"a photo of a hen",
|
|
"a photo of a hippo",
|
|
"a photo of a room",
|
|
"a photo of a tractor",
|
|
"a photo of an ostrich",
|
|
"a photo of an ox"]
|
|
|
|
lables = st_tags(
|
|
label='Select labels',
|
|
text='Press enter to add more',
|
|
value=candidateLabels,
|
|
maxtags = -1)
|
|
|
|
st.subheader('Classified Image')
|
|
|
|
image_size = st.slider('Image Size', 400, 1000, value=400, step = 100)
|
|
|
|
try:
|
|
st.image(f"{IMAGE_FILE_PATH}/{selected_image}", width=image_size)
|
|
except:
|
|
st.image(selected_image, width=image_size)
|
|
|
|
st.subheader('Classification')
|
|
|
|
init_spark()
|
|
Pipeline = create_pipeline(model, lables)
|
|
output = fit_data(Pipeline, selected_image)
|
|
|
|
st.markdown(f'This document has been classified as : **{output}**') |