Daniel Cerda Escobar
Refresh front
6e0a508
raw
history blame
5.24 kB
import streamlit as st
import sahi.utils.file
from PIL import Image
from sahi import AutoDetectionModel
from utils import sahi_yolov8m_inference
from streamlit_image_comparison import image_comparison
from ultralyticsplus.hf_utils import download_from_hub
IMAGE_TO_URL = {
'factory_pid.png' : 'https://d1afc1j4569hs1.cloudfront.net/factory-pid.png',
'plant_pid.png' : 'https://d1afc1j4569hs1.cloudfront.net/plant-pid.png',
'processing_pid.png' : 'https://d1afc1j4569hs1.cloudfront.net/processing-pid.png',
'prediction_visual.png' : 'https://d1afc1j4569hs1.cloudfront.net/prediction_visual.png'
}
st.set_page_config(
page_title="P&ID Object Detection",
layout="wide",
initial_sidebar_state="expanded"
)
st.title('P&ID Object Detection')
st.subheader(' Identify valves and pumps with deep learning model ', divider='rainbow')
st.caption('Developed by Deep Drawings Co.')
@st.cache_resource(show_spinner=False)
def get_model(postprocess_match_threshold):
yolov8_model_path = download_from_hub('DanielCerda/pid_yolov8')
detection_model = AutoDetectionModel.from_pretrained(
model_type='yolov8',
model_path=yolov8_model_path,
confidence_threshold=postprocess_match_threshold,
device="cpu",
)
return detection_model
@st.cache_data(show_spinner=False)
def download_comparison_images():
sahi.utils.file.download_from_url(
'https://d1afc1j4569hs1.cloudfront.net/plant-pid.png',
'plant_pid.png',
)
sahi.utils.file.download_from_url(
'https://d1afc1j4569hs1.cloudfront.net/prediction_visual.png',
'prediction_visual.png',
)
download_comparison_images()
if "output_1" not in st.session_state:
img_1 = Image.open('plant_pid.png')
st.session_state["output_1"] = img_1.resize((4960,3508))
if "output_2" not in st.session_state:
img_2 = Image.open('prediction_visual.png')
st.session_state["output_2"] = img_2.resize((4960,3508))
col1, col2, col3 = st.columns(3, gap='medium')
with col1:
with st.expander('How to use it'):
st.markdown(
'''
1) Upload or select any example diagram πŸ‘†πŸ»
2) Set model parameters πŸ“ˆ
3) Press to perform inference πŸš€
4) Visualize model predictions πŸ”Ž
'''
)
st.write('##')
col1, col2, col3 = st.columns(3, gap='large')
with col1:
st.markdown('##### Set Input Image')
# set input image by upload
image_file = st.file_uploader(
'Upload your P&ID', type = ['jpg','jpeg','png']
)
# set input images from examples
def radio_func(option):
option_to_id = {
'factory_pid.png' : 'A',
'plant_pid.png' : 'B',
'processing_pid.png' : 'C',
}
return option_to_id[option]
radio = st.radio(
'Select from the following examples',
options = ['factory_pid.png', 'plant_pid.png', 'processing_pid.png'],
format_func = radio_func,
)
with col2:
# visualize input image
if image_file is not None:
image = Image.open(image_file)
else:
image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[radio])
st.markdown('##### Preview')
with st.container(border = True):
st.image(image, use_column_width = True)
with col3:
# set SAHI parameters
st.markdown('##### Set model parameters')
slice_number = st.select_slider(
'Slices per Image',
options = [
'1',
'4',
'16',
'64',
],
)
overlap_ratio = st.slider(
label = 'Slicing Overlap Ratio',
min_value=0.0,
max_value=0.5,
value=0.1,
step=0.1
)
postprocess_match_threshold = st.slider(
label = 'Confidence Threshold',
min_value = 0.0,
max_value = 1.0,
value = 0.8,
step = 0.1
)
st.write('##')
col1, col2, col3 = st.columns([3, 1, 3])
with col2:
submit = st.button("πŸš€ Perform Prediction")
if submit:
# perform prediction
with st.spinner(text="Downloading model weights ... "):
detection_model = get_model(postprocess_match_threshold)
slice_size = int(4960/(slice_number**0.5))
image_size = 4960
with st.spinner(text="Performing prediction ... "):
output = sahi_yolov8m_inference(
image,
detection_model,
image_size=image_size,
slice_height=slice_size,
slice_width=slice_size,
overlap_height_ratio=overlap_ratio,
overlap_width_ratio=overlap_ratio,
)
st.session_state["output_1"] = image
st.session_state["output_2"] = output
st.write('##')
col1, col2, col3 = st.columns([3, 1, 1], gap='small')
with col1:
st.markdown(f"#### Object Detection Result")
with st.container(border = True):
static_component = image_comparison(
img1=st.session_state["output_1"],
img2=st.session_state["output_2"],
label1='Raw Diagram',
label2='Inference Prediction',
width=768,
starting_position=50,
show_labels=True,
make_responsive=True,
in_memory=True,
)