import time
import gradio as gr
import numpy as np
from pathlib import Path
import time
from anomalib.deploy import OpenVINOInferencer
from openvino.runtime import Core
# Initialize the Core
core = Core()
# Get the available devices
devices = core.available_devices
inferencer = None
prev_category_selection = None
prev_device_selection = None
example_list = [["bottle/examples/000.png", "anomaly_map", "bottle", "CPU"],
["pill/examples/014.png", "heat_map", "pill", "CPU"],
["zipper/examples/001.png", "pred_mask", "zipper", "CPU"],
["grid/examples/005.png", "segmentations", "grid", "CPU"],
["cubes/examples/005.jpg", "heat_map", "cubes", "CPU"]]
def OV_compilemodel(category_choice, device):
global inferencer
#Get the available models
openvino_model_path = Path.cwd() / category_choice / "run" / "weights" / "openvino" / "model.bin"
metadata_path = Path.cwd() / category_choice / "run" / "weights" / "openvino" / "metadata.json"
inferencer = OpenVINOInferencer(
path=openvino_model_path, # Path to the OpenVINO IR model.
metadata=metadata_path, # Path to the metadata file.
device=device, # We would like to run it on an Intel CPU.
config= {"INFERENCE_PRECISION_HINT": "f16" } if device != "CPU" else {}
)
return inferencer
def OV_inference(input_img, operation, category_choice, device):
start_time = time.time()
predictions = inferencer.predict(image=input_img)
stop_time = time.time()
inference_time = stop_time - start_time
confidence = predictions.pred_score
if operation == "original":
output_img1 = predictions.image
elif operation == "anomaly_map":
output_img1 = predictions.anomaly_map
elif operation == "heat_map":
output_img1 = predictions.heat_map
elif operation == "pred_mask":
output_img1 = predictions.pred_mask
elif operation == "segmentations":
output_img1 = predictions.segmentations
else:
output_img1 = predictions.image
return output_img1, round(inference_time*1000), round(confidence*100,2)
#Run + Compile the model
def OV_compile_run_model(category_choice, device_choice, image, output_choice):
#If a different category or device are selected, compile/re-compile the model
global prev_category_selection
global prev_device_selection
if device_choice != prev_device_selection or category_choice != prev_category_selection:
OV_compilemodel(category_choice, device_choice)
prev_category_selection = category_choice
prev_device_selection = device_choice
#Run model
print("Running model")
output_img, output_time, output_confidence = OV_inference(image, output_choice, category_choice, device_choice)
return output_img, output_time, output_confidence
with gr.Blocks() as demo:
gr.Markdown(
"""