import gradio as gr import numpy as np import json import joblib import tensorflow as tf import pandas as pd from joblib import load from tensorflow.keras.models import load_model from sklearn.preprocessing import MinMaxScaler import os import sklearn # Import sklearn # Display library versions print(f"Gradio version: {gr.__version__}") print(f"NumPy version: {np.__version__}") print(f"Scikit-learn version: {sklearn.__version__}") print(f"Joblib version: {joblib.__version__}") print(f"TensorFlow version: {tf.__version__}") print(f"Pandas version: {pd.__version__}") # Directory paths for the saved models script_dir = os.path.dirname(os.path.abspath(__file__)) scaler_path = os.path.join(script_dir, 'toolkit', 'scaler_X.json') rf_model_path = os.path.join(script_dir, 'toolkit', 'rf_model.joblib') mlp_model_path = os.path.join(script_dir, 'toolkit', 'mlp_model.keras') meta_model_path = os.path.join(script_dir, 'toolkit', 'meta_model.joblib') image_path = os.path.join(script_dir, 'toolkit', 'car.png') # Load the scaler and models try: # Load the scaler with open(scaler_path, 'r') as f: scaler_params = json.load(f) scaler_X = MinMaxScaler() scaler_X.scale_ = np.array(scaler_params["scale_"]) scaler_X.min_ = np.array(scaler_params["min_"]) scaler_X.data_min_ = np.array(scaler_params["data_min_"]) scaler_X.data_max_ = np.array(scaler_params["data_max_"]) scaler_X.data_range_ = np.array(scaler_params["data_range_"]) scaler_X.n_features_in_ = scaler_params["n_features_in_"] scaler_X.feature_names_in_ = np.array(scaler_params["feature_names_in_"]) # Load the models loaded_rf_model = load(rf_model_path) print("Random Forest model loaded successfully.") loaded_mlp_model = load_model(mlp_model_path) print("MLP model loaded successfully.") loaded_meta_model = load(meta_model_path) print("Meta model loaded successfully.") except Exception as e: print(f"Error loading models or scaler: {e}") def predict_new_values(new_input_data): try: # Ensure the new input data is in the correct format print(f"Raw Input Data: {new_input_data}") new_input_data = np.array(new_input_data).reshape(1, -1) # Scale the new input data new_input_scaled = scaler_X.transform(new_input_data) print(f"Scaled Input Data: {new_input_scaled}") # Make predictions with both base models mlp_predictions_new = loaded_mlp_model.predict(new_input_scaled) rf_predictions_new = loaded_rf_model.predict(new_input_scaled) # Combine the predictions combined_features_new = np.concatenate([mlp_predictions_new, rf_predictions_new], axis=1) print(f"Combined Features: {combined_features_new}") # Use the loaded meta model to make predictions on the new data loaded_meta_predictions_new = loaded_meta_model.predict(combined_features_new) print(f"Meta Model Predictions: {loaded_meta_predictions_new}") return loaded_meta_predictions_new[0] except Exception as e: print(f"Error in prediction: {e}") return ["Error", "Error", "Error", "Error", "Error", "Error"] def gradio_interface(velocity, temperature, precipitation, humidity): try: input_data = [velocity, temperature, precipitation, humidity] print(f"Input Data: {input_data}") predictions = predict_new_values(input_data) print(f"Predictions: {predictions}") return [ f"{predictions[0] * 100:.2f}%" if predictions[0] != "Error" else "Error", f"{predictions[1] * 100:.2f}%" if predictions[1] != "Error" else "Error", f"{predictions[2] * 100:.2f}%" if predictions[2] != "Error" else "Error", f"{predictions[3] * 100:.2f}%" if predictions[3] != "Error" else "Error", f"{predictions[4] * 100:.2f}%" if predictions[4] != "Error" else "Error", f"{predictions[5] * 100:.2f}%" if predictions[5] != "Error" else "Error" ] except Exception as e: print(f"Error in Gradio interface: {e}") return ["Error", "Error", "Error", "Error", "Error", "Error"] inputs = [ gr.Slider(minimum=0, maximum=100, value=50, step=0.05, label="Velocity (mph)"), gr.Slider(minimum=-2, maximum=30, value=0, step=0.5, label="Temperature (°C)"), gr.Slider(minimum=0, maximum=1, value=0, step=0.01, label="Precipitation (inch)"), gr.Slider(minimum=0, maximum=100, value=50, label="Humidity (%)") ] outputs = [ gr.Textbox(label="Front Left"), gr.Textbox(label="Front Right"), gr.Textbox(label="Left"), gr.Textbox(label="Right"), gr.Textbox(label="Roof"), gr.Textbox(label="Rear") ] with gr.Blocks() as demo: gr.Markdown("

Environmental Factor-Based Contamination Level Prediction

") gr.Markdown("This application predicts the contamination levels on different parts of a car's LiDAR system based on environmental factors such as velocity, temperature, precipitation, and humidity.") # Layout with two columns with gr.Row(): with gr.Column(): gr.Markdown("### Input Parameters") for inp in inputs: inp.render() # Centered image display with gr.Row(): with gr.Column(scale=1, min_width=0, elem_id="center-column"): gr.Image(image_path) # Ensure the image is centered gr.Button(value="Submit", variant="primary").click(fn=gradio_interface, inputs=inputs, outputs=outputs) gr.Button(value="Clear").click(fn=lambda: None) with gr.Column(): gr.Markdown("### Output Predictions ± 7.1%") for out in outputs: out.render() demo.launch()