import gradio as gr import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_regression from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error def get_plots(min_alpha, max_alpha): clf = Ridge() X, y, w = make_regression( n_samples=10, n_features=10, coef=True, random_state=1, bias=3.5 ) coefs = [] errors = [] alphas = np.logspace(min_alpha, max_alpha, 200) # Train the model with different regularisation strengths for a in alphas: clf.set_params(alpha=a) clf.fit(X, y) coefs.append(clf.coef_) errors.append(mean_squared_error(clf.coef_, w)) # Display results fig, ax = plt.subplots(1, 2, figsize=(20, 6)) ax[0].plot(alphas, coefs) ax[0].set_xscale("log") ax[0].set_xlabel("alpha", fontsize=16) ax[0].set_ylabel("weights", fontsize=16) ax[0].set_title( "Ridge coefficients as a function of the regularization", fontsize=20 ) ax[1].plot(alphas, errors) ax[1].set_xscale("log") ax[1].set_xlabel("alpha", fontsize=16) ax[1].set_ylabel("error", fontsize=16) ax[1].set_title( "Coefficient error as a function of the regularization", fontsize=20 ) fig.tight_layout() plotted_alphas_text = ( f"**Plotted alphas between 10^({min_alpha}) and 10^({max_alpha})**" ) return fig, plotted_alphas_text with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=2): gr.Markdown( "Choose the range of alpha values to plot." + " The models you input for alpha are for the exponents of 10," + " so a value of -6 means 10^6." ) plotted_alphas = gr.Markdown() with gr.Column(scale=3): with gr.Row(): min_alpha = gr.Number( step=1, value=-6, label="Minimum Alpha Exponent", ) max_alpha = gr.Number( step=1, value=6, label="Maximum Alpha Exponent", ) plots = gr.Plot() min_alpha.change( get_plots, [min_alpha, max_alpha], [plots, plotted_alphas], queue=False, ) max_alpha.change( get_plots, [min_alpha, max_alpha], [plots, plotted_alphas], queue=False, ) demo.load( get_plots, [min_alpha, max_alpha], [plots, plotted_alphas], queue=False, ) if __name__ == "__main__": demo.launch()