File size: 2,715 Bytes
fe70d6c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import numpy as np
import matplotlib

matplotlib.use("Agg")
import matplotlib.pyplot as plt
import gradio as gr
from sklearn import datasets
from sklearn import linear_model
from sklearn.svm import l1_min_c


def train_it(solver, intersect_scaling, tol, max_iter):
    iris = datasets.load_iris()
    X = iris.data
    y = iris.target

    X = X[y != 2]
    y = y[y != 2]

    X /= X.max()

    cs = l1_min_c(X, y, loss="log") * np.logspace(0, 7, 16)

    clf = linear_model.LogisticRegression(
        penalty="l1",
        solver=solver,
        tol=tol,
        max_iter=int(max_iter),
        warm_start=True,
        intercept_scaling=intersect_scaling,
    )

    coefs_ = []
    for c in cs:
        clf.set_params(C=c)
        clf.fit(X, y)
        coefs_.append(clf.coef_.ravel().copy())

    coefs_ = np.array(coefs_)

    plt.plot(np.log10(cs), coefs_, marker="o")
    ymin, ymax = plt.ylim()
    plt.xlabel("log(C)")
    plt.ylabel("Coefficients")
    plt.title("Logistic Regression Path")
    plt.axis("tight")
    plt.show()

    return plt


with gr.Blocks() as demo:
    gr.Markdown("# Regularization path of L1- Logistic Regression")
    gr.Markdown(
        """
    This interactive demo is based on the [Regularization path of L1- Logistic Regression] (https://scikit-learn.org/stable/auto_examples/linear_model/plot_logistic_path.html).This demonstrates how to perform l1-penalized logistic regression on a binary classification problem derived from the Iris dataset. The regularization path plots the progression of the coefficients from exactly 0 to non-zero values as the regularization becomes progressively looser.
                """
    )

    with gr.Row():
        with gr.Column():
            solver = gr.Dropdown(
                ["liblinear", "saga"], label="Solver", value="liblinear"
            )
            warm_start = gr.Dropdown(
                ["True", "False"], label="Warm Start", value="True"
            )
        with gr.Column(align="center"):
            intersect_scaling = gr.Slider(
                value=10000.0,
                minimum=0,
                maximum=100000,
                step=0.1,
                label="Intersect Scaling",
            )
            tol = gr.Slider(
                value=1e-6, minimum=0, maximum=1, step=0.1, label="Tolerance"
            )
            max_iter = gr.Slider(
                value=1e6,
                minimum=0,
                maximum=1000000,
                step=0.1,
                label="Maximum Iterations",
            )
    train_buttion = gr.Button(label="Train")

    train_buttion.click(
        train_it, inputs=[solver, intersect_scaling, tol, max_iter], outputs=gr.Plot()
    )


demo.launch()