vumichien commited on
Commit
d40d4af
1 Parent(s): 9e1c90b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +159 -0
app.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+
5
+ from sklearn.ensemble import GradientBoostingClassifier
6
+ from sklearn.model_selection import KFold
7
+ from sklearn.model_selection import train_test_split
8
+ from sklearn.metrics import log_loss
9
+
10
+ from scipy.special import expit
11
+
12
+ theme = gr.themes.Monochrome(
13
+ primary_hue="indigo",
14
+ secondary_hue="blue",
15
+ neutral_hue="slate",
16
+ )
17
+ model_card = f"""
18
+ ## Description
19
+
20
+ The **Out-of-bag (OOB)** method is a useful technique for estimating the optimal number of boosting iterations.
21
+ This method is similar to cross-validation, but it does not require repeated model fitting and can be computed on-the-fly.
22
+ **OOB** estimates are only applicable to Stochastic Gradient Boosting (i.e., subsample < 1.0). They are calculated from the improvement in loss based on examples not included in the bootstrap sample (i.e., out-of-bag examples).
23
+ The **OOB** estimator provides a conservative estimate of the true test loss, but is still a reasonable approximation for a small number of trees.
24
+ This demo shows the negative OOB improvements' cumulative sum as a function of the boosting iteration.
25
+
26
+ ## Dataset
27
+
28
+ Simulation data
29
+ """
30
+
31
+ def do_train(n_samples, n_splits, random_seed):
32
+ # Generate data (adapted from G. Ridgeway's gbm example)
33
+ random_state = np.random.RandomState(random_seed)
34
+ x1 = random_state.uniform(size=n_samples)
35
+ x2 = random_state.uniform(size=n_samples)
36
+ x3 = random_state.randint(0, 4, size=n_samples)
37
+
38
+ p = expit(np.sin(3 * x1) - 4 * x2 + x3)
39
+ y = random_state.binomial(1, p, size=n_samples)
40
+
41
+ X = np.c_[x1, x2, x3]
42
+
43
+ X = X.astype(np.float32)
44
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=random_seed)
45
+
46
+ # Fit classifier with out-of-bag estimates
47
+ params = {
48
+ "n_estimators": 1200,
49
+ "max_depth": 3,
50
+ "subsample": 0.5,
51
+ "learning_rate": 0.01,
52
+ "min_samples_leaf": 1,
53
+ "random_state": random_seed,
54
+ }
55
+ clf = GradientBoostingClassifier(**params)
56
+
57
+ clf.fit(X_train, y_train)
58
+ train_acc = clf.score(X_train, y_train)
59
+ test_acc = clf.score(X_test, y_test)
60
+ text = f"Train set accuracy: {train_acc*100:.2f}%. Test set accuracy: {test_acc*100:.2f}%"
61
+ n_estimators = params["n_estimators"]
62
+ x = np.arange(n_estimators) + 1
63
+
64
+ def heldout_score(clf, X_test, y_test):
65
+ """compute deviance scores on ``X_test`` and ``y_test``."""
66
+ score = np.zeros((n_estimators,), dtype=np.float64)
67
+ for i, y_proba in enumerate(clf.staged_predict_proba(X_test)):
68
+ score[i] = 2 * log_loss(y_test, y_proba[:, 1])
69
+ return score
70
+
71
+ def cv_estimate(n_splits):
72
+ cv = KFold(n_splits=n_splits)
73
+ cv_clf = GradientBoostingClassifier(**params)
74
+ val_scores = np.zeros((n_estimators,), dtype=np.float64)
75
+ for train, test in cv.split(X_train, y_train):
76
+ cv_clf.fit(X_train[train], y_train[train])
77
+ val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
78
+ val_scores /= n_splits
79
+ return val_scores
80
+
81
+ # Estimate best n_splits using cross-validation
82
+ cv_score = cv_estimate(n_splits)
83
+
84
+ # Compute best n_splits for test data
85
+ test_score = heldout_score(clf, X_test, y_test)
86
+
87
+ # negative cumulative sum of oob improvements
88
+ cumsum = -np.cumsum(clf.oob_improvement_)
89
+
90
+ # min loss according to OOB
91
+ oob_best_iter = x[np.argmin(cumsum)]
92
+
93
+ # min loss according to test (normalize such that first loss is 0)
94
+ test_score -= test_score[0]
95
+ test_best_iter = x[np.argmin(test_score)]
96
+
97
+ # min loss according to cv (normalize such that first loss is 0)
98
+ cv_score -= cv_score[0]
99
+ cv_best_iter = x[np.argmin(cv_score)]
100
+
101
+ # color brew for the three curves
102
+ oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
103
+ test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
104
+ cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
105
+
106
+ # line type for the three curves
107
+ oob_line = "dashed"
108
+ test_line = "solid"
109
+ cv_line = "dashdot"
110
+
111
+ # plot curves and vertical lines for best iterations
112
+ fig, ax = plt.subplots(figsize=(8, 6))
113
+ ax.plot(x, cumsum, label="OOB loss", color=oob_color, linestyle=oob_line)
114
+ ax.plot(x, test_score, label="Test loss", color=test_color, linestyle=test_line)
115
+ ax.plot(x, cv_score, label="CV loss", color=cv_color, linestyle=cv_line)
116
+ ax.axvline(x=oob_best_iter, color=oob_color, linestyle=oob_line)
117
+ ax.axvline(x=test_best_iter, color=test_color, linestyle=test_line)
118
+ ax.axvline(x=cv_best_iter, color=cv_color, linestyle=cv_line)
119
+
120
+ # add three vertical lines to xticks
121
+ xticks = plt.xticks()
122
+ xticks_pos = np.array(
123
+ xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter]
124
+ )
125
+ xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ["OOB", "CV", "Test"])
126
+ ind = np.argsort(xticks_pos)
127
+ xticks_pos = xticks_pos[ind]
128
+ xticks_label = xticks_label[ind]
129
+ ax.set_xticks(xticks_pos, xticks_label, rotation=90)
130
+
131
+ ax.legend(loc="upper center")
132
+ ax.set_ylabel("normalized loss")
133
+ ax.set_xlabel("number of iterations")
134
+ return fig, text
135
+
136
+
137
+ with gr.Blocks(theme=theme) as demo:
138
+ gr.Markdown('''
139
+ <div>
140
+ <h1 style='text-align: center'>Gradient Boosting Out-of-Bag estimates</h1>
141
+ </div>
142
+ ''')
143
+ gr.Markdown(model_card)
144
+ gr.Markdown("Author: <a href=\"https://huggingface.co/vumichien\">Vu Minh Chien</a>. Based on the example from <a href=\"https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_oob.html#sphx-glr-auto-examples-ensemble-plot-gradient-boosting-oob-py\">scikit-learn</a>")
145
+ n_samples = gr.Slider(minimum=500, maximum=5000, step=500, value=500, label="Number of samples")
146
+ n_splits = gr.Slider(minimum=2, maximum=10, step=1, value=3, label="Number of cross validation folds")
147
+ random_seed = gr.Slider(minimum=0, maximum=2000, step=1, value=0, label="Random seed")
148
+
149
+ with gr.Row():
150
+ with gr.Column():
151
+ plot = gr.Plot()
152
+ with gr.Column():
153
+ result = gr.Textbox(label="Resusts")
154
+
155
+ n_samples.change(fn=do_train, inputs=[n_samples, n_splits, random_seed], outputs=[plot, result])
156
+ n_splits.change(fn=do_train, inputs=[n_samples, n_splits, random_seed], outputs=[plot, result])
157
+ random_seed.change(fn=do_train, inputs=[n_samples, n_splits, random_seed], outputs=[plot, result])
158
+
159
+ demo.launch()