File size: 5,072 Bytes
b0661c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bebe690
b0661c4
 
 
 
bebe690
 
b0661c4
 
bebe690
 
cd8213d
 
 
 
 
 
 
 
 
 
 
 
43b5430
bebe690
43b5430
cd8213d
bebe690
 
 
 
43b5430
 
bebe690
43b5430
bebe690
43b5430
bebe690
43b5430
bebe690
43b5430
 
 
 
 
b0661c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd8213d
 
 
43b5430
8f825f0
cd8213d
b0661c4
 
 
 
 
cd8213d
bebe690
cd8213d
43b5430
cd8213d
bebe690
 
43b5430
cd8213d
bebe690
b0661c4
bebe690
b0661c4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
"""
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================

The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.

Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.

Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.

"""

import matplotlib.pyplot as plt
import gradio as gr
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import numpy as np

# load data
iris = datasets.load_iris()

all_X = iris.data
all_y = iris.target
target_names = iris.target_names


def plot_lda_pca(n_samples = 50, n_features = 4):

    '''
    Function to plot LDA and PCA of Iris dataset
    
    Parameters
    ----------
    n_samples : int, optional
        Number of samples to use from the dataset. The default is 50.
    n_features : int, optional
        Number of features to use from dataset. The default is 4.
    
    '''
    
    # print(f"all X is: {all_X}")
    
    
    idx = np.random.randint(0, len(iris.data), n_samples)
    # sub-sample
    X = all_X[idx, :n_features]
    y = all_y[idx]
    
    # fit PCA
    pca = PCA(n_components=2)
    X_r = pca.fit(X).transform(X)

    # fit LDA
    lda = LinearDiscriminantAnalysis(n_components=2)
    X_r2 = lda.fit(X, y).transform(X)

    # Percentage of variance explained for each components
    print(
        "explained variance ratio (first two components): %s"
        % str(pca.explained_variance_ratio_)
    )

    # fig = plt.figure(1, facecolor="w", figsize=(5,5))
    fig, axes = plt.subplots(2,1, sharey= False, sharex=False, figsize = (8,6))
    colors = ["navy", "turquoise", "darkorange"]
    lw = 2

    for color, i, target_name in zip(colors, [0, 1, 2], target_names):
        axes[0].scatter(
            X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=0.8, lw=lw, label=target_name
        )
    axes[0].legend(loc="lower right")
    axes[0].set_title("PCA of IRIS dataset")    
    for color, i, target_name in zip(colors, [0, 1, 2], target_names):
        axes[1].scatter(
            X_r2[y == i, 0], X_r2[y == i, 1], alpha=0.8, color=color, label=target_name
        )
    plt.legend(loc="best", shadow=False, scatterpoints=1)
    axes[1].legend(loc="lower right")
    axes[1].set_title("LDA of IRIS dataset")
    plt.tight_layout()   


    return fig


title = "2-D projection of Iris dataset using LDA and PCA"
with gr.Blocks(title=title) as demo:
    gr.Markdown(f"# {title}")
    gr.Markdown(" This example shows how one can use Prinicipal Components Analysis (PCA) and Linear Discriminant Analysis (LDA) to cluster the Iris dataset based on provided features. <br>"
    " PCA applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. <br>"
    " LDA is a supervised method that tries to identify attributes that account for the most variance between classes using the known class labels.  <br>"
    " The number of samples (n_samples) will determine the number of data points to produce.  <br>"
    " The number of components is fixed to 2 for this 2-D visualisation and LDA requires the number of components to be the number of classes -1, which in this case is (3-1) = 2. <br>"
    " The number of features (n_features) determine the number of features from the IRIS dataset to use for the model fitting.  <br>"
    " For further details please see the sklearn docs:"    
    )

    gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py)** <br>")

    gr.Markdown(" **Dataset** : The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes or features: sepal length, sepal width, petal length and petal width. . <br>")

    # get max possible samples and features
    max_samples = len(iris.data)
    max_features = iris.data.shape[1]
    with gr.Row():
        n_samples = gr.Slider(value=100, minimum=10, maximum=max_samples, step=10, label="n_samples")

        n_features = gr.Slider(value=2, minimum=2, maximum=max_features, step=1, label="n_features")
        
    btn = gr.Button(value="Run")
    btn.click(plot_lda_pca, inputs = [n_samples, n_features], outputs= gr.Plot(label='PCA vs LDA clustering') ) # 
    

demo.launch()