File size: 2,184 Bytes
25e7dcb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
from tensorflow.keras.models import Model, Input
from tensorflow.keras.layers import Dense, LSTM, GRU, Dropout
from tensorflow.keras.optimizers import Adam
import tensorflow as tf

def create_lstm_model(input_shape, lstm_units_1, lstm_units_2, dense_units, eta):
    inputs = Input(shape=input_shape)
    x = LSTM(units=lstm_units_1, return_sequences=True)(inputs)
    x = LSTM(units=lstm_units_2)(x)
    x = Dense(units=dense_units, activation='relu')(x)
    outputs = Dense(units=1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=outputs)

    model.compile(optimizer=Adam(eta), loss='binary_crossentropy', metrics=[tf.keras.metrics.AUC()])
    return model

def objective(trial, df):
    # Hyperparameter space

    powers_of_two = [2 ** n for n in range(2, 9)]
    lstm_units_1 = trial.suggest_categorical('lstm_units_1', powers_of_two)
    lstm_units_2 = trial.suggest_categorical('lstm_units_2', powers_of_two)
    dense_units = trial.suggest_categorical('dense_units', powers_of_two)
    batch_size = 128
    epochs = 128
    eta = trial.suggest_float('eta', 1e-4, 1e-1, log=True)
    slen = trial.suggest_int('sequence_length', 10, 60)
    fs, t = get_sequences(df, 'clabel', slen)
    trs, vs, tss, trt, vt, tst = get_train_val_test(fs, t)
    # Create model
    model = create_lstm_model(input_shape=(trs.shape[1], trs.shape[2]),
                              lstm_units_1=lstm_units_1,
                              lstm_units_2=lstm_units_2,
                              dense_units=dense_units,
                              eta=eta)

    # Early stopping callback
    #     early_stopping = EarlyStopping(monitor='val_loss', patience=5)

    # Pruning callback
    pruning_callback = TFKerasPruningCallback(trial, 'val_loss')


    # Train model
    history = model.fit(trs, trt,
                        validation_data=(vs, vt),
                        epochs=epochs,
                        batch_size=batch_size,
                        callbacks=[pruning_callback],
                        verbose=0)

    # Evaluate model
    val_loss, auc = model.evaluate(vs, vt, verbose=0)
    return auc