|
from tensorflow.keras.models import Model, Input
|
|
from tensorflow.keras.layers import Dense, LSTM, GRU, Dropout
|
|
from tensorflow.keras.optimizers import Adam
|
|
import tensorflow as tf
|
|
|
|
def create_lstm_model(input_shape, lstm_units_1, lstm_units_2, dense_units, eta):
|
|
inputs = Input(shape=input_shape)
|
|
x = LSTM(units=lstm_units_1, return_sequences=True)(inputs)
|
|
x = LSTM(units=lstm_units_2)(x)
|
|
x = Dense(units=dense_units, activation='relu')(x)
|
|
outputs = Dense(units=1, activation='sigmoid')(x)
|
|
|
|
model = Model(inputs=inputs, outputs=outputs)
|
|
|
|
model.compile(optimizer=Adam(eta), loss='binary_crossentropy', metrics=[tf.keras.metrics.AUC()])
|
|
return model
|
|
|
|
def objective(trial, df):
|
|
|
|
|
|
powers_of_two = [2 ** n for n in range(2, 9)]
|
|
lstm_units_1 = trial.suggest_categorical('lstm_units_1', powers_of_two)
|
|
lstm_units_2 = trial.suggest_categorical('lstm_units_2', powers_of_two)
|
|
dense_units = trial.suggest_categorical('dense_units', powers_of_two)
|
|
batch_size = 128
|
|
epochs = 128
|
|
eta = trial.suggest_float('eta', 1e-4, 1e-1, log=True)
|
|
slen = trial.suggest_int('sequence_length', 10, 60)
|
|
fs, t = get_sequences(df, 'clabel', slen)
|
|
trs, vs, tss, trt, vt, tst = get_train_val_test(fs, t)
|
|
|
|
model = create_lstm_model(input_shape=(trs.shape[1], trs.shape[2]),
|
|
lstm_units_1=lstm_units_1,
|
|
lstm_units_2=lstm_units_2,
|
|
dense_units=dense_units,
|
|
eta=eta)
|
|
|
|
|
|
|
|
|
|
|
|
pruning_callback = TFKerasPruningCallback(trial, 'val_loss')
|
|
|
|
|
|
|
|
history = model.fit(trs, trt,
|
|
validation_data=(vs, vt),
|
|
epochs=epochs,
|
|
batch_size=batch_size,
|
|
callbacks=[pruning_callback],
|
|
verbose=0)
|
|
|
|
|
|
val_loss, auc = model.evaluate(vs, vt, verbose=0)
|
|
return auc |