|
import torch |
|
import numpy as np |
|
import scipy |
|
|
|
|
|
num_layers = 2 |
|
batch_size = 32 |
|
hidden_dim = 256 |
|
|
|
def random_rotation(inputs): |
|
angle = np.random.uniform(-180, 180) |
|
inputs = scipy.ndimage.rotate(inputs, angle, reshape=False) |
|
return inputs |
|
|
|
def random_scaling(inputs): |
|
scale = np.random.uniform(0.8, 1.2) |
|
inputs = scipy.ndimage.zoom(inputs, scale) |
|
return inputs |
|
|
|
def random_translation(inputs): |
|
shift = np.random.uniform(-0.2, 0.2) |
|
inputs = scipy.ndimage.shift(inputs, shift) |
|
return inputs |
|
|
|
def random_shearing(inputs): |
|
shear = np.random.uniform(-0.2, 0.2) |
|
inputs = scipy.ndimage.shear(inputs, shear) |
|
return inputs |
|
|
|
def random_flipping(inputs): |
|
inputs = scipy.ndimage.flip(inputs, axis=1) |
|
return inputs |
|
|
|
def data_augmentation(inputs): |
|
|
|
inputs = random_rotation(inputs) |
|
|
|
inputs = random_scaling(inputs) |
|
|
|
inputs = random_translation(inputs) |
|
|
|
inputs = random_shearing(inputs) |
|
|
|
inputs = random_flipping(inputs) |
|
return inputs |
|
|
|
def evaluate(model, test_data, hyperparameters, recurrent_network=False, pre_trained_model=False, fine_tuning=False): |
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
hidden = (torch.zeros(num_layers, batch_size, hidden_dim).to(device), |
|
torch.zeros(num_layers, batch_size, hidden_dim).to(device)) |
|
model.eval() |
|
with torch.no_grad(): |
|
correct = 0 |
|
total = 0 |
|
for data in test_data: |
|
inputs, labels = data |
|
|
|
inputs = data_augmentation(inputs) |
|
|
|
inputs = inputs.to(device) |
|
labels = labels.to(device) |
|
|
|
if recurrent_network: |
|
outputs = model(inputs, hidden) |
|
else: |
|
outputs = model(inputs) |
|
|
|
if pre_trained_model: |
|
outputs = model.forward_from_pretrained(inputs) |
|
|
|
if fine_tuning: |
|
outputs = model.fine_tune(inputs, hyperparameters) |
|
_, predicted = torch.max(outputs.data, 1) |
|
total += labels.size(0) |
|
correct += (predicted == labels).sum().item() |
|
accuracy = 100 * correct / total |
|
return accuracy |
|
|
|
def adjust_learning_rate(optimizer, epoch): |
|
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" |
|
lr = 0.001 * (0.1 ** (epoch // 30)) |
|
for param_group in optimizer.param_groups: |
|
param_group['lr'] = lr |