|
import time |
|
import torch |
|
from options.train_options import TrainOptions |
|
from data import create_dataset |
|
from models import create_model |
|
from util.visualizer import Visualizer |
|
from tqdm.auto import tqdm |
|
|
|
if __name__ == "__main__": |
|
opt = TrainOptions().parse() |
|
dataset = create_dataset( |
|
opt |
|
) |
|
dataset_size = len(dataset) |
|
|
|
model = create_model(opt) |
|
print("The number of training images = %d" % dataset_size) |
|
|
|
visualizer = Visualizer( |
|
opt |
|
) |
|
opt.visualizer = visualizer |
|
total_iters = 0 |
|
|
|
optimize_time = 0.1 |
|
|
|
times = [] |
|
for epoch in range( |
|
opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1 |
|
): |
|
epoch_start_time = time.time() |
|
iter_data_time = time.time() |
|
epoch_iter = 0 |
|
visualizer.reset() |
|
|
|
dataset.set_epoch(epoch) |
|
for i, data in tqdm( |
|
enumerate(dataset), total=len(dataset) |
|
): |
|
iter_start_time = time.time() |
|
if total_iters % opt.print_freq == 0: |
|
t_data = iter_start_time - iter_data_time |
|
|
|
batch_size = data["A"].size(0) |
|
total_iters += batch_size |
|
epoch_iter += batch_size |
|
if len(opt.gpu_ids) > 0: |
|
torch.cuda.synchronize() |
|
optimize_start_time = time.time() |
|
if epoch == opt.epoch_count and i == 0: |
|
model.data_dependent_initialize(data) |
|
model.setup( |
|
opt |
|
) |
|
model.parallelize() |
|
model.set_input(data) |
|
model.optimize_parameters() |
|
if len(opt.gpu_ids) > 0: |
|
torch.cuda.synchronize() |
|
optimize_time = ( |
|
time.time() - optimize_start_time |
|
) / batch_size * 0.005 + 0.995 * optimize_time |
|
|
|
if ( |
|
total_iters % opt.display_freq == 0 |
|
): |
|
save_result = total_iters % opt.update_html_freq == 0 |
|
model.compute_visuals() |
|
visualizer.display_current_results( |
|
model.get_current_visuals(), epoch, save_result |
|
) |
|
|
|
if ( |
|
total_iters % opt.print_freq == 0 |
|
): |
|
losses = model.get_current_losses() |
|
visualizer.print_current_losses( |
|
epoch, epoch_iter, losses, optimize_time, t_data |
|
) |
|
if opt.display_id is None or opt.display_id > 0: |
|
visualizer.plot_current_losses( |
|
epoch, float(epoch_iter) / dataset_size, losses |
|
) |
|
|
|
if ( |
|
total_iters % opt.save_latest_freq == 0 |
|
): |
|
print( |
|
"saving the latest model (epoch %d, total_iters %d)" |
|
% (epoch, total_iters) |
|
) |
|
print( |
|
opt.name |
|
) |
|
save_suffix = "iter_%d" % total_iters if opt.save_by_iter else "latest" |
|
model.save_networks(save_suffix) |
|
|
|
iter_data_time = time.time() |
|
|
|
if ( |
|
epoch % opt.save_epoch_freq == 0 |
|
): |
|
print( |
|
"saving the model at the end of epoch %d, iters %d" |
|
% (epoch, total_iters) |
|
) |
|
model.save_networks("latest") |
|
model.save_networks(epoch) |
|
|
|
print( |
|
"End of epoch %d / %d \t Time Taken: %d sec" |
|
% (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time) |
|
) |
|
model.update_learning_rate() |
|
|