|
import torch |
|
import torch.nn as nn |
|
|
|
class Config: |
|
workers = 2 |
|
batch_size = 128 |
|
image_size = 64 |
|
nc = 3 |
|
nz = 100 |
|
ngf = 64 |
|
ndf = 64 |
|
num_epochs = 15 |
|
lr = 0.0002 |
|
beta1 = 0.5 |
|
ngpu = 1 |
|
|
|
class Generator(nn.Module): |
|
def __init__(self, ngpu): |
|
super(Generator, self).__init__() |
|
self.ngpu = ngpu |
|
self.network = nn.Sequential( |
|
|
|
nn.ConvTranspose2d(Config.nz, Config.ngf * 8, 4, 1, 0, bias=False), |
|
nn.BatchNorm2d(Config.ngf * 8), |
|
nn.ReLU(True), |
|
|
|
nn.ConvTranspose2d(Config.ngf * 8, Config.ngf * 4, 4, 2, 1, bias=False), |
|
nn.BatchNorm2d(Config.ngf * 4), |
|
nn.ReLU(True), |
|
|
|
nn.ConvTranspose2d(Config.ngf * 4, Config.ngf * 2, 4, 2, 1, bias=False), |
|
nn.BatchNorm2d(Config.ngf * 2), |
|
nn.ReLU(True), |
|
|
|
nn.ConvTranspose2d(Config.ngf * 2, Config.ngf, 4, 2, 1, bias=False), |
|
nn.BatchNorm2d(Config.ngf), |
|
nn.ReLU(True), |
|
|
|
nn.ConvTranspose2d(Config.ngf, Config.nc, 4, 2, 1, bias=False), |
|
nn.Tanh() |
|
|
|
) |
|
|
|
def forward(self, xb): |
|
return self.network(xb) |