Add DCGAN app
Browse files- Generator.py +45 -0
- app.py +37 -0
- generator.pth +3 -0
- requirement.txt +4 -0
Generator.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
class Config:
|
5 |
+
workers = 2 # Number of workers for dataloader
|
6 |
+
batch_size = 128 # Batch size during training
|
7 |
+
image_size = 64
|
8 |
+
nc = 3 # Number of channels in the training images
|
9 |
+
nz = 100 # Size of z latent vector (i.e. size of generator input)
|
10 |
+
ngf = 64 # Size of feature maps in generator
|
11 |
+
ndf = 64 # Size of feature maps in discriminator
|
12 |
+
num_epochs = 15 # Number of training epochs
|
13 |
+
lr = 0.0002 # Learning rate for optimizers
|
14 |
+
beta1 = 0.5 # Beta1 hyperparam for Adam optimizers
|
15 |
+
ngpu = 1 # Number of GPUs available. Use 0 for CPU mode.
|
16 |
+
|
17 |
+
class Generator(nn.Module):
|
18 |
+
def __init__(self, ngpu):
|
19 |
+
super(Generator, self).__init__()
|
20 |
+
self.ngpu = ngpu
|
21 |
+
self.network = nn.Sequential(
|
22 |
+
# input is Z, going into a convolution
|
23 |
+
nn.ConvTranspose2d(Config.nz, Config.ngf * 8, 4, 1, 0, bias=False),
|
24 |
+
nn.BatchNorm2d(Config.ngf * 8),
|
25 |
+
nn.ReLU(True),
|
26 |
+
# state size. (ngf*8) x 4 x 4
|
27 |
+
nn.ConvTranspose2d(Config.ngf * 8, Config.ngf * 4, 4, 2, 1, bias=False),
|
28 |
+
nn.BatchNorm2d(Config.ngf * 4),
|
29 |
+
nn.ReLU(True),
|
30 |
+
# state size. (ngf*4) x 8 x 8
|
31 |
+
nn.ConvTranspose2d(Config.ngf * 4, Config.ngf * 2, 4, 2, 1, bias=False),
|
32 |
+
nn.BatchNorm2d(Config.ngf * 2),
|
33 |
+
nn.ReLU(True),
|
34 |
+
# state size. (ngf*2) x 16 x 16
|
35 |
+
nn.ConvTranspose2d(Config.ngf * 2, Config.ngf, 4, 2, 1, bias=False),
|
36 |
+
nn.BatchNorm2d(Config.ngf),
|
37 |
+
nn.ReLU(True),
|
38 |
+
# state size. (ngf) x 32 x 32
|
39 |
+
nn.ConvTranspose2d(Config.ngf, Config.nc, 4, 2, 1, bias=False),
|
40 |
+
nn.Tanh()
|
41 |
+
# state size. (nc) x 64 x 64
|
42 |
+
)
|
43 |
+
|
44 |
+
def forward(self, xb):
|
45 |
+
return self.network(xb)
|
app.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from Generator import Generator
|
4 |
+
from torchvision.utils import save_image
|
5 |
+
|
6 |
+
|
7 |
+
generator = Generator(1)
|
8 |
+
generator.load_state_dict(torch.load("./generator.pth", map_location=torch.device('cpu')))
|
9 |
+
generator.eval()
|
10 |
+
|
11 |
+
|
12 |
+
def generate(seed, num_img):
|
13 |
+
torch.manual_seed(seed)
|
14 |
+
z = torch.randn(num_img, 100, 1, 1)
|
15 |
+
fake_img = generator(z)
|
16 |
+
fake_img = fake_img.detach()
|
17 |
+
fake_img = fake_img.squeeze()
|
18 |
+
save_image(fake_img, "fake_img.png", normalize=True)
|
19 |
+
return 'fake_img.png'
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
with gr.Blocks() as demo:
|
24 |
+
gr.Markdown("DCGAN model that generate fake images")
|
25 |
+
|
26 |
+
|
27 |
+
image_input = [
|
28 |
+
gr.Slider(0, 1000, label='Seed'),
|
29 |
+
gr.Slider(4, 64, label='Number of images', step=1),
|
30 |
+
]
|
31 |
+
image_output = gr.Image()
|
32 |
+
image_button = gr.Button("Generate")
|
33 |
+
|
34 |
+
|
35 |
+
image_button.click(generate, inputs=image_input, outputs=image_output)
|
36 |
+
|
37 |
+
demo.launch(share=True)
|
generator.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:255cba98fd84807b783a17ffdd094f416b524ea0ff9c69904679106914e494d0
|
3 |
+
size 14321882
|
requirement.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
numpy
|
4 |
+
gradio
|