Spaces:
Sleeping
Sleeping
Upload 8 files
Browse files- app.py +83 -0
- archs/model.py +97 -0
- examples/inputs/1.jpg +0 -0
- examples/inputs/2.jpg +0 -0
- examples/inputs/3.jpg +0 -0
- examples/inputs/4.jpg +0 -0
- examples/inputs/5.jpg +0 -0
- models/chk_6000.pt +3 -0
app.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from PIL import Image
|
3 |
+
import torch
|
4 |
+
import torchvision.transforms as transforms
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
from archs.model import UNet
|
8 |
+
|
9 |
+
|
10 |
+
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
11 |
+
#define some auxiliary functions
|
12 |
+
pil_to_tensor = transforms.ToTensor()
|
13 |
+
|
14 |
+
# define some parameters based on the run we want to make
|
15 |
+
|
16 |
+
model = UNet()
|
17 |
+
|
18 |
+
checkpoints = torch.load('./models/chk_6000.pt', map_location=device)
|
19 |
+
|
20 |
+
model.load_state_dict(checkpoints['model_state_dict'])
|
21 |
+
|
22 |
+
model = model.to(device)
|
23 |
+
|
24 |
+
model.eval()
|
25 |
+
|
26 |
+
def load_img (filename):
|
27 |
+
img = Image.open(filename).convert("RGB")
|
28 |
+
img_tensor = pil_to_tensor(img)
|
29 |
+
return img_tensor
|
30 |
+
|
31 |
+
def process_img(image):
|
32 |
+
img = np.array(image)
|
33 |
+
img = img / 255.
|
34 |
+
img = img.astype(np.float32)
|
35 |
+
y = torch.tensor(img).permute(2,0,1).unsqueeze(0).to(device)
|
36 |
+
|
37 |
+
with torch.no_grad():
|
38 |
+
x_hat = model(y)
|
39 |
+
|
40 |
+
restored_img = x_hat.squeeze().permute(1,2,0).clamp_(0, 1).cpu().detach().numpy()
|
41 |
+
restored_img = np.clip(restored_img, 0. , 1.)
|
42 |
+
|
43 |
+
restored_img = (restored_img * 255.0).round().astype(np.uint8) # float32 to uint8
|
44 |
+
return Image.fromarray(restored_img) #(image, Image.fromarray(restored_img))
|
45 |
+
|
46 |
+
title = "Efficient Low-Light Enhancement ✏️🖼️ 🤗"
|
47 |
+
description = ''' ## [Inpainting for Autonomous Driving](https://github.com/cidautai)
|
48 |
+
[Javier Abad Hernández](https://github.com/javierabad01)
|
49 |
+
Fundación Cidaut
|
50 |
+
> **Disclaimer:** please remember this is not a product, thus, you will notice some limitations.
|
51 |
+
**This demo expects an image with some degradations.**
|
52 |
+
Due to the GPU memory limitations, the app might crash if you feed a high-resolution image (2K, 4K).
|
53 |
+
<br>
|
54 |
+
'''
|
55 |
+
|
56 |
+
examples = [['examples/inputs/1.png'],
|
57 |
+
['examples/inputs/2.png'],
|
58 |
+
['examples/inputs/3.png'],
|
59 |
+
["examples/inputs/4.png"],
|
60 |
+
["examples/inputs/5.png"]]
|
61 |
+
|
62 |
+
css = """
|
63 |
+
.image-frame img, .image-container img {
|
64 |
+
width: auto;
|
65 |
+
height: auto;
|
66 |
+
max-width: none;
|
67 |
+
}
|
68 |
+
"""
|
69 |
+
|
70 |
+
demo = gr.Interface(
|
71 |
+
fn = process_img,
|
72 |
+
inputs = [
|
73 |
+
gr.Image(type = 'pil', label = 'input')
|
74 |
+
],
|
75 |
+
outputs = [gr.Image(type='pil', label = 'output')],
|
76 |
+
title = title,
|
77 |
+
description = description,
|
78 |
+
examples = examples,
|
79 |
+
css = css
|
80 |
+
)
|
81 |
+
|
82 |
+
if __name__ == '__main__':
|
83 |
+
demo.launch()
|
archs/model.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
|
5 |
+
class AttentionBlock(nn.Module):
|
6 |
+
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1):
|
7 |
+
super(AttentionBlock, self).__init__()
|
8 |
+
self.conv1 = nn.Conv2d(in_channels, out_channels,
|
9 |
+
kernel_size=kernel_size, padding=padding)
|
10 |
+
self.conv2 = nn.Conv2d(out_channels, out_channels,
|
11 |
+
kernel_size=kernel_size, padding=padding)
|
12 |
+
self.attn = nn.MultiheadAttention(
|
13 |
+
out_channels, num_heads=8, batch_first=True)
|
14 |
+
self.norm = nn.LayerNorm(out_channels)
|
15 |
+
self.activation = nn.ReLU()
|
16 |
+
|
17 |
+
def forward(self, x):
|
18 |
+
x = self.conv1(x)
|
19 |
+
x = self.activation(x)
|
20 |
+
x = self.conv2(x)
|
21 |
+
b, c, h, w = x.size()
|
22 |
+
x = x.view(b, c, h * w).permute(2, 0, 1) # Reshape and permute
|
23 |
+
attn_output, _ = self.attn(x, x, x)
|
24 |
+
x = attn_output.permute(1, 2, 0).view(
|
25 |
+
b, c, h, w) # Revert the permute and reshape
|
26 |
+
x = x.view(b, c, -1) # Flatten the last two dimensions
|
27 |
+
# Reshape for LayerNorm and apply normalization
|
28 |
+
x = self.norm(x.reshape(b, -1, c))
|
29 |
+
x = x.view(b, c, h, w) # Reshape back to original
|
30 |
+
return x
|
31 |
+
|
32 |
+
|
33 |
+
class UNet(nn.Module):
|
34 |
+
def __init__(self):
|
35 |
+
super(UNet, self).__init__()
|
36 |
+
|
37 |
+
self.encoder = nn.Sequential(
|
38 |
+
nn.Conv2d(3, 32, kernel_size=3, padding=1),
|
39 |
+
nn.ReLU(),
|
40 |
+
nn.MaxPool2d(2),
|
41 |
+
nn.Conv2d(32, 64, kernel_size=3, padding=1),
|
42 |
+
nn.ReLU(),
|
43 |
+
nn.MaxPool2d(2),
|
44 |
+
nn.Conv2d(64, 128, kernel_size=3, padding=1),
|
45 |
+
nn.ReLU(),
|
46 |
+
nn.MaxPool2d(2),
|
47 |
+
nn.Conv2d(128, 256, kernel_size=3, padding=1),
|
48 |
+
nn.ReLU(),
|
49 |
+
nn.MaxPool2d(2),
|
50 |
+
nn.Conv2d(256, 512, kernel_size=3, padding=1),
|
51 |
+
nn.ReLU(),
|
52 |
+
)
|
53 |
+
|
54 |
+
self.lstm = nn.LSTM(512, 512, batch_first=True)
|
55 |
+
self.attn_block = AttentionBlock(512, 512)
|
56 |
+
|
57 |
+
self.decoder = nn.Sequential(
|
58 |
+
nn.ConvTranspose2d(1024, 256, kernel_size=2, stride=2),
|
59 |
+
nn.ReLU(),
|
60 |
+
nn.ConvTranspose2d(512, 128, kernel_size=2, stride=2),
|
61 |
+
nn.ReLU(),
|
62 |
+
nn.ConvTranspose2d(256, 64, kernel_size=2, stride=2),
|
63 |
+
nn.ReLU(),
|
64 |
+
nn.ConvTranspose2d(128, 32, kernel_size=2, stride=2),
|
65 |
+
nn.ReLU(),
|
66 |
+
nn.ConvTranspose2d(64, 3, kernel_size=1),
|
67 |
+
nn.Sigmoid(),
|
68 |
+
)
|
69 |
+
|
70 |
+
def forward(self, x):
|
71 |
+
skip_connections = []
|
72 |
+
|
73 |
+
for layer in self.encoder:
|
74 |
+
x = layer(x)
|
75 |
+
skip_connections.append(x)
|
76 |
+
if isinstance(layer, nn.MaxPool2d):
|
77 |
+
skip_connections.pop()
|
78 |
+
|
79 |
+
batch_size, channels, height, width = x.size()
|
80 |
+
x = x.view(batch_size, -1, channels)
|
81 |
+
x, _ = self.lstm(x)
|
82 |
+
x = x.unsqueeze(1)
|
83 |
+
x = x.permute(0, 2, 3, 1)
|
84 |
+
x = x.reshape(batch_size, channels, height, width)
|
85 |
+
|
86 |
+
x = self.attn_block(x)
|
87 |
+
|
88 |
+
skip_connections = skip_connections[::-1]
|
89 |
+
|
90 |
+
for i, layer in enumerate(self.decoder):
|
91 |
+
if isinstance(layer, nn.ConvTranspose2d):
|
92 |
+
x = layer(torch.cat((x, skip_connections[i]), dim=1))
|
93 |
+
else:
|
94 |
+
x = layer(x)
|
95 |
+
|
96 |
+
return x
|
97 |
+
|
examples/inputs/1.jpg
ADDED
![]() |
examples/inputs/2.jpg
ADDED
![]() |
examples/inputs/3.jpg
ADDED
![]() |
examples/inputs/4.jpg
ADDED
![]() |
examples/inputs/5.jpg
ADDED
![]() |
models/chk_6000.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:169f189b0dc9275b81d4877d38f14f4a0155f72781a43b62d731df5a41481a1e
|
3 |
+
size 130055955
|