Spaces:
Paused
Paused
Create ref_cap.py
Browse files- tasks/ref_cap.py +68 -0
tasks/ref_cap.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# --------------------------------------------------------
|
2 |
+
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
|
3 |
+
# Copyright (c) 2022 Microsoft
|
4 |
+
# Licensed under The MIT License [see LICENSE for details]
|
5 |
+
# Written by Xueyan Zou ([email protected])
|
6 |
+
# --------------------------------------------------------
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import numpy as np
|
11 |
+
from PIL import Image
|
12 |
+
from torchvision import transforms
|
13 |
+
from utils.visualizer import Visualizer
|
14 |
+
from detectron2.data import MetadataCatalog
|
15 |
+
|
16 |
+
t = []
|
17 |
+
t.append(transforms.Resize(224, interpolation=Image.BICUBIC))
|
18 |
+
transform_ret = transforms.Compose(t)
|
19 |
+
t = []
|
20 |
+
t.append(transforms.Resize(512, interpolation=Image.BICUBIC))
|
21 |
+
transform_grd = transforms.Compose(t)
|
22 |
+
|
23 |
+
metedata = MetadataCatalog.get('coco_2017_train_panoptic')
|
24 |
+
|
25 |
+
def referring_captioning(model, image, texts, inpainting_text, *args, **kwargs):
|
26 |
+
model_last, model_cap = model
|
27 |
+
with torch.no_grad():
|
28 |
+
image_ori = image
|
29 |
+
image = transform_grd(image)
|
30 |
+
width = image.size[0]
|
31 |
+
height = image.size[1]
|
32 |
+
image = np.asarray(image)
|
33 |
+
image_ori_ = image
|
34 |
+
images = torch.from_numpy(image.copy()).permute(2,0,1).cuda()
|
35 |
+
texts_input = [[texts.strip() if texts.endswith('.') else (texts + '.')]]
|
36 |
+
|
37 |
+
batch_inputs = [{'image': images, 'groundings': {'texts':texts_input}, 'height': height, 'width': width}]
|
38 |
+
outputs = model_last.model.evaluate_grounding(batch_inputs, None)
|
39 |
+
|
40 |
+
grd_mask = (outputs[-1]['grounding_mask'] > 0).float()
|
41 |
+
grd_mask_ = (1 - F.interpolate(grd_mask[None,], (224, 224), mode='nearest')[0]).bool()
|
42 |
+
|
43 |
+
color = [252/255, 91/255, 129/255]
|
44 |
+
visual = Visualizer(image_ori_, metadata=metedata)
|
45 |
+
demo = visual.draw_binary_mask(grd_mask.cpu().numpy()[0], color=color, text=texts)
|
46 |
+
res = demo.get_image()
|
47 |
+
|
48 |
+
if (1 - grd_mask_.float()).sum() < 5:
|
49 |
+
torch.cuda.empty_cache()
|
50 |
+
return Image.fromarray(res), 'n/a', None
|
51 |
+
|
52 |
+
grd_mask_ = grd_mask_ * 0
|
53 |
+
image = transform_ret(image_ori)
|
54 |
+
image_ori = np.asarray(image_ori)
|
55 |
+
image = np.asarray(image)
|
56 |
+
images = torch.from_numpy(image.copy()).permute(2,0,1).cuda()
|
57 |
+
batch_inputs = [{'image': images, 'image_id': 0, 'captioning_mask': grd_mask_}]
|
58 |
+
|
59 |
+
token_text = texts.replace('.','') if texts.endswith('.') else texts
|
60 |
+
token = model_cap.model.sem_seg_head.predictor.lang_encoder.tokenizer.encode(token_text)
|
61 |
+
token = torch.tensor(token)[None,:-1]
|
62 |
+
|
63 |
+
outputs = model_cap.model.evaluate_captioning(batch_inputs, extra={'token': token})
|
64 |
+
# outputs = model_cap.model.evaluate_captioning(batch_inputs, extra={})
|
65 |
+
text = outputs[-1]['captioning_text']
|
66 |
+
|
67 |
+
torch.cuda.empty_cache()
|
68 |
+
return Image.fromarray(res), text, None
|