File size: 1,801 Bytes
2cb6621
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import torch
import open_clip
from torchvision import transforms
from torchvision.transforms import ToPILImage

class help_function:
    def __init__(self):
        self.clip_text_model = torch.jit.load('jit_models/clip_text_jit.pt', map_location=torch.device('cpu'))
        self.decoder = torch.jit.load('jit_models/decoder_16w.pt', map_location=torch.device('cpu'))
        self.mapper_clip = torch.jit.load('jit_models/mapper_clip_jit.pt', map_location=torch.device('cpu'))
        self.mean_clip = torch.load('jit_models/mean_clip.pt')
        self.mean_person = torch.load('jit_models/mean_person.pt')
        self.encoder = torch.jit.load('jit_models/combined_encoder.pt', map_location=torch.device('cpu'))
        self.tokenizer = open_clip.get_tokenizer('ViT-B-32')
        self.transform = transforms.Compose([
            transforms.Resize(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ])

    def get_text_embedding(self, text):
        text = self.clip_text_model(self.tokenizer(text))
        return text

    def get_image_inversion(self, image):
        image = self.transform(image)
        w_inversion = self.encoder(image.reshape(1,3,224,224)).reshape(1,16,512)
        return w_inversion + self.mean_person

    def get_text_delta(self,text_feachers):
        w_delta = self.mapper_clip(text_feachers - self.mean_clip)
        return w_delta
    def image_from_text(self,text,image,power = 1.0):
        w_inversion = self.get_image_inversion(image)
        text_embedding = self.get_text_embedding(text)
        w_delta = self.get_text_delta(text_embedding)

        w_edit = w_inversion + w_delta * power
        image_edit = self.decoder(w_edit)
        return ToPILImage()((image_edit[0]+0.5)*0.5)