File size: 972 Bytes
89c79b6
 
 
 
 
07d2dac
89c79b6
 
 
 
 
07d2dac
89c79b6
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
from typing import Dict, List, Any
from PIL import Image
import torch
import base64
from io import BytesIO
from transformers import AutoProcessor, BlipForQuestionAnswering

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class EndpointHandler():
    def __init__(self, path=""):
        self.processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-capfilt-large")
        self.model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-capfilt-large").to(device)

    def __call__(self, data: Any) -> List[float]:
        inputs = data.pop("inputs", data)

        image = Image.open(BytesIO(base64.b64decode(inputs['image'])))
        inputs = self.processor(image, inputs['question'], return_tensors="pt").to(device)

        with torch.no_grad():
            outputs = self.model.generate(**inputs)
        
        pooler_output = outputs.pooler_output
        return processor.decode(out[0], skip_special_tokens=True)