|
from typing import Dict, List, Any |
|
from diffusers import AutoPipelineForText2Image |
|
import base64 |
|
import torch |
|
from PIL import Image |
|
import io |
|
|
|
|
|
class EndpointHandler(): |
|
def __init__(self, path=""): |
|
self.pipeline = AutoPipelineForText2Image.from_pretrained( |
|
path, torch_dtype=torch.float16, variant="fp16", use_safetensors=True |
|
).to("cuda") |
|
|
|
|
|
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: |
|
""" |
|
data args: |
|
inputs (:obj: `str` or `list` of `str`): The textual prompt to invert |
|
negative_prompt (:obj: `str`): The negative prompt to invert the input prompt to |
|
num_inference_steps (:obj: `int`): The number of inference steps to perform |
|
Return: |
|
A :obj:`list` | `dict`: will be serialized and returned |
|
""" |
|
|
|
inputs = data.pop("inputs", data) |
|
negative_prompt = data.pop("negative_prompt", None) |
|
num_inference_steps = data.pop("num_inference_steps", 50) |
|
|
|
with torch.no_grad(): |
|
images = self.pipeline(prompt=inputs, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps).images |
|
|
|
response = { |
|
"outputs": [{"prompt": inputs[i], "image": self.encode_img(images[i])} for i in range(len(images))] |
|
} |
|
|
|
return response |
|
|
|
def encode_img(self, img: Image) -> str: |
|
img_byte_array = io.BytesIO() |
|
img.save(img_byte_array, format="JPEG") |
|
img_byte_array = img_byte_array.getvalue() |
|
img_str = base64.b64encode(img_byte_array).decode("utf-8") |
|
return img_str |