File size: 2,683 Bytes
cf00167 47ea857 cf00167 d265ac6 cf00167 ff158bc cf00167 1d43287 cf00167 b452566 3fcc2ae cf00167 a68e3f2 cf00167 a68e3f2 d265ac6 cf00167 b2843db 47ea857 ff158bc cf00167 d265ac6 cf00167 d265ac6 cf00167 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import os
from typing import Any, Dict
from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, TorchAoConfig
from PIL import Image
import torch
IS_COMPILE = True
if IS_COMPILE:
import torch._dynamo
torch._dynamo.config.suppress_errors = True
#from huggingface_inference_toolkit.logging import logger
def compile_pipeline(pipe) -> Any:
pipe.transformer.fuse_qkv_projections()
pipe.transformer.to(memory_format=torch.channels_last)
pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=False, dynamic=False, backend="inductor")
return pipe
class EndpointHandler:
def __init__(self, path=""):
repo_id = "camenduru/FLUX.1-dev-diffusers"
#repo_id = "NoMoreCopyright/FLUX.1-dev-test"
dtype = torch.bfloat16
quantization_config = TorchAoConfig("int8wo")
vae = AutoencoderKL.from_pretrained(repo_id, subfolder="vae", torch_dtype=dtype)
#transformer = FluxTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", torch_dtype=dtype, quantization_config=quantization_config).to("cuda")
self.pipeline = FluxPipeline.from_pretrained(repo_id, vae=vae, torch_dtype=dtype, quantization_config=quantization_config)
if IS_COMPILE: self.pipeline = compile_pipeline(self.pipeline)
self.pipeline.to("cuda")
#@torch.inference_mode()
def __call__(self, data: Dict[str, Any]) -> Image.Image:
#logger.info(f"Received incoming request with {data=}")
if "inputs" in data and isinstance(data["inputs"], str):
prompt = data.pop("inputs")
elif "prompt" in data and isinstance(data["prompt"], str):
prompt = data.pop("prompt")
else:
raise ValueError(
"Provided input body must contain either the key `inputs` or `prompt` with the"
" prompt to use for the image generation, and it needs to be a non-empty string."
)
parameters = data.pop("parameters", {})
num_inference_steps = parameters.get("num_inference_steps", 28)
width = parameters.get("width", 1024)
height = parameters.get("height", 1024)
guidance_scale = parameters.get("guidance_scale", 3.5)
# seed generator (seed cannot be provided as is but via a generator)
seed = parameters.get("seed", 0)
generator = torch.manual_seed(seed)
return self.pipeline( # type: ignore
prompt,
height=height,
width=width,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
).images[0] |