File size: 2,394 Bytes
fb3f880
 
 
 
 
 
 
 
 
 
 
461ace0
0cf7b1f
 
fb3f880
 
 
 
 
 
 
 
 
0cf7b1f
 
fb3f880
 
 
 
 
 
84f2663
 
 
 
 
fb3f880
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from io import BytesIO

import torch
from diffusers import DiffusionPipeline
from fastapi import FastAPI
from fastapi.responses import StreamingResponse

# load both base & refiner
base = DiffusionPipeline.from_pretrained(
    "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)

base.to("cuda")
# base.enable_model_cpu_offload()
base.enable_attention_slicing()
refiner = DiffusionPipeline.from_pretrained(
    "stabilityai/stable-diffusion-xl-refiner-1.0",
    text_encoder_2=base.text_encoder_2,
    vae=base.vae,
    torch_dtype=torch.float16,
    use_safetensors=True,
    variant="fp16",
)
refiner.to("cuda")
# refiner.enable_model_cpu_offload()
refiner.enable_attention_slicing()

# Create a new FastAPI app instance
app = FastAPI()


@app.get("/")
async def root():
    return {"message": "UNIK ML API"}


@app.get("/generate")
def generate(text: str):
    """
    generate image
    """
    # Define how many steps and what % of steps to be run on each experts (80/20) here
    n_steps = 40
    high_noise_frac = 0.8
    negative = "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly. bad anatomy, disfigured, poorly drawn face, mutation, mutated, extra limb, ugly, disgusting, poorly drawn hands, missing limb, floating limbs, disconnected limbs, malformed hands, blurry, mutated hands and fingers, watermark, watermarked, oversaturated, censored, distorted hands, amputation, missing hands, obese, doubled face, double hands, two women, anime style, cartoon, toon."
    prompt = "Designs should play with different textures and layering but stick to a monochrome palette. Think leather jackets over mesh tops, or satin draped over matte cotton. in a studio. zoomed-in. single model."

    # run both experts
    image = base(
        prompt=prompt,
        negative_prompt=negative,
        num_inference_steps=n_steps,
        denoising_end=high_noise_frac,
        output_type="latent",
    ).images
    final_image = refiner(
        prompt=prompt,
        negative_prompt=negative,
        num_inference_steps=n_steps,
        denoising_start=high_noise_frac,
        image=image,
    ).images[0]

    return StreamingResponse(BytesIO(final_image), media_type="image/png")
    # Return the generated text in a JSON response
    # return {"output": output[0]["generated_text"]}