Spaces:
Sleeping
Sleeping
File size: 1,268 Bytes
0925cf1 c63d488 0925cf1 8ef1d5d 0925cf1 8ef1d5d 5629139 0925cf1 34c1821 0925cf1 54accb7 7a2c267 54accb7 0925cf1 7a2c267 54accb7 0925cf1 8ef1d5d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import torch
from diffusers import DiffusionPipeline
import gradio as gr
import os
import spaces
model_list = os.environ.get("MODELS").split(",")
lora_list = os.environ.get("LORAS") # Not in use
@spaces.GPU
def generate(prompt, model):
pipe = DiffusionPipeline.from_pretrained(
model,
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe.to('cuda')
negative_prompt = "nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]"
image = pipe(
prompt,
negative_prompt=negative_prompt,
width=832,
height=1216,
guidance_scale=7,
num_inference_steps=28
).images[0]
return image
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
inp_prompt = gr.Textbox(label="Prompt")
inp_model = gr.Dropdown(model_list, label="Select a model")
btn = gr.Button("Run")
with gr.Column():
out = gr.Image()
btn.click(fn=generate, inputs=[inp_prompt, inp_model], outputs=out)
demo.launch() |