|
import gradio as gr |
|
import torch |
|
from diffusers import StableDiffusion3Pipeline |
|
import os |
|
from huggingface_hub import login |
|
|
|
|
|
hf_token = os.environ.get("HF_TOKEN") |
|
login(token=hf_token) |
|
|
|
|
|
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-large", torch_dtype=torch.bfloat16) |
|
pipe.load_lora_weights("prithivMLmods/SD3.5-Large-Photorealistic-LoRA", weight_name="Photorealistic-SD3.5-Large-LoRA.safetensors") |
|
pipe.fuse_lora(lora_scale=1.0) |
|
|
|
|
|
|
|
|
|
|
|
def generate_image(prompt, seed): |
|
|
|
generator = torch.manual_seed(seed) |
|
|
|
|
|
result = pipe(prompt=prompt, |
|
num_inference_steps=24, |
|
guidance_scale=4.0, |
|
width=960, height=1280, |
|
generator=generator) |
|
|
|
|
|
image = result.images[0] |
|
return image |
|
|
|
|
|
def gradio_interface(): |
|
with gr.Interface(fn=generate_image, |
|
inputs=[gr.Textbox(label="Prompt", value="Man in the style of dark beige and brown, uhd image, youthful protagonists, nonrepresentational photography"), |
|
gr.Slider(minimum=0, maximum=100000, step=1, label="Seed", value=42)], |
|
outputs=gr.Image(type="pil", label="Generated Image")) as demo: |
|
demo.launch() |
|
|
|
|
|
gradio_interface() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|