|
import gradio as gr |
|
import torch |
|
from diffusers import StableDiffusion3Pipeline |
|
import os |
|
from huggingface_hub import login |
|
hf_token = os.environ.get("HF_TOKEN") |
|
login(token=hf_token) |
|
|
|
|
|
|
|
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-large", torch_dtype=torch.bfloat16) |
|
pipe.load_lora_weights("prithivMLmods/SD3.5-Large-Photorealistic-LoRA", weight_name="Photorealistic-SD3.5-Large-LoRA.safetensors") |
|
pipe.fuse_lora(lora_scale=1.0) |
|
|
|
|
|
|
|
def generate_image(prompt, seed): |
|
|
|
generator = torch.manual_seed(seed) |
|
|
|
|
|
image = pipe(prompt=prompt, |
|
num_inference_steps=24, |
|
guidance_scale=4.0, |
|
width=960, height=1280, |
|
generator=generator).images[0] |
|
|
|
return image |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Stable Diffusion Image Generation with Seed Control") |
|
|
|
|
|
prompt_input = gr.Textbox(label="Prompt", value="Man in the style of dark beige and brown, uhd image, youthful protagonists, nonrepresentational photography") |
|
|
|
|
|
seed_input = gr.Slider(minimum=0, maximum=100000, step=1, label="Seed", value=42) |
|
|
|
|
|
output_image = gr.Image(type="pil", label="Generated Image") |
|
|
|
|
|
generate_btn = gr.Button("Generate Image") |
|
generate_btn.click(generate_image, inputs=[prompt_input, seed_input], outputs=output_image) |
|
|
|
|
|
demo.launch() |