import gradio as gr import torch from diffusers import StableDiffusion3Pipeline import os from huggingface_hub import login # 获取Hugging Face Token hf_token = os.environ.get("HF_TOKEN") login(token=hf_token) # 加载模型并配置 pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-large", torch_dtype=torch.bfloat16) pipe.load_lora_weights("prithivMLmods/SD3.5-Large-Photorealistic-LoRA", weight_name="Photorealistic-SD3.5-Large-LoRA.safetensors") pipe.fuse_lora(lora_scale=1.0) # 如果有GPU,转移到GPU # pipe.to("cuda") # 定义图像生成函数,添加种子参数 def generate_image(prompt, seed): # 设置种子 generator = torch.manual_seed(seed) # 使用模型生成图像 result = pipe(prompt=prompt, num_inference_steps=24, guidance_scale=4.0, width=960, height=1280, generator=generator) # 确保返回 PIL 图像 image = result.images[0] return image # 创建Gradio界面(使用 Interface) def gradio_interface(): with gr.Interface(fn=generate_image, inputs=[gr.Textbox(label="Prompt", value="Man in the style of dark beige and brown, uhd image, youthful protagonists, nonrepresentational photography"), gr.Slider(minimum=0, maximum=100000, step=1, label="Seed", value=42)], outputs=gr.Image(type="pil", label="Generated Image")) as demo: demo.launch() # 启动Gradio应用 gradio_interface() # 创建Gradio界面 # with gr.Blocks() as demo: # gr.Markdown("## Stable Diffusion Image Generation with Seed Control") # # 输入框:提示文本 # prompt_input = gr.Textbox(label="Prompt", value="Man in the style of dark beige and brown, uhd image, youthful protagonists, nonrepresentational photography") # # 滑块:种子 # seed_input = gr.Slider(minimum=0, maximum=100000, step=1, label="Seed", value=42) # # 输出图像 # output_image = gr.Image(type="pil", label="Generated Image") # # 按钮触发事件 # generate_btn = gr.Button("Generate Image") # generate_btn.click(fn=generate_image, inputs=[prompt_input, seed_input], outputs=output_image) # # 启动Gradio应用 # demo.launch()