import gradio as gr import requests from PIL import Image import io import base64 def base64_to_pil_image(base64_image: str) -> Image.Image: image_stream = io.BytesIO(base64.b64decode(base64_image)) image = Image.open(image_stream) return image def generate_image( prompt, key, model_name, specify_uid, seed, width, height, # num_inference_steps, # guidance_scale, ): data = { "key": key, "model_name": model_name, "prompt": prompt, "miner_uid": specify_uid, "seed": seed, "pipeline_params": { "width": width, "height": height, # "num_inference_steps": num_inference_steps, # "guidance_scale": guidance_scale, }, } response = requests.post( "http://proxy_client_nicheimage.nichetensor.com:10003/generate", json=data, timeout=60, ) base64_image = response.json() print(len(base64_image)) image = base64_to_pil_image(base64_image) return image iface = gr.Interface( fn=generate_image, inputs=[ gr.Textbox(label="Prompt", value=""), gr.Textbox(label="API Key", value=""), gr.Dropdown( choices=["RealisticVision", "SDXLTurbo", "AnimeV3"], value="SDXLTurbo" ), gr.Number(label="Specify Miner UID", value=-1), gr.Number(label="Seed", value=0), gr.Slider(label="Width", minimum=0, maximum=2048, value=512, step=16), gr.Slider(label="Height", minimum=0, maximum=2048, value=512, step=16), # gr.Slider(label="Inference Steps", minimum=0, maximum=50, value=30, step=1), # gr.Slider( # label="Guidance Scale", # minimum=0, # maximum=1, # value=7, # step=0.1, # ), ], outputs="image", title="Image Generation from Text Prompt", description="Enter a prompt to generate an image.", ) iface.queue().launch(share=False)