# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_seg2image.py
# The original license file is LICENSE.ControlNet in this repo.
import gradio as gr
from PIL import Image
#first elem of gallery is ^^ - {'name': '/tmp/tmpw60bbw6k.png', 'data': 'file=/tmp/tmpw60bbw6k.png', 'is_file': True}
#first elem of gallery is ^^ - {'name': '/tmp/tmpba0d5dt5.png', 'data': 'file=/tmp/tmpba0d5dt5.png', 'is_file': True}
import numpy as np
import base64
def encode(input_image):
print(f"type of input_image ^^ - {type(input_image)}")
# Convert NumPy array to bytes
img_bytes = np.ndarray.tobytes(input_image)
# Encode the bytes using Base64
encoded_string = base64.b64encode(img_bytes).decode('utf-8')
# Print and return the encoded string
#print(encoded_string)
return encoded_string
def create_imgcomp(input_image, filename):
encoded_string = encode(input_image)
htmltag = ' '
#https://ysharma-controlnetsegmentation.hf.space/file=/tmp/tmpqcz9yeta.png
print(f"htmltag is ^^ - {htmltag}")
desc = """
"
return desc
def dummyfun(result_gallery):
print(f"type of gallery is ^^ - {type(result_gallery)}")
print(f"length of gallery is ^^ - {len(result_gallery)}")
print(f"first elem of gallery is ^^ - {result_gallery[0]}")
print(f"first elem of gallery is ^^ - {result_gallery[1]}")
# Load the image
#image = result_gallery[1] #Image.open("example.jpg")
# Get the filename
#filename = image.filename
# Print the filename
#print(f"filename is ^^ - {filename}")
return result_gallery[1]['name'] #+ ',' + result_gallery[1]['name'] #filename
def create_demo(process, max_images=12):
with gr.Blocks() as demo:
with gr.Row():
gr.Markdown('## Control Stable Diffusion with Segmentation Maps')
with gr.Row():
with gr.Column():
input_image = gr.Image(source='upload', type='numpy')
prompt = gr.Textbox(label='Prompt')
run_button = gr.Button(label='Run')
with gr.Accordion('Advanced options', open=False):
num_samples = gr.Slider(label='Images',
minimum=1,
maximum=max_images,
value=1,
step=1)
image_resolution = gr.Slider(label='Image Resolution',
minimum=256,
maximum=768,
value=512,
step=256)
detect_resolution = gr.Slider(
label='Segmentation Resolution',
minimum=128,
maximum=1024,
value=512,
step=1)
ddim_steps = gr.Slider(label='Steps',
minimum=1,
maximum=100,
value=20,
step=1)
scale = gr.Slider(label='Guidance Scale',
minimum=0.1,
maximum=30.0,
value=9.0,
step=0.1)
seed = gr.Slider(label='Seed',
minimum=-1,
maximum=2147483647,
step=1,
randomize=True,
queue=False)
eta = gr.Number(label='eta (DDIM)', value=0.0)
a_prompt = gr.Textbox(
label='Added Prompt',
value='best quality, extremely detailed')
n_prompt = gr.Textbox(
label='Negative Prompt',
value=
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
)
with gr.Column():
result_gallery = gr.Gallery(label='Output', #visible= False,
show_label=False,
elem_id='gallery').style(
grid=2, height='auto')
b1 = gr.Button('Get filenames')
filename = gr.Textbox(label="image file names")
b2 = gr.Button('Show Image-Comparison')
imagecomp = gr.HTML()
ips = [
input_image, prompt, a_prompt, n_prompt, num_samples,
image_resolution, detect_resolution, ddim_steps, scale, seed, eta
]
run_button.click(fn=process,
inputs=ips,
outputs=[result_gallery],
api_name='seg')
b1.click(dummyfun, [result_gallery], [filename])
b2.click(create_imgcomp, [input_image, filename], [imagecomp])
return demo