import os os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "False" os.environ["TOKENIZERS_PARALLELISM"] = "true" import numpy as np import gradio as gr import spaces import torch import torch.nn.functional as F from PIL import Image from omegaconf import OmegaConf from transformers import AutoTokenizer from prompting_utils import UniversalPrompting, create_attention_mask_predict_next, create_attention_mask_for_mmu from training_utils import image_transform from models import Showo, MAGVITv2, get_mask_chedule device = torch.device("cuda" if torch.cuda.is_available() else "cpu") config = OmegaConf.load("configs/showo_demo_512x512.yaml") tokenizer = AutoTokenizer.from_pretrained(config.model.showo.llm_model_path, padding_side="left") uni_prompting = UniversalPrompting(tokenizer, max_text_len=config.dataset.preprocessing.max_seq_length, special_tokens=("<|soi|>", "<|eoi|>", "<|sov|>", "<|eov|>", "<|t2i|>", "<|mmu|>", "<|t2v|>", "<|v2v|>", "<|lvg|>"), ignore_id=-100, cond_dropout_prob=config.training.cond_dropout_prob) vq_model = MAGVITv2() vq_model = vq_model.from_pretrained(config.model.vq_model.vq_model_name).to(device) vq_model.requires_grad_(False) vq_model.eval() model = Showo.from_pretrained(config.model.showo.pretrained_model_path).to(device) model.eval() mask_token_id = model.config.mask_token_id @spaces.GPU def text_to_image_generation(input_text, guidance_scale=1.75, generation_timesteps=18): prompts = [input_text] config.training.batch_size = config.batch_size = 1 config.training.guidance_scale = config.guidance_scale = guidance_scale config.training.generation_timesteps = config.generation_timesteps = generation_timesteps image_tokens = torch.ones((len(prompts), config.model.showo.num_vq_tokens), dtype=torch.long, device=device) * mask_token_id input_ids, _ = uni_prompting((prompts, image_tokens), 't2i_gen') if config.training.guidance_scale > 0: uncond_input_ids, _ = uni_prompting(([''] * len(prompts), image_tokens), 't2i_gen') attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) else: attention_mask = create_attention_mask_predict_next(input_ids, pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) uncond_input_ids = None if config.get("mask_schedule", None) is not None: schedule = config.mask_schedule.schedule args = config.mask_schedule.get("params", {}) mask_schedule = get_mask_chedule(schedule, **args) else: mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) with torch.no_grad(): gen_token_ids = model.t2i_generate( input_ids=input_ids, uncond_input_ids=uncond_input_ids, attention_mask=attention_mask, guidance_scale=config.training.guidance_scale, temperature=config.training.get("generation_temperature", 1.0), timesteps=config.training.generation_timesteps, noise_schedule=mask_schedule, noise_type=config.training.get("noise_type", "mask"), seq_len=config.model.showo.num_vq_tokens, uni_prompting=uni_prompting, config=config, ) gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) images = vq_model.decode_code(gen_token_ids) images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().detach().numpy().astype(np.uint8) return images[0] @spaces.GPU def text_guided_inpainting(input_text, inpainting_image, inpainting_mask_input, guidance_scale=1.75, generation_timesteps=16): # pre-process inpainting mask alpha_channel = inpainting_mask_input["layers"][0][:, :, 3] mask = np.where(alpha_channel == 0, 0, 255).astype(np.uint8) if np.sum(mask) == 0: inpainting_mask = Image.fromarray(inpainting_mask_input['background']).convert('L') else: inpainting_mask = Image.fromarray(mask).convert('L') prompt = [input_text] config.training.batch_size = config.batch_size = 1 config.training.guidance_scale = config.guidance_scale = guidance_scale config.training.generation_timesteps = config.generation_timesteps = generation_timesteps inpainting_image = image_transform(inpainting_image, resolution=config.dataset.params.resolution).to(device) inpainting_mask = image_transform(inpainting_mask, resolution=config.dataset.params.resolution, normalize=False) inpainting_image = inpainting_image.unsqueeze(0).repeat(config.training.batch_size, 1, 1, 1) inpainting_mask = inpainting_mask.unsqueeze(0).to(device) inpainting_mask = F.interpolate(inpainting_mask, size=config.dataset.params.resolution // 16, mode='bicubic') inpainting_mask = inpainting_mask.repeat(config.training.batch_size, 1, 1, 1) inpainting_mask[inpainting_mask < 0.5] = 0 inpainting_mask[inpainting_mask >= 0.5] = 1 inpainting_mask = inpainting_mask.reshape(config.training.batch_size, -1) inpainting_mask = inpainting_mask.to(torch.bool) inpainting_image_tokens = vq_model.get_code(inpainting_image) + len(uni_prompting.text_tokenizer) inpainting_image_tokens[inpainting_mask] = mask_token_id input_ids, _ = uni_prompting((prompt, inpainting_image_tokens), 't2i_gen') if config.training.guidance_scale > 0: uncond_input_ids, _ = uni_prompting(([''] * len(prompt), inpainting_image_tokens), 't2i_gen') attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) else: attention_mask = create_attention_mask_predict_next(input_ids, pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) uncond_input_ids = None if config.get("mask_schedule", None) is not None: schedule = config.mask_schedule.schedule args = config.mask_schedule.get("params", {}) mask_schedule = get_mask_chedule(schedule, **args) else: mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) with torch.no_grad(): gen_token_ids = model.t2i_generate( input_ids=input_ids, uncond_input_ids=uncond_input_ids, attention_mask=attention_mask, guidance_scale=config.training.guidance_scale, temperature=config.training.get("generation_temperature", 1.0), timesteps=config.training.generation_timesteps, noise_schedule=mask_schedule, noise_type=config.training.get("noise_type", "mask"), seq_len=config.model.showo.num_vq_tokens, uni_prompting=uni_prompting, config=config, ) gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) images = vq_model.decode_code(gen_token_ids) images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().detach().numpy().astype(np.uint8) return images[0] @spaces.GPU def text_guided_extrapolation(input_img, input_text, left_ext, right_ext, guidance_scale=1.75, generation_timesteps=16): config.offset = 0 config.training.batch_size = config.batch_size = 1 config.training.guidance_scale = config.guidance_scale = guidance_scale config.training.generation_timesteps = config.generation_timesteps = generation_timesteps extra_direction = ['right'] * int(right_ext) + ['left'] * int(left_ext) prompt = [input_text] * len(extra_direction) W = config.dataset.params.resolution // 16 for id, (prt, direction) in enumerate(zip(prompt, extra_direction)): prt = [prt] * config.training.batch_size if id == 0: # extrapolation_image = Image.open(config.image_path).convert("RGB") extrapolation_image = input_img extrapolation_image = image_transform(extrapolation_image, resolution=config.dataset.params.resolution).to(device) B, _, _ = extrapolation_image.shape extrapolation_image = extrapolation_image.unsqueeze(0) extrapolation_image_tokens = vq_model.get_code(extrapolation_image) + len(uni_prompting.text_tokenizer) extrapolation_image_tokens = extrapolation_image_tokens.reshape(1, config.dataset.params.resolution // 16, config.dataset.params.resolution // 16) extrapolation_image_tokens = extrapolation_image_tokens.repeat(config.training.batch_size, 1, 1) else: extrapolation_image_tokens = gen_token_ids + len(uni_prompting.text_tokenizer) image_left_part = extrapolation_image_tokens[:, :, :-(W // 2 - config.offset)] - len( uni_prompting.text_tokenizer) image_right_part = extrapolation_image_tokens[:, :, W // 2 - config.offset:] - len(uni_prompting.text_tokenizer) image_up_part = extrapolation_image_tokens[:, :-(W // 2 - config.offset), :] - len(uni_prompting.text_tokenizer) image_down_part = extrapolation_image_tokens[:, W // 2 - config.offset:, :] - len(uni_prompting.text_tokenizer) if direction in ['left', 'right']: extrapolation_mask = torch.zeros((config.training.batch_size, config.dataset.params.resolution // 16, config.dataset.params.resolution // 16 // 2 + config.offset), dtype=torch.int64, device=device) + mask_token_id else: extrapolation_mask = torch.zeros((config.training.batch_size, config.dataset.params.resolution // 16 // 2 + config.offset, config.dataset.params.resolution // 16), dtype=torch.int64, device=device) + mask_token_id if direction == 'left': extrapolation_image_tokens = torch.cat( [extrapolation_mask, extrapolation_image_tokens[:, :, :W // 2 - config.offset]], dim=-1) elif direction == 'right': extrapolation_image_tokens = torch.cat( [extrapolation_image_tokens[:, :, -(W // 2 - config.offset):], extrapolation_mask], dim=-1) elif direction == 'up': extrapolation_image_tokens = torch.cat( [extrapolation_mask, extrapolation_image_tokens[:, :W // 2 - config.offset, :]], dim=-2) else: extrapolation_image_tokens = torch.cat( [extrapolation_image_tokens[:, -(W // 2 - config.offset):, :], extrapolation_mask], dim=-2) extrapolation_image_tokens = extrapolation_image_tokens.reshape(config.training.batch_size, -1) input_ids, _ = uni_prompting((prt, extrapolation_image_tokens), 't2i_gen') if config.training.guidance_scale > 0: uncond_input_ids, _ = uni_prompting(([''] * len(prt), extrapolation_image_tokens), 't2i_gen') attention_mask = create_attention_mask_predict_next(torch.cat([input_ids, uncond_input_ids], dim=0), pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) else: attention_mask = create_attention_mask_predict_next(input_ids, pad_id=int(uni_prompting.sptids_dict['<|pad|>']), soi_id=int(uni_prompting.sptids_dict['<|soi|>']), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>']), rm_pad_in_image=True) uncond_input_ids = None if config.get("mask_schedule", None) is not None: schedule = config.mask_schedule.schedule args = config.mask_schedule.get("params", {}) mask_schedule = get_mask_chedule(schedule, **args) else: mask_schedule = get_mask_chedule(config.training.get("mask_schedule", "cosine")) with torch.no_grad(): gen_token_ids = model.t2i_generate( input_ids=input_ids, uncond_input_ids=uncond_input_ids, attention_mask=attention_mask, guidance_scale=config.training.guidance_scale, temperature=config.training.get("generation_temperature", 1.0), timesteps=config.training.generation_timesteps, noise_schedule=mask_schedule, noise_type=config.training.get("noise_type", "mask"), seq_len=config.model.showo.num_vq_tokens, uni_prompting=uni_prompting, config=config, ) gen_token_ids = torch.clamp(gen_token_ids, max=config.model.showo.codebook_size - 1, min=0) gen_token_ids = gen_token_ids.reshape(config.training.batch_size, config.dataset.params.resolution // 16, config.dataset.params.resolution // 16) if direction == 'left': gen_token_ids = torch.cat([gen_token_ids, image_right_part], dim=-1) elif direction == 'right': gen_token_ids = torch.cat([image_left_part, gen_token_ids], dim=-1) elif direction == 'up': gen_token_ids = torch.cat([gen_token_ids, image_down_part], dim=-2) else: gen_token_ids = torch.cat([image_left_part, gen_token_ids], dim=-2) _, h, w = gen_token_ids.shape gen_token_ids = gen_token_ids.reshape(config.training.batch_size, -1) with torch.no_grad(): images = vq_model.decode_code(gen_token_ids, shape=(h, w)) images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0) images *= 255.0 images = images.permute(0, 2, 3, 1).cpu().detach().numpy().astype(np.uint8) return images[0] @spaces.GPU def multimodal_understanding(input_img, input_text, chat_history): top_k = 1 # retain only the top_k most likely tokens, clamp others to have 0 probability image_ori = input_img image = image_transform(image_ori, resolution=config.dataset.params.resolution).to(device) image = image.unsqueeze(0) image_tokens = vq_model.get_code(image) + len(uni_prompting.text_tokenizer) question = input_text input_ids = uni_prompting.text_tokenizer(['USER: \n' + question + ' ASSISTANT:'])[ 'input_ids'] input_ids = torch.tensor(input_ids).to(device) input_ids = torch.cat([ (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|mmu|>']).to(device), (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|soi|>']).to(device), image_tokens, (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|eoi|>']).to(device), (torch.ones(input_ids.shape[0], 1) * uni_prompting.sptids_dict['<|sot|>']).to(device), input_ids ], dim=1).long() attention_mask = create_attention_mask_for_mmu(input_ids.to(device), eoi_id=int(uni_prompting.sptids_dict['<|eoi|>'])) cont_toks_list = model.mmu_generate(input_ids, attention_mask=attention_mask, max_new_tokens=100, top_k=top_k, eot_token=uni_prompting.sptids_dict['<|eot|>']) cont_toks_list = torch.stack(cont_toks_list).squeeze()[None] output_text = uni_prompting.text_tokenizer.batch_decode(cont_toks_list, skip_special_tokens=True) output_text = output_text[0].strip() chat_history.append((input_text, output_text)) return "", chat_history with gr.Blocks() as demo: gr.HTML("""

Show-o

This is the official Gradio demo for the Show-o model, a unified model that can do multimodal understanding and generation.

Paper: Show-o: One Single Transformer To Unify Multimodal Understanding and Generation
Project Website: Show-o Website
Code and Models: GitHub

""") banner_1 = gr.Markdown(value="# Text-to-image Generation") with gr.Row(): with gr.Column(): text_prompt_t2i = gr.Textbox( label="Text prompt", lines=2, placeholder="Input the text prompt here for image generation." ) guidance_scale_t2i = gr.Slider( label="guidance scale", minimum=0, maximum=5, step=0.05, value=1.75 ) generation_timesteps_t2i = gr.Slider( label="timesteps", minimum=1, maximum=30, step=1, value=18 ) generated_img_t2i = gr.Image( label="Output image" ) examples_t2i = gr.Examples( label="Text to image generation examples", examples=[ "A 3D render of a futuristic car made of glass, driving through a city of mirrors.", # "A photo-realistic image of a garden with pink and blue flowers. There are pink poppies in the foreground, with their petals gently curved. The background features purple cosmos flowers. The flowers have water droplets on their petals, which glisten in the natural light. The green leaves are lush and healthy. The background is blurred, with a few trees and buildings visible. The overall image has a high resolution and is hyper-realistic, as if taken by a skilled photographer.", "an egg and a bird made of wheat bread.", "An armchair in the shape of an avocado", # "The image features a stylized stained glass illustration of a hummingbird with vibrant colors, set against a backdrop of swirling patterns and a large sun. The composition includes floral elements and intricate details, creating a vivid and dynamic scene that emphasizes the beauty of the bird. The colors range from greens to reds, enhancing the lively and artistic aesthetic of the piece.", # "A 3D render of a surreal explosion scene on the shore of a beautiful white sand beach with crystal clear water. The explosion has a spatter of oil paint with pastel colors and a thick consistency. The explosion is in a quiet and serene environment. A beautiful Japanese woman with a dress compacted to the sea is seen. There are butterfly petals and flowers with an ethereal glow and bioluminescence. There are pink and blue roses, and the overall image has a surreal and dreamlike quality.", # "A 3D render of a cute, round rice ball character with big, sparkling eyes that convey curiosity and joy. Its body is a soft, fluffy white with a slight sheen, resembling freshly cooked rice. Mochi has small, rosy cheeks that give it a warm, friendly expression. A tiny smile brightens its face, and it often sports a colorful ribbon tied around its \"waist,\" adding a playful touch. Mochi's arms and feet are cartoonishly short, allowing it to bounce adorably around its surroundings.", # "A hyper-realistic close-up photograph of a woman's face, focusing on the left side. The image is highly detailed and realistic, showing voluminous glossy lips slightly parted, a well-defined nose, and open eyes with long eyelashes that cast shadows on the skin. The eye color is crystal clear almond green. The skin texture is crisp, with incredible detail of natural, lush skin and pores and freckles, with subtle highlights and shadows that give a realistic, close-up appearance.", "A vibrant cartoon of a chameleon blending into a tie-dye pattern.", "A colorful cartoon of a tiger camouflaged in an abstract art painting, its stripes merging with the wild brushstrokes.", # "A 3D render of a cute, round rice ball character named Mochi, with big, sparkling eyes that convey curiosity and joy. Its body is a soft, fluffy white with a slight sheen, resembling freshly cooked rice. Mochi has small, rosy cheeks that give it a warm, friendly expression. A tiny smile brightens its face, and it often sports a colorful ribbon tied around its \"waist,\" adding a playful touch. Mochi's arms and feet are cartoonishly short, allowing it to bounce adorably around its surroundings. This time, Mochi is placed against a background that is a vibrant explosion of colors, with bright hues of fuchsia, turquoise, lemon yellow, and emerald green creating a canvas of vibrant contrasts and playful energy. The clashing colors make Mochi's soft white body and rosy cheeks stand out even more, inviting viewers into a world of cheerful exuberance and visual delight.", "The word 'mardefly' on a coffee mug.", "A group of seven people standing on a snow-covered slope, allwearing skis and posing for a picture." ], inputs=text_prompt_t2i, ) submit_btn_t2i = gr.Button("Generate: Text-to-image") submit_btn_t2i.click(text_to_image_generation, [text_prompt_t2i, guidance_scale_t2i, generation_timesteps_t2i], [generated_img_t2i]) banner_2 = gr.Markdown(value="# Text-guided inpainting") with gr.Row(): inpainting_input_img = gr.Image( label="Input image", type="pil", # height=256, # width=256, ) # inpainting_input_mask = gr.Image( # label="Inpainting mask", # image_mode="L", # type="pil", # height=256, # width=256, # ) inpainting_input_mask = gr.ImageMask( sources=["upload"], layers=False, transforms=[], format="png", label="Inpainting mask", show_label=True ) with gr.Column(): text_prompt_inpainting = gr.Textbox( label="Text prompt", lines=2, placeholder="Input the text prompt here for image inpainting." ) guidance_scale_inpainting = gr.Slider( label="guidance scale", minimum=0, maximum=5, step=0.05, value=1.75 ) generation_timesteps_inpainting = gr.Slider( label="timesteps", minimum=1, maximum=30, step=1, value=16 ) generated_img_inpainting = gr.Image( label="Output image" ) examples_inpainting = gr.Examples( label="Text-guided inpainting examples", examples=[ [ "a blue sports car with sleek curves and tinted windows, parked on a bustling city street.", Image.open("./inpainting_validation/bus.jpg").convert("RGB"), Image.open("./inpainting_validation/bus_mask.webp").convert("L"), ], [ "a clear, shallow river with some vibrant flowers in it.", Image.open("./inpainting_validation/train.jpg").convert("RGB"), Image.open("./inpainting_validation/train_mask.webp").convert("L"), ], ], inputs=[text_prompt_inpainting, inpainting_input_img, inpainting_input_mask], ) submit_btn_inpainting = gr.Button("Generate: Text-guided Inpainting") submit_btn_inpainting.click(text_guided_inpainting, [text_prompt_inpainting, inpainting_input_img, inpainting_input_mask, guidance_scale_inpainting, generation_timesteps_inpainting], [generated_img_inpainting]) banner_3 = gr.Markdown(value="# Text-guided extrapolation") with gr.Row(): extra_input_img = gr.Image( label="Input image", type="pil", image_mode="RGB", ) with gr.Column(): text_prompt_extrapolation = gr.Textbox( label="Text prompt", lines=1, placeholder="Input the text prompt here for image extrapolation." ) guidance_scale_extrapolation = gr.Slider( label="guidance scale", minimum=0, maximum=5, step=0.05, value=1.75 ) generation_timesteps_extrapolation = gr.Slider( label="timesteps", minimum=1, maximum=30, step=1, value=16 ) left_extrapolation = gr.Slider( label="left extrapolation", minimum=0, maximum=5, step=1, value=1 ) right_extrapolation = gr.Slider( label="right extrapolation", minimum=0, maximum=5, step=1, value=1 ) generated_img_extrapolation = gr.Image( label="Output image" ) examples_extra = gr.Examples( label="Text-guided extrapolation examples", examples=[ [ Image.open("./inpainting_validation/wukong2.jpg").convert("RGB"), "the continuous mountain ranges and jungles, with meandering rivers occasionally appearing.", 2, 2, ], [ Image.open("./inpainting_validation/alpine_lake.jpg").convert("RGB"), "a serene natural landscape featuring a clear, blue lake surrounded by lush green trees.", 2, 2, ], ], inputs=[extra_input_img, text_prompt_extrapolation, left_extrapolation, right_extrapolation], ) submit_btn_inpainting = gr.Button("Generate: Text-guided Extrapolation") submit_btn_inpainting.click(text_guided_extrapolation, [extra_input_img, text_prompt_extrapolation, left_extrapolation, right_extrapolation, guidance_scale_extrapolation, generation_timesteps_extrapolation], [generated_img_extrapolation]) banner_4 = gr.Markdown(value="# Multimodal understanding") with gr.Row(): with gr.Row(): chat_input_img = gr.Image( label="Input image", type="pil", image_mode="RGB", ) with gr.Column(): chatbot = gr.Chatbot() msg = gr.Textbox(label="Press Enter to send a message for chat") clear = gr.ClearButton([msg, chatbot]) msg.submit(multimodal_understanding, [chat_input_img, msg, chatbot], [msg, chatbot]) demo.launch()