Spaces:
Runtime error
Runtime error
File size: 9,796 Bytes
e3a3eca 4f4656c cb415f3 4f4656c cb415f3 4f4656c eeb7e29 82201a6 4f4656c 406a690 9d41154 cb415f3 4f4656c cb415f3 4f4656c cb415f3 eeb7e29 cb415f3 eeb7e29 cb415f3 4f4656c eeb7e29 4f4656c eeb7e29 4f4656c eeb7e29 4f4656c 95aa48f eeb7e29 cb415f3 95aa48f cb415f3 4f4656c cb415f3 406a690 4db08f4 cb415f3 4f4656c eeb7e29 4f4656c cb415f3 4f4656c cb415f3 4db08f4 cb415f3 4f4656c cb415f3 4f4656c 24dc2ac 4f4656c cb415f3 4f4656c eeb7e29 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 |
import spaces
import torch
from pipelines.inverted_ve_pipeline import STYLE_DESCRIPTION_DICT, create_image_grid
import gradio as gr
import os, json
import numpy as np
from PIL import Image
from pipelines.pipeline_stable_diffusion_xl import StableDiffusionXLPipeline
from random import randint
from utils import init_latent
from transformers import Blip2Processor, Blip2ForConditionalGeneration
from diffusers import DDIMScheduler
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cpu':
torch_dtype = torch.float32
else:
torch_dtype = torch.float16
def memory_efficient(model):
try:
model.to(device)
except Exception as e:
print("Error moving model to device:", e)
try:
model.enable_model_cpu_offload()
except AttributeError:
print("enable_model_cpu_offload is not supported.")
try:
model.enable_vae_slicing()
except AttributeError:
print("enable_vae_slicing is not supported.")
# if device == 'cuda':
# try:
# model.enable_xformers_memory_efficient_attention()
# except AttributeError:
# print("enable_xformers_memory_efficient_attention is not supported.")
model = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch_dtype)
print("SDXL")
memory_efficient(model)
blip_processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
blip_model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch_dtype).to(device)
# controlnet_scale, canny thres 1, 2 (2 > 1, 2:1, 3:1)
def parse_config(config):
with open(config, 'r') as f:
config = json.load(f)
return config
def load_example_style():
folder_path = 'assets/ref'
examples = []
for filename in os.listdir(folder_path):
if filename.endswith((".png")):
image_path = os.path.join(folder_path, filename)
image_name = os.path.basename(image_path)
style_name = image_name.split('_')[1]
config_path = './config/{}.json'.format(style_name)
config = parse_config(config_path)
inf_object_name = config["inference_info"]["inf_object_list"][0]
image_info = [image_path, style_name, inf_object_name, 1, 50]
examples.append(image_info)
return examples
def blip_inf_prompt(image):
inputs = blip_processor(images=image, return_tensors="pt").to(device, torch.float16)
generated_ids = blip_model.generate(**inputs)
generated_text = blip_processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
return generated_text
@spaces.GPU
def style_fn(image_path, style_name, content_text, output_number=1, diffusion_step=50):
user_image_flag = not style_name.strip() # empty
if not user_image_flag:
real_img = None
config_path = './config/{}.json'.format(style_name)
config = parse_config(config_path)
inf_object = content_text
inf_seeds = [randint(0, 10**10) for _ in range(int(output_number))]
activate_layer_indices_list = config['inference_info']['activate_layer_indices_list']
activate_step_indices_list = config['inference_info']['activate_step_indices_list']
ref_seed = config['reference_info']['ref_seeds'][0]
attn_map_save_steps = config['inference_info']['attn_map_save_steps']
guidance_scale = config['guidance_scale']
use_inf_negative_prompt = config['inference_info']['use_negative_prompt']
ref_object = config["reference_info"]["ref_object_list"][0]
ref_with_style_description = config['reference_info']['with_style_description']
inf_with_style_description = config['inference_info']['with_style_description']
use_shared_attention = config['inference_info']['use_shared_attention']
adain_queries = config['inference_info']['adain_queries']
adain_keys = config['inference_info']['adain_keys']
adain_values = config['inference_info']['adain_values']
use_advanced_sampling = config['inference_info']['use_advanced_sampling']
use_prompt_as_null = False
style_name = config["style_name_list"][0]
style_description_pos, style_description_neg = STYLE_DESCRIPTION_DICT[style_name][0], \
STYLE_DESCRIPTION_DICT[style_name][1]
if ref_with_style_description:
ref_prompt = style_description_pos.replace("{object}", ref_object)
else:
ref_prompt = ref_object
if inf_with_style_description:
inf_prompt = style_description_pos.replace("{object}", inf_object)
else:
inf_prompt = inf_object
else:
model.scheduler = DDIMScheduler.from_config(model.scheduler.config)
origin_real_img = Image.open(image_path).resize((1024, 1024), resample=Image.BICUBIC)
real_img = np.array(origin_real_img).astype(np.float32) / 255.0
style_name = 'default'
config_path = './config/{}.json'.format(style_name)
config = parse_config(config_path)
inf_object = content_text
inf_seeds = [randint(0, 10**10) for _ in range(int(output_number))]
activate_layer_indices_list = config['inference_info']['activate_layer_indices_list']
activate_step_indices_list = config['inference_info']['activate_step_indices_list']
ref_seed = 0
attn_map_save_steps = config['inference_info']['attn_map_save_steps']
guidance_scale = config['guidance_scale']
use_inf_negative_prompt = False
use_shared_attention = config['inference_info']['use_shared_attention']
adain_queries = config['inference_info']['adain_queries']
adain_keys = config['inference_info']['adain_keys']
adain_values = config['inference_info']['adain_values']
use_advanced_sampling = False
use_prompt_as_null = True
ref_prompt = blip_inf_prompt(origin_real_img)
inf_prompt = inf_object
style_description_neg = None
# Inference
with torch.inference_mode():
grid = None
for activate_layer_indices in activate_layer_indices_list:
for activate_step_indices in activate_step_indices_list:
str_activate_layer, str_activate_step = model.activate_layer(
activate_layer_indices=activate_layer_indices,
attn_map_save_steps=attn_map_save_steps,
activate_step_indices=activate_step_indices, use_shared_attention=use_shared_attention,
adain_queries=adain_queries,
adain_keys=adain_keys,
adain_values=adain_values,
)
ref_latent = init_latent(model, device_name=device, dtype=torch_dtype, seed=ref_seed)
latents = [ref_latent]
num_images_per_prompt = len(inf_seeds) + 1
for inf_seed in inf_seeds:
# latents.append(model.get_init_latent(inf_seed, precomputed_path=None))
inf_latent = init_latent(model, device_name=device, dtype=torch_dtype, seed=inf_seed)
latents.append(inf_latent)
latents = torch.cat(latents, dim=0)
latents.to(device)
images = model(
prompt=ref_prompt,
negative_prompt=style_description_neg,
guidance_scale=guidance_scale,
num_inference_steps=diffusion_step,
latents=latents,
num_images_per_prompt=num_images_per_prompt,
target_prompt=inf_prompt,
use_inf_negative_prompt=use_inf_negative_prompt,
use_advanced_sampling=use_advanced_sampling,
use_prompt_as_null=use_prompt_as_null,
image=real_img
)[0][1:]
n_row = 1
n_col = len(inf_seeds)
# make grid
grid = create_image_grid(images, n_row, n_col, padding=10)
return grid
description_md = """
### We introduce `Visual Style Prompting`, which reflects the style of a reference image to the images generated by a pretrained text-to-image diffusion model without finetuning or optimization (e.g., Figure N).
### π [[Paper](https://arxiv.org/abs/2402.12974)] | β¨ [[Project page](https://curryjung.github.io/VisualStylePrompt)] | β¨ [[Code](https://github.com/naver-ai/Visual-Style-Prompting)]
### π₯ [[w/ Controlnet ver](https://huggingface.co./spaces/naver-ai/VisualStylePrompting_Controlnet)]
---
### π₯ To try out our vanilla demo,
1. Choose a `style reference` from the collection of images below.
2. Enter the `text prompt`.
3. Choose the `number of outputs`.
### ποΈ To better reflect the style of a user's image, the higher the resolution, the better.
### π To achieve faster results, we recommend lowering the diffusion steps to 30.
### Enjoy ! π
"""
iface_style = gr.Interface(
fn=style_fn,
inputs=[
gr.components.Image(label="Style Image", type="filepath"),
gr.components.Textbox(label='Style name', visible=False),
gr.components.Textbox(label="Text prompt", placeholder="Enter Text prompt"),
gr.components.Textbox(label="Number of outputs", placeholder="Enter Number of outputs"),
gr.components.Slider(minimum=10, maximum=50, step=10, value=50, label="Diffusion steps")
],
outputs=gr.components.Image(label="Generated Image"),
title="π¨ Visual Style Prompting (default)",
description=description_md,
examples=load_example_style(),
)
iface_style.launch(debug=True) |