File size: 9,678 Bytes
f8a748e
7f48662
d8e8827
1876385
2f84586
 
7f48662
54aabc9
f8a748e
647b23c
97e4edd
647b23c
 
2f84586
 
647b23c
2f84586
 
a854c9a
2f84586
 
 
 
 
c421dec
 
 
6993038
 
 
 
 
2f84586
 
 
 
 
63ac632
 
be84c5e
2f84586
 
dcd7638
e663763
5f66807
 
 
be84c5e
 
2f84586
 
f8a748e
8618ebe
a97c86d
0f83a92
a854c9a
 
7f48662
a713a09
7f48662
 
 
2f84586
986f1b2
edfedba
986f1b2
 
edfedba
986f1b2
 
2f84586
 
a854c9a
2f84586
d8e8827
 
f8a748e
a854c9a
2f84586
 
 
 
7f48662
 
a713a09
a854c9a
200a130
 
b712951
 
 
 
 
 
 
 
 
 
 
 
200a130
 
 
bf4215a
2f84586
aac204e
 
 
b712951
 
a97c86d
bf4215a
200a130
a854c9a
4004f94
5f66807
2f84586
 
 
 
5f66807
d8e8827
 
53aed50
5f66807
a713a09
 
8f0d478
 
6993038
4004f94
7f48662
5ed5dc0
a713a09
7f48662
d967414
4004f94
7f48662
2f84586
 
 
 
 
200a130
54aabc9
 
a97c86d
54aabc9
a97c86d
0f83a92
 
54aabc9
 
c2c0f84
54aabc9
 
 
 
0f83a92
bf4215a
 
a97c86d
54aabc9
 
 
 
 
 
 
 
 
 
5ed5dc0
63ac632
2f84586
63ac632
a713a09
2f84586
 
 
7f48662
a713a09
7f48662
 
a97c86d
bf4215a
6993038
200a130
 
 
 
2f84586
bf4215a
6993038
f8a748e
2f84586
a713a09
 
2f84586
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
import gradio as gr
import os
import random
import spaces
import torch
from transformers import MllamaForConditionalGeneration, AutoProcessor
from OmniGen import OmniGenPipeline
from PIL import Image

from huggingface_hub import login
Llama32V_HFtoken = os.getenv("Llama32V")
login(Llama32V_HFtoken)

pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1")
model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
model = MllamaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
processor = AutoProcessor.from_pretrained(model_id)


@spaces.GPU()
def predict_clothing(images):
    messages = [{"role": "user", "content":
                 [
                     {"type": "image"},
                     {"type": "text", "text": """Define only the clothing from this image in 1-3 words, always include the colour of the clothing too.
                                                 Your response should be only the definition.
                                                 Examples:
                                                 Black Tshirt
                                                 Blue jeans
                                                 Olive Cargo pants
                                                 If there are topwear and bottomwear to be seen in the image, define both. Example: White shirt and Brown trousers
                                                 """}
                 ]}
               ]
    input_text = processor.apply_chat_template(messages, add_generation_prompt=True)

    output_texts = []
    for img_path in images:
        image = Image.open(img_path)
        print(type(image))
        inputs = processor(image, input_text, add_special_tokens=False, return_tensors="pt").to(model.device)
        with torch.no_grad():
            output = model.generate(**inputs, max_new_tokens=32, temperature=0.1)

        
        output_reponse = str(processor.decode(output[0])).split('\n')[-1] 
        output_texts.append(output_reponse[:-11])   # without .<|eot_id|>
        
    print(output_texts)
    return output_texts
    

@spaces.GPU(duration=180)
def generate_image(category, img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model,
                   use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale=2.5):

    print()
    input_images = [img1, img2, img3]
    # Delete None
    input_images = [img for img in input_images if img is not None]
    if len(input_images) == 0:
        input_images = None

    #wears = predict_clothing(input_images[1:])
    if len(input_images)==2:
        #dress = wears[0]
        text = f"""A {category} wearing a dress, with completely white background. The {category} is in <img><|image_1|></img>. The dress is in <img><|image_2|></img>."""
    elif len(input_images)==3:
        #topwear, bottomwear = wears[0], wears[1]
        text = f"""A {category} wearing a Black tshirt and Olive pants, with completely white background. The {category} is in <img><|image_1|></img>. The Black tshirt is in <img><|image_2|></img>. The Olive pants is in <img><|image_3|></img>."""
    else:
        input_images = None

        
    if randomize_seed:
        seed = random.randint(0, 10000000)

    print(text)
    output = pipe(prompt=text, input_images=input_images, height=height, width=width, guidance_scale=guidance_scale,
                  img_guidance_scale=img_guidance_scale, num_inference_steps=inference_steps, separate_cfg_infer=separate_cfg_infer, 
                  use_kv_cache=True, offload_kv_cache=True, offload_model=offload_model, 
                  use_input_image_size_as_output=use_input_image_size_as_output, seed=seed, max_input_image_size=max_input_image_size,)
    img = output[0]
    return img

    
def get_example():
    case = [
            [   "./imgs/test_cases/icl1.jpg",
                "./imgs/test_cases/icl2.jpg",
                "./imgs/test_cases/icl3.jpg",
                224,
                224,
                1.6,
                1,
                768,
                False,
                False,
                2.5
            ],
    ]
    return case

def run_for_examples(img1, img2, img3, height, width, img_guidance_scale, seed, max_input_image_size, randomize_seed, use_input_image_size_as_output,guidance_scale):    
    # Check the internal configuration of the function
    inference_steps = 50
    separate_cfg_infer = True
    offload_model = False

    text = "According to the following examples, generate an output for the input.\nInput: <img><|image_1|></img>\nOutput: <img><|image_2|></img>\n\nInput: <img><|image_3|></img>\nOutput:"
    return generate_image(img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed, separate_cfg_infer,
                          offload_model, use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale)


description = """
### Usage:
- First upload your own image as the first image, also tagged 'Person'
- Then upload you 'Top-wear' and 'Bottom-wear' images
- If its a single dress, and/or you don't have a Topwear and Bottomwear as separate images upload that single image under 'Topwear'

### Tips:
- For out-of-memory or time cost, you can set `offload_model=True` or refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources) to select a appropriate setting.
- If inference time is too long when inputting multiple images, please try to reduce the `max_input_image_size`. For more details please refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources).

**Please note that HF Spaces often encounter errors due to GPU quota or other limitations, so please try lowering the image sizes and inference steps to manage the generation.**
"""

Credits = """**Credits**

Made using [Llava 3.2 Vision](https://huggingface.co./meta-llama/Llama-3.2-11B-Vision-Instruct) and [OmniGen](https://huggingface.co./Shitao/OmniGen-v1): Unified Image Generation
"""


# Gradio 
with gr.Blocks() as demo:
    gr.Markdown("# Virtual Try-On ✨")
    gr.Markdown(description)
    with gr.Row():
        with gr.Row(equal_height=True):
            # input images
            image_input_1 = gr.Image(label="Person", type="filepath")
            image_input_2 = gr.Image(label="Top-wear", type="filepath")
            image_input_3 = gr.Image(label="Bottom-wear", type="filepath")

    with gr.Row():
        with gr.Column():
            category = gr.Radio(["man", "woman", "boy", "girl"], label="Category", info="Choose one category from the following")
            
            # sliders            
            height_input = gr.Slider(label="Height", minimum=128, maximum=1024, value=512, step=16)
            width_input = gr.Slider(label="Width", minimum=128, maximum=1024, value=512, step=16)
    
            num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=128, value=32, step=1)
    
            seed_input = gr.Slider(label="Seed", minimum=0, maximum=2147483647, value=42, step=1)
            randomize_seed = gr.Checkbox(label="Randomize seed", value=True)

        with gr.Column():
            max_input_image_size = gr.Slider(label="max_input_image_size", minimum=128, maximum=2048, value=1024, step=16)

            guidance_scale_input = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=5.0, value=2.5, step=0.1)
            
            img_guidance_scale_input = gr.Slider(label="img_guidance_scale", minimum=1.0, maximum=2.0, value=1.6, step=0.1)
            
            separate_cfg_infer = gr.Checkbox(
                label="separate_cfg_infer", info="Whether to use separate inference process for different guidance. This will reduce the memory cost.", value=True,)
            
            offload_model = gr.Checkbox(
                label="offload_model", info="Offload model to CPU, which will significantly reduce the memory cost but slow down the generation speed. You can cancel separate_cfg_infer and set offload_model=True. If both separate_cfg_infer and offload_model are True, further reduce the memory, but slowest generation", value=False,)
            
            use_input_image_size_as_output = gr.Checkbox(
                label="use_input_image_size_as_output", info="Automatically adjust the output image size to be same as input image size. For editing and controlnet task, it can make sure the output image has the same size as input image leading to better performance", value=False,)

    with gr.Row():
        # generate
        generate_button = gr.Button("Try On!")
            
    with gr.Row():
        # output image
        output_image = gr.Image(label="Output Image")

    # click
    generate_button.click(
        generate_image,
        inputs=[category, image_input_1, image_input_2, image_input_3, height_input, width_input, img_guidance_scale_input, num_inference_steps,
                seed_input, separate_cfg_infer, offload_model, use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale_input],
        outputs=output_image,)

    gr.Examples(
        examples=get_example(),
        fn=run_for_examples,
        inputs=[image_input_1, image_input_2, image_input_3, height_input, width_input, img_guidance_scale_input, seed_input,
                max_input_image_size, randomize_seed, use_input_image_size_as_output, guidance_scale_input],
        outputs=output_image,)

    gr.Markdown(Credits)

# launch
demo.launch()