NikhilJoson commited on
Commit
6993038
·
verified ·
1 Parent(s): c421dec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -11
app.py CHANGED
@@ -25,10 +25,11 @@ def predict_clothing(images):
25
  {"type": "text", "text": """Define only the clothing from this image in 1-3 words, always include the colour of the clothing too.
26
  Your response should be only the definition.
27
  Examples:
28
- - Black Tshirt
29
- - Blue jeans
30
- - Olive Cargo pants
31
- - WHite shirt and Brown trousers"""}
 
32
  ]}
33
  ]
34
  input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
@@ -63,10 +64,10 @@ def generate_image(category, img1, img2, img3, height, width, img_guidance_scale
63
  wears = predict_clothing(input_images[1:])
64
  if len(wears)==1:
65
  dress = wears[0]
66
- text = f"""A {category} wearing {dress}, with a completely white background. The {category} is <img><|image_1|></img>. The {dress} is in <img><|image_2|></img>."""
67
  elif len(wears)==2:
68
  topwear, bottomwear = wears[0], wears[1]
69
- text = f"""A {category} wearing {topwear} and {bottomwear}, with a completely white background. The {category} is <img><|image_1|></img>. The {topwear} is in <img><|image_2|></img>. The {bottomwear} is in <img><|image_3|></img>."""
70
  else:
71
  input_images = None
72
 
@@ -126,7 +127,7 @@ description = """
126
 
127
  Credits = """**Credits**
128
 
129
- Made using [OmniGen](https://huggingface.co/Shitao/OmniGen-v1): Unified Image Generation [paper](https://arxiv.org/abs/2409.11340) [code](https://github.com/VectorSpaceLab/OmniGen)
130
  """
131
 
132
 
@@ -183,16 +184,14 @@ with gr.Blocks() as demo:
183
  generate_image,
184
  inputs=[category, image_input_1, image_input_2, image_input_3, height_input, width_input, img_guidance_scale_input, num_inference_steps,
185
  seed_input, separate_cfg_infer, offload_model, use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale_input],
186
- outputs=output_image,
187
- )
188
 
189
  gr.Examples(
190
  examples=get_example(),
191
  fn=run_for_examples,
192
  inputs=[image_input_1, image_input_2, image_input_3, height_input, width_input, img_guidance_scale_input, seed_input,
193
  max_input_image_size, randomize_seed, use_input_image_size_as_output, guidance_scale_input],
194
- outputs=output_image,
195
- )
196
 
197
  gr.Markdown(Credits)
198
 
 
25
  {"type": "text", "text": """Define only the clothing from this image in 1-3 words, always include the colour of the clothing too.
26
  Your response should be only the definition.
27
  Examples:
28
+ Black Tshirt
29
+ Blue jeans
30
+ Olive Cargo pants
31
+ If there are topwear and bottomwear to be seen in the image, define both. Example: White shirt and Brown trousers
32
+ """}
33
  ]}
34
  ]
35
  input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
 
64
  wears = predict_clothing(input_images[1:])
65
  if len(wears)==1:
66
  dress = wears[0]
67
+ text = f"""A {category} wearing {dress}, with a completely white background. The {category} is in <img><|image_1|></img>. The {dress} is in <img><|image_2|></img>."""
68
  elif len(wears)==2:
69
  topwear, bottomwear = wears[0], wears[1]
70
+ text = f"""A {category} wearing {topwear} and {bottomwear}, with a completely white background. The {category} is in <img><|image_1|></img>. The {topwear} is in <img><|image_2|></img>. The {bottomwear} is in <img><|image_3|></img>."""
71
  else:
72
  input_images = None
73
 
 
127
 
128
  Credits = """**Credits**
129
 
130
+ Made using [Llava 3.2 Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct) and [OmniGen](https://huggingface.co/Shitao/OmniGen-v1): Unified Image Generation
131
  """
132
 
133
 
 
184
  generate_image,
185
  inputs=[category, image_input_1, image_input_2, image_input_3, height_input, width_input, img_guidance_scale_input, num_inference_steps,
186
  seed_input, separate_cfg_infer, offload_model, use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale_input],
187
+ outputs=output_image,)
 
188
 
189
  gr.Examples(
190
  examples=get_example(),
191
  fn=run_for_examples,
192
  inputs=[image_input_1, image_input_2, image_input_3, height_input, width_input, img_guidance_scale_input, seed_input,
193
  max_input_image_size, randomize_seed, use_input_image_size_as_output, guidance_scale_input],
194
+ outputs=output_image,)
 
195
 
196
  gr.Markdown(Credits)
197