NikhilJoson commited on
Commit
a854c9a
·
verified ·
1 Parent(s): 446a044

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -16,6 +16,7 @@ model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
16
  model = MllamaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
17
  processor = AutoProcessor.from_pretrained(model_id)
18
 
 
19
  @spaces.GPU()
20
  def predict_clothing(images):
21
  messages = [{"role": "user", "content":
@@ -34,8 +35,9 @@ def predict_clothing(images):
34
  with torch.no_grad():
35
  output = model.generate(**inputs, max_new_tokens=32)
36
 
37
- output_reponse = str(processor.decode(output[0])).split('\n')
38
- output_texts.append(output_reponse[-1])
 
39
 
40
  print(output_texts)
41
  return output_texts
@@ -44,7 +46,8 @@ def predict_clothing(images):
44
  @spaces.GPU(duration=180)
45
  def generate_image(img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model,
46
  use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale=3):
47
-
 
48
  input_images = [img1, img2, img3]
49
  # Delete None
50
  input_images = [img for img in input_images if img is not None]
@@ -61,11 +64,12 @@ def generate_image(img1, img2, img3, height, width, img_guidance_scale, inferenc
61
  The {topwear} is in <img><|image_2|></img>. The {bottomwear} is in <img><|image_3|></img>."""
62
  else:
63
  input_images = None
 
64
 
65
-
66
  if randomize_seed:
67
  seed = random.randint(0, 10000000)
68
 
 
69
  output = pipe(prompt=text, input_images=input_images, height=height, width=width, guidance_scale=guidance_scale,
70
  img_guidance_scale=img_guidance_scale, num_inference_steps=inference_steps, separate_cfg_infer=separate_cfg_infer,
71
  use_kv_cache=True, offload_kv_cache=True, offload_model=offload_model,
@@ -73,6 +77,7 @@ def generate_image(img1, img2, img3, height, width, img_guidance_scale, inferenc
73
  img = output[0]
74
  return img
75
 
 
76
  def get_example():
77
  case = [
78
  [ "./imgs/test_cases/icl1.jpg",
@@ -101,6 +106,7 @@ def run_for_examples(img1, img2, img3, height, width, img_guidance_scale, seed,
101
  return generate_image(text, img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed,
102
  separate_cfg_infer, offload_model, use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale)
103
 
 
104
  description = """
105
  This is a Virtual Try-On Platform.
106
  Usage:
 
16
  model = MllamaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
17
  processor = AutoProcessor.from_pretrained(model_id)
18
 
19
+
20
  @spaces.GPU()
21
  def predict_clothing(images):
22
  messages = [{"role": "user", "content":
 
35
  with torch.no_grad():
36
  output = model.generate(**inputs, max_new_tokens=32)
37
 
38
+ # .<|eot_id|>
39
+ output_reponse = str(processor.decode(output[0])).split('\n')[-1]
40
+ output_texts.append(output_reponse[:-11])
41
 
42
  print(output_texts)
43
  return output_texts
 
46
  @spaces.GPU(duration=180)
47
  def generate_image(img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model,
48
  use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale=3):
49
+
50
+ print()
51
  input_images = [img1, img2, img3]
52
  # Delete None
53
  input_images = [img for img in input_images if img is not None]
 
64
  The {topwear} is in <img><|image_2|></img>. The {bottomwear} is in <img><|image_3|></img>."""
65
  else:
66
  input_images = None
67
+
68
 
 
69
  if randomize_seed:
70
  seed = random.randint(0, 10000000)
71
 
72
+ print(text)
73
  output = pipe(prompt=text, input_images=input_images, height=height, width=width, guidance_scale=guidance_scale,
74
  img_guidance_scale=img_guidance_scale, num_inference_steps=inference_steps, separate_cfg_infer=separate_cfg_infer,
75
  use_kv_cache=True, offload_kv_cache=True, offload_model=offload_model,
 
77
  img = output[0]
78
  return img
79
 
80
+
81
  def get_example():
82
  case = [
83
  [ "./imgs/test_cases/icl1.jpg",
 
106
  return generate_image(text, img1, img2, img3, height, width, img_guidance_scale, inference_steps, seed,
107
  separate_cfg_infer, offload_model, use_input_image_size_as_output, max_input_image_size, randomize_seed, guidance_scale)
108
 
109
+
110
  description = """
111
  This is a Virtual Try-On Platform.
112
  Usage: