xxx1 commited on
Commit
26889b2
1 Parent(s): 8cf99f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -7
app.py CHANGED
@@ -42,12 +42,16 @@ def gpt3(question,vqa_answer,caption):
42
 
43
 
44
  def inference_chat(input_image,input_text):
 
45
  inputs = processor(images=input_image, text=input_text,return_tensors="pt")
46
  inputs["max_length"] = 10
47
  inputs["num_beams"] = 5
48
  inputs['num_return_sequences'] =4
49
  out = model_vqa.generate(**inputs)
50
- return "\n".join(processor.batch_decode(out, skip_special_tokens=True))
 
 
 
51
 
52
  with gr.Blocks(
53
  css="""
@@ -70,18 +74,20 @@ with gr.Blocks(
70
  with gr.Row():
71
  clear_button = gr.Button(value="Clear", interactive=True)
72
  submit_button = gr.Button(
73
- value="Submit_VQA", interactive=True, variant="primary"
74
  )
 
75
  cap_submit_button = gr.Button(
76
  value="Submit_CAP", interactive=True, variant="primary"
77
  )
78
  gpt3_submit_button = gr.Button(
79
  value="Submit_GPT3", interactive=True, variant="primary"
80
  )
 
81
  with gr.Column():
82
- caption_output = gr.Textbox(lines=0, label="VQA Output(模型答案输出)")
83
- caption_output_v1 = gr.Textbox(lines=0, label="Caption Output(模型caption输出)")
84
- gpt3_output_v1 = gr.Textbox(lines=0, label="GPT3 Output(GPT3输出)")
85
 
86
  image_input.change(
87
  lambda: ("", "", []),
@@ -100,7 +106,7 @@ with gr.Blocks(
100
  clear_button.click(
101
  lambda: ("", [], []),
102
  [],
103
- [chat_input, state],
104
  queue=False,
105
  )
106
  submit_button.click(
@@ -109,8 +115,9 @@ with gr.Blocks(
109
  image_input,
110
  chat_input,
111
  ],
112
- [caption_output],
113
  )
 
114
  cap_submit_button.click(
115
  caption,
116
  [
@@ -128,6 +135,7 @@ with gr.Blocks(
128
  ],
129
  [gpt3_output_v1],
130
  )
 
131
 
132
  # examples = gr.Examples(
133
  # examples=examples,
 
42
 
43
 
44
  def inference_chat(input_image,input_text):
45
+ cap=caption(input_image)
46
  inputs = processor(images=input_image, text=input_text,return_tensors="pt")
47
  inputs["max_length"] = 10
48
  inputs["num_beams"] = 5
49
  inputs['num_return_sequences'] =4
50
  out = model_vqa.generate(**inputs)
51
+ out=processor.batch_decode(out, skip_special_tokens=True)
52
+ vqa="\n".join(out)
53
+ gpt3_out=gpt3(input_text,vqa,cap)
54
+ return out[0], gpt3_out
55
 
56
  with gr.Blocks(
57
  css="""
 
74
  with gr.Row():
75
  clear_button = gr.Button(value="Clear", interactive=True)
76
  submit_button = gr.Button(
77
+ value="VLE", interactive=True, variant="primary"
78
  )
79
+ '''
80
  cap_submit_button = gr.Button(
81
  value="Submit_CAP", interactive=True, variant="primary"
82
  )
83
  gpt3_submit_button = gr.Button(
84
  value="Submit_GPT3", interactive=True, variant="primary"
85
  )
86
+ '''
87
  with gr.Column():
88
+ caption_output = gr.Textbox(lines=0, label="VQA ")
89
+ #caption_output_v1 = gr.Textbox(lines=0, label="Caption Output(模型caption输出)")
90
+ gpt3_output_v1 = gr.Textbox(lines=0, label="VQA+LLM")
91
 
92
  image_input.change(
93
  lambda: ("", "", []),
 
106
  clear_button.click(
107
  lambda: ("", [], []),
108
  [],
109
+ [chat_input, state,caption_output,gpt3_output_v1],
110
  queue=False,
111
  )
112
  submit_button.click(
 
115
  image_input,
116
  chat_input,
117
  ],
118
+ [caption_output,gpt3_output_v1],
119
  )
120
+ '''
121
  cap_submit_button.click(
122
  caption,
123
  [
 
135
  ],
136
  [gpt3_output_v1],
137
  )
138
+ '''
139
 
140
  # examples = gr.Examples(
141
  # examples=examples,