Jordan commited on
Commit
8d1b720
1 Parent(s): 105c1f4

Second changes - includes the generated text in images and videos

Browse files
Files changed (3) hide show
  1. app.py +5 -3
  2. combine_modules.py +4 -4
  3. interpret_model_pt.py +2 -2
app.py CHANGED
@@ -13,18 +13,20 @@ with gr.Blocks() as demo:
13
  vid_input = gr.Video()
14
  vid_output = gr.Textbox()
15
  vid_output_interpret = gr.TextArea()
 
16
  vid_button = gr.Button("Check Bias in your Video")
17
 
18
  with gr.Tab("Image"):
19
  img_input = gr.Image()
20
  img_output = gr.Textbox()
 
21
  img_output_interpret = gr.TextArea()
22
  img_button = gr.Button("Check Bias in your Image")
23
 
24
  text_button.click(combine_modules.app_nlp_start, inputs=text_input, outputs=[text_output, text_output_interpret])
25
- vid_button.click(combine_modules.app_video_start, inputs=vid_input, outputs=[vid_output, vid_output_interpret])
26
- img_button.click(combine_modules.app_image_start, inputs=img_input, outputs=[img_output, img_output_interpret])
27
 
28
 
29
  if __name__=="__main__":
30
- demo.launch()
 
13
  vid_input = gr.Video()
14
  vid_output = gr.Textbox()
15
  vid_output_interpret = gr.TextArea()
16
+ vid_audio_stmt = gr.Textbox()
17
  vid_button = gr.Button("Check Bias in your Video")
18
 
19
  with gr.Tab("Image"):
20
  img_input = gr.Image()
21
  img_output = gr.Textbox()
22
+ img_gen_stmt = gr.Textbox()
23
  img_output_interpret = gr.TextArea()
24
  img_button = gr.Button("Check Bias in your Image")
25
 
26
  text_button.click(combine_modules.app_nlp_start, inputs=text_input, outputs=[text_output, text_output_interpret])
27
+ vid_button.click(combine_modules.app_video_start, inputs=vid_input, outputs=[vid_output, vid_audio_stmt, vid_output_interpret])
28
+ img_button.click(combine_modules.app_image_start, inputs=img_input, outputs=[img_output, img_gen_stmt, img_output_interpret])
29
 
30
 
31
  if __name__=="__main__":
32
+ demo.launch(share=True)
combine_modules.py CHANGED
@@ -8,10 +8,10 @@ def app_nlp_start(statement):
8
 
9
  def app_video_start(video_path):
10
  return_text = rtxt(video_path)
11
- output_txt = bias_checker(return_text)
12
- return output_txt
13
 
14
  def app_image_start(image_path):
15
  text_generated = img_pipe(image_path)
16
- output_txt = bias_checker(text_generated)
17
- return output_txt
 
8
 
9
  def app_video_start(video_path):
10
  return_text = rtxt(video_path)
11
+ output_txt, interpreted_txt = bias_checker(return_text)
12
+ return output_txt, interpreted_txt, return_text
13
 
14
  def app_image_start(image_path):
15
  text_generated = img_pipe(image_path)
16
+ output_txt, interpreted_txt = bias_checker(text_generated)
17
+ return output_txt, interpreted_txt, text_generated
interpret_model_pt.py CHANGED
@@ -1,8 +1,8 @@
1
  from transformers_interpret import SequenceClassificationExplainer, MultiLabelClassificationExplainer
2
 
3
  def explainer(input_statement, model, tokenizer):
4
- # cls_explainer = SequenceClassificationExplainer(model, tokenizer)
5
- cls_explainer = MultiLabelClassificationExplainer(model, tokenizer)
6
  word_attributions = cls_explainer(input_statement)
7
  return dict(word_attributions)
8
 
 
1
  from transformers_interpret import SequenceClassificationExplainer, MultiLabelClassificationExplainer
2
 
3
  def explainer(input_statement, model, tokenizer):
4
+ cls_explainer = SequenceClassificationExplainer(model, tokenizer)
5
+ # cls_explainer = MultiLabelClassificationExplainer(model, tokenizer)
6
  word_attributions = cls_explainer(input_statement)
7
  return dict(word_attributions)
8