xxx1 commited on
Commit
8cf99f3
1 Parent(s): a2468a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -13,8 +13,8 @@ model_vqa = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-capfil
13
 
14
  from transformers import BlipProcessor, BlipForConditionalGeneration
15
 
16
- cap_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
17
- cap_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
18
 
19
 
20
 
@@ -26,7 +26,7 @@ def caption(input_image):
26
  return "\n".join(cap_processor.batch_decode(out, skip_special_tokens=True))
27
  import openai
28
  import os
29
- openai.api_key= os.getenv('openai_appkey') #"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"
30
  def gpt3(question,vqa_answer,caption):
31
  prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
32
  response = openai.Completion.create(
 
13
 
14
  from transformers import BlipProcessor, BlipForConditionalGeneration
15
 
16
+ cap_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
17
+ cap_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
18
 
19
 
20
 
 
26
  return "\n".join(cap_processor.batch_decode(out, skip_special_tokens=True))
27
  import openai
28
  import os
29
+ openai.api_key= os.getenv('openai_appkey')
30
  def gpt3(question,vqa_answer,caption):
31
  prompt=caption+"\n"+question+"\n"+vqa_answer+"\n Tell me the right answer."
32
  response = openai.Completion.create(