File size: 3,009 Bytes
2b0923a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e5c77ad
2b0923a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcfaed8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import tensorflow as tf
import gradio as gr
# importing necessary libraries
from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering

tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad",return_dict=False)
from transformers import pipeline

nlp = pipeline("question-answering", model=model, tokenizer=tokenizer)

context = "My name is Hema Raikhola, i am a data scientist and machine learning engineer."
question = "what is my profession?"

result = nlp(question = question, context=context)

print(f"QUESTION: {question}")
print(f"ANSWER: {result['answer']}")

# creating the function
def func(context, question):
  result = nlp(question = question, context=context)
  return result['answer']

example_1 = "(1) Hema and Aman are team members.They are working on a machine learning project"
qst_1 =  "who are the team members?"

example_2 = "(2) Natural Language Processing (NLP) allows machines to break down and interpret human language. It's at the core of tools we use every day – from translation software, chatbots, spam filters, and search engines, to grammar correction software, voice assistants, and social media monitoring tools."
qst_2 =  "What is NLP used for?"


from transformers import ViltProcessor, ViltForQuestionAnswering


def getResult(query, image):
    # prepare image + question
    #image = Image.open(BytesIO(base64.b64decode(base64_encoded_image)))
    text = query

    processor = ViltProcessor.from_pretrained(
        "dandelin/vilt-b32-finetuned-vqa")
    model = ViltForQuestionAnswering.from_pretrained(
        "dandelin/vilt-b32-finetuned-vqa")

    # prepare inputs
    encoding = processor(image, text, return_tensors="pt")

    # forward pass
    outputs = model(**encoding)
    logits = outputs.logits
    idx = logits.argmax(-1).item()
    print("Predicted answer:", model.config.id2label[idx])
    return model.config.id2label[idx]

# creating the interface
iface = gr.Interface(fn=getResult, inputs=[
                     "text", gr.Image(type="pil")], outputs="text")
    
# creating the interface
app = gr.Interface(fn=func,
                   inputs = ['textbox', 'text'],
                   outputs = gr.Textbox( lines=10), 
                   title = 'Question Answering bot',
                   description = 'Input context and question, then get answers!',
                   examples = [[example_1, qst_1],
                               [example_2, qst_2]],
                    theme = "darkhuggingface",
                   Timeout =120,
                   allow_flagging="manual",
                   flagging_options=["incorrect", "ambiguous", "offensive", "other"],
              
                   ).queue()
# launching the app
gr.TabbedInterface([iface,app],["Visual QA","Text QA"]).launch(auth = ('user','work'), auth_message = "Check your Login details sent to your email")