Spaces:
Runtime error
Runtime error
Momin Aziz
commited on
Commit
·
8d3902c
1
Parent(s):
610acf4
checking qa generator
Browse files- app.py +47 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import AutoTokenizer, T5ForConditionalGeneration
|
3 |
+
|
4 |
+
model_name = "allenai/t5-small-squad2-question-generation"
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
6 |
+
@st.cache
|
7 |
+
def load_model(model_name):
|
8 |
+
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
9 |
+
return model
|
10 |
+
|
11 |
+
model = load_model(model_name)
|
12 |
+
|
13 |
+
|
14 |
+
def run_model(input_string, **generator_args):
|
15 |
+
input_ids = tokenizer.encode(input_string, return_tensors="pt")
|
16 |
+
res = model.generate(input_ids, **generator_args)
|
17 |
+
output = tokenizer.batch_decode(res, skip_special_tokens=True)
|
18 |
+
# print(output)
|
19 |
+
return output
|
20 |
+
|
21 |
+
|
22 |
+
default_value = "Nicejob has increased our revenue 80% since signing up"
|
23 |
+
|
24 |
+
#prompts
|
25 |
+
st.title("Question Generation")
|
26 |
+
st.write("Placeholder for some other texts, like instructions...")
|
27 |
+
|
28 |
+
sent = st.text_area("Text", default_value, height = 150)
|
29 |
+
max_length = st.sidebar.slider("Max Length", min_value = 10, max_value=50)
|
30 |
+
temperature = st.sidebar.slider("Temperature", value = 1.0, min_value = 0.0, max_value=1.0, step=0.05)
|
31 |
+
num_return_sequences = st.sidebar.slider("Num Return Sequences", min_value = 1, max_value=4, value = 1)
|
32 |
+
num_beams = st.sidebar.slider("Num Beams", min_value = 4, max_value=10, value = 4)
|
33 |
+
top_k = st.sidebar.slider("Top-k", min_value = 0, max_value=100, value = 90)
|
34 |
+
top_p = st.sidebar.slider("Top-p", min_value = 0.0, max_value=1.0, step = 0.05, value = 0.9)
|
35 |
+
|
36 |
+
encoded_prompt = tokenizer.encode(sent, add_special_tokens=False, return_tensors="pt")
|
37 |
+
if encoded_prompt.size()[-1] == 0:
|
38 |
+
input_ids = None
|
39 |
+
else:
|
40 |
+
input_ids = encoded_prompt
|
41 |
+
|
42 |
+
|
43 |
+
output_sequences = run_model(input_ids, max_length=max_length,num_return_sequences=num_return_sequences,
|
44 |
+
num_beams=num_beams,
|
45 |
+
temperature=temperature, top_k=top_k, top_p=top_p)
|
46 |
+
|
47 |
+
st.write('\n'.join(output_sequences))
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
streamlit
|
3 |
+
torch
|