Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -20,8 +20,19 @@ def response(user_question, table_data):
|
|
20 |
|
21 |
queries = [user_question]
|
22 |
|
23 |
-
encoding = tokenizer(table=table_data, query=queries, padding=True, return_tensors="pt",truncation=True)
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
ans = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
26 |
|
27 |
query_result = {
|
@@ -33,6 +44,7 @@ def response(user_question, table_data):
|
|
33 |
|
34 |
return query_result
|
35 |
|
|
|
36 |
# Streamlit interface
|
37 |
st.markdown("""
|
38 |
<div style='display: flex; align-items: center;'>
|
|
|
20 |
|
21 |
queries = [user_question]
|
22 |
|
23 |
+
encoding = tokenizer(table=table_data, query=queries, padding=True, return_tensors="pt", truncation=True)
|
24 |
+
|
25 |
+
# Experiment with generation parameters
|
26 |
+
outputs = model.generate(
|
27 |
+
**encoding,
|
28 |
+
num_beams=5, # Beam search to generate more diverse responses
|
29 |
+
top_k=50, # Top-k sampling for diversity
|
30 |
+
top_p=0.95, # Nucleus sampling
|
31 |
+
temperature=0.7, # Temperature scaling (if supported by the model)
|
32 |
+
max_length=50, # Limit the length of the generated response
|
33 |
+
early_stopping=True # Stop generation when an end token is generated
|
34 |
+
)
|
35 |
+
|
36 |
ans = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
37 |
|
38 |
query_result = {
|
|
|
44 |
|
45 |
return query_result
|
46 |
|
47 |
+
|
48 |
# Streamlit interface
|
49 |
st.markdown("""
|
50 |
<div style='display: flex; align-items: center;'>
|