awinml commited on
Commit
bf8b612
1 Parent(s): cc50220

Upload 3 files

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. utils.py +8 -11
app.py CHANGED
@@ -70,7 +70,7 @@ with st.sidebar:
70
  st.subheader("Select Options:")
71
 
72
  with st.sidebar:
73
- num_results = int(st.number_input("Number of Results to query", 1, 15, value=5))
74
 
75
 
76
  # Choose encoder model
@@ -108,7 +108,7 @@ elif encoder_model == "SGPT":
108
 
109
 
110
  with st.sidebar:
111
- window = int(st.number_input("Sentence Window Size", 0, 10, value=3))
112
 
113
  with st.sidebar:
114
  threshold = float(
 
70
  st.subheader("Select Options:")
71
 
72
  with st.sidebar:
73
+ num_results = int(st.number_input("Number of Results to query", 1, 15, value=6))
74
 
75
 
76
  # Choose encoder model
 
108
 
109
 
110
  with st.sidebar:
111
+ window = int(st.number_input("Sentence Window Size", 0, 10, value=1))
112
 
113
  with st.sidebar:
114
  threshold = float(
utils.py CHANGED
@@ -115,19 +115,16 @@ def text_lookup(data, sentence_ids):
115
 
116
 
117
  def generate_prompt(query_text, context_list):
118
- context = " \n\n".join(context_list)
119
- prompt = f"""Answer the question as truthfully as possible using the provided text. Try to include as many key details as possible and format the answer in points.
120
-
121
- Context:
122
- {context}
123
-
124
- Q: {query_text}
125
- A:"""
126
  return prompt
127
 
128
 
129
  def generate_prompt_2(query_text, context_list):
130
- context = " \n\n".join(context_list)
131
  prompt = f"""
132
  Context information is below:
133
  ---------------------
@@ -144,9 +141,9 @@ def gpt_model(prompt):
144
  model="text-davinci-003",
145
  prompt=prompt,
146
  temperature=0.1,
147
- max_tokens=512,
148
  top_p=1.0,
149
- frequency_penalty=0.0,
150
  presence_penalty=1,
151
  )
152
  return response.choices[0].text
 
115
 
116
 
117
  def generate_prompt(query_text, context_list):
118
+ context = " ".join(context_list)
119
+ prompt = f"""Answer the question as accurately as possible using the provided context. Try to include as many key details as possible.
120
+ Context: {context}
121
+ Question: {query_text}
122
+ Answer:"""
 
 
 
123
  return prompt
124
 
125
 
126
  def generate_prompt_2(query_text, context_list):
127
+ context = " ".join(context_list)
128
  prompt = f"""
129
  Context information is below:
130
  ---------------------
 
141
  model="text-davinci-003",
142
  prompt=prompt,
143
  temperature=0.1,
144
+ max_tokens=1024,
145
  top_p=1.0,
146
+ frequency_penalty=0.5,
147
  presence_penalty=1,
148
  )
149
  return response.choices[0].text