KarthickAdopleAI commited on
Commit
4aef76a
·
verified ·
1 Parent(s): 02d955d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -49
app.py CHANGED
@@ -100,35 +100,31 @@ class FactChecking:
100
  logging.error(f"Error occurred in extract_unique_sentences: {e}")
101
  return set()
102
 
103
- def find_different_sentences(self, text1: str, text2: str) -> List[Tuple[str, str]]:
104
- """
105
- Finds sentences that are different between two texts.
106
 
107
- Args:
108
- text1 (str): The first text.
109
- text2 (str): The second text.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
- Returns:
112
- List[Tuple[str, str]]: A list of tuples containing sentences and their labels.
113
- """
114
- try:
115
- sentences_text1 = self.extract_unique_sentences(text1)
116
- sentences_text2 = self.extract_unique_sentences(text2)
117
- # Initialize labels list
118
- labels = []
119
- # Iterate over sentences in text1
120
- for sentence in sentences_text1:
121
- if sentence in sentences_text2:
122
- # If sentence is common to both texts, assign 'factual' label
123
- labels.append((sentence, 'factual'))
124
- else:
125
- # If sentence is unique to text1, assign 'hallucinated' label
126
- labels.append((sentence, 'hallucinated'))
127
- logging.info("Sentence comparison completed successfully.")
128
- return labels
129
- except Exception as e:
130
- logging.error(f"Error occurred in find_different_sentences: {e}")
131
- return []
132
 
133
  def extract_words(self, text: str) -> List[str]:
134
  """
@@ -196,22 +192,36 @@ class FactChecking:
196
 
197
  # Generate initial response using contract generator
198
  mixtral_response = self.mixtral_response(question)
 
 
199
 
200
- # Create checker chain for summarization checking
201
- checker_chain = LLMSummarizationCheckerChain.from_llm(self.llm, verbose=True, max_checks=2)
202
 
203
- # Run fact checking on the generated result
204
- fact_checking_result = checker_chain.run(mixtral_response)
205
 
206
- # Find different sentences between original result and fact checking result
207
- prediction_list = self.find_different_sentences(mixtral_response, fact_checking_result)
 
 
 
208
 
209
- #word prediction list
210
- word_prediction_list = self.label_words(mixtral_response, fact_checking_result)
 
 
 
 
 
 
 
 
 
 
211
 
212
  logging.info("Sentences comparison completed successfully.")
213
  # Return the original result and list of hallucinated sentences
214
- return mixtral_response,fact_checking_result,prediction_list,word_prediction_list
215
 
216
  except Exception as e:
217
  logging.error(f"Error occurred in find_hallucinatted_sentence: {e}")
@@ -229,24 +239,14 @@ class FactChecking:
229
  with gr.Row():
230
  button = gr.Button(value="Submit")
231
  with gr.Row():
232
- with gr.Column(scale=0.50):
233
- mixtral_response = gr.Textbox(label="llm answer")
234
- with gr.Column(scale=0.50):
235
- fact_checking_result = gr.Textbox(label="Corrected Result")
236
  with gr.Row():
237
- with gr.Column(scale=0.50):
238
- highlighted_prediction = gr.HighlightedText(
239
  label="Sentence Hallucination detection",
240
  combine_adjacent=True,
241
  color_map={"hallucinated": "red", "factual": "green"},
242
  show_legend=True)
243
- with gr.Column(scale=0.50):
244
- word_highlighted_prediction = gr.HighlightedText(
245
- label="Word Hallucination detection",
246
- combine_adjacent=True,
247
- color_map={"hallucinated": "red", "factual": "green"},
248
- show_legend=True)
249
- button.click(self.find_hallucinatted_sentence,question,[mixtral_response,fact_checking_result,highlighted_prediction,word_highlighted_prediction])
250
  demo.launch(debug=True)
251
 
252
 
 
100
  logging.error(f"Error occurred in extract_unique_sentences: {e}")
101
  return set()
102
 
103
+ def find_different_sentences(self,answer):
104
+ splitted_answer=answer.split("\n\n")
 
105
 
106
+ predictions_=[]
107
+ for i in range(len(splitted_answer)):
108
+ if "True." in splitted_answer[i]:
109
+ prediction="factual"
110
+ context=splitted_answer[i].split("\n")
111
+ # print(context)
112
+ for j in range(len(context)):
113
+ t_sentence=context[j].replace(f"Fact {i+1}: ","")
114
+ predictions_.append((t_sentence, prediction))
115
+ break
116
+
117
+
118
+ elif "False." in splitted_answer[i]:
119
+ prediction="hallucinated"
120
+ context=splitted_answer[i].split("\n")
121
+ for j in range(len(context)):
122
+ sentence=context[j].replace(f"Fact {i+1}: ","")
123
+ break
124
+ predictions_.append((sentence, prediction))
125
 
126
+
127
+ return predictions_
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  def extract_words(self, text: str) -> List[str]:
130
  """
 
192
 
193
  # Generate initial response using contract generator
194
  mixtral_response = self.mixtral_response(question)
195
+
196
+ template = """Given some text, extract a list of facts from the text.
197
 
198
+ Format your output as a bulleted list.
 
199
 
200
+ Text:
201
+ {question}
202
 
203
+ Facts:"""
204
+ prompt_template = PromptTemplate(input_variables=["question"], template=template)
205
+ question_chain = LLMChain(llm=self.llm, prompt=prompt_template)
206
+
207
+ template = """You are an expert fact checker. You have been hired by a major news organization to fact check a very important story.
208
 
209
+ Here is a bullet point list of facts:
210
+ {statement}
211
+
212
+ For each fact, determine whether it is true or false about the subject. If you are unable to determine whether the fact is true or false, output "Undetermined".
213
+ If the fact is false, explain why."""
214
+ prompt_template = PromptTemplate(input_variables=["statement"], template=template)
215
+ assumptions_chain = LLMChain(llm=self.llm, prompt=prompt_template)
216
+
217
+ overall_chain = SimpleSequentialChain(chains=[question_chain, assumptions_chain], verbose=True)
218
+ answer = overall_chain.run(mixtral_response)
219
+ # Find different sentences between original result and fact checking result
220
+ prediction_list = self.find_different_sentences(answer)
221
 
222
  logging.info("Sentences comparison completed successfully.")
223
  # Return the original result and list of hallucinated sentences
224
+ return mixtral_response,prediction_list
225
 
226
  except Exception as e:
227
  logging.error(f"Error occurred in find_hallucinatted_sentence: {e}")
 
239
  with gr.Row():
240
  button = gr.Button(value="Submit")
241
  with gr.Row():
242
+ mixtral_response = gr.Textbox(label="llm answer")
 
 
 
243
  with gr.Row():
244
+ highlighted_prediction = gr.HighlightedText(
 
245
  label="Sentence Hallucination detection",
246
  combine_adjacent=True,
247
  color_map={"hallucinated": "red", "factual": "green"},
248
  show_legend=True)
249
+ button.click(self.find_hallucinatted_sentence,question,[mixtral_response,highlighted_prediction])
 
 
 
 
 
 
250
  demo.launch(debug=True)
251
 
252