AyeshaAslam commited on
Commit
2d34ecf
·
verified ·
1 Parent(s): 5c572ea

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -0
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import Libraries
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
+ from sentence_transformers import SentenceTransformer, util
4
+ from datasets import load_dataset
5
+ import faiss
6
+ import numpy as np
7
+ import streamlit as st
8
+
9
+ # Load the BillSum dataset
10
+ dataset = load_dataset("billsum", split="ca_test")
11
+
12
+ # Initialize models
13
+ sbert_model = SentenceTransformer("all-mpnet-base-v2")
14
+ t5_tokenizer = AutoTokenizer.from_pretrained("t5-small")
15
+ t5_model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
16
+
17
+ # Prepare data and build FAISS index
18
+ texts = dataset["text"][:100] # Limiting to 100 samples for speed
19
+ case_embeddings = sbert_model.encode(texts, convert_to_tensor=True, show_progress_bar=True)
20
+ index = faiss.IndexFlatL2(case_embeddings.shape[1])
21
+ index.add(np.array(case_embeddings.cpu()))
22
+
23
+ # Define retrieval and summarization functions
24
+ def retrieve_cases(query, top_k=3):
25
+ query_embedding = sbert_model.encode(query, convert_to_tensor=True)
26
+ _, indices = index.search(np.array([query_embedding.cpu()]), top_k)
27
+ return [(texts[i], i) for i in indices[0]]
28
+
29
+ def summarize_text(text):
30
+ inputs = t5_tokenizer("summarize: " + text, return_tensors="pt", max_length=512, truncation=True)
31
+ outputs = t5_model.generate(inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
32
+ return t5_tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+
34
+ # Streamlit UI
35
+ def main():
36
+ st.title("Legal Case Summarizer")
37
+ query = st.text_input("Enter your case search query here:")
38
+ top_k = st.slider("Number of similar cases to retrieve:", 1, 5, 3)
39
+
40
+ if st.button("Search"):
41
+ results = retrieve_cases(query, top_k=top_k)
42
+ for i, (case_text, index) in enumerate(results):
43
+ st.subheader(f"Case {i+1}")
44
+ st.write("**Original Text:**", case_text)
45
+ summary = summarize_text(case_text)
46
+ st.write("**Summary:**", summary)
47
+
48
+ if __name__ == "__main__":
49
+ main()
50
+
51
+ # Run Streamlit app within Colab