Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,7 @@ def find(query):
|
|
34 |
queries = [
|
35 |
get_detailed_instruct(task, query)
|
36 |
]
|
37 |
-
print("start
|
38 |
print(time.time())
|
39 |
|
40 |
hadiths = pd.read_csv('all_hadiths_clean.csv', delimiter=",")
|
@@ -42,18 +42,21 @@ def find(query):
|
|
42 |
document_embeddings = torch.load('encoded_hadiths_multilingual-e5-large-instruct (1).sav',map_location ='cpu')
|
43 |
#file = open('encoded_hadiths_multilingual-e5-large-instruct (1).sav','rb')
|
44 |
#document_embeddings = pickle.load(file)
|
45 |
-
print("load hadiths
|
46 |
print(time.time())
|
47 |
|
48 |
query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True)
|
|
|
|
|
|
|
49 |
scores = (query_embeddings @ document_embeddings.T) * 100
|
50 |
-
print("consine similarity
|
51 |
print(time.time())
|
52 |
|
53 |
# insert the similarity value to dataframe & sort it
|
54 |
hadiths['similarity'] = scores.tolist()[0]
|
55 |
sorted_hadiths = hadiths.sort_values(by='similarity', ascending=False)
|
56 |
-
print("sort hadiths
|
57 |
print(time.time())
|
58 |
|
59 |
results = sorted_hadiths.head(3).drop(columns=['id', 'hadith_id', 'chain_indx'])
|
@@ -67,7 +70,7 @@ def find(query):
|
|
67 |
results['text'] = '<a href="'+url+'">'+results['text_en']+ '</a>' + ' (' + results['source'].astype(str) + ')'
|
68 |
results = results.drop(columns=['source', 'chapter_no', 'hadith_no', 'chapter', 'similarity', 'text_ar', 'text_en'])
|
69 |
|
70 |
-
print("prepare results
|
71 |
print(time.time())
|
72 |
|
73 |
#return sorted_quran
|
|
|
34 |
queries = [
|
35 |
get_detailed_instruct(task, query)
|
36 |
]
|
37 |
+
print("start")
|
38 |
print(time.time())
|
39 |
|
40 |
hadiths = pd.read_csv('all_hadiths_clean.csv', delimiter=",")
|
|
|
42 |
document_embeddings = torch.load('encoded_hadiths_multilingual-e5-large-instruct (1).sav',map_location ='cpu')
|
43 |
#file = open('encoded_hadiths_multilingual-e5-large-instruct (1).sav','rb')
|
44 |
#document_embeddings = pickle.load(file)
|
45 |
+
print("load hadiths")
|
46 |
print(time.time())
|
47 |
|
48 |
query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True)
|
49 |
+
print("embed query")
|
50 |
+
print(time.time())
|
51 |
+
|
52 |
scores = (query_embeddings @ document_embeddings.T) * 100
|
53 |
+
print("consine similarity")
|
54 |
print(time.time())
|
55 |
|
56 |
# insert the similarity value to dataframe & sort it
|
57 |
hadiths['similarity'] = scores.tolist()[0]
|
58 |
sorted_hadiths = hadiths.sort_values(by='similarity', ascending=False)
|
59 |
+
print("sort hadiths")
|
60 |
print(time.time())
|
61 |
|
62 |
results = sorted_hadiths.head(3).drop(columns=['id', 'hadith_id', 'chain_indx'])
|
|
|
70 |
results['text'] = '<a href="'+url+'">'+results['text_en']+ '</a>' + ' (' + results['source'].astype(str) + ')'
|
71 |
results = results.drop(columns=['source', 'chapter_no', 'hadith_no', 'chapter', 'similarity', 'text_ar', 'text_en'])
|
72 |
|
73 |
+
print("prepare results")
|
74 |
print(time.time())
|
75 |
|
76 |
#return sorted_quran
|