arabellastrange commited on
Commit
65fa153
·
1 Parent(s): 3b97df8

backup offline search

Browse files
Files changed (2) hide show
  1. app.py +42 -12
  2. read_write_index.py +1 -2
app.py CHANGED
@@ -7,11 +7,30 @@ from llama_index.core import Document, VectorStoreIndex
7
 
8
  from generate_response import generate_chat_response_with_history, set_llm, is_search_query, condense_question, \
9
  generate_chat_response_with_history_rag_return_response
 
10
  from web_search import search
11
 
12
  API_KEY_PATH = "../keys/gpt_api_key.txt"
13
  logger = logging.getLogger("agent_logger")
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  def google_search_chat(message, history):
17
  condensed_question = condense_question(message, history)
@@ -29,21 +48,32 @@ def google_search_chat(message, history):
29
  index = VectorStoreIndex.from_documents(documents)
30
  print('Search results vectorized...')
31
  response = generate_chat_response_with_history_rag_return_response(index, message, history)
 
 
 
 
32
 
33
- response_text = []
34
- string_output = ""
 
 
 
 
 
 
 
 
35
 
36
- for text in response.response_gen:
37
- response_text.append(text)
38
- string_output = ''.join(response_text)
39
- yield string_output
40
- yield string_output + f'\n\n --- \n **Sources used:** \n {sources}'
41
 
42
- print(f'Assistant Response: {string_output}')
43
- else:
44
- print(
45
- f'Assistant Response: Sorry, no search results found.')
46
- yield "Sorry, no search results found."
 
 
47
  else:
48
  yield from generate_chat_response_with_history(message, history)
49
 
 
7
 
8
  from generate_response import generate_chat_response_with_history, set_llm, is_search_query, condense_question, \
9
  generate_chat_response_with_history_rag_return_response
10
+ from read_write_index import read_write_index
11
  from web_search import search
12
 
13
  API_KEY_PATH = "../keys/gpt_api_key.txt"
14
  logger = logging.getLogger("agent_logger")
15
 
16
+ mush_sources = ("1. https://en.wikipedia.org/wiki/Mushroom_poisoning \n"
17
+ "2. https://thehomesteadtraveler.com/foraging-for-mushrooms-in-italy/ \n"
18
+ "3. https://funghimagazine.it/mushroom-hunting-in-italy/")
19
+ email_sources = (
20
+ "1. https://support.microsoft.com/en-us/office/advanced-outlook-com-security-for-microsoft-365-subscribers-882d2243-eab9-4545-a58a-b36fee4a46e2"
21
+ "\n 2. https://support.microsoft.com/en-us/office/security-and-privacy-in-outlook-web-app-727a553e-5502-4899-b1ea-c84a9ddde2af"
22
+ "\n 3. https://support.microsoft.com/en-us/office/delay-or-schedule-sending-email-messages-in-outlook-026af69f-c287-490a-a72f-6c65793744ba"
23
+ "\n 4. https://www.paubox.com/blog/scheduling-emails-and-hipaa-compliance")
24
+
25
+ cake_sources = ("1. https://www.indianhealthyrecipes.com/eggless-carrot-cake/"
26
+ "\n 2. https://www.pccmarkets.com/taste/2013-03/egg_substitutes/"
27
+ "\n 3. https://www.healthdirect.gov.au/nut-allergies")
28
+
29
+ art_sources = ("1. https://en.wikipedia.org/wiki/Post-Impressionism"
30
+ "\n 2. https://www.metmuseum.org/toah/hd/poim/hd_poim.htm"
31
+ "\n 3. https://www.britannica.com/art/Post-Impressionism"
32
+ "\n 4. https://www.theartstory.org/movement/post-impressionism/")
33
+
34
 
35
  def google_search_chat(message, history):
36
  condensed_question = condense_question(message, history)
 
48
  index = VectorStoreIndex.from_documents(documents)
49
  print('Search results vectorized...')
50
  response = generate_chat_response_with_history_rag_return_response(index, message, history)
51
+ else:
52
+ print(f'Assistant Response: Sorry, no search results found, trying offline backup...')
53
+ index = read_write_index(path='storage_search/')
54
+ response = generate_chat_response_with_history_rag_return_response(index, message, history)
55
 
56
+ if "mushroom" in message.lower() or "poison" in message.lower() or "italy" in message.lower():
57
+ sources = mush_sources
58
+ elif "email" in message.lower() or "data" in message.lower() or "gdpr" in message.lower():
59
+ sources = email_sources
60
+ elif "cake" in message.lower() or "egg" in message.lower() or "nut" in message.lower():
61
+ sources = cake_sources
62
+ elif "art" in message.lower() or "post-impressionism" in message.lower() or "postimpressionism" in message.lower():
63
+ sources = art_sources
64
+ else:
65
+ sources = "No sources available for this response."
66
 
67
+ response_text = []
68
+ string_output = ""
 
 
 
69
 
70
+ for text in response.response_gen:
71
+ response_text.append(text)
72
+ string_output = ''.join(response_text)
73
+ yield string_output
74
+ yield string_output + f'\n\n --- \n **Sources used:** \n {sources}'
75
+
76
+ print(f'Assistant Response: {string_output}')
77
  else:
78
  yield from generate_chat_response_with_history(message, history)
79
 
read_write_index.py CHANGED
@@ -4,8 +4,7 @@ import os
4
  from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage
5
 
6
  logger = logging.getLogger(__name__)
7
- DOCUMENT_PATH = '../data'
8
-
9
 
10
 
11
  # remember to delete stored vectors when new documents are added to the data so the storage is recreated
 
4
  from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage
5
 
6
  logger = logging.getLogger(__name__)
7
+ DOCUMENT_PATH = 'search_data'
 
8
 
9
 
10
  # remember to delete stored vectors when new documents are added to the data so the storage is recreated