Gopikanth123 commited on
Commit
3c88d1c
·
verified ·
1 Parent(s): 16d15ab

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +18 -44
main.py CHANGED
@@ -1,10 +1,12 @@
1
  import os
 
2
  from flask import Flask, render_template, request, jsonify
3
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
4
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
5
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
6
  from huggingface_hub import InferenceClient
7
 
 
8
  repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
9
  llm_client = InferenceClient(
10
  model=repo_id,
@@ -12,7 +14,6 @@ llm_client = InferenceClient(
12
  )
13
 
14
  os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
15
- # Configure Llama index settings
16
  Settings.llm = HuggingFaceInferenceAPI(
17
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
18
  tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
@@ -31,54 +32,47 @@ PDF_DIRECTORY = 'data'
31
  # Ensure directories exist
32
  os.makedirs(PDF_DIRECTORY, exist_ok=True)
33
  os.makedirs(PERSIST_DIR, exist_ok=True)
 
34
  chat_history = []
35
  current_chat_history = []
36
 
37
- # def data_ingestion_from_directory():
38
- # documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
39
- # storage_context = StorageContext.from_defaults()
40
- # index = VectorStoreIndex.from_documents(documents)
41
- # index.storage_context.persist(persist_dir=PERSIST_DIR)
42
  def data_ingestion_from_directory():
43
  # Clear previous data by removing the persist directory
44
  if os.path.exists(PERSIST_DIR):
45
- shutil.rmtree(PERSIST_DIR) # Remove the persist directory and all its contents
46
 
47
- # Recreate the persist directory after removal
48
  os.makedirs(PERSIST_DIR, exist_ok=True)
49
-
50
- # Load new documents from the directory
51
  new_documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
52
 
53
- # Create a new index with the new documents
54
- index = VectorStoreIndex.from_documents(new_documents)
 
55
 
56
- # Persist the new index
57
  index.storage_context.persist(persist_dir=PERSIST_DIR)
 
58
 
59
  def handle_query(query):
60
  chat_text_qa_msgs = [
61
- (
62
- "user",
63
- """
64
- You are the Taj Hotel chatbot and your name is Taj Hotel Helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the given Taj hotel's data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
65
- {context_str}
66
- Question:
67
- {query_str}
68
- """
69
- )
70
  ]
71
  text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
72
 
73
  storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
74
  index = load_index_from_storage(storage_context)
 
75
  context_str = ""
76
  for past_query, response in reversed(current_chat_history):
77
  if past_query.strip():
78
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
79
 
80
  query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
81
- print(query)
82
  answer = query_engine.query(query)
83
 
84
  if hasattr(answer, 'response'):
@@ -87,43 +81,23 @@ def handle_query(query):
87
  response = answer['response']
88
  else:
89
  response = "Sorry, I couldn't find an answer."
 
90
  current_chat_history.append((query, response))
91
  return response
92
 
93
  app = Flask(__name__)
94
 
95
- # Initialize Gradio Client once for efficiency
96
- try:
97
- client = Client("Gopikanth123/llama2") # Replace with your Gradio model URL
98
- except Exception as e:
99
- print(f"Error initializing Gradio client: {str(e)}")
100
- client = None
101
-
102
- # # Function to fetch the response from Gradio model
103
- # def generate_response(query):
104
- # if client is None:
105
- # return "Model is unavailable at the moment. Please try again later."
106
- # try:
107
- # result = client.predict(query=query, api_name="/predict")
108
- # return result
109
- # except Exception as e:
110
- # return f"Error fetching the response: {str(e)}"
111
- # Generate Response
112
  def generate_response(query):
113
  try:
114
- # Call the handle_query function to get the response
115
  bot_response = handle_query(query)
116
  return bot_response
117
  except Exception as e:
118
  return f"Error fetching the response: {str(e)}"
119
 
120
-
121
- # Route for the homepage
122
  @app.route('/')
123
  def index():
124
  return render_template('index.html')
125
 
126
- # Route to handle chatbot messages
127
  @app.route('/chat', methods=['POST'])
128
  def chat():
129
  try:
 
1
  import os
2
+ import shutil
3
  from flask import Flask, render_template, request, jsonify
4
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
5
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
6
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
7
  from huggingface_hub import InferenceClient
8
 
9
+ # Initialize environment and settings
10
  repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
11
  llm_client = InferenceClient(
12
  model=repo_id,
 
14
  )
15
 
16
  os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
 
17
  Settings.llm = HuggingFaceInferenceAPI(
18
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
19
  tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
 
32
  # Ensure directories exist
33
  os.makedirs(PDF_DIRECTORY, exist_ok=True)
34
  os.makedirs(PERSIST_DIR, exist_ok=True)
35
+
36
  chat_history = []
37
  current_chat_history = []
38
 
 
 
 
 
 
39
  def data_ingestion_from_directory():
40
  # Clear previous data by removing the persist directory
41
  if os.path.exists(PERSIST_DIR):
42
+ shutil.rmtree(PERSIST_DIR)
43
 
 
44
  os.makedirs(PERSIST_DIR, exist_ok=True)
 
 
45
  new_documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
46
 
47
+ if not new_documents:
48
+ print("No documents were found or loaded.")
49
+ return
50
 
51
+ index = VectorStoreIndex.from_documents(new_documents)
52
  index.storage_context.persist(persist_dir=PERSIST_DIR)
53
+ print("Persist data cleared and updated with new data.")
54
 
55
  def handle_query(query):
56
  chat_text_qa_msgs = [
57
+ ("user", """
58
+ You are the Taj Hotel chatbot and your name is Taj Hotel Helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the given Taj hotel's data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
59
+ {context_str}
60
+ Question:
61
+ {query_str}
62
+ """)
 
 
 
63
  ]
64
  text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
65
 
66
  storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
67
  index = load_index_from_storage(storage_context)
68
+
69
  context_str = ""
70
  for past_query, response in reversed(current_chat_history):
71
  if past_query.strip():
72
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
73
 
74
  query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
75
+ print(f"User query: {query}")
76
  answer = query_engine.query(query)
77
 
78
  if hasattr(answer, 'response'):
 
81
  response = answer['response']
82
  else:
83
  response = "Sorry, I couldn't find an answer."
84
+
85
  current_chat_history.append((query, response))
86
  return response
87
 
88
  app = Flask(__name__)
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  def generate_response(query):
91
  try:
 
92
  bot_response = handle_query(query)
93
  return bot_response
94
  except Exception as e:
95
  return f"Error fetching the response: {str(e)}"
96
 
 
 
97
  @app.route('/')
98
  def index():
99
  return render_template('index.html')
100
 
 
101
  @app.route('/chat', methods=['POST'])
102
  def chat():
103
  try: