Spaces:
Sleeping
Sleeping
from flask import Flask, render_template, request, jsonify | |
import os | |
import shutil | |
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings | |
from llama_index.llms.huggingface import HuggingFaceInferenceAPI | |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding | |
from huggingface_hub import InferenceClient | |
from transformers import AutoTokenizer, AutoModel | |
# Ensure HF_TOKEN is set | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if not HF_TOKEN: | |
raise ValueError("HF_TOKEN environment variable not set.") | |
repo_id = "meta-llama/Meta-Llama-3-8B-Instruct" | |
llm_client = InferenceClient( | |
model=repo_id, | |
token=HF_TOKEN, | |
) | |
# Configure Llama index settings | |
Settings.llm = HuggingFaceInferenceAPI( | |
model_name=repo_id, | |
tokenizer_name=repo_id, | |
context_window=3000, | |
token=HF_TOKEN, | |
max_new_tokens=512, | |
generate_kwargs={"temperature": 0.1}, | |
) | |
# Configure embedding model (XLM-RoBERTa model for multilingual support) | |
Settings.embed_model = HuggingFaceEmbedding( | |
model_name="xlm-roberta-base" # Multilingual support | |
) | |
# Configure tokenizer and model for multilingual responses | |
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base") | |
model = AutoModel.from_pretrained("xlm-roberta-base") | |
PERSIST_DIR = "db" | |
PDF_DIRECTORY = 'data' | |
# Ensure directories exist | |
os.makedirs(PDF_DIRECTORY, exist_ok=True) | |
os.makedirs(PERSIST_DIR, exist_ok=True) | |
chat_history = [] | |
current_chat_history = [] | |
# Data ingestion function | |
def data_ingestion_from_directory(): | |
if os.path.exists(PERSIST_DIR): | |
shutil.rmtree(PERSIST_DIR) # Remove the persist directory and its contents | |
os.makedirs(PERSIST_DIR, exist_ok=True) | |
new_documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data() | |
index = VectorStoreIndex.from_documents(new_documents) | |
index.storage_context.persist(persist_dir=PERSIST_DIR) | |
# Function to handle the query and provide a response | |
def handle_query(query, selected_language): | |
context_str = "" | |
# Build context from current chat history | |
for past_query, response in reversed(current_chat_history): | |
if past_query.strip(): | |
context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n" | |
# Define the response template based on selected language | |
if selected_language == 'telugu': | |
language_prompt = "మీరు తాజ్ హోటల్ చాట్బాట్, తాజ్ హోటల్ సహాయకుడు." | |
elif selected_language == 'hindi': | |
language_prompt = "आप ताज होटल चैटबोट हैं, ताज होटल सहायक।" | |
else: | |
language_prompt = "You are the Taj Hotel chatbot, Taj Hotel Helper." | |
chat_text_qa_msgs = [ | |
( | |
"user", | |
f""" | |
{language_prompt} | |
**Your Role:** | |
- Respond accurately and concisely in the user's preferred language (English, Telugu, or Hindi). | |
- Provide information about the hotel’s services, amenities, and policies. | |
**Instructions:** | |
- **Context:** | |
{context_str} | |
- **User's Question:** | |
{query} | |
**Response Guidelines:** | |
1. **Language Adaptation:** Respond in the language of the question (English, Telugu, or Hindi). | |
2. **Tone:** Maintain politeness, professionalism, and the luxury branding of the Taj Hotel. | |
3. **Clarity:** Limit responses to 10-15 words for direct and clear communication. | |
4. **Knowledge Boundaries:** If unsure of an answer, respond with: | |
_"I’m not sure. Please contact our staff for accurate information."_ | |
5. **Actionable Help:** Offer suggestions or alternative steps to guide the user where applicable. | |
**Response:** [Your concise response here] | |
""" | |
) | |
] | |
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs) | |
# Load the index for querying | |
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR) | |
index = load_index_from_storage(storage_context) | |
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str) | |
print(f"Querying: {query}") | |
answer = query_engine.query(query) | |
# Extracting the response | |
if hasattr(answer, 'response'): | |
response = answer.response | |
elif isinstance(answer, dict) and 'response' in answer: | |
response = answer['response'] | |
else: | |
response = "I'm sorry, I couldn't find an answer to that." | |
# Append to chat history | |
current_chat_history.append((query, response)) | |
return response | |
app = Flask(__name__) | |
# Data ingestion | |
data_ingestion_from_directory() | |
# Generate Response | |
def generate_response(query, language): | |
try: | |
# Call the handle_query function to get the response | |
bot_response = handle_query(query, language) | |
return bot_response | |
except Exception as e: | |
return f"Error fetching the response: {str(e)}" | |
# Route for the homepage | |
def index(): | |
return render_template('index.html') | |
# Route to handle chatbot messages | |
def chat(): | |
try: | |
user_message = request.json.get("message") | |
selected_language = request.json.get("language") # Get selected language from the request | |
if not user_message: | |
return jsonify({"response": "Please say something!"}) | |
if selected_language not in ['english', 'telugu', 'hindi']: | |
return jsonify({"response": "Invalid language selected."}) | |
bot_response = generate_response(user_message, selected_language) | |
return jsonify({"response": bot_response}) | |
except Exception as e: | |
return jsonify({"response": f"An error occurred: {str(e)}"}) | |
if __name__ == '__main__': | |
app.run(debug=True) | |