Spaces:
Build error
Build error
File size: 5,835 Bytes
54766f8 d0e2ca4 54766f8 080548c 54766f8 9c44467 2ff6484 54766f8 6c93ed8 54766f8 828f17b 54766f8 828f17b 54766f8 080548c 54766f8 080548c 54766f8 2ff6484 54766f8 11c5475 397ad86 11c5475 080548c 72569e6 54766f8 11c5475 54766f8 4daaa45 54766f8 11c5475 c00b1b5 11c5475 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import streamlit as st
import pandas as pd
from langchain_community.llms import LlamaCpp
from langchain_core.callbacks import StreamingStdOutCallbackHandler
from langchain_core.prompts import PromptTemplate
# Load the CSV file for Kendra Locator
df = pd.read_csv('location.csv', encoding='Windows-1252')
# Initialize session state for selected service and chatbot history
if 'selected_service' not in st.session_state:
st.session_state.selected_service = "Kendr Locator"
if 'user_input' not in st.session_state:
st.session_state['user_input'] = ''
st.set_page_config(layout="centered", initial_sidebar_state="expanded")
st.sidebar.title("KENDR LOCATOR")
st.sidebar.write("Find One Near You!")
display_option = st.sidebar.selectbox("Select:", ["Address", "Email"])
pin_code_input = st.sidebar.text_input("Enter Pin Code:")
if st.sidebar.button("Locate"):
if pin_code_input:
result = df[df['Pin'].astype(str) == pin_code_input]
if not result.empty:
st.sidebar.write(f"**Name**: {result['Name'].values[0]}")
if display_option == "Address":
st.sidebar.write(f"**Address**: {result['Address'].values[0]}")
elif display_option == "Email":
st.sidebar.write(f"**Email**: {result['Email'].values[0]}")
else:
st.sidebar.write("No results found.")
else:
st.sidebar.write("Please enter a pin code.")
llm = LlamaCpp(
model_path="model.gguf",
temperature=0.7,
max_tokens=512,
top_p=1,
callbacks=[StreamingStdOutCallbackHandler()],
verbose=False,
stop=["###"]
)
template = """You are a knowledgeable, conversational assistant. Below is a Question that describes a query. Provide a comprehensive Response that thoroughly addresses the query, including reasoning and examples where relevant.
### Question:
{}
### Response:
{}"""
prompt = PromptTemplate.from_template(template)
PROFANE_WORDS = [
"damn", "shit", "fuck", "bitch", "asshole", "dick", "piss", "crap", "cunt",
"twat", "slut", "whore", "faggot", "nigger", "kike", "chink", "gook", "spic",
"dyke", "suck", "cock", "pussy", "motherfucker", "bastard", "prick", "wanker",
"bollocks", "arse", "bloody", "bugger", "tosser", "git", "slag", "pillock",
"knob", "knobhead", "wazzock", "clit", "scrotum", "fanny", "ass", "freak",
"bimbo", "dumbass", "jackass", "wimp", "idiot", "moron", "loser", "fool",
"retard", "cocksucker", "shag", "shagger", "piss off", "go to hell",
"dammit", "son of a bitch", "jerk", "puke", "chut", "chutiyah",
"bhosdike", "bhenchod", "madarchod", "gandu", "gand", "bhancho",
"saala", "kameena", "bhenji", "bhadwa", "kothi", "aankhmar", "launda",
"bhikari", "sala", "bhosdika", "kothi", "sundar", "langda",
"kaamchor", "gaddha", "bakra", "chudiya", "gando", "bhencod", "lanat",
"bhoot", "chakkar", "chutak", "haramkhor", "bandar", "banda", "bakwas",
"nikamma", "pagal", "nalayak", "pagal", "khota", "madharchod"
]
def contains_profanity(text):
"""Check if the text contains any profane words."""
return any(word in text.lower() for word in PROFANE_WORDS)
def truncate_at_full_stop(text, max_length=512):
if len(text) <= max_length:
return text
truncated = text[:max_length]
print(f"Truncated text: {truncated}")
last_period = truncated.rfind('.')
print(f"Last period index: {last_period}")
if last_period != -1:
return truncated[:last_period + 1]
return truncated
df1 = pd.read_csv(
'med_name.csv', encoding='utf-8'
)
# Convert the 'Meds' column to a lowercase list
KNOWN_MEDICINES = df1['Meds '].str.lower().str.strip().tolist()
def contains_medicine_terms(output):
"""Check if the output contains terms that indicate a medicine name."""
return any(term in output for term in [" IP ", " mg ", " ml ", " Mg ", " Ml ", "mg ", "ml ",
" gm ", "gm ", " mcg ", "mcg "])
def is_valid_medicine_in_input(user_input):
"""Check if the user input contains any valid medicine name."""
return any(med in user_input.lower() for med in KNOWN_MEDICINES)
if st.session_state.selected_service == "Kendr Locator":
st.title("MedBot")
user_input = st.text_input("Your Queries:", key='temp_user_input')
if st.button("Ask Away"):
if user_input:
if contains_profanity(user_input):
st.markdown("<span style='color: red;'>Mind The Language Dear!</span>", unsafe_allow_html=True)
else:
formatted_prompt = template.format(user_input, "")
response = llm.invoke(formatted_prompt)
if contains_medicine_terms(response):
# Generated response has medical terms
if is_valid_medicine_in_input(user_input):
truncated_response = truncate_at_full_stop(response)
st.markdown(f"**MedBot:** {truncated_response}", unsafe_allow_html=False)
else:
st.markdown("<span style='color: green;'>"
"Please consult to a Pharmacist at you nearest Janaushadi Kendr""<br>"
"Use Kendr Locator to find one near you!"
"</span>", unsafe_allow_html=True)
else:
# No medicine-related terms, safe to display response
truncated_response = truncate_at_full_stop(response)
st.markdown(f"**MedBot:** {truncated_response}", unsafe_allow_html=False)
st.warning("Developer's notice : Responses are generated by AI and maybe inaccurate or inappropriate."
"Any received medical or financial consult is not a substitute for professional advice.") |