student-abdullah commited on
Commit
54766f8
β€’
1 Parent(s): f3bea0d

Upload 6 files

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. README.md +12 -0
  3. a.py +123 -0
  4. gitattributes +1 -0
  5. location.csv +0 -0
  6. model.gguf +3 -0
  7. requirements.txt +0 -0
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ model.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: MedAI
3
+ emoji: πŸ’Š
4
+ colorFrom: red
5
+ colorTo: gray
6
+ sdk: streamlit
7
+ sdk_version: 1.38.0
8
+ app_file: a.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
a.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from langchain_community.llms import LlamaCpp
4
+ from langchain_core.callbacks import StreamingStdOutCallbackHandler
5
+ from langchain_core.prompts import PromptTemplate
6
+
7
+ # Load the CSV file for Kendra Locator
8
+ df = pd.read_csv('location.csv', encoding='Windows-1252')
9
+
10
+ # Initialize session state for selected service and chatbot history
11
+ if 'selected_service' not in st.session_state:
12
+ st.session_state.selected_service = "Kendra Locator"
13
+ if 'user_input' not in st.session_state:
14
+ st.session_state['user_input'] = ''
15
+
16
+
17
+ st.set_page_config(layout="centered", initial_sidebar_state="expanded")
18
+
19
+ st.sidebar.title("KENDR LOCATOR")
20
+ st.sidebar.write("Find One Near You!")
21
+
22
+ display_option = st.sidebar.selectbox("Select:", ["Address", "Email"])
23
+ pin_code_input = st.sidebar.text_input("Enter Pin Code:")
24
+
25
+ if st.sidebar.button("Locate"):
26
+ if pin_code_input:
27
+
28
+ result = df[df['Pin'].astype(str) == pin_code_input]
29
+ if not result.empty:
30
+ st.sidebar.write(f"**Name**: {result['Name'].values[0]}")
31
+
32
+ if display_option == "Address":
33
+ st.sidebar.write(f"**Address**: {result['Address'].values[0]}")
34
+ elif display_option == "Email":
35
+ st.sidebar.write(f"**Email**: {result['Email'].values[0]}")
36
+ else:
37
+ st.sidebar.write("No results found.")
38
+ else:
39
+ st.sidebar.write("Please enter a pin code.")
40
+
41
+
42
+ llm = LlamaCpp(
43
+ model_path="model.gguf",
44
+ temperature=0.3,
45
+ max_tokens=512,
46
+ top_p=1,
47
+ callbacks=[StreamingStdOutCallbackHandler()],
48
+ verbose=False,
49
+ stop=["###"]
50
+ )
51
+
52
+ template = """Below is Question that describes a task. Write a Response that appropriately completes that request. Always maintain a professional tone and avoid providing offensive or inappropriate Response.
53
+
54
+ ### Question:
55
+ {input}
56
+
57
+ ### Response:
58
+ {response}"""
59
+
60
+ prompt = PromptTemplate.from_template(template)
61
+
62
+ PROFANE_WORDS = [
63
+ "damn", "shit", "fuck", "bitch", "asshole", "dick", "piss", "crap", "cunt",
64
+ "twat", "slut", "whore", "faggot", "nigger", "kike", "chink", "gook", "spic",
65
+ "dyke", "suck", "cock", "pussy", "motherfucker", "bastard", "prick", "wanker",
66
+ "bollocks", "arse", "bloody", "bugger", "tosser", "git", "slag", "pillock",
67
+ "knob", "knobhead", "wazzock", "clit", "scrotum", "fanny", "ass", "freak",
68
+ "bimbo", "dumbass", "jackass", "wimp", "idiot", "moron", "loser", "fool",
69
+ "retard", "cocksucker", "shag", "shagger", "piss off", "go to hell",
70
+ "hell", "dammit", "son of a bitch", "jerk", "puke", "chut", "chutiyah",
71
+ "bhosdike", "bhenchod", "madarchod", "gandu", "gand", "bhancho",
72
+ "saala", "kameena", "bhenji", "bhadwa", "kothi", "aankhmar", "launda",
73
+ "bhikari", "sala", "billi", "bhosdika", "kothi", "sundar", "langda",
74
+ "kaamchor", "gaddha", "bakra", "chudiya", "gando", "bhencod", "lanat",
75
+ "bhoot", "chakkar", "chutak", "haramkhor", "bandar", "banda", "bakwas",
76
+ "nikamma", "pagal", "nalayak", "pagal", "khota", "madharchod"
77
+ ]
78
+
79
+
80
+ def contains_profanity(text):
81
+ """Check if the text contains any profane words."""
82
+ return any(word in text.lower() for word in PROFANE_WORDS)
83
+
84
+
85
+ def truncate_at_full_stop(text, max_length=512):
86
+ if len(text) <= max_length:
87
+ return text
88
+
89
+ truncated = text[:max_length]
90
+ print(f"Truncated text: {truncated}")
91
+
92
+ last_period = truncated.rfind('.')
93
+ print(f"Last period index: {last_period}")
94
+
95
+ if last_period != -1:
96
+ return truncated[:last_period + 1]
97
+
98
+ return truncated
99
+
100
+
101
+ if st.session_state.selected_service == "Kendra Locator":
102
+ st.title("MedAI")
103
+
104
+ user_input = st.text_input("Your Queries:", key='temp_user_input')
105
+
106
+ if st.button("Ask Away"):
107
+ if user_input:
108
+ if contains_profanity(user_input):
109
+ st.markdown("<span style='color: red;'>Mind The Language Dear!</span>", unsafe_allow_html=True)
110
+ else:
111
+ formatted_prompt = prompt.format(
112
+ input=user_input,
113
+ response=""
114
+ )
115
+
116
+ response = llm.invoke(formatted_prompt)
117
+
118
+ truncated_response = truncate_at_full_stop(response)
119
+ st.markdown(f"**You:** {user_input}", unsafe_allow_html=False)
120
+ st.markdown(f"**MedAI:** {truncated_response}.", unsafe_allow_html=False)
121
+
122
+
123
+ st.warning("Developer's notice : Responses are generated by AI and maybe inaccurate or inappropriate. Any received medical or financial consult is not a substitute for professional advice.")
gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ model.gguf filter=lfs diff=lfs merge=lfs -text
location.csv ADDED
The diff for this file is too large to render. See raw diff
 
model.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e64b9b104ba6a0246f807dbe024dfbc4f7d66e8bea06a31591249ade04ff9ad3
3
+ size 4951084928
requirements.txt ADDED
Binary file (2.86 kB). View file