Spaces:
Sleeping
Sleeping
Younesse Kaddar
commited on
Commit
Β·
d165796
1
Parent(s):
e867a2d
latest update
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import streamlit as st
|
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain.embeddings import OpenAIEmbeddings
|
6 |
from langchain.vectorstores import Chroma
|
7 |
-
from langchain.chains import
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
from langchain.vectorstores import FAISS
|
10 |
|
@@ -47,7 +47,7 @@ claude = ChatAnthropic()
|
|
47 |
|
48 |
# Initialize session state
|
49 |
if 'model' not in st.session_state:
|
50 |
-
st.session_state['model'] = 'GPT-3.5
|
51 |
|
52 |
# Get model based on user selection
|
53 |
selected_model = st.session_state['model']
|
@@ -91,15 +91,40 @@ def load_file(files):
|
|
91 |
if 'text' not in st.session_state:
|
92 |
st.session_state['text'] = ''
|
93 |
|
94 |
-
|
95 |
-
|
96 |
-
human_template="
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
-
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
100 |
-
chain = LLMChain(llm=llm, prompt=chat_prompt)
|
101 |
|
102 |
def get_feedback(text, university, major):
|
|
|
103 |
# Use a loading screen
|
104 |
with st.spinner('π Generating feedback...'):
|
105 |
feedback = chain.predict(subject=major, university=university, statement=text, verbose=True)
|
|
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain.embeddings import OpenAIEmbeddings
|
6 |
from langchain.vectorstores import Chroma
|
7 |
+
from langchain.chains import LLMChain # , ConversationalRetrievalChain
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
from langchain.vectorstores import FAISS
|
10 |
|
|
|
47 |
|
48 |
# Initialize session state
|
49 |
if 'model' not in st.session_state:
|
50 |
+
st.session_state['model'] = 'GPT-3.5'
|
51 |
|
52 |
# Get model based on user selection
|
53 |
selected_model = st.session_state['model']
|
|
|
91 |
if 'text' not in st.session_state:
|
92 |
st.session_state['text'] = ''
|
93 |
|
94 |
+
def get_prompt(subject, university):
|
95 |
+
system_template="You are an expert university admissions officer tasked with helping a student."
|
96 |
+
human_template="""This the student's personal statement:
|
97 |
+
{statement}
|
98 |
+
Mark the personal statement using this rubric, give a mark for each section and an overall mark: Appropriate Opening (1 point): The opening should be clear and focused, avoiding gimmicky or pretentious language.
|
99 |
+
Passion for Subject (5 points): The author should clearly express their interest in the subject matter, supported by evidence of wider reading and analytical reflection.
|
100 |
+
Analytical Reflection (5 points): The author should reflect on their wider reading and how it has developed their interest in the subject, supporting their ideas with evidence.
|
101 |
+
Extra-Curricular Activities (1 point): Relevant extracurricular activities should be mentioned briefly.
|
102 |
+
Strong Conclusion (1 point): The conclusion should be simple and directly related to the subject matter.
|
103 |
+
Spelling and Grammar (2 points): The author should ensure that their spelling and grammar are correct.
|
104 |
+
Tone (3 points): The tone should strike a balance between formality and casualness.
|
105 |
+
Clarity and Flow (2 points): The statement should be easy to read and have a clear structure.. For each attribute explain specifically how the student could improve/learn e.g., courses to take, projects to undertake.
|
106 |
+
Provide a score for each section and an overall score, explain how they can improve each section."""
|
107 |
+
|
108 |
+
if subject != "Infer from statement" and university != "Infer from statement":
|
109 |
+
system_template += f" The student wants to study {subject} at university and dreams of getting admitted into their dream university: {university}."
|
110 |
+
human_template += " Don't forget their desired major is {subject} and their desired university is {university}."
|
111 |
+
elif subject == "Infer from statement" and university != "Infer from statement":
|
112 |
+
system_template += f" The student dreams of getting admitted into their dream university: {university}."
|
113 |
+
human_template += " Don't forget their desired university is {university}."
|
114 |
+
elif subject != "Infer from statement" and university == "Infer from statement":
|
115 |
+
system_template += f" The student wants to study {subject} at university and dreams of getting admitted into their dream university."
|
116 |
+
human_template += " Don't forget their desired major is {subject}."
|
117 |
+
|
118 |
+
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)
|
119 |
+
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
120 |
+
|
121 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
122 |
+
|
123 |
+
return chat_prompt
|
124 |
|
|
|
|
|
125 |
|
126 |
def get_feedback(text, university, major):
|
127 |
+
chain = LLMChain(llm=llm, prompt=get_prompt(major, university))
|
128 |
# Use a loading screen
|
129 |
with st.spinner('π Generating feedback...'):
|
130 |
feedback = chain.predict(subject=major, university=university, statement=text, verbose=True)
|