Spaces:
Sleeping
Sleeping
import streamlit as st | |
import pandas as pd | |
from sklearn.preprocessing import LabelEncoder | |
from sklearn.feature_extraction.text import TfidfVectorizer | |
from sklearn.multiclass import OneVsRestClassifier | |
from sklearn.neighbors import KNeighborsClassifier | |
import re | |
import pickle | |
import pdfminer | |
from pdfminer.high_level import extract_text | |
def cleanResume(resumeText): | |
# Your existing cleanResume function remains unchanged | |
resumeText = re.sub('http\S+\s*', ' ', resumeText) | |
resumeText = re.sub('RT|cc', ' ', resumeText) | |
resumeText = re.sub('#\S+', '', resumeText) | |
resumeText = re.sub('@\S+', ' ', resumeText) | |
resumeText = re.sub('[%s]' % re.escape("""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""), ' ', resumeText) | |
resumeText = re.sub(r'[^\x00-\x7f]',r' ', resumeText) | |
resumeText = re.sub('\s+', ' ', resumeText) | |
return resumeText | |
df = pd.read_csv('UpdatedResumeDataSet.csv') | |
df['cleaned'] = df['Resume'].apply(lambda x: cleanResume(x)) | |
label = LabelEncoder() | |
df['Category'] = label.fit_transform(df['Category']) | |
text = df['cleaned'].values | |
target = df['Category'].values | |
word_vectorizer = TfidfVectorizer( | |
sublinear_tf=True, | |
stop_words='english', | |
max_features=1500) | |
word_vectorizer.fit(text) | |
WordFeatures = word_vectorizer.transform(text) | |
model = OneVsRestClassifier(KNeighborsClassifier()) | |
model.fit(WordFeatures, target) | |
def pdf_to_text(file): | |
# Use pdfminer.six to extract text from the PDF file | |
text = extract_text(file) | |
return text | |
def predict_category(resumes_data, selected_category): | |
resumes_df = pd.DataFrame(resumes_data) | |
resumes_features = word_vectorizer.transform(resumes_df['ResumeText']) | |
predicted_probs = model.predict_proba(resumes_features) | |
# Assign probabilities to respective job categories | |
for i, category in enumerate(label.classes_): | |
resumes_df[category] = predicted_probs[:, i] | |
resumes_df_sorted = resumes_df.sort_values(by=selected_category, ascending=False) | |
# Get the ranks for the selected category | |
ranks = [] | |
for rank, (idx, row) in enumerate(resumes_df_sorted.iterrows()): | |
rank = rank + 1 | |
file_name = row['FileName'] | |
ranks.append({'Rank': rank, 'FileName': file_name}) | |
return ranks | |
def main(): | |
st.title("Resume Ranking App") | |
st.text("Upload resumes and select a category to rank them.") | |
resumes_data = [] | |
selected_category = "" | |
# Handle multiple file uploads | |
files = st.file_uploader("Upload resumes", type=["pdf"], accept_multiple_files=True) | |
if files: | |
for file in files: | |
text = cleanResume(pdf_to_text(file)) | |
resumes_data.append({'ResumeText': text, 'FileName': file.name}) | |
selected_category = st.selectbox("Select a category to rank by", label.classes_) | |
if st.button("Rank Resumes"): | |
if not resumes_data or not selected_category: | |
st.warning("Please upload resumes and select a category to continue.") | |
else: | |
ranks = predict_category(resumes_data, selected_category) | |
st.write(pd.DataFrame(ranks)) | |
if __name__ == '__main__': | |
main() | |