from gliner import GLiNER import re import fitz import gradio as gr model = GLiNER.from_pretrained("gliner-community/gliner_large-v2.5", load_tokenizer=True) def clean_text(text): # Remove all escape characters cleaned_text = re.sub(r'[\n\r\t\f\v]', ' ', text) # Remove any other non-printable characters cleaned_text = re.sub(r'[^\x20-\x7E]', '', cleaned_text) # Replace multiple spaces with a single space cleaned_text = re.sub(r'\s+', ' ', cleaned_text) # Strip leading and trailing whitespace cleaned_text = cleaned_text.strip() return cleaned_text def pdf2text(file_path): with fitz.open(file_path) as doc: text = "" for page in doc: text += page.get_text() return clean_text(text) def ner(text, labels, threshold) : labels = labels.split(",") labels = [label.strip() for label in labels] print(labels) return { "text": text, "entities": [ { "entity": entity["label"], "word": entity["text"], "start": entity["start"], "end": entity["end"], "score": 0, } for entity in model.predict_entities( text, labels, flat_ner=True, threshold=threshold ) ], } def parser(file_path, labels, threshold): text = pdf2text(file_path) return ner(text, labels, threshold) # Define a custom CSS style custom_css = """ body { background-color: #f0f8ff; font-family: 'Arial', sans-serif; } .container { margin: auto; padding: 20px; border-radius: 10px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); } h1 { color: #3d1ad9; text-align: center; } #file_upload { display: flex; justify-content: center; margin-bottom: 20px; } """ with gr.Blocks(css=custom_css) as demo: #home page with gr.Tab("Home"): # Title and Description gr.Markdown(""" # 📝 **Resume Parser with GLiNER Model** 📝 **Welcome!** This project is a **resume parser** designed to make the process of extracting important information from resumes easy and efficient. It uses a **generalized approach** to recognize and parse key details like **name**, **email**, **profession**, and more from different resume formats. By leveraging the **GLiNER model** for **Named Entity Recognition (NER)** and the power of **Hugging Face**, this tool ensures accurate and fast resume data extraction. It's perfect for **recruiters**, **HR professionals**, and **data analysts** who want to streamline their workflows. """) # Use Cases Section gr.Markdown(""" ## 🎯 **Use Cases**: 1. **Recruiters**: Automatically extract and store candidate information from resumes. 2. **HR Professionals**: Parse bulk resumes to organize important information quickly. 3. **Data Analysts**: Analyze trends in candidate attributes for better decision-making. 4. **Job Boards**: Categorize and structure resume submissions automatically. """, elem_classes=["use-cases"]) # Technologies Used Section gr.Markdown(""" ## 💻 **Technologies Used**: - **Python** 🐍: Backend logic and automation. - **Hugging Face** 🤗: Model hosting and integration. - **GLiNER** 🧠: Named Entity Recognition model used to extract structured data. """, elem_classes=["technologies"]) # Another Image Placeholder for Technologies Used #gr.Image("technologies_image_here.png", label="Technologies Used") # Footer or Additional Information gr.Markdown(""" ### 🌟 **Additional Information**: This project is scalable and can be integrated into various systems like **ATS (Applicant Tracking Systems)**, job boards, and recruitment platforms. It's designed to handle **diverse resume formats** and ensures that all critical information is captured with **high accuracy**. Start **automating your workflow** and let this parser do the heavy lifting! 🚀 """, elem_classes=["additional-info"]) # app page with gr.Tab("Resume Parser"): gr.HTML("
This application extracts important data from your resume using innovative NLP methods. This tool's key advantage is that, in contrast to conventional resume parsers, it is generalized(Thanks to GLiNER team), meaning it functions in accordance with your needs. Simply enter the labels (NER) that you wish to extract, then adjust the threshold and submit the resume. Magic will happen in a few seconds.
") with gr.Row() as row: labels = gr.Textbox( label="Labels", placeholder="Enter your labels here (comma separated)", scale=2, ) threshold = gr.Slider( 0, 1, value=0.3, step=0.01, label="Threshold", info="Lower the threshold to increase how many entities get predicted.", scale=0, ) with gr.Row(): file_input = gr.File(label="Upload Resume", file_types=['.pdf'], elem_id="file_upload" ) with gr.Row(): parse_button = gr.Button("Parse Resume") with gr.Row(): output = gr.HighlightedText(label="Parsed Resume", combine_adjacent=True ) parse_button.click(fn=parser, inputs=[file_input,labels, threshold], outputs=output) gr.HTML("Our resume parser can identify and extract important details such as personal information, education, work experience, skills, and more. Simply upload your resume and let our AI do the work!
") # contact us with gr.Tab("Contact"): gr.Markdown(""" # 📧 **Contact Krish Goyani** 📧 I am happy to accept your feedback and suggestions! Feel free to reach out using the details below. """) # Contact Information with gr.Row(): # Changed from gr.Box to gr.Row gr.Markdown(""" ## 🧑 **Krish Goyani** - **Email**: goyani@duck.com - **Portfolio**: [Krish Goyani](https://www.datascienceportfol.io/Krish_Goyani) - **LinkedIn**: [Krish Goyani](https://www.linkedin.com/in/krish-goyani/) - **GitHub**: [github.com/krish-goyani](https://github.com/Krish-Goyani) """) # Message gr.Markdown(""" Thank you for visiting my page. I'm always open to hearing from you. Feel free to share any suggestions or feedback, and I'll get back to you as soon as possible! ✨ """) # Launch the interface demo.queue() demo.launch(share=True, debug=True)