Spaces:
Runtime error
Runtime error
from PyPDF2 import PdfReader | |
import gradio as gr | |
from langchain.chat_models import ChatOpenAI | |
from kor import create_extraction_chain, Object, Text | |
from langchain.prompts.prompt import PromptTemplate | |
# from langchain.chains import LLMChain | |
# from langchain.llms import OpenAI | |
from dotenv import load_dotenv | |
load_dotenv() | |
def gen_text(pdf_file): | |
with open(pdf_file.name, "rb") as f: | |
reader = PdfReader(f) | |
num_pages = len(reader.pages) | |
text = "" | |
for page in reader.pages: | |
text += page.extract_text() | |
print(text) | |
constraints=context_extracter(text) | |
return constraints | |
# def generate_questions(resume,role='',experience=''): | |
# _PROMPT_TEMPLATE = """ | |
# this is the resume of user: | |
# {resume_details} | |
# here is the role he want to join in : | |
# {role} | |
# Based on the following experience: | |
# {experience} | |
# What are your interview questions for the given user resume and role he want to join in with that experience? | |
# generate no of questions = {questions}! | |
# """ | |
# PROMPT = PromptTemplate(input_variables=["resume_details", "role", "experience",'questions'], template=_PROMPT_TEMPLATE) | |
# llm1 = OpenAI(model_name="text-davinci-003", temperature=0) | |
# chain = LLMChain(llm=llm1, prompt=PROMPT) | |
# prompt = chain.predict_and_parse(resume_details= gen_text(resume), | |
# role= role, | |
# experience= experience, | |
# questions=10) | |
# return prompt.split('\n') | |
def context_extracter(text): | |
llm = ChatOpenAI( | |
model_name="gpt-3.5-turbo-16k", | |
temperature=0, | |
max_tokens=2000, | |
frequency_penalty=0, | |
presence_penalty=0, | |
top_p=1.0, | |
) | |
schema = Object( | |
id="interviewer", | |
description=( | |
"interviewer is examining resume text and should produce set of attributes which represents that person by his resume" | |
), | |
attributes=[ | |
Text( | |
id="summary_or_objective", | |
description="A brief overview of the candidate's professional background, skills, and career goals", | |
examples=[], | |
many=True, | |
), | |
Text( | |
id="work_experience", | |
description="Details of previous employment positions, including job titles, company names, employment dates, and a description of responsibilities and achievements for each role ", | |
examples=[], | |
many=True, | |
), | |
Text( | |
id="education", | |
description="Information about the candidate's educational qualifications, including degrees, certificates, and the names of institutions attended", | |
examples=[], | |
many=True, | |
), | |
Text( | |
id="skills", | |
description="A section highlighting the candidate's relevant skills, such as technical skills, languages spoken, software proficiency, or specific tools used", | |
examples=[], | |
many=True, | |
), | |
Text( | |
id="achievements_or_awards", | |
description="Any notable achievements, awards, or recognition received by the candidate during their education or career.", | |
examples=[], | |
many=True, | |
), | |
Text( | |
id="certifications_or_licenses", | |
description="Information about any professional certifications or licenses held by the candidate that are relevant to the desired position", | |
examples=[], | |
many=True, | |
), | |
Text( | |
id="projects", | |
description="Details of significant projects the candidate has worked on, including a brief description, their role, and any notable outcomes", | |
examples=[], | |
many=True, | |
), | |
Text( | |
id="publications_or_presentations", | |
description=" If applicable, a list of publications or presentations the candidate has authored or delivered, including the titles, dates, and locations", | |
examples=[], | |
many=True, | |
), | |
], | |
many=True, | |
) | |
# chain = LLMChain(llm=llm1, prompt=PROMPT) | |
chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json') | |
return chain.run(text=text)['data'] | |
k=gr.Interface( | |
fn=gen_text, | |
inputs=['file'], | |
outputs=['json'] | |
) | |
k.launch() | |