File size: 4,521 Bytes
7a8431c
 
 
 
 
 
 
 
c7d68d0
7a8431c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecc5ce5
7a8431c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecc5ce5
7a8431c
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from PyPDF2 import PdfReader
import gradio as gr
from langchain.chat_models import ChatOpenAI
from kor import create_extraction_chain, Object, Text 
from langchain.prompts.prompt import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
import os
os.environ['OPENAI_API_KEY']="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"
def gen_text(pdf_file):
    with open(pdf_file.name, "rb") as f:
        reader = PdfReader(f)
        num_pages = len(reader.pages)
        text = ""
        for page in reader.pages:
            text += page.extract_text() 
    constraints=context_extracter(text)
    return constraints
    

    
def generate_questions(resume,role='',experience=''):
    _PROMPT_TEMPLATE = """
    this is the resume of user:
    {resume_details}

    here is the role he want to join in :
    {role}

    Based on the following experience:
    {experience}

    What are your interview  questions for the given user resume and role he want to join in with that experience?
    generate no of  questions = {questions}!
    """
    PROMPT = PromptTemplate(input_variables=["resume_details", "role", "experience",'questions'], template=_PROMPT_TEMPLATE)
  
    llm1 = OpenAI(model_name="text-davinci-003", temperature=0)
    chain = LLMChain(llm=llm1, prompt=PROMPT)
    prompt = chain.predict_and_parse(resume_details= gen_text(resume),
    role= role,
    experience= experience,
      questions=10)
    return prompt.split('\n')
def context_extracter(text):
      
      llm = ChatOpenAI(
        model_name="gpt-3.5-turbo",
        temperature=0,
        max_tokens=1900,
        frequency_penalty=0,
        presence_penalty=0,
        top_p=1.0,
      )
      schema = Object(
      id="interviewer",
      description=(
        "interviewer is examining resume text and should produce set of attributes which represents that person by his resume"
        
      ),
      attributes=[
          Text(
              id="summary_or_objective",
              description="A brief overview of the candidate's professional background, skills, and career goals",
              examples=[],
              many=True,
          ),
          Text(
              id="work_experience",
              description="Details of previous employment positions, including job titles, company names, employment dates, and a description of responsibilities and achievements for each role ",
              examples=[],
              many=True,
          ),
          Text(
              id="education",
              description="Information about the candidate's educational qualifications, including degrees, certificates, and the names of institutions attended",
              examples=[],
              many=True,
          ),
          Text(
               id="skills",
               description="A section highlighting the candidate's relevant skills, such as technical skills, languages spoken, software proficiency, or specific tools used",
               examples=[],
               many=True,
          ),
          Text(
               id="achievements_or_awards",
               description="Any notable achievements, awards, or recognition received by the candidate during their education or career.",
               examples=[],
               many=True,
          ),
          Text(
               id="certifications_or_licenses",
               description="Information about any professional certifications or licenses held by the candidate that are relevant to the desired position",
               examples=[],
               many=True,
          ),
          Text(
               id="projects",
               description="Details of significant projects the candidate has worked on, including a brief description, their role, and any notable outcomes",
               examples=[],
               many=True,
          ),
          Text(
               id="publications_or_presentations",
               description=" If applicable, a list of publications or presentations the candidate has authored or delivered, including the titles, dates, and locations",
               examples=[],
               many=True,
          ),
      ],
      many=True,
      )
      # chain = LLMChain(llm=llm1, prompt=PROMPT)
      chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
      return chain.predict_and_parse(text=text)['data']
k=gr.Interface(
    fn=generate_questions,
    inputs=['file','text','text'],
    outputs=['text']
)
k.launch()