SujanMidatani commited on
Commit
5d29764
1 Parent(s): 8aba350

update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -24
app.py CHANGED
@@ -3,8 +3,8 @@ import gradio as gr
3
  from langchain.chat_models import ChatOpenAI
4
  from kor import create_extraction_chain, Object, Text
5
  from langchain.prompts.prompt import PromptTemplate
6
- from langchain.chains import LLMChain
7
- from langchain.llms import OpenAI
8
 
9
  from dotenv import load_dotenv
10
 
@@ -21,29 +21,29 @@ def gen_text(pdf_file):
21
 
22
 
23
 
24
- def generate_questions(resume,role='',experience=''):
25
- _PROMPT_TEMPLATE = """
26
- this is the resume of user:
27
- {resume_details}
28
 
29
- here is the role he want to join in :
30
- {role}
31
 
32
- Based on the following experience:
33
- {experience}
34
 
35
- What are your interview questions for the given user resume and role he want to join in with that experience?
36
- generate no of questions = {questions}!
37
- """
38
- PROMPT = PromptTemplate(input_variables=["resume_details", "role", "experience",'questions'], template=_PROMPT_TEMPLATE)
39
 
40
- llm1 = OpenAI(model_name="text-davinci-003", temperature=0)
41
- chain = LLMChain(llm=llm1, prompt=PROMPT)
42
- prompt = chain.predict_and_parse(resume_details= gen_text(resume),
43
- role= role,
44
- experience= experience,
45
- questions=10)
46
- return prompt.split('\n')
47
  def context_extracter(text):
48
 
49
  llm = ChatOpenAI(
@@ -115,10 +115,11 @@ def context_extracter(text):
115
  # chain = LLMChain(llm=llm1, prompt=PROMPT)
116
  chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
117
  return chain.predict_and_parse(text=text)['data']
 
118
  k=gr.Interface(
119
- fn=generate_questions,
120
- inputs=['file','text','text'],
121
- outputs=['text']
122
  )
123
  k.launch()
124
 
 
3
  from langchain.chat_models import ChatOpenAI
4
  from kor import create_extraction_chain, Object, Text
5
  from langchain.prompts.prompt import PromptTemplate
6
+ # from langchain.chains import LLMChain
7
+ # from langchain.llms import OpenAI
8
 
9
  from dotenv import load_dotenv
10
 
 
21
 
22
 
23
 
24
+ # def generate_questions(resume,role='',experience=''):
25
+ # _PROMPT_TEMPLATE = """
26
+ # this is the resume of user:
27
+ # {resume_details}
28
 
29
+ # here is the role he want to join in :
30
+ # {role}
31
 
32
+ # Based on the following experience:
33
+ # {experience}
34
 
35
+ # What are your interview questions for the given user resume and role he want to join in with that experience?
36
+ # generate no of questions = {questions}!
37
+ # """
38
+ # PROMPT = PromptTemplate(input_variables=["resume_details", "role", "experience",'questions'], template=_PROMPT_TEMPLATE)
39
 
40
+ # llm1 = OpenAI(model_name="text-davinci-003", temperature=0)
41
+ # chain = LLMChain(llm=llm1, prompt=PROMPT)
42
+ # prompt = chain.predict_and_parse(resume_details= gen_text(resume),
43
+ # role= role,
44
+ # experience= experience,
45
+ # questions=10)
46
+ # return prompt.split('\n')
47
  def context_extracter(text):
48
 
49
  llm = ChatOpenAI(
 
115
  # chain = LLMChain(llm=llm1, prompt=PROMPT)
116
  chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
117
  return chain.predict_and_parse(text=text)['data']
118
+
119
  k=gr.Interface(
120
+ fn=gen_text,
121
+ inputs=['file'],
122
+ outputs=['json']
123
  )
124
  k.launch()
125