SujanMidatani's picture
update app.py
0f80ca2
raw
history blame
1.11 kB
from langchain.chains import LLMChain
from langchain.llms import OpenAI
import gradio as gr
from dotenv import load_dotenv
from langchain.prompts.prompt import PromptTemplate
load_dotenv()
def generate_questions(resume,role='',experience=''):
_PROMPT_TEMPLATE = """
this is the resume of user:
{resume_details}
here is the role he want to join in :
{role}
Based on the following experience:
{experience}
What are your interview questions for the given user resume and role he want to join in with that experience?
generate no of questions = {questions}!
"""
PROMPT = PromptTemplate(input_variables=["resume_details", "role", "experience",'questions'], template=_PROMPT_TEMPLATE)
llm1 = OpenAI(model_name="text-davinci-003", temperature=0)
chain = LLMChain(llm=llm1, prompt=PROMPT)
prompt = chain.predict_and_parse(resume_details= resume,
role= role,
experience= experience,
questions=10)
return prompt.split('\n')
k=gr.Interface(
fn=generate_questions,
inputs=['textbox','text','text'],
outputs=['text']
)
k.launch()