KvrParaskevi commited on
Commit
85a3c90
·
verified ·
1 Parent(s): 1b9d0f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -4
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from langchain.llms.base import LLM
4
  from langchain import PromptTemplate, LLMChain
5
 
@@ -23,9 +23,23 @@ class CustomLLM(LLM):
23
 
24
  llm = CustomLLM()
25
 
26
- template = """Question: {question}
27
- Answer: Let's think step by step."""
28
- prompt = PromptTemplate(template=template, input_variables=["question"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  llm_chain = LLMChain(prompt=prompt, llm=llm)
31
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLLM, AutoTokenizer
3
  from langchain.llms.base import LLM
4
  from langchain import PromptTemplate, LLMChain
5
 
 
23
 
24
  llm = CustomLLM()
25
 
26
+ template = """<<SYS>>
27
+ You are an AI having conversation with a human. Below is an instruction that describes a task.
28
+ Write a response that appropriately completes the request.
29
+ Reply with the most helpful and logic answer. During the conversation you need to ask the user
30
+ the following questions to complete the hotel booking task.
31
+ 1) Where would you like to stay and when?
32
+ 2) How many people are staying in the room?
33
+ 3) Do you prefer any ammenities like breakfast included or gym?
34
+ 4) What is your name, your email address and phone number?
35
+ Make sure you receive a logical answer from the user from every question to complete the hotel
36
+ booking process.
37
+ <</SYS>>
38
+ Previous conversation:
39
+ {history}
40
+ Human: {input}
41
+ AI:"""
42
+ prompt = PromptTemplate(template=template, input_variables=["history", "input"])
43
 
44
  llm_chain = LLMChain(prompt=prompt, llm=llm)
45