Hemanth-thunder commited on
Commit
bbd6db5
1 Parent(s): 8e3f470

added prompt template

Browse files
Files changed (1) hide show
  1. app.py +13 -2
app.py CHANGED
@@ -5,11 +5,22 @@ model_name = "Hemanth-thunder/Tamil-Mistral-7B-Instruct-v0.1"
5
  model_file = "tamil-mistral-7b-instruct-v0.1.Q4_K_M.gguf"
6
 
7
  model_path = hf_hub_download(model_name, filename=model_file)
 
 
 
 
 
 
 
 
 
 
8
  llm = AutoModelForCausalLM.from_pretrained(model_name, model_file=model_file,
9
  model_type="mistral", gpu_layers=0)
10
  def alternatingly_agree(message, history):
11
- result = llm(message)
12
- return result
 
13
 
14
 
15
  gr.ChatInterface(alternatingly_agree).launch()
 
5
  model_file = "tamil-mistral-7b-instruct-v0.1.Q4_K_M.gguf"
6
 
7
  model_path = hf_hub_download(model_name, filename=model_file)
8
+
9
+ prompt = """<s> சரியான பதிலுடன் வேலையை வெற்றிகரமாக முடிக்க. தேவையான தகவலை உள்ளிடவும்.
10
+
11
+ ### Instruction:
12
+ {}
13
+
14
+ ### Response:
15
+ """
16
+
17
+
18
  llm = AutoModelForCausalLM.from_pretrained(model_name, model_file=model_file,
19
  model_type="mistral", gpu_layers=0)
20
  def alternatingly_agree(message, history):
21
+ prompt = prompt.format(message)
22
+ result = llm(message,max_new_tokens=50,temperature=0.7)
23
+ return result #stream=True
24
 
25
 
26
  gr.ChatInterface(alternatingly_agree).launch()