alimrb commited on
Commit
c416b4a
1 Parent(s): 1e36d4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -2,7 +2,7 @@ import torch
2
  from peft import PeftModel, PeftConfig
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
- peft_model_id = f"alimrb/eff24_farsi"
6
  config = PeftConfig.from_pretrained(peft_model_id)
7
  model = AutoModelForCausalLM.from_pretrained(
8
  config.base_model_name_or_path,
@@ -14,9 +14,9 @@ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
14
  # Load the Lora model
15
  model = PeftModel.from_pretrained(model, peft_model_id)
16
 
17
- def make_inference(product_name, product_description):
18
  batch = tokenizer(
19
- f"### Product and Description:\n{product_name}: {product_description}\n\n### Ad:",
20
  return_tensors="pt",
21
  )
22
 
@@ -35,10 +35,9 @@ if __name__ == "__main__":
35
  gr.Interface(
36
  make_inference,
37
  [
38
- gr.Textbox(lines=2, label="Product Name"),
39
- gr.Textbox(lines=5, label="Product Description"),
40
  ],
41
- gr.Textbox(label="Ad"),
42
  title="EFF24",
43
- description="EFF24 is a generative model that generates ads for products."
44
  ).launch()
 
2
  from peft import PeftModel, PeftConfig
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
+ peft_model_id = f"alimrb/eff24"
6
  config = PeftConfig.from_pretrained(peft_model_id)
7
  model = AutoModelForCausalLM.from_pretrained(
8
  config.base_model_name_or_path,
 
14
  # Load the Lora model
15
  model = PeftModel.from_pretrained(model, peft_model_id)
16
 
17
+ def make_inference(question, answer):
18
  batch = tokenizer(
19
+ f"### Question:\n{question}\n\n### Answer:",
20
  return_tensors="pt",
21
  )
22
 
 
35
  gr.Interface(
36
  make_inference,
37
  [
38
+ gr.Textbox(lines=2, label="Question"),
 
39
  ],
40
+ gr.Textbox(label="Answer"),
41
  title="EFF24",
42
+ description="EFF24 is a generative model that generates Answers for Questions."
43
  ).launch()