Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import torch
|
|
2 |
from peft import PeftModel, PeftConfig
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
|
5 |
-
peft_model_id = f"alimrb/
|
6 |
config = PeftConfig.from_pretrained(peft_model_id)
|
7 |
model = AutoModelForCausalLM.from_pretrained(
|
8 |
config.base_model_name_or_path,
|
@@ -14,9 +14,9 @@ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
|
14 |
# Load the Lora model
|
15 |
model = PeftModel.from_pretrained(model, peft_model_id)
|
16 |
|
17 |
-
def make_inference(
|
18 |
batch = tokenizer(
|
19 |
-
f"###
|
20 |
return_tensors="pt",
|
21 |
)
|
22 |
|
@@ -35,10 +35,9 @@ if __name__ == "__main__":
|
|
35 |
gr.Interface(
|
36 |
make_inference,
|
37 |
[
|
38 |
-
gr.Textbox(lines=2, label="
|
39 |
-
gr.Textbox(lines=5, label="Product Description"),
|
40 |
],
|
41 |
-
gr.Textbox(label="
|
42 |
title="EFF24",
|
43 |
-
description="EFF24 is a generative model that generates
|
44 |
).launch()
|
|
|
2 |
from peft import PeftModel, PeftConfig
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
|
5 |
+
peft_model_id = f"alimrb/eff24"
|
6 |
config = PeftConfig.from_pretrained(peft_model_id)
|
7 |
model = AutoModelForCausalLM.from_pretrained(
|
8 |
config.base_model_name_or_path,
|
|
|
14 |
# Load the Lora model
|
15 |
model = PeftModel.from_pretrained(model, peft_model_id)
|
16 |
|
17 |
+
def make_inference(question, answer):
|
18 |
batch = tokenizer(
|
19 |
+
f"### Question:\n{question}\n\n### Answer:",
|
20 |
return_tensors="pt",
|
21 |
)
|
22 |
|
|
|
35 |
gr.Interface(
|
36 |
make_inference,
|
37 |
[
|
38 |
+
gr.Textbox(lines=2, label="Question"),
|
|
|
39 |
],
|
40 |
+
gr.Textbox(label="Answer"),
|
41 |
title="EFF24",
|
42 |
+
description="EFF24 is a generative model that generates Answers for Questions."
|
43 |
).launch()
|