Update README.md
Browse files
README.md
CHANGED
@@ -26,9 +26,9 @@ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
|
26 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
27 |
model.to(device)
|
28 |
|
29 |
-
def generate_answer(question, context, max_length=
|
30 |
input_text = f"question: {question} context: {context}"
|
31 |
-
inputs = tokenizer(input_text, return_tensors="pt", max_length=
|
32 |
|
33 |
outputs = model.generate(
|
34 |
**inputs,
|
|
|
26 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
27 |
model.to(device)
|
28 |
|
29 |
+
def generate_answer(question, context, max_length=512):
|
30 |
input_text = f"question: {question} context: {context}"
|
31 |
+
inputs = tokenizer(input_text, return_tensors="pt", max_length=max_length, truncation=True, padding="max_length").to(device)
|
32 |
|
33 |
outputs = model.generate(
|
34 |
**inputs,
|