update tokenizer in example
Browse files
README.md
CHANGED
@@ -71,7 +71,7 @@ import torch
|
|
71 |
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
72 |
|
73 |
model = T5ForConditionalGeneration.from_pretrained("MarioBarbeque/CyberSolve-LinAlg-1.2").to("cuda")
|
74 |
-
tokenizer =
|
75 |
|
76 |
# Pass the model instruction to solve a linear equation in the following simple format
|
77 |
input_text = "Solve 24 = 1601*c - 1605*c for c."
|
|
|
71 |
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
72 |
|
73 |
model = T5ForConditionalGeneration.from_pretrained("MarioBarbeque/CyberSolve-LinAlg-1.2").to("cuda")
|
74 |
+
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large") # CyberSolve uses the same tokenizer as the base FLAN-T5 model
|
75 |
|
76 |
# Pass the model instruction to solve a linear equation in the following simple format
|
77 |
input_text = "Solve 24 = 1601*c - 1605*c for c."
|