Generation config is not necessary
Browse files
README.md
CHANGED
@@ -452,11 +452,7 @@ tokenizer = T5Tokenizer.from_pretrained('jbochi/madlad400-10b-mt')
|
|
452 |
text = "<2pt> I love pizza!"
|
453 |
input_ids = tokenizer(text, return_tensors="pt").input_ids
|
454 |
|
455 |
-
outputs = model.generate(
|
456 |
-
input_ids=input_ids,
|
457 |
-
generation_config=GenerationConfig(
|
458 |
-
decoder_start_token_id=0,
|
459 |
-
))
|
460 |
|
461 |
tokenizer.decode(outputs[0], skip_special_tokens=True)
|
462 |
# Eu amo pizza!
|
|
|
452 |
text = "<2pt> I love pizza!"
|
453 |
input_ids = tokenizer(text, return_tensors="pt").input_ids
|
454 |
|
455 |
+
outputs = model.generate(input_ids=input_ids))
|
|
|
|
|
|
|
|
|
456 |
|
457 |
tokenizer.decode(outputs[0], skip_special_tokens=True)
|
458 |
# Eu amo pizza!
|