ibraheemmoosa commited on
Commit
9c00f86
·
1 Parent(s): a57ef9f

Update to latest test_model script

Browse files
Files changed (1) hide show
  1. prev-checkpoint/test_model.py +17 -4
prev-checkpoint/test_model.py CHANGED
@@ -1,7 +1,20 @@
1
  from transformers import T5ForConditionalGeneration, T5TokenizerFast
2
- model = T5ForConditionalGeneration.from_pretrained('./pytorch_model')
3
  tokenizer = T5TokenizerFast.from_pretrained('./')
4
  text = 'বাংলার মুখ আমি দেখিয়াছি, তাই আমি পৃথিবীর রূপ খুঁজিতে যাই না'
5
- input_ids = tokenizer(text, return_tensors='pt').input_ids
6
- outputs = model.generate(input_ids)
7
- print(tokenizer.batch_decode(outputs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from transformers import T5ForConditionalGeneration, T5TokenizerFast
2
+ t5_model = T5ForConditionalGeneration.from_pretrained('./pytorch_model')
3
  tokenizer = T5TokenizerFast.from_pretrained('./')
4
  text = 'বাংলার মুখ আমি দেখিয়াছি, তাই আমি পৃথিবীর রূপ খুঁজিতে যাই না'
5
+ tokenized = tokenizer(text, return_tensors='pt')
6
+ input_ids = tokenized.input_ids
7
+ attention_mask = tokenized.attention_mask
8
+ t5_model.eval()
9
+ beam_outputs = t5_model.generate(
10
+ input_ids=input_ids,attention_mask=attention_mask,
11
+ max_length=64,
12
+ early_stopping=True,
13
+ num_beams=10,
14
+ num_return_sequences=3,
15
+ no_repeat_ngram_size=2
16
+ )
17
+
18
+ for beam_output in beam_outputs:
19
+ sent = tokenizer.decode(beam_output, skip_special_tokens=True,clean_up_tokenization_spaces=True)
20
+ print (sent)