File size: 704 Bytes
5c99c7b
 
 
24d1b2a
5c99c7b
 
 
230975a
5c99c7b
230975a
5c99c7b
 
1
2
3
4
5
6
7
8
9
10
11
12
>>> from transformers import GPTNeoForCausalLM, GPT2Tokenizer
>>> model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
>>> tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")

>>> prompt = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \
...          "previously unexplored valley, in the Andes Mountains. Even more surprising to the " \
...          "researchers was the fact that the unicorns spoke perfect English."

>>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids

>>> gen_tokens = model.generate(input_ids, do_sample=True, temperature=0.9, max_length=100,)
>>> gen_text = tokenizer.batch_decode(gen_tokens)[0]