mkocian commited on
Commit
9cc7a78
1 Parent(s): 49736c4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -17,8 +17,8 @@ fake_sentence = "Za hory, za doly, kočka zlaté parohy"
17
 
18
  fake_sentence_tokens = ["[CLS]"] + tokenizer.tokenize(fake_sentence) + ["[SEP]"]
19
  fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
20
- discriminator_outputs = discriminator(fake_inputs)
21
- predictions = torch.nn.Sigmoid()(discriminator_outputs[0]).cpu().detach().numpy()
22
 
23
  for token in fake_sentence_tokens:
24
  print("{:>7s}".format(token), end="")
 
17
 
18
  fake_sentence_tokens = ["[CLS]"] + tokenizer.tokenize(fake_sentence) + ["[SEP]"]
19
  fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
20
+ outputs = discriminator(fake_inputs)
21
+ predictions = torch.nn.Sigmoid()(outputs[0]).cpu().detach().numpy()
22
 
23
  for token in fake_sentence_tokens:
24
  print("{:>7s}".format(token), end="")