ierhon commited on
Commit
c323e5c
·
1 Parent(s): 4fa94e0

Add verbose setting to generate

Browse files
Files changed (1) hide show
  1. test.py +2 -3
test.py CHANGED
@@ -1,4 +1,3 @@
1
- import json # TODO: use the responses file after it's done instead of loading the dataset again
2
  import numpy as np
3
  from keras.saving import load_model
4
  from keras.preprocessing.text import Tokenizer
@@ -17,10 +16,10 @@ model = load_model("chatbot.keras", custom_objects={"SeqSelfAttention": SeqSelfA
17
  def find_line_number(array):
18
  return sorted(zip(list(array), [x for x in range(len(array))]), key=lambda x:x[0], reverse=True)[0][1] # yeah, one big line, find the biggest value and return the number of the line
19
 
20
- def generate(text):
21
  tokens = list(tokenizer.texts_to_sequences([text,])[0]) # text into tokens (almost words)
22
  tokens = (tokens+[0,]*inp_len)[:inp_len] # cutting off the sentence after inp_len words
23
- prediction = model.predict(np.array([tokens,]))[0]
24
  line = find_line_number(prediction)
25
  return lines[line]
26
 
 
 
1
  import numpy as np
2
  from keras.saving import load_model
3
  from keras.preprocessing.text import Tokenizer
 
16
  def find_line_number(array):
17
  return sorted(zip(list(array), [x for x in range(len(array))]), key=lambda x:x[0], reverse=True)[0][1] # yeah, one big line, find the biggest value and return the number of the line
18
 
19
+ def generate(text, verbose=1):
20
  tokens = list(tokenizer.texts_to_sequences([text,])[0]) # text into tokens (almost words)
21
  tokens = (tokens+[0,]*inp_len)[:inp_len] # cutting off the sentence after inp_len words
22
+ prediction = model.predict(np.array([tokens,]), verbose=verbose)[0]
23
  line = find_line_number(prediction)
24
  return lines[line]
25