thaboe01 commited on
Commit
4742645
1 Parent(s): 1085827

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -26,7 +26,7 @@ def correct_text(text):
26
  if len(current_chunk) + 1 > MAX_PHRASE_LENGTH:
27
  input_text = PREFIX + " ".join(current_chunk)
28
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
29
- outputs = model.generate(**input_ids)
30
  corrected_phrase = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
  corrected_phrases.append(corrected_phrase)
32
  current_chunk = [] # Reset the chunk
@@ -35,7 +35,7 @@ def correct_text(text):
35
  if current_chunk:
36
  input_text = PREFIX + " ".join(current_chunk)
37
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
38
- outputs = model.generate(**input_ids)
39
  corrected_phrase = tokenizer.decode(outputs[0], skip_special_tokens=True)
40
  corrected_phrases.append(corrected_phrase)
41
 
 
26
  if len(current_chunk) + 1 > MAX_PHRASE_LENGTH:
27
  input_text = PREFIX + " ".join(current_chunk)
28
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
29
+ outputs = model.generate(input_ids)
30
  corrected_phrase = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
  corrected_phrases.append(corrected_phrase)
32
  current_chunk = [] # Reset the chunk
 
35
  if current_chunk:
36
  input_text = PREFIX + " ".join(current_chunk)
37
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
38
+ outputs = model.generate(input_ids)
39
  corrected_phrase = tokenizer.decode(outputs[0], skip_special_tokens=True)
40
  corrected_phrases.append(corrected_phrase)
41