Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -15,14 +15,12 @@ sentence = st.text_input("input your english text")
|
|
15 |
button = st.button("translate to Darija")
|
16 |
|
17 |
if button :
|
18 |
-
|
19 |
-
sentence = SPECIAL_WORD+" "+sentence
|
20 |
sentence = sentence.lower()
|
|
|
21 |
length = len(sentence.split())
|
22 |
if length < MAX_LENGTH-1:
|
23 |
inputs = tokenizer(sentence, max_length=MAX_LENGTH, truncation=True, return_tensors="pt")
|
24 |
outputs =model.generate(**inputs,max_length=MAX_LENGTH)
|
25 |
|
26 |
decoded_output = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
|
27 |
-
decoded_output = decoded_output.replace(SPECIAL_WORD,'')
|
28 |
st.text(decoded_output)
|
|
|
15 |
button = st.button("translate to Darija")
|
16 |
|
17 |
if button :
|
|
|
|
|
18 |
sentence = sentence.lower()
|
19 |
+
sentence = SPECIAL_WORD+" "+sentence
|
20 |
length = len(sentence.split())
|
21 |
if length < MAX_LENGTH-1:
|
22 |
inputs = tokenizer(sentence, max_length=MAX_LENGTH, truncation=True, return_tensors="pt")
|
23 |
outputs =model.generate(**inputs,max_length=MAX_LENGTH)
|
24 |
|
25 |
decoded_output = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
|
|
|
26 |
st.text(decoded_output)
|