Update README.md
Browse files
README.md
CHANGED
@@ -19,15 +19,15 @@ We support the following languages/graphemaries:
|
|
19 |
```python
|
20 |
from transformers import NllbTokenizerFast, AutoModelForSeq2SeqLM
|
21 |
|
22 |
-
tokenizer = NllbTokenizerFast.from_pretrained("CenIA/nllb-200-3.3B-spa-arn"
|
23 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("CenIA/nllb-200-3.3B-spa-arn"
|
24 |
|
25 |
def translate(sentence: str, translate_from="spa_Latn", translate_to="quy_Latn") -> str:
|
26 |
tokenizer.src_lang = translate_from
|
27 |
tokenizer.tgt_lang = translate_to
|
28 |
|
29 |
inputs = tokenizer(sentence, return_tensors="pt")
|
30 |
-
result = model.generate(**inputs, forced_bos_token_id=tokenizer.
|
31 |
decoded = tokenizer.batch_decode(result, skip_special_tokens=True)[0]
|
32 |
|
33 |
return decoded
|
|
|
19 |
```python
|
20 |
from transformers import NllbTokenizerFast, AutoModelForSeq2SeqLM
|
21 |
|
22 |
+
tokenizer = NllbTokenizerFast.from_pretrained("CenIA/nllb-200-3.3B-spa-arn")
|
23 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("CenIA/nllb-200-3.3B-spa-arn")
|
24 |
|
25 |
def translate(sentence: str, translate_from="spa_Latn", translate_to="quy_Latn") -> str:
|
26 |
tokenizer.src_lang = translate_from
|
27 |
tokenizer.tgt_lang = translate_to
|
28 |
|
29 |
inputs = tokenizer(sentence, return_tensors="pt")
|
30 |
+
result = model.generate(**inputs, forced_bos_token_id=tokenizer.convert_tokens_to_ids(translate_to))
|
31 |
decoded = tokenizer.batch_decode(result, skip_special_tokens=True)[0]
|
32 |
|
33 |
return decoded
|