Ghanvert commited on
Commit
bfc254e
·
verified ·
1 Parent(s): cc30d18

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -19,15 +19,15 @@ We support the following languages/graphemaries:
19
  ```python
20
  from transformers import NllbTokenizerFast, AutoModelForSeq2SeqLM
21
 
22
- tokenizer = NllbTokenizerFast.from_pretrained("CenIA/nllb-200-3.3B-spa-arn", use_auth_token="HF_TOKEN")
23
- model = AutoModelForSeq2SeqLM.from_pretrained("CenIA/nllb-200-3.3B-spa-arn", use_auth_token="HF_TOKEN")
24
 
25
  def translate(sentence: str, translate_from="spa_Latn", translate_to="quy_Latn") -> str:
26
  tokenizer.src_lang = translate_from
27
  tokenizer.tgt_lang = translate_to
28
 
29
  inputs = tokenizer(sentence, return_tensors="pt")
30
- result = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id[translate_to])
31
  decoded = tokenizer.batch_decode(result, skip_special_tokens=True)[0]
32
 
33
  return decoded
 
19
  ```python
20
  from transformers import NllbTokenizerFast, AutoModelForSeq2SeqLM
21
 
22
+ tokenizer = NllbTokenizerFast.from_pretrained("CenIA/nllb-200-3.3B-spa-arn")
23
+ model = AutoModelForSeq2SeqLM.from_pretrained("CenIA/nllb-200-3.3B-spa-arn")
24
 
25
  def translate(sentence: str, translate_from="spa_Latn", translate_to="quy_Latn") -> str:
26
  tokenizer.src_lang = translate_from
27
  tokenizer.tgt_lang = translate_to
28
 
29
  inputs = tokenizer(sentence, return_tensors="pt")
30
+ result = model.generate(**inputs, forced_bos_token_id=tokenizer.convert_tokens_to_ids(translate_to))
31
  decoded = tokenizer.batch_decode(result, skip_special_tokens=True)[0]
32
 
33
  return decoded