norwegian-xsum / translator.py
versae's picture
Latest version of translator
cee8760
import argparse
import re
from functools import partial
from pathlib import Path
from typing import Optional, Union
import nltk
import torch
from datasets import load_dataset
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def split_into_chunks(text, tokenizer, max_tokens=128):
# Split tokenized text into sentences
sentences = nltk.sent_tokenize(text)
# Create chunks based on the maximum number of tokens
chunks = []
current_chunk = []
tokens_count = 0
for sentence in sentences:
sentence_tokens = tokenizer.encode(sentence, add_special_tokens=False)
sentence_token_count = len(sentence_tokens)
if tokens_count + sentence_token_count > max_tokens:
# If adding this sentence to the current chunk would exceed the maximum number of tokens, add the current chunk to the list of chunks
if current_chunk:
chunk_text = tokenizer.decode(current_chunk)
chunks.append(chunk_text)
current_chunk = []
tokens_count = 0
# Add the sentence to the current chunk
current_chunk.extend(sentence_tokens)
tokens_count += sentence_token_count
# Add any remaining tokens as the last chunk
if current_chunk:
chunk_text = tokenizer.decode(current_chunk)
chunks.append(chunk_text)
return chunks
def to_lang_code(texts, lang_code, model, tokenizer, max_tokens=128, sentence_joiner=" "):
is_string = isinstance(texts, str)
if is_string:
texts = [texts]
batch_size = len(texts)
to_translate = []
lengths = []
for text in texts:
# Split in chunks of non-breaking sentences and keep lengths of chunks
chunks = split_into_chunks(text, tokenizer=tokenizer, max_tokens=max_tokens)
lengths.append(len(chunks))
to_translate += chunks
translated_texts = []
# Split in batches for translation
to_translate_batches = [to_translate[i:i + batch_size] for i in range(0, len(to_translate), batch_size)]
for to_translate_batch in to_translate_batches:
inputs = tokenizer(to_translate_batch, return_tensors="pt", padding=True, truncation=True).to(DEVICE)
translated_tokens = model.generate(
**inputs,
forced_bos_token_id=tokenizer.lang_code_to_id[lang_code],
max_length=512, # max_new_tokens=512,
# max_length=int(len(inputs.tokens()) * 1.25) # 25% more tokens for the translation just in case
)
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
translated_texts += translated_text
# Merge outputs properly
outputs = []
start = 0
for length in lengths:
outputs.append(sentence_joiner.join(translated_texts[start:start + length]))
start += length
return outputs[0] if is_string else outputs
def main(
dataset_name: str,
dataset_columns: Union[list, tuple],
model_name: Optional[str]="facebook/nllb-200-1.3B", # "facebook/nllb-200-distilled-600M"
model_revision: Optional[str]=None,
dataset_splits: Union[list, tuple]=("train", "validation", "test"),
dataset_config: Optional[str]=None,
dataset_revision: Optional[str]=None,
source_lang: Optional[str]="eng_Latn",
target_langs: Optional[Union[list, tuple]]=("nob_Latn", "nno_Latn"),
sentence_joiner: Optional[str]=" ",
max_tokens_per_chunk: Optional[int]=128,
batch_size: Optional[int]=24,
output_dir: Optional[Path]=Path("./"),
) -> None:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=True, torch_dtype=torch.float32)
model.to(DEVICE, torch.float32, True)
tokenizer = AutoTokenizer.from_pretrained(
model_name, revision=model_revision, use_auth_token=True, src_lang=source_lang,
)
for lang_code in target_langs:
lang_code_short = re.split(r"[-_ /]", lang_code)[0]
if dataset_config:
output_path = output_dir / dataset_config / lang_code_short
else:
output_path = output_dir / lang_code_short
for split in dataset_splits:
ds = load_dataset(dataset_name, name=dataset_config, revision=dataset_revision, split=split)
translate = partial(
to_lang_code,
lang_code=lang_code,
model=model,
tokenizer=tokenizer,
sentence_joiner=sentence_joiner,
max_tokens=max_tokens_per_chunk,
)
ds = ds.map(
lambda batch: {
column: translate(batch[column])
for column in dataset_columns
},
batched=True,
batch_size=batch_size,
desc=f"Translating to {lang_code} ({split})",
)
ds.save_to_disk(output_path / split, max_shard_size="1GB")
json_filename = f"{lang_code_short}_{split}.json.gz".lower()
ds.to_pandas().to_json(
output_path / json_filename, orient='records', lines=True
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Translate datasets using Facebook's NLLB models")
parser.add_argument('dataset_name')
parser.add_argument('dataset_columns', help="Comma separated column names to translate")
parser.add_argument('--dataset_splits', default="train,validation,test", help="Comma separated splits to translate")
parser.add_argument('--dataset_config')
parser.add_argument('--dataset_revision')
parser.add_argument('--model_name', default="facebook/nllb-200-1.3B")
parser.add_argument('--model_revision')
parser.add_argument('--source_lang', default="eng_Latn")
parser.add_argument('--target_langs', default="nob_Latn,nno_Latn", help="Comma separated target languages to translate to")
parser.add_argument('--sentence_joiner', default=" ", help="String to join sentences split for translation")
parser.add_argument('--max_tokens_per_chunk', default=128, type=int, help="Max number of tokens for each chunk for translation")
parser.add_argument('--batch_size', '-bs', default=24, type=int, help='Number of inputs per batch for prediction')
parser.add_argument('--output_dir', '-o', default="./", type=str)
args = parser.parse_args()
main(
dataset_name=args.dataset_name,
dataset_columns=args.dataset_columns.split(","),
dataset_splits=args.dataset_splits.split(","),
dataset_config=args.dataset_config,
dataset_revision=args.dataset_revision,
model_name=args.model_name,
model_revision=args.model_revision,
source_lang=args.source_lang,
target_langs=args.target_langs.split(","),
sentence_joiner=args.sentence_joiner,
max_tokens_per_chunk=args.max_tokens_per_chunk,
batch_size=args.batch_size,
output_dir=Path(args.output_dir),
)