versae commited on
Commit
7cf41ef
1 Parent(s): c076d9b

Adding new translations for nob and nno

Browse files
nob/nob_test.json.tar.gz → nno/nno_test.json.gz RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e53b90d8579baebfb94b1cfea5695a75633d71c38bac43ad3d5c06586e2204e7
3
- size 3363690
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3d4e9848cf1c5b52a9b7c8b3e4c54ac8a4cf83b76e7d90297aae8ae17ece42b
3
+ size 9099898
nno/nno_train.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcebec789f9f7af7a149bedd0b49910115ee12dc053186fbc710dc1f0245abab
3
+ size 162881864
nob/nob_validation.json.tar.gz → nno/nno_validation.json.gz RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:51ecfd130e61fb1860448427b83a18a64ca6f779f1785517366faeb8d5d790de
3
- size 3280742
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95bc60b000790dc4eab2b39cd95ec7d64074b06b148f94a32812aa3efc0ab05d
3
+ size 8955006
nob/nob_train.json.tar.gz → nno/test/data-00000-of-00001.arrow RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c533b8c8596718c7f3954c0578562efa314e141b3880cb43145d6c7e3ef11053
3
- size 60171284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2271215611a203d8cafac0af05c964e1b597348d396326ee8dd340b167d68f1b
3
+ size 22634776
nno/test/dataset_info.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "xsum",
3
+ "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 532255381,
6
+ "description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are three features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n - id: BBC ID of the article.\n\n",
7
+ "download_checksums": {
8
+ "data/XSUM-EMNLP18-Summary-Data-Original.tar.gz": {
9
+ "num_bytes": 254582292,
10
+ "checksum": "10b48aa187fc9c904b30f76ca97e2da0de8d3a1238acc26acadef93e2001af90"
11
+ },
12
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json": {
13
+ "num_bytes": 2720574,
14
+ "checksum": "9c0c5d8f048a90bd68b19a34e4c30577ed270d3247b2119fa06a04ef46292068"
15
+ }
16
+ },
17
+ "download_size": 257302866,
18
+ "features": {
19
+ "document": {
20
+ "dtype": "string",
21
+ "_type": "Value"
22
+ },
23
+ "summary": {
24
+ "dtype": "string",
25
+ "_type": "Value"
26
+ },
27
+ "id": {
28
+ "dtype": "string",
29
+ "_type": "Value"
30
+ }
31
+ },
32
+ "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
33
+ "license": "",
34
+ "size_in_bytes": 789558247,
35
+ "splits": {
36
+ "train": {
37
+ "name": "train",
38
+ "num_bytes": 479206363,
39
+ "num_examples": 204045,
40
+ "dataset_name": "xsum"
41
+ },
42
+ "validation": {
43
+ "name": "validation",
44
+ "num_bytes": 26292877,
45
+ "num_examples": 11332,
46
+ "dataset_name": "xsum"
47
+ },
48
+ "test": {
49
+ "name": "test",
50
+ "num_bytes": 26756141,
51
+ "num_examples": 11334,
52
+ "dataset_name": "xsum"
53
+ }
54
+ },
55
+ "supervised_keys": {
56
+ "input": "document",
57
+ "output": "summary"
58
+ },
59
+ "version": {
60
+ "version_str": "1.2.0",
61
+ "major": 1,
62
+ "minor": 2,
63
+ "patch": 0
64
+ }
65
+ }
nno/test/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "ca9883b8f61f3596",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "test"
13
+ }
nno/train/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cfe1eb2e1298cdb75e04c839ed09daa2b91451573cd8f6e3c8b3df8fd81a65c
3
+ size 405633912
nno/train/dataset_info.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "xsum",
3
+ "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 532255381,
6
+ "description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are three features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n - id: BBC ID of the article.\n\n",
7
+ "download_checksums": {
8
+ "data/XSUM-EMNLP18-Summary-Data-Original.tar.gz": {
9
+ "num_bytes": 254582292,
10
+ "checksum": "10b48aa187fc9c904b30f76ca97e2da0de8d3a1238acc26acadef93e2001af90"
11
+ },
12
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json": {
13
+ "num_bytes": 2720574,
14
+ "checksum": "9c0c5d8f048a90bd68b19a34e4c30577ed270d3247b2119fa06a04ef46292068"
15
+ }
16
+ },
17
+ "download_size": 257302866,
18
+ "features": {
19
+ "document": {
20
+ "dtype": "string",
21
+ "_type": "Value"
22
+ },
23
+ "summary": {
24
+ "dtype": "string",
25
+ "_type": "Value"
26
+ },
27
+ "id": {
28
+ "dtype": "string",
29
+ "_type": "Value"
30
+ }
31
+ },
32
+ "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
33
+ "license": "",
34
+ "size_in_bytes": 789558247,
35
+ "splits": {
36
+ "train": {
37
+ "name": "train",
38
+ "num_bytes": 479206363,
39
+ "num_examples": 204045,
40
+ "dataset_name": "xsum"
41
+ },
42
+ "validation": {
43
+ "name": "validation",
44
+ "num_bytes": 26292877,
45
+ "num_examples": 11332,
46
+ "dataset_name": "xsum"
47
+ },
48
+ "test": {
49
+ "name": "test",
50
+ "num_bytes": 26756141,
51
+ "num_examples": 11334,
52
+ "dataset_name": "xsum"
53
+ }
54
+ },
55
+ "supervised_keys": {
56
+ "input": "document",
57
+ "output": "summary"
58
+ },
59
+ "version": {
60
+ "version_str": "1.2.0",
61
+ "major": 1,
62
+ "minor": 2,
63
+ "patch": 0
64
+ }
65
+ }
nno/train/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "5bac35fb99243876",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "train"
13
+ }
nno/validation/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b46d5ad6709969f1079b17dd317645af0ec84f00fcf7bb426be678b596cf4eee
3
+ size 22308800
nno/validation/dataset_info.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "xsum",
3
+ "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 532255381,
6
+ "description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are three features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n - id: BBC ID of the article.\n\n",
7
+ "download_checksums": {
8
+ "data/XSUM-EMNLP18-Summary-Data-Original.tar.gz": {
9
+ "num_bytes": 254582292,
10
+ "checksum": "10b48aa187fc9c904b30f76ca97e2da0de8d3a1238acc26acadef93e2001af90"
11
+ },
12
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json": {
13
+ "num_bytes": 2720574,
14
+ "checksum": "9c0c5d8f048a90bd68b19a34e4c30577ed270d3247b2119fa06a04ef46292068"
15
+ }
16
+ },
17
+ "download_size": 257302866,
18
+ "features": {
19
+ "document": {
20
+ "dtype": "string",
21
+ "_type": "Value"
22
+ },
23
+ "summary": {
24
+ "dtype": "string",
25
+ "_type": "Value"
26
+ },
27
+ "id": {
28
+ "dtype": "string",
29
+ "_type": "Value"
30
+ }
31
+ },
32
+ "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
33
+ "license": "",
34
+ "size_in_bytes": 789558247,
35
+ "splits": {
36
+ "train": {
37
+ "name": "train",
38
+ "num_bytes": 479206363,
39
+ "num_examples": 204045,
40
+ "dataset_name": "xsum"
41
+ },
42
+ "validation": {
43
+ "name": "validation",
44
+ "num_bytes": 26292877,
45
+ "num_examples": 11332,
46
+ "dataset_name": "xsum"
47
+ },
48
+ "test": {
49
+ "name": "test",
50
+ "num_bytes": 26756141,
51
+ "num_examples": 11334,
52
+ "dataset_name": "xsum"
53
+ }
54
+ },
55
+ "supervised_keys": {
56
+ "input": "document",
57
+ "output": "summary"
58
+ },
59
+ "version": {
60
+ "version_str": "1.2.0",
61
+ "major": 1,
62
+ "minor": 2,
63
+ "patch": 0
64
+ }
65
+ }
nno/validation/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "a6c220ea2a1f3000",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "validation"
13
+ }
nob/dataset_dict.json DELETED
@@ -1 +0,0 @@
1
- {"splits": ["train", "validation", "test"]}
 
 
nob/nob_test.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0c323dc8b9a5bee9ed424842c3e8b02c889193ecad76aebe3655a0a4f13e8af
3
+ size 9515838
nob/nob_train.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:923dbc055f3874dd98a89c7e9b22d48bb773af5caa8e9325f01fd410889c0989
3
+ size 170294159
nob/nob_validation.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de17f72b1e7fdfb611e8aa636f83e1f36b6eea384bf8996a48bb4fa0e9c58b52
3
+ size 9359670
nob/test/data-00000-of-00001.arrow CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b6b9eb616116b75a8c23eabde80b9ab21227c7f3f09738e9461548a13670c06
3
- size 48076152
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:786585c00c182a27fe083d8956a81daf0c61a688cf1de8235b38bd9dc3bba153
3
+ size 23875496
nob/test/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "0cd0deb949ec246b",
8
  "_format_columns": null,
9
  "_format_kwargs": {},
10
  "_format_type": null,
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "5cce8fd2208d7170",
8
  "_format_columns": null,
9
  "_format_kwargs": {},
10
  "_format_type": null,
nob/train/data-00000-of-00001.arrow CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e799e9bdddcdf2997088f16298df4614f8adb6068f634d52b576b11a66d3e04
3
- size 833308256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67a00f2331a3afe0a984c8939c25a688fc92467ac86363cee5fadf402ac0f4d4
3
+ size 427840440
nob/train/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "09e18f5a398d83ac",
8
  "_format_columns": null,
9
  "_format_kwargs": {},
10
  "_format_type": null,
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "b9a9c9cc7aef8975",
8
  "_format_columns": null,
9
  "_format_kwargs": {},
10
  "_format_type": null,
nob/validation/data-00000-of-00001.arrow CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0dccddfe9fc0209ae45e4ccf9279b0330b8a480dd5d1819a2ba8136e3695a320
3
- size 44194752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c182cfd9445cd9c6c35fbbb5cacd046a8f101d9d988983681da4e7a7a13a35a
3
+ size 23504448
nob/validation/state.json CHANGED
@@ -4,7 +4,7 @@
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
- "_fingerprint": "3d58f12b55350560",
8
  "_format_columns": null,
9
  "_format_kwargs": {},
10
  "_format_type": null,
 
4
  "filename": "data-00000-of-00001.arrow"
5
  }
6
  ],
7
+ "_fingerprint": "c42300ebe4b92c7b",
8
  "_format_columns": null,
9
  "_format_kwargs": {},
10
  "_format_type": null,
translator.py CHANGED
@@ -12,58 +12,80 @@ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
12
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
 
14
 
15
- def to_lang_code(texts, lang_code, model, tokenizer, max_words=500):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  is_string = isinstance(texts, str)
17
  if is_string:
18
  texts = [texts]
19
  batch_size = len(texts)
20
  to_translate = []
21
- merges = []
22
- for index, text in enumerate(texts):
23
- # Split in sentences if too long
24
- merges.append(0)
25
- if text.count(" ") > max_words:
26
- sentences = nltk.sent_tokenize(text, "norwegian")
27
- text_to_translate = ""
28
- for sentence in sentences:
29
- spaces = (text_to_translate + " " + sentence).count(" ")
30
- if spaces >= max_words:
31
- to_translate.append(text_to_translate.strip())
32
- merges[-1] += 1
33
- else:
34
- text_to_translate += sentence + " "
35
- else:
36
- to_translate.append(text)
37
  translated_texts = []
38
  # Split in batches for translation
39
- to_translate_batchs = [to_translate[i:i + batch_size] for i in range(0, len(to_translate), batch_size)]
40
- for to_translate_batch in to_translate_batchs:
41
  inputs = tokenizer(to_translate_batch, return_tensors="pt", padding=True, truncation=True).to(DEVICE)
42
  translated_tokens = model.generate(
43
  **inputs,
44
  forced_bos_token_id=tokenizer.lang_code_to_id[lang_code],
45
- max_length=int(len(inputs.tokens()) * 1.25) # 25% more tokens for the translation just in case
 
46
  )
47
- translated_texts += tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
 
48
  # Merge outputs properly
49
  outputs = []
50
- for merge in merges:
51
- output = ""
52
- if merge:
53
- for i in range(len(outputs), len(outputs) + merge):
54
- output += translated_texts[i] + " "
55
- outputs.append(output.strip())
56
- else:
57
- outputs.append(translated_texts[len(outputs)].strip())
58
  return outputs[0] if is_string else outputs
59
 
60
 
61
  def main(
62
  dataset_name: str,
63
  dataset_columns: Union[list, tuple],
64
- model_name: Optional[str]="facebook/nllb-200-3.3B", # "facebook/nllb-200-distilled-600M"
65
  model_revision: Optional[str]=None,
66
- dataset_splits: Union[list, tuple]=("test", "validation", "train"),
67
  dataset_config: Optional[str]=None,
68
  dataset_revision: Optional[str]=None,
69
  source_lang: Optional[str]="eng_Latn",
@@ -78,21 +100,24 @@ def main(
78
  model_name, revision=model_revision, use_auth_token=True, src_lang=source_lang,
79
  )
80
 
81
- ds = load_dataset(dataset_name, name=dataset_config, revision=dataset_revision)
82
- dss = {}
83
  for lang_code in target_langs:
84
- translate = partial(to_lang_code, lang_code=lang_code, model=model, tokenizer=tokenizer)
85
- dss[lang_code] = ds.map(
86
- lambda batch: {col: translate(batch[col]) for col in dataset_columns},
87
- batched=True,
88
- batch_size=batch_size,
89
- desc=f"Translating to {lang_code}",
90
- )
91
- lang_code_short = re.split(r"[-_ /]", lang_code)[0]
92
- dss[lang_code].save_to_disk(output_dir / lang_code_short, max_shard_size="1GB")
93
  for split in dataset_splits:
94
- json_filename = f"{lang_code_short}_{split}.json.tar.gz".lower()
95
- dss[lang_code][split].to_pandas().to_json(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  output_dir / lang_code_short / json_filename, orient='records', lines=True
97
  )
98
 
@@ -102,10 +127,10 @@ if __name__ == "__main__":
102
  parser = argparse.ArgumentParser(description="Translate datasets using Facebook's NLLB models")
103
  parser.add_argument('dataset_name')
104
  parser.add_argument('dataset_columns', help="Comma separated column names to translate")
105
- parser.add_argument('--dataset_splits', default="test,validation,train", help="Comma separated splits to translate")
106
  parser.add_argument('--dataset_config')
107
  parser.add_argument('--dataset_revision')
108
- parser.add_argument('--model_name', default="facebook/nllb-200-3.3B")
109
  parser.add_argument('--model_revision')
110
  parser.add_argument('--source_lang', default="eng_Latn")
111
  parser.add_argument('--target_langs', default="nob_Latn,nno_Latn", help="Comma separated target languages to translate to")
 
12
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
 
14
 
15
+
16
+ def split_into_chunks(text, tokenizer, max_tokens=128):
17
+ # Split tokenized text into sentences
18
+ sentences = nltk.sent_tokenize(text)
19
+
20
+ # Create chunks based on the maximum number of tokens
21
+ chunks = []
22
+ current_chunk = []
23
+ tokens_count = 0
24
+
25
+ for sentence in sentences:
26
+ sentence_tokens = tokenizer.encode(sentence, add_special_tokens=False)
27
+ sentence_token_count = len(sentence_tokens)
28
+
29
+ if tokens_count + sentence_token_count > max_tokens:
30
+ # If adding this sentence to the current chunk would exceed the maximum number of tokens, add the current chunk to the list of chunks
31
+ if current_chunk:
32
+ chunk_text = tokenizer.decode(current_chunk)
33
+ chunks.append(chunk_text)
34
+ current_chunk = []
35
+ tokens_count = 0
36
+
37
+ # Add the sentence to the current chunk
38
+ current_chunk.extend(sentence_tokens)
39
+ tokens_count += sentence_token_count
40
+
41
+ # Add any remaining tokens as the last chunk
42
+ if current_chunk:
43
+ chunk_text = tokenizer.decode(current_chunk)
44
+ chunks.append(chunk_text)
45
+
46
+ return chunks
47
+
48
+
49
+ def to_lang_code(texts, lang_code, model, tokenizer, max_tokens=128):
50
  is_string = isinstance(texts, str)
51
  if is_string:
52
  texts = [texts]
53
  batch_size = len(texts)
54
  to_translate = []
55
+ lengths = []
56
+ for text in texts:
57
+ # Split in chunks of non-breaking sentences and keep lengths of chunks
58
+ chunks = split_into_chunks(text, tokenizer=tokenizer, max_tokens=max_tokens)
59
+ lengths.append(len(chunks))
60
+ to_translate += chunks
 
 
 
 
 
 
 
 
 
 
61
  translated_texts = []
62
  # Split in batches for translation
63
+ to_translate_batches = [to_translate[i:i + batch_size] for i in range(0, len(to_translate), batch_size)]
64
+ for to_translate_batch in to_translate_batches:
65
  inputs = tokenizer(to_translate_batch, return_tensors="pt", padding=True, truncation=True).to(DEVICE)
66
  translated_tokens = model.generate(
67
  **inputs,
68
  forced_bos_token_id=tokenizer.lang_code_to_id[lang_code],
69
+ max_new_tokens=512,
70
+ # max_length=int(len(inputs.tokens()) * 1.25) # 25% more tokens for the translation just in case
71
  )
72
+ translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
73
+ translated_texts += translated_text
74
  # Merge outputs properly
75
  outputs = []
76
+ start = 0
77
+ for length in lengths:
78
+ outputs.append(" ".join(translated_texts[start:start + length]))
79
+ start += length
 
 
 
 
80
  return outputs[0] if is_string else outputs
81
 
82
 
83
  def main(
84
  dataset_name: str,
85
  dataset_columns: Union[list, tuple],
86
+ model_name: Optional[str]="facebook/nllb-200-1.3B", # "facebook/nllb-200-distilled-600M"
87
  model_revision: Optional[str]=None,
88
+ dataset_splits: Union[list, tuple]=("train", "validation", "test"),
89
  dataset_config: Optional[str]=None,
90
  dataset_revision: Optional[str]=None,
91
  source_lang: Optional[str]="eng_Latn",
 
100
  model_name, revision=model_revision, use_auth_token=True, src_lang=source_lang,
101
  )
102
 
 
 
103
  for lang_code in target_langs:
 
 
 
 
 
 
 
 
 
104
  for split in dataset_splits:
105
+ ds = load_dataset(dataset_name, name=dataset_config, revision=dataset_revision, split=split)
106
+ translate = partial(to_lang_code, lang_code=lang_code, model=model, tokenizer=tokenizer)
107
+ ds = ds.map(
108
+ lambda batch: {
109
+ column: translate(batch[column])
110
+ for column in dataset_columns
111
+ },
112
+ batched=True,
113
+ batch_size=batch_size,
114
+ desc=f"Translating to {lang_code} ({split})",
115
+ )
116
+ lang_code_short = re.split(r"[-_ /]", lang_code)[0]
117
+ ds.save_to_disk(output_dir / lang_code_short / split, max_shard_size="1GB")
118
+
119
+ json_filename = f"{lang_code_short}_{split}.json.gz".lower()
120
+ ds.to_pandas().to_json(
121
  output_dir / lang_code_short / json_filename, orient='records', lines=True
122
  )
123
 
 
127
  parser = argparse.ArgumentParser(description="Translate datasets using Facebook's NLLB models")
128
  parser.add_argument('dataset_name')
129
  parser.add_argument('dataset_columns', help="Comma separated column names to translate")
130
+ parser.add_argument('--dataset_splits', default="train,validation,test", help="Comma separated splits to translate")
131
  parser.add_argument('--dataset_config')
132
  parser.add_argument('--dataset_revision')
133
+ parser.add_argument('--model_name', default="facebook/nllb-200-1.3B")
134
  parser.add_argument('--model_revision')
135
  parser.add_argument('--source_lang', default="eng_Latn")
136
  parser.add_argument('--target_langs', default="nob_Latn,nno_Latn", help="Comma separated target languages to translate to")