metricsubs-chunktranslate / generate_chatgpt_varlen.py
metricv's picture
Update data
6747b07 verified
raw
history blame
8.66 kB
import json
import os
import copy
import re
from os import listdir
from os.path import isfile, join
import argparse
import sys
INSTRUCT_CHUNKED_PROMPT = """你是一个擅长翻译科技新闻的翻译专家。请将以下内容翻译为中文,使用相同格式输出,并保留时间戳。不要漏掉任何信息。合并多行文本时,保留第一个和最后一个时间戳。
"""
def break_line(line: str):
pattern = re.compile(r"^\[(\d+.\d+)\](.*)\[(\d+.\d+)\]$")
match = pattern.match(line)
start_time = match.group(1)
text = match.group(2)
end_time = match.group(3)
return start_time, text.strip(), end_time, float(start_time), float(end_time)
def get_total_chars(cn_lines: list[str], en_lines: list[str]):
cn_total_chars = 0
en_total_chars = 0
for line in cn_lines:
cn_total_chars += len(line)
for line in en_lines:
en_total_chars += len(line)
return cn_total_chars + en_total_chars
def chunk_messages(cn_lines: list[str], en_lines: list[str], MAX_LEN: int = 2000):
cn_lines_copy = copy.deepcopy(cn_lines)
en_lines_copy = copy.deepcopy(en_lines)
final_chunks: list[tuple[list[str], list[str]]] = []
while True:
en_current_chunk = []
cn_current_chunk = []
while True:
curr_total_len = get_total_chars(cn_current_chunk, en_current_chunk)
if len(cn_lines_copy) == 0 or len(en_lines_copy) == 0:
final_chunks.append((cn_current_chunk, en_current_chunk))
return final_chunks
elif curr_total_len > MAX_LEN:
final_chunks.append((cn_current_chunk, en_current_chunk))
break
else:
# Try append a new line to current chunk
latest_cn_line = cn_lines_copy.pop(0)
cn_start, cn_text, cn_end, cn_start_f, cn_end_f = break_line(latest_cn_line)
cn_current_chunk.append(latest_cn_line)
while True:
latest_en_line = en_lines_copy.pop(0)
en_start, en_text, en_end, en_start_f, en_end_f = break_line(latest_en_line)
en_current_chunk.append(latest_en_line)
if en_end == cn_end:
break
else:
if en_start_f > cn_end_f:
raise Exception("ERROR: English and Chinese lines are not in sync. Offensing line: " + latest_cn_line)
def new_message(eng_in, chs_out, prev_in = None, prev_out = None):
if(prev_in == None or prev_out == None):
return {"messages": [
{"role": "system", "content": INSTRUCT_CHUNKED_PROMPT},
{"role": "user", "content": eng_in},
{"role": "assistant", "content": chs_out}]
}
else:
return {"messages": [
{"role": "system", "content": INSTRUCT_CHUNKED_PROMPT},
{"role": "user", "content": prev_in},
{"role": "assistant", "content": prev_out},
{"role": "user", "content": eng_in},
{"role": "assistant", "content": chs_out}]
}
def write_jsonl(message_groups, filename):
json_lines = []
with open(filename, "w", encoding='utf-8-sig') as fout:
for i in range(len(message_groups)):
if(i>0):
msg_obj = new_message(
message_groups[i][0].strip(),
message_groups[i][1].strip(),
message_groups[i-1][0].strip(),
message_groups[i-1][1].strip()
)
else:
msg_obj = new_message(
message_groups[i][0].strip(),
message_groups[i][1].strip()
)
json.dump(msg_obj, fout)
fout.write("\n")
json_lines.append(json.dumps(msg_obj))
return json_lines
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate ChatGPT training data from a directory of subtitle files.')
parser.add_argument('data_dir', type=str, help='The directory containing the subtitle files.', default="data")
parser.add_argument('--maxlen', type=int, help='The maximum length of a combined message. \nNote that this limit will be exceeded a little bit, so leave some headroom. \nRecommended value is max context length / 4.', default=2000)
parser.add_argument('--test-ratio', type=float, help='The ratio of test data to training data.', default=0.2)
args = parser.parse_args()
message_groups = []
DOCUMENT_ROOT = args.data_dir
files = listdir(DOCUMENT_ROOT)
files = list(filter(lambda x: x.endswith(".en.txt"), files))
files.sort()
for f in files:
en_fname = join(DOCUMENT_ROOT, f)
if en_fname.endswith(".en.txt") and isfile(en_fname):
cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
if os.path.exists(cn_fname) and isfile(cn_fname):
print(f"Found data pair: {en_fname} and {cn_fname}")
with open(en_fname, "r", encoding='utf-8-sig') as enfin:
en_messages = enfin.read()
with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
cn_messages = cnfin.read()
en_messages = [part.strip() for part in en_messages.split("\n") if part.strip() != ""]
cn_messages = [part.strip() for part in cn_messages.split("\n") if part.strip() != ""]
try:
chunks = chunk_messages(cn_messages, en_messages, MAX_LEN=args.maxlen)
en_messages = []
cn_messages = []
for chunk in chunks:
cn_chunk, en_chunk = chunk
en_messages.append("\n".join(en_chunk))
cn_messages.append("\n".join(cn_chunk))
# print("\n".join(en_chunk))
# print("---")
# print("\n".join(cn_chunk))
# print("\n")
except Exception as e:
print(f"Error: {e}")
continue
if(len(en_messages) != len(cn_messages)):
print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
messages = zip(en_messages, cn_messages)
message_groups.extend(messages)
jsonl_lines = write_jsonl(message_groups, f"combined-{args.maxlen}.jsonl")
import random
random.shuffle(jsonl_lines)
TEST_RATIO = args.test_ratio
split_index = int(len(jsonl_lines) * TEST_RATIO)
test = jsonl_lines[:split_index]
train = jsonl_lines[split_index:]
with open (f"chatgpt-train-{args.maxlen}.jsonl", "w", encoding='utf-8-sig') as fout:
for line in train:
fout.write(line + "\n")
with open (f"chatgpt-test-{args.maxlen}.jsonl", "w", encoding='utf-8-sig') as fout:
for line in test:
fout.write(line + "\n")
# recent_files = files[-5:]
# recent_messages = []
# for f in recent_files:
# en_fname = join(DOCUMENT_ROOT, f)
# if en_fname.endswith(".en.txt") and isfile(en_fname):
# cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
# if os.path.exists(cn_fname) and isfile(cn_fname):
# print(f"Found data pair: {en_fname} and {cn_fname}")
# with open(en_fname, "r", encoding='utf-8-sig') as enfin:
# en_messages = enfin.read()
# with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
# cn_messages = cnfin.read()
# en_messages = [part.strip() for part in en_messages.split("\n") if part.strip() != ""]
# cn_messages = [part.strip() for part in cn_messages.split("\n") if part.strip() != ""]
# if(len(en_messages) != len(cn_messages)):
# print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
# messages = zip(en_messages, cn_messages)
# recent_messages.extend(messages)
# write_jsonl(recent_messages, "recent-combined.jsonl")
# TEST_RATIO = 0.2
# split_index = int(len(recent_messages) * TEST_RATIO)
# test = recent_messages[:split_index]
# train = recent_messages[split_index:]
# write_jsonl(train, "chatgpt-recent-train.jsonl")
# write_jsonl(test, "chatgpt-recent-test.jsonl")