|
import json |
|
import os |
|
from os import listdir |
|
from os.path import isfile, join |
|
|
|
INSTRUCT_CHUNKED_PROMPT = """你是一个擅长翻译科技新闻的翻译专家。请将以下内容翻译为中文,使用相同格式输出,并保留时间戳。不要漏掉任何信息。合并多行文本时,保留第一个和最后一个时间戳。 |
|
""" |
|
|
|
def new_message(eng_in, chs_out, prev_in = None, prev_out = None): |
|
if(prev_in == None or prev_out == None): |
|
return {"messages": [ |
|
{"role": "system", "content": INSTRUCT_CHUNKED_PROMPT}, |
|
{"role": "user", "content": eng_in}, |
|
{"role": "assistant", "content": chs_out}] |
|
} |
|
else: |
|
return {"messages": [ |
|
{"role": "system", "content": INSTRUCT_CHUNKED_PROMPT}, |
|
{"role": "user", "content": prev_in}, |
|
{"role": "assistant", "content": prev_out}, |
|
{"role": "user", "content": eng_in}, |
|
{"role": "assistant", "content": chs_out}] |
|
} |
|
|
|
def write_jsonl(message_groups, filename): |
|
with open(filename, "w", encoding='utf-8-sig') as fout: |
|
for i in range(len(message_groups)): |
|
if(i>0): |
|
msg_obj = new_message( |
|
message_groups[i][0].strip(), |
|
message_groups[i][1].strip(), |
|
message_groups[i-1][0].strip(), |
|
message_groups[i-1][1].strip() |
|
) |
|
else: |
|
msg_obj = new_message( |
|
message_groups[i][0].strip(), |
|
message_groups[i][1].strip() |
|
) |
|
json.dump(msg_obj, fout) |
|
fout.write("\n") |
|
|
|
message_groups = [] |
|
|
|
DOCUMENT_ROOT = "data" |
|
files = listdir("data") |
|
files = list(filter(lambda x: x.endswith(".en.txt"), files)) |
|
files.sort() |
|
|
|
print(files) |
|
|
|
for f in files: |
|
en_fname = join(DOCUMENT_ROOT, f) |
|
if en_fname.endswith(".en.txt") and isfile(en_fname): |
|
cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt")) |
|
if os.path.exists(cn_fname) and isfile(cn_fname): |
|
print(f"Found data pair: {en_fname} and {cn_fname}") |
|
|
|
with open(en_fname, "r", encoding='utf-8-sig') as enfin: |
|
en_messages = enfin.read() |
|
|
|
with open(cn_fname, "r", encoding='utf-8-sig') as cnfin: |
|
cn_messages = cnfin.read() |
|
|
|
en_messages = [part.strip() for part in en_messages.split("\n\n") if part.strip() != ""] |
|
cn_messages = [part.strip() for part in cn_messages.split("\n\n") if part.strip() != ""] |
|
|
|
if(len(en_messages) != len(cn_messages)): |
|
print(f"English and Chinese version mismatch. Discarding {en_fname} pair.") |
|
|
|
messages = zip(en_messages, cn_messages) |
|
|
|
message_groups.extend(messages) |
|
|
|
write_jsonl(message_groups, "combined.jsonl") |
|
|
|
import random |
|
random.shuffle(message_groups) |
|
|
|
TEST_RATIO = 0.2 |
|
|
|
split_index = int(len(message_groups) * TEST_RATIO) |
|
|
|
test = message_groups[:split_index] |
|
train = message_groups[split_index:] |
|
|
|
write_jsonl(train, "chatgpt-train.jsonl") |
|
write_jsonl(test, "chatgpt-test.jsonl") |
|
|
|
recent_files = files[-5:] |
|
recent_messages = [] |
|
|
|
for f in recent_files: |
|
en_fname = join(DOCUMENT_ROOT, f) |
|
if en_fname.endswith(".en.txt") and isfile(en_fname): |
|
cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt")) |
|
if os.path.exists(cn_fname) and isfile(cn_fname): |
|
print(f"Found data pair: {en_fname} and {cn_fname}") |
|
|
|
with open(en_fname, "r", encoding='utf-8-sig') as enfin: |
|
en_messages = enfin.read() |
|
|
|
with open(cn_fname, "r", encoding='utf-8-sig') as cnfin: |
|
cn_messages = cnfin.read() |
|
|
|
en_messages = [part.strip() for part in en_messages.split("\n\n") if part.strip() != ""] |
|
cn_messages = [part.strip() for part in cn_messages.split("\n\n") if part.strip() != ""] |
|
|
|
if(len(en_messages) != len(cn_messages)): |
|
print(f"English and Chinese version mismatch. Discarding {en_fname} pair.") |
|
|
|
messages = zip(en_messages, cn_messages) |
|
|
|
recent_messages.extend(messages) |
|
|
|
write_jsonl(recent_messages, "recent-combined.jsonl") |
|
|
|
TEST_RATIO = 0.2 |
|
|
|
split_index = int(len(recent_messages) * TEST_RATIO) |
|
|
|
test = recent_messages[:split_index] |
|
train = recent_messages[split_index:] |
|
|
|
write_jsonl(train, "chatgpt-recent-train.jsonl") |
|
write_jsonl(test, "chatgpt-recent-test.jsonl") |