Datasets:
File size: 4,519 Bytes
f752c8d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import json
import os
from os import listdir
from os.path import isfile, join
INSTRUCT_CHUNKED_PROMPT = """你是一个擅长翻译科技新闻的翻译专家。请将以下内容翻译为中文,使用相同格式输出,并保留时间戳。不要漏掉任何信息。合并多行文本时,保留第一个和最后一个时间戳。
"""
def new_message(eng_in, chs_out, prev_in = None, prev_out = None):
if(prev_in == None or prev_out == None):
return {"messages": [
{"role": "system", "content": INSTRUCT_CHUNKED_PROMPT},
{"role": "user", "content": eng_in},
{"role": "assistant", "content": chs_out}]
}
else:
return {"messages": [
{"role": "system", "content": INSTRUCT_CHUNKED_PROMPT},
{"role": "user", "content": prev_in},
{"role": "assistant", "content": prev_out},
{"role": "user", "content": eng_in},
{"role": "assistant", "content": chs_out}]
}
def write_jsonl(message_groups, filename):
with open(filename, "w", encoding='utf-8-sig') as fout:
for i in range(len(message_groups)):
if(i>0):
msg_obj = new_message(
message_groups[i][0].strip(),
message_groups[i][1].strip(),
message_groups[i-1][0].strip(),
message_groups[i-1][1].strip()
)
else:
msg_obj = new_message(
message_groups[i][0].strip(),
message_groups[i][1].strip()
)
json.dump(msg_obj, fout)
fout.write("\n")
message_groups = []
DOCUMENT_ROOT = "data"
files = listdir("data")
files = list(filter(lambda x: x.endswith(".en.txt"), files))
files.sort()
print(files)
for f in files:
en_fname = join(DOCUMENT_ROOT, f)
if en_fname.endswith(".en.txt") and isfile(en_fname):
cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
if os.path.exists(cn_fname) and isfile(cn_fname):
print(f"Found data pair: {en_fname} and {cn_fname}")
with open(en_fname, "r", encoding='utf-8-sig') as enfin:
en_messages = enfin.read()
with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
cn_messages = cnfin.read()
en_messages = [part.strip() for part in en_messages.split("\n\n") if part.strip() != ""]
cn_messages = [part.strip() for part in cn_messages.split("\n\n") if part.strip() != ""]
if(len(en_messages) != len(cn_messages)):
print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
messages = zip(en_messages, cn_messages)
message_groups.extend(messages)
write_jsonl(message_groups, "combined.jsonl")
import random
random.shuffle(message_groups)
TEST_RATIO = 0.2
split_index = int(len(message_groups) * TEST_RATIO)
test = message_groups[:split_index]
train = message_groups[split_index:]
write_jsonl(train, "chatgpt-train.jsonl")
write_jsonl(test, "chatgpt-test.jsonl")
recent_files = files[-5:]
recent_messages = []
for f in recent_files:
en_fname = join(DOCUMENT_ROOT, f)
if en_fname.endswith(".en.txt") and isfile(en_fname):
cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
if os.path.exists(cn_fname) and isfile(cn_fname):
print(f"Found data pair: {en_fname} and {cn_fname}")
with open(en_fname, "r", encoding='utf-8-sig') as enfin:
en_messages = enfin.read()
with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
cn_messages = cnfin.read()
en_messages = [part.strip() for part in en_messages.split("\n\n") if part.strip() != ""]
cn_messages = [part.strip() for part in cn_messages.split("\n\n") if part.strip() != ""]
if(len(en_messages) != len(cn_messages)):
print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
messages = zip(en_messages, cn_messages)
recent_messages.extend(messages)
write_jsonl(recent_messages, "recent-combined.jsonl")
TEST_RATIO = 0.2
split_index = int(len(recent_messages) * TEST_RATIO)
test = recent_messages[:split_index]
train = recent_messages[split_index:]
write_jsonl(train, "chatgpt-recent-train.jsonl")
write_jsonl(test, "chatgpt-recent-test.jsonl") |