Datasets:
File size: 2,184 Bytes
f752c8d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import os
from os import listdir
from os.path import isfile, join
import json
INSTRUCT_CHUNKED_PROMPT = """你是一个擅长翻译科技新闻的翻译专家。请将以下内容翻译为中文,使用相同格式输出,并保留时间戳。不要漏掉任何信息。合并多行文本时,保留第一个和最后一个时间戳。"""
DOCUMENT_ROOT = "data"
files = listdir("data")
all_datapoints = []
for f in files:
en_fname = join(DOCUMENT_ROOT, f)
if en_fname.endswith(".en.txt") and isfile(en_fname):
cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
if os.path.exists(cn_fname) and isfile(cn_fname):
print(f"Found data pair: {en_fname} and {cn_fname}")
with open(en_fname, "r", encoding='utf-8-sig') as enfin:
en_messages = enfin.read()
with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
cn_messages = cnfin.read()
en_messages = [part.strip() for part in en_messages.split("\n\n") if part.strip() != ""]
cn_messages = [part.strip() for part in cn_messages.split("\n\n") if part.strip() != ""]
if(len(en_messages) != len(cn_messages)):
print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
# Write document
history = []
for en, cn in zip(en_messages, cn_messages):
all_datapoints.append({
'system': INSTRUCT_CHUNKED_PROMPT,
'input': en,
'output': cn,
'history': history
})
history.append([en, cn])
import random
shuffled = random.shuffle(all_datapoints)
COUNT = len(all_datapoints)
TRAIN = int(0.8 * COUNT)
train_dataset = all_datapoints[:TRAIN]
evaluation_dataset = all_datapoints[TRAIN:]
with open("train.json", "w", encoding='utf-8-sig') as train_f:
for d in train_dataset:
json.dump(d, train_f)
train_f.write('\n')
with open("test.json", "w", encoding='utf-8-sig') as ev_f:
for d in evaluation_dataset:
json.dump(d, ev_f)
ev_f.write('\n')
|