metricsubs-chunktranslate / generate_llamafactory.py
metricv's picture
New data and scripts
f752c8d verified
import os
from os import listdir
from os.path import isfile, join
import json
INSTRUCT_CHUNKED_PROMPT = """你是一个擅长翻译科技新闻的翻译专家。请将以下内容翻译为中文,使用相同格式输出,并保留时间戳。不要漏掉任何信息。合并多行文本时,保留第一个和最后一个时间戳。"""
DOCUMENT_ROOT = "data"
files = listdir("data")
all_datapoints = []
for f in files:
en_fname = join(DOCUMENT_ROOT, f)
if en_fname.endswith(".en.txt") and isfile(en_fname):
cn_fname = join(DOCUMENT_ROOT, f.replace(".en.txt", ".cn.txt"))
if os.path.exists(cn_fname) and isfile(cn_fname):
print(f"Found data pair: {en_fname} and {cn_fname}")
with open(en_fname, "r", encoding='utf-8-sig') as enfin:
en_messages = enfin.read()
with open(cn_fname, "r", encoding='utf-8-sig') as cnfin:
cn_messages = cnfin.read()
en_messages = [part.strip() for part in en_messages.split("\n\n") if part.strip() != ""]
cn_messages = [part.strip() for part in cn_messages.split("\n\n") if part.strip() != ""]
if(len(en_messages) != len(cn_messages)):
print(f"English and Chinese version mismatch. Discarding {en_fname} pair.")
# Write document
history = []
for en, cn in zip(en_messages, cn_messages):
all_datapoints.append({
'system': INSTRUCT_CHUNKED_PROMPT,
'input': en,
'output': cn,
'history': history
})
history.append([en, cn])
import random
shuffled = random.shuffle(all_datapoints)
COUNT = len(all_datapoints)
TRAIN = int(0.8 * COUNT)
train_dataset = all_datapoints[:TRAIN]
evaluation_dataset = all_datapoints[TRAIN:]
with open("train.json", "w", encoding='utf-8-sig') as train_f:
for d in train_dataset:
json.dump(d, train_f)
train_f.write('\n')
with open("test.json", "w", encoding='utf-8-sig') as ev_f:
for d in evaluation_dataset:
json.dump(d, ev_f)
ev_f.write('\n')