{ "cells": [ { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found data pair: data/Watch Out, Apple... [5v72vj2nFN0].en.txt and data/Watch Out, Apple... [5v72vj2nFN0].cn.txt\n", "Found data pair: data/It's Finally Over. [KJF5Nn23CwM]-4k.en.txt and data/It's Finally Over. [KJF5Nn23CwM]-4k.cn.txt\n", "Found data pair: data/Apple, Are U Ok? [34R2xVzBmfA].en.txt and data/Apple, Are U Ok? [34R2xVzBmfA].cn.txt\n", "Found data pair: data/China lost its GPU privileges [Wz9JoTTRa4k].en.txt and data/China lost its GPU privileges [Wz9JoTTRa4k].cn.txt\n" ] } ], "source": [ "import os\n", "from os import listdir\n", "from os.path import isfile, join\n", "import json\n", "\n", "INSTRUCT_CHUNKED_PROMPT = \"\"\"你是一个擅长翻译科技新闻的翻译专家。请将以下内容翻译为中文,使用相同格式输出,并保留时间戳。不要漏掉任何信息。合并多行文本时,保留第一个和最后一个时间戳。\"\"\"\n", "\n", "DOCUMENT_ROOT = \"data\"\n", "files = listdir(\"data\")\n", "\n", "all_datapoints = []\n", "\n", "for f in files:\n", " en_fname = join(DOCUMENT_ROOT, f)\n", " if en_fname.endswith(\".en.txt\") and isfile(en_fname):\n", " cn_fname = join(DOCUMENT_ROOT, f.replace(\".en.txt\", \".cn.txt\"))\n", " if os.path.exists(cn_fname) and isfile(cn_fname):\n", " print(f\"Found data pair: {en_fname} and {cn_fname}\")\n", "\n", " with open(en_fname, \"r\") as enfin:\n", " en_messages = enfin.read()\n", "\n", " with open(cn_fname, \"r\") as cnfin:\n", " cn_messages = cnfin.read()\n", " \n", " en_messages = [part.strip() for part in en_messages.split(\"\\n\\n\") if part.strip() != \"\"]\n", " cn_messages = [part.strip() for part in cn_messages.split(\"\\n\\n\") if part.strip() != \"\"]\n", "\n", " if(len(en_messages) != len(cn_messages)):\n", " print(f\"English and Chinese version mismatch. Discarding {en_fname} pair.\")\n", " \n", " # Write document\n", " history = []\n", " for en, cn in zip(en_messages, cn_messages):\n", " all_datapoints.append({\n", " 'system': INSTRUCT_CHUNKED_PROMPT, \n", " 'input': en,\n", " 'output': cn,\n", " 'history': history\n", " })\n", " history.append([en, cn])" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "import random\n", "shuffled = random.shuffle(all_datapoints)\n", "\n", "COUNT = len(all_datapoints)\n", "TRAIN = int(0.8 * COUNT)\n", "\n", "train_dataset = all_datapoints[:TRAIN]\n", "evaluation_dataset = all_datapoints[TRAIN:]\n", "\n", "with open(\"train.json\", \"w\") as train_f:\n", " for d in train_dataset:\n", " json.dump(d, train_f)\n", " train_f.write('\\n')\n", "\n", "with open(\"evaluation.json\", \"w\") as ev_f:\n", " for d in evaluation_dataset:\n", " json.dump(d, ev_f)\n", " ev_f.write('\\n')\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.5" } }, "nbformat": 4, "nbformat_minor": 2 }