{ "cells": [ { "cell_type": "code", "execution_count": 108, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import glob\n", "import spacy\n", "from spacy.tokens import Span, Doc\n", "import os\n", "from spacy.training import biluo_tags_to_offsets, biluo_tags_to_spans, iob_to_biluo\n", "import srsly" ] }, { "cell_type": "code", "execution_count": 113, "metadata": {}, "outputs": [], "source": [ "def create_spacy_training_data(file_path):\n", " # Load data from the Excel file\n", " data = pd.read_excel(file_path)\n", " # if \"Line_ID\" in data.columns:\n", " # group_col = \"Line_ID\"\n", " if \"ACT\" in data.columns:\n", " group_col = \"ACT\"\n", " elif \"Original_Act_ID\" in data.columns:\n", " group_col = \"Original_Act_ID\"\n", " else:\n", " \"unknown\"\n", " # data = data[~data['Word_x'].apply(lambda x: isinstance(x, int))]\n", " # data = data[~data['Word_x'].apply(lambda x: isinstance(x, float))]\n", " data['Word_x'] = data['Word_x'].astype(str).str.strip()\n", " \n", " # Combine words into sentences, assumed by unique 'Line_ID'\n", " grouped_data = data.groupby(group_col)\n", " \n", " # Prepare training data in spaCy format\n", " training_data = []\n", " for _, item in grouped_data:\n", " bilo_loc = item[\"LOC_x\"].tolist()\n", " bilo_person = item[\"PERS_x\"].tolist()\n", " tokens = item[\"Word_x\"].tolist()\n", " doc = Doc(nlp.vocab, words=tokens, spaces=[True for i in range(len(tokens))])\n", " # doc = nlp(\" \".join(tokens))\n", "\n", " spans = iob_to_biluo(bilo_person)\n", " spans = biluo_tags_to_spans(doc, spans)\n", "\n", "\n", " loc_spans = iob_to_biluo(bilo_loc)\n", " loc_spans = biluo_tags_to_spans(doc, loc_spans)\n", "\n", " spans = loc_spans + spans\n", " doc.spans[\"sc\"] = spans\n", " span_ents = []\n", " for span in doc.spans[\"sc\"]:\n", " span_ents.append({\"text\": span.text, \"label\": span.label_, \"start\": span.start, \"end\": span.end})\n", " training_data.append({\"text\": doc.text, \"spans\": span_ents})\n", " return training_data" ] }, { "cell_type": "code", "execution_count": 98, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "17\n" ] } ], "source": [ "files = glob.glob(\"Database/*/CONLL/*.xlsx\")\n", "print(len(files))" ] }, { "cell_type": "code", "execution_count": 117, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0 Notre_Dame_Roche_Paris_BnF_10996\n", "1 Pontoise_Paris_BnF_5657\n", "2 Saint_Denis_Paris_AN_LL_1157\n", "3 Pontigny_Paris_BnF_lat_9887_inner_unmerged.xlsx\n", "4 Navarre_Pau_AD_E513\n", "5 Clairmarais_Troyes_AD_3H3700_inner_unmerged.xlsx\n", "6 Port_Royal_2_Paris_BnF_10998\n", "7 Nesle_Chantilly_GB_Reg12_14F22\n", "8 Fervaques_Paris_BnF_lat_11071\n", "9 Molesme_2_Dijon_ADCO_Cart_143_7H7\n", "10 Saint_Nicaise_Reims_BM_1843_inner_unmerged.xlsx\n", "11 Sommereux_Paris_Bnf_nal_1934\n", "12 Chartres_2_Paris_BnF_lat_10095\n", "13 Chartres_1_Paris_BnF_lat_10094\n", "14 Vauluisant_Paris_BnF_lat_9901\n", "15 Port_Royal_1_Paris_BnF_10997\n", "16 Molesme_1_Dijon_ADCO_Cart_142_7H6\n" ] } ], "source": [ "training_data = []\n", "for i, filename in enumerate(files):\n", " manuscript = os.path.basename(filename).split('_final_version_inner')[0]\n", " print(i, manuscript)\n", " res = create_spacy_training_data(filename)\n", " for r in res:\n", " r[\"ms\"] = manuscript\n", " training_data.append(r)" ] }, { "cell_type": "code", "execution_count": 120, "metadata": {}, "outputs": [], "source": [ "srsly.write_jsonl(\"home-alcar-ner.jsonl\", training_data)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "bow", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 2 }