{ "cells": [ { "cell_type": "code", "execution_count": 138, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import glob\n", "import spacy\n", "from spacy.tokens import Span\n", "import os" ] }, { "cell_type": "code", "execution_count": 139, "metadata": {}, "outputs": [], "source": [ "def join_words(group):\n", " return ' '.join(group.astype(str))\n", "\n", "def create_spacy_training_data(file_path):\n", " # Load data from the Excel file\n", " data = pd.read_excel(file_path)\n", " # if \"Line_ID\" in data.columns:\n", " # group_col = \"Line_ID\"\n", " if \"ACT\" in data.columns:\n", " group_col = \"ACT\"\n", " elif \"Original_Act_ID\" in data.columns:\n", " group_col = \"Original_Act_ID\"\n", " else:\n", " \"unknown\"\n", " data = data[~data['Word_x'].apply(lambda x: isinstance(x, int))]\n", " data = data[~data['Word_x'].apply(lambda x: isinstance(x, float))]\n", "\n", "\n", "\n", "\n", " \n", " # Combine words into sentences, assumed by unique 'Line_ID'\n", " grouped_data = data.groupby(group_col)['Word_x'].apply(' '.join).reset_index()\n", " \n", " # Prepare training data in spaCy format\n", " training_data = []\n", " \n", " for _, row in grouped_data.iterrows():\n", " text = row['Word_x']\n", " entities = []\n", " current_position = 0\n", "\n", " # Iterate over words in the current line to build entities\n", " for _, word_data in data[data[group_col] == row[group_col]].iterrows():\n", " start = current_position\n", " end = start + len(word_data['Word_x'])\n", " # Check if there's a named entity\n", " if word_data['LOC_x'] != 'O':\n", " entities.append((start, end, 'LOC'))\n", " if word_data['PERS_x'] != 'O':\n", " entities.append((start, end, 'PER'))\n", " \n", " current_position = end + 1 # Update position, accounting for space\n", "\n", " # Append to training data\n", " training_data.append({\"text\": text, \"entities\": entities})\n", " training_data = convert_to_spacy_docs(training_data)\n", " return training_data\n", "\n", "def convert_to_spacy_docs(training_data):\n", " # Load spaCy model, you can change it to whatever model you'd like to use or just use a blank one\n", " nlp = spacy.blank('en') # Assuming the data is in English; change the language code if needed\n", " \n", " # List to hold the spaCy docs\n", " spacy_docs = []\n", " \n", " for record in training_data:\n", " # Create a doc from the text\n", " doc = nlp(record['text'])\n", " \n", " # Create a list to collect entity spans\n", " spans = []\n", " \n", " for start, end, label in record['entities']:\n", " span = doc.char_span(start, end, label=label)\n", " if span is not None: # Only add the span if it's correctly aligned with token boundaries\n", " spans.append(span)\n", " \n", " # Overwrite the doc's 'ents' with our list of spans\n", " try:\n", " doc.spans[\"sc\"] = spans\n", " except:\n", " ValueError\n", " print(spans)\n", " \n", " span_ents = []\n", " for span in doc.spans[\"sc\"]:\n", " span_ents.append({\"text\": span.text, \"label\": span.label_, \"start\": span.start, \"end\": span.end})\n", " # Append the modified doc to the list\n", " spacy_docs.append({\"text\": doc.text, 'spans': span_ents})\n", " \n", " return spacy_docs" ] }, { "cell_type": "code", "execution_count": 140, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "17\n" ] } ], "source": [ "files = glob.glob(\"Database/*/CONLL/*.xlsx\")\n", "print(len(files))" ] }, { "cell_type": "code", "execution_count": 141, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "0 Notre_Dame_Roche_Paris_BnF_10996\n", "1 Pontoise_Paris_BnF_5657\n", "2 Saint_Denis_Paris_AN_LL_1157\n", "3 Pontigny_Paris_BnF_lat_9887_inner_unmerged.xlsx\n", "4 Navarre_Pau_AD_E513\n", "5 Clairmarais_Troyes_AD_3H3700_inner_unmerged.xlsx\n", "6 Port_Royal_2_Paris_BnF_10998\n", "7 Nesle_Chantilly_GB_Reg12_14F22\n", "8 Fervaques_Paris_BnF_lat_11071\n", "9 Molesme_2_Dijon_ADCO_Cart_143_7H7\n", "10 Saint_Nicaise_Reims_BM_1843_inner_unmerged.xlsx\n", "11 Sommereux_Paris_Bnf_nal_1934\n", "12 Chartres_2_Paris_BnF_lat_10095\n", "13 Chartres_1_Paris_BnF_lat_10094\n", "14 Vauluisant_Paris_BnF_lat_9901\n", "15 Port_Royal_1_Paris_BnF_10997\n", "16 Molesme_1_Dijon_ADCO_Cart_142_7H6\n" ] } ], "source": [ "training_data = []\n", "for i, filename in enumerate(files):\n", " manuscript = os.path.basename(filename).split('_final_version_inner')[0]\n", " print(i, manuscript)\n", " res = create_spacy_training_data(filename)\n", " for r in res:\n", " r[\"ms\"] = manuscript\n", " training_data.append(r)" ] }, { "cell_type": "code", "execution_count": 142, "metadata": {}, "outputs": [], "source": [ "srsly.write_jsonl(\"home-alcar-ner.jsonl\", training_data)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "bow", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 2 }