BabyLM / tag_data.py
Zeb
Add scripts to tag data and improve cleaning
a966ae1
raw
history blame
4.78 kB
""" Script used to tag the data with POS tags. """
import os
import re
from transformers import AutoTokenizer
import nltk, sys
UNSUPERVISED_POS_TAG_MAP = {
"and" : 'CONJ',
"|" : 'NOUN',
"states" : 'NOUN',
"school" : 'NOUN',
".\"" : '.',
"-" : '.',
"five" : 'NUM',
"1" : 'NUM',
"they" : 'PRON',
"of" : 'ADP',
"are" : 'VERB',
"(" : '.',
"american" : 'ADJ',
"'s" : 'VERB',
"\"" : 'NOUN',
"the" : 'DET',
"a" : 'DET',
"after" : 'ADP',
"th" : 'NOUN',
"good" : 'ADJ',
"her" : 'PRON',
"night" : 'NOUN',
"to" : 'PRT',
"used" : 'VERB',
"," : '.',
"sir" : 'NOUN',
"tell" : 'VERB',
"lot" : 'NOUN',
"amp" : 'NOUN',
"doing" : 'VERB'
}
def tag_with_nltk(text, en_ptb_map):
""" Given a list of text, tag each word with its POS tag using NLTK """
new_lines = []
for line in text:
tokens = line.split()
tagged = nltk.pos_tag(tokens)
# Map the NLTK PTB tags to the universal tags
tagged = [(token, en_ptb_map[tag]) for (token, tag) in tagged]
new_lines.append(tagged)
return new_lines
def write_to_file(tagged, output_file):
""" Given a list of tagged lines, write them to the given output file """
with open(output_file, 'w') as f:
for line in tagged:
for token, tag in line:
f.write(f'{token}__<label>__{tag} ')
f.write('\n')
def tokenize_lines(text, tokenizer):
new_lines = []
for line in text:
tokens = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(line)
tokens = [t[0].replace("Ġ", "").replace('Ċ','\n') for t in tokens]
new_lines.append(' '.join(tokens))
return new_lines
def get_tags_from_file(file):
with open(file, 'r') as f:
lines = f.read().splitlines()
gold_tagged_lines = []
pred_tagged_lines = []
gold_tagged = []
pred_tagged = []
total = 0
correct = 0
for line in lines:
if line == '':
gold_tagged_lines.append(gold_tagged)
pred_tagged_lines.append(pred_tagged)
gold_tagged = []
pred_tagged = []
else:
token, gold_tag, _, pred_tag = line.strip().split(' ')
gold_tagged.append((token, gold_tag))
# Use the manual map to map the predicted tags to the universal tags
pred_tagged.append((token, UNSUPERVISED_POS_TAG_MAP[pred_tag]))
total += 1
if gold_tag == UNSUPERVISED_POS_TAG_MAP[pred_tag]:
correct += 1
print(f' Unsupervised Tagging Accuracy: {correct/total}')
return gold_tagged_lines, pred_tagged_lines
def write_tagged_lines(filename, text, tagged_lines):
with open(filename, 'w') as f:
for line, tagged in zip(text, tagged_lines):
f.write(line)
f.write(' '.join([f'{token}__<label>__{tag}' for token, tag in tagged]) + '\n')
tokenizer = AutoTokenizer.from_pretrained("CamBabyTrainers/BabyBERTa-3-8192-tokenizer")
FOLDERS = ['10M', '100M', 'dev', 'test']
if __name__ == "__main__":
# Read all text files from directory "BabyLM"
all_files = []
for folder in FOLDERS:
for root, dirs, files in os.walk(f"clean/{folder}"):
for file in files:
if file.endswith(".txt"):
all_files.append(os.path.join(root, file))
# Get map from PTB tags to universal tags
en_ptb_map = {}
with open('../pos_tagging/en-ptb.map', 'r') as f:
for line in f.readlines():
(key, val) = line.split()
en_ptb_map[key] = val
for file in all_files:
print(file)
with open(file, 'r') as f:
lines = f.readlines()
# 1. Tokenize the lines in the text, tag with universal tags and write to tmp file
tokenized = tokenize_lines(lines, tokenizer)
tagged = tag_with_nltk(tokenized, en_ptb_map)
write_to_file(tagged, 'tmp.txt')
# 2. Run the unsupervised tagger on the tmp file
os.system(f'./../anchor/hmm --output ../pos_tagging/10M_train_30_extended --data tmp.txt --pred tmp_tagged.txt')
# 3. Get the gold tags and predicted tags
gold_tagged_lines, pred_tagged_lines = get_tags_from_file('tmp_tagged.txt')
assert len(gold_tagged_lines) == len(pred_tagged_lines) == len(lines)
# 4. Write the tagged lines to the original file
new_file = file.replace('clean', 'tagged')
os.makedirs(os.path.dirname(new_file), exist_ok=True)
write_tagged_lines(new_file, lines, pred_tagged_lines)
new_file = file.replace('clean', 'tagged_gold')
os.makedirs(os.path.dirname(new_file), exist_ok=True)
write_tagged_lines(new_file, lines, gold_tagged_lines)