|
""" Script used to tag the data with POS tags. """ |
|
|
|
import os |
|
import re |
|
from transformers import AutoTokenizer |
|
|
|
import nltk, sys |
|
|
|
UNSUPERVISED_POS_TAG_MAP = { |
|
"and" : 'CONJ', |
|
"|" : 'NOUN', |
|
"states" : 'NOUN', |
|
"school" : 'NOUN', |
|
".\"" : '.', |
|
"-" : '.', |
|
"five" : 'NUM', |
|
"1" : 'NUM', |
|
"they" : 'PRON', |
|
"of" : 'ADP', |
|
"are" : 'VERB', |
|
"(" : '.', |
|
"american" : 'ADJ', |
|
"'s" : 'VERB', |
|
"\"" : 'NOUN', |
|
"the" : 'DET', |
|
"a" : 'DET', |
|
"after" : 'ADP', |
|
"th" : 'NOUN', |
|
"good" : 'ADJ', |
|
"her" : 'PRON', |
|
"night" : 'NOUN', |
|
"to" : 'PRT', |
|
"used" : 'VERB', |
|
"," : '.', |
|
"sir" : 'NOUN', |
|
"tell" : 'VERB', |
|
"lot" : 'NOUN', |
|
"amp" : 'NOUN', |
|
"doing" : 'VERB' |
|
} |
|
|
|
def tag_with_nltk(text, en_ptb_map): |
|
""" Given a list of text, tag each word with its POS tag using NLTK """ |
|
new_lines = [] |
|
for line in text: |
|
tokens = line.split() |
|
tagged = nltk.pos_tag(tokens) |
|
|
|
tagged = [(token, en_ptb_map[tag]) for (token, tag) in tagged] |
|
new_lines.append(tagged) |
|
return new_lines |
|
|
|
def write_to_file(tagged, output_file): |
|
""" Given a list of tagged lines, write them to the given output file """ |
|
with open(output_file, 'w') as f: |
|
for line in tagged: |
|
for token, tag in line: |
|
f.write(f'{token}__<label>__{tag} ') |
|
f.write('\n') |
|
|
|
def tokenize_lines(text, tokenizer): |
|
new_lines = [] |
|
for line in text: |
|
tokens = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(line) |
|
tokens = [t[0].replace("Ġ", "").replace('Ċ','\n') for t in tokens] |
|
new_lines.append(' '.join(tokens)) |
|
return new_lines |
|
|
|
def get_tags_from_file(file): |
|
with open(file, 'r') as f: |
|
lines = f.readlines() |
|
|
|
gold_tagged_lines = [] |
|
pred_tagged_lines = [] |
|
gold_tagged = [] |
|
pred_tagged = [] |
|
total = 0 |
|
correct = 0 |
|
for line in lines: |
|
line = line.strip() |
|
if line == '': |
|
gold_tagged_lines.append(gold_tagged) |
|
pred_tagged_lines.append(pred_tagged) |
|
gold_tagged = [] |
|
pred_tagged = [] |
|
else: |
|
token, gold_tag, _, pred_tag = line.strip().split(' ') |
|
gold_tagged.append((token, gold_tag)) |
|
|
|
pred_tagged.append((token, UNSUPERVISED_POS_TAG_MAP[pred_tag])) |
|
total += 1 |
|
if gold_tag == UNSUPERVISED_POS_TAG_MAP[pred_tag]: |
|
correct += 1 |
|
print(f' Unsupervised Tagging Accuracy: {correct/total}') |
|
|
|
return gold_tagged_lines, pred_tagged_lines |
|
|
|
def write_tagged_lines(filename, text, tagged_lines): |
|
with open(filename, 'w') as f: |
|
|
|
f.write(filename.split('/')[-1] + '\n') |
|
for line, tagged in zip(text, tagged_lines): |
|
f.write(line) |
|
f.write(' '.join([f'{token}__<label>__{tag}' for token, tag in tagged]) + '\n') |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("CamBabyTrainers/CamBabyTokenizer-8192") |
|
|
|
FOLDERS = ['10M', '100M', 'dev', 'test'] |
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
all_files = [] |
|
for folder in FOLDERS: |
|
for root, dirs, files in os.walk(f"clean/{folder}"): |
|
for file in files: |
|
if file.endswith(".txt"): |
|
all_files.append(os.path.join(root, file)) |
|
|
|
|
|
en_ptb_map = {} |
|
with open('../pos_tagging/en-ptb.map', 'r') as f: |
|
for line in f.readlines(): |
|
(key, val) = line.split() |
|
en_ptb_map[key] = val |
|
|
|
for file in all_files: |
|
print(file) |
|
with open(file, 'r') as f: |
|
lines = f.readlines()[1:] |
|
|
|
|
|
tokenized = tokenize_lines(lines, tokenizer) |
|
tagged = tag_with_nltk(tokenized, en_ptb_map) |
|
write_to_file(tagged, 'tmp.txt') |
|
|
|
|
|
os.system(f'./../anchor/hmm --output ../pos_tagging/10M_train_30_extended --data tmp.txt --pred tmp_tagged.txt') |
|
|
|
|
|
gold_tagged_lines, pred_tagged_lines = get_tags_from_file('tmp_tagged.txt') |
|
|
|
assert len(gold_tagged_lines) == len(pred_tagged_lines) == len(lines) |
|
|
|
|
|
new_file = file.replace('clean', 'tagged') |
|
os.makedirs(os.path.dirname(new_file), exist_ok=True) |
|
write_tagged_lines(new_file, lines, pred_tagged_lines) |
|
|
|
new_file = file.replace('clean', 'tagged_gold') |
|
os.makedirs(os.path.dirname(new_file), exist_ok=True) |
|
write_tagged_lines(new_file, lines, gold_tagged_lines) |
|
|
|
os.remove('tmp.txt') |
|
os.remove('tmp_tagged.txt') |
|
|