|
""" Script used to clean the data. """ |
|
|
|
import os |
|
import re |
|
from nltk import tokenize |
|
|
|
def clean_aochildes(lines): |
|
""" For aochildes, we just remove the space between the punctuation mark and the final word """ |
|
new_lines = [] |
|
for line in lines: |
|
new_lines.append(line[:-3] + line[-2:]) |
|
return new_lines |
|
|
|
def clean_bnc_spoken(lines): |
|
""" For bnc_spoken, we lowercase """ |
|
new_lines = [] |
|
for line in lines: |
|
new_lines.append(line.lower()) |
|
return new_lines |
|
|
|
def clean_cbt(lines): |
|
""" For cbt, we lowercase and normalise punctuation """ |
|
punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", 'β', 'β', 'β', 'β'] |
|
new_lines = [] |
|
for line in lines: |
|
new_line = line.lower() |
|
new_line = new_line.replace(": ' ", ": \"") |
|
new_line = new_line.replace("''", "\"") |
|
new_line = new_line.replace(" '\n", "\"\n") |
|
new_line = new_line.replace(" ' ", "\" ") |
|
new_line = new_line.replace(" `` ", " \"") |
|
new_line = new_line.replace("` ", " \"") |
|
new_line = new_line.replace("`", "\"") |
|
new_line = new_line.replace("β", "\"") |
|
for punct in punctuation: |
|
new_line = new_line.replace(f" {punct}", punct) |
|
new_lines.append(new_line) |
|
return new_lines |
|
|
|
def clean_children_stories(lines): |
|
""" For children_stories, we lowercase and split long lines into sentences """ |
|
new_lines = [] |
|
for line in lines: |
|
sentences = [s + '\n' for s in tokenize.sent_tokenize(line.lower().strip()) if s != ''] |
|
new_lines.extend(sentences) |
|
return new_lines |
|
|
|
def clean_gutenberg(lines): |
|
""" For gutenberg, we lowercase, remove italics, group lines into paragraphs and then split into sentences """ |
|
|
|
paragraphs = [] |
|
paragraph = "" |
|
for line in lines: |
|
|
|
tmp_line = line.lower().strip().replace('_','') |
|
if tmp_line == "" and paragraph != "": |
|
paragraphs.append(paragraph[:-1] + '\n') |
|
paragraph = "" |
|
else: |
|
paragraph += tmp_line + " " |
|
|
|
|
|
new_lines = [] |
|
for paragraph in paragraphs: |
|
sentences = [s + '\n' for s in tokenize.sent_tokenize(paragraph) if s != ''] |
|
new_lines.extend(sentences) |
|
return new_lines |
|
|
|
def clean_open_subtitles(lines): |
|
""" For open_subtitles, we lowercase, remove subtitle dashes and fix the lowercase 'l' problem """ |
|
punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", 'β', 'β', 'β', 'β', ' ', '\n'] |
|
new_lines = [] |
|
for line in lines: |
|
new_line = line.lower() |
|
if new_line[0:2] == "- ": |
|
new_line = new_line[2:] |
|
if new_line[0] == "-": |
|
new_line = new_line[1:] |
|
new_line = ' ' + new_line |
|
for punct in punctuation: |
|
new_line = new_line.replace(f" l{punct}", f" i{punct}") |
|
new_line = new_line.replace(f" lm{punct}", f" im{punct}") |
|
new_line = new_line.replace(f" lf{punct}", f" if{punct}") |
|
new_line = new_line.replace(' lc', ' ic') |
|
new_line = new_line.replace(' ld', ' id') |
|
new_line = new_line.replace(' lj', ' i j') |
|
new_line = new_line.replace(' ln', ' in') |
|
new_line = new_line.replace(' lp', ' ip') |
|
new_line = new_line.replace(' lr', ' ir') |
|
new_line = new_line.replace(' ls', ' is') |
|
new_line = new_line.replace(' isd', ' lsd') |
|
new_line = new_line.replace(' lt', ' it') |
|
new_line = new_line.replace(' lt', ' it') |
|
new_line = new_line.replace(' lv', ' iv') |
|
new_lines.append(new_line.strip() + '\n') |
|
return new_lines |
|
|
|
def clean_qed(lines): |
|
""" For qed, we lowercase and normalise punctuation, remove words contained in parentheses, |
|
remove lines that arejust character's names and fix the lowercase 'l' problem""" |
|
|
|
new_lines = [] |
|
for line in lines: |
|
|
|
words = line.split() |
|
for i, word in enumerate(words): |
|
if word.replace('l','I').isupper() and 'l' in word and word != 'I\'ll': |
|
words[i] = word.replace('l', 'I') |
|
new_line = ' '.join(words).lower() |
|
new_line = new_line.replace(' lc', ' ic') |
|
new_line = new_line.replace(' ld', ' id') |
|
new_line = new_line.replace(' lj', ' i j') |
|
new_line = new_line.replace(' ln', ' in') |
|
new_line = new_line.replace(' lp', ' ip') |
|
new_line = new_line.replace(' lr', ' ir') |
|
new_line = new_line.replace(' ls', ' is') |
|
new_line = new_line.replace(' isd', ' lsd') |
|
new_line = new_line.replace(' lt', ' it') |
|
new_line = new_line.replace(' lt', ' it') |
|
new_line = new_line.replace(' lv', ' iv') |
|
|
|
|
|
if len(new_line.strip()) < 1 or (len(words) <= 3 and new_line.strip()[-1] == ':'): |
|
continue |
|
|
|
|
|
if new_line[0:2] == "- ": |
|
new_line = new_line[2:] |
|
if new_line[0] == "-": |
|
new_line = new_line[1:] |
|
|
|
|
|
pattern = r'\([^)]*\)' |
|
new_line = re.sub(pattern, '', new_line) |
|
pattern = r'\[[^)]*\]' |
|
new_line = re.sub(pattern, '', new_line) |
|
new_line = new_line.replace('"', '\'') |
|
|
|
|
|
new_line = new_line.replace('#','') |
|
new_line = new_line.replace('*','') |
|
|
|
new_line = new_line.strip() |
|
if new_line != "": |
|
new_lines.append(new_line + '\n') |
|
return new_lines |
|
|
|
def clean_simple_wikipedia(lines): |
|
""" For simple_wikipedia, we lowercase, remove empty lines and article names and split paragraphs into sentences.""" |
|
new_lines = [] |
|
next_line_is_article_name = False |
|
for line in lines: |
|
if next_line_is_article_name: |
|
next_line_is_article_name = False |
|
continue |
|
if line.strip() == "": |
|
next_line_is_article_name = True |
|
continue |
|
sentences = [s + '\n' for s in tokenize.sent_tokenize(line.lower()) if s != ''] |
|
new_lines.extend(sentences) |
|
return new_lines |
|
|
|
def clean_switchboard(lines): |
|
""" For switchboard, we lowercase """ |
|
new_lines = [] |
|
for line in lines: |
|
new_line = line.lower() |
|
new_lines.append(new_line) |
|
return new_lines |
|
|
|
def clean_wikipedia(lines): |
|
""" For wikipedia, we lowercase, remove empty lines and article names and split paragraphs into sentences. |
|
We also remove lines that seem to be figure names or table entries. """ |
|
new_lines = [] |
|
for line in lines: |
|
new_line = line.strip() |
|
words = new_line.split() |
|
|
|
|
|
if new_line == "": |
|
continue |
|
if new_line[0] == "=" and new_line[-1] == "=": |
|
continue |
|
|
|
|
|
all_numeric = True |
|
all_uppercase = True |
|
for word in words: |
|
if not word.isnumeric(): |
|
all_numeric = False |
|
if not word[0].isupper(): |
|
all_uppercase = False |
|
if all_numeric or all_uppercase: |
|
continue |
|
|
|
|
|
sentences = [s + '\n' for s in tokenize.sent_tokenize(new_line.lower()) if s != ''] |
|
new_lines.extend(sentences) |
|
return new_lines |
|
|
|
CLEAN_FUNCTIONS = {'aochildes' : clean_aochildes, 'bnc_spoken' : clean_bnc_spoken, 'cbt' : clean_cbt, 'children_stories' : clean_children_stories, 'gutenberg' : clean_gutenberg, 'open_subtitles' : clean_open_subtitles, 'qed' : clean_qed, 'simple_wikipedia' : clean_simple_wikipedia, 'switchboard' : clean_switchboard, 'wikipedia' : clean_wikipedia} |
|
FOLDERS = ['10M', '100M', 'dev', 'test'] |
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
all_files = [] |
|
for folder in FOLDERS: |
|
for root, dirs, files in os.walk(f"original/{folder}"): |
|
for file in files: |
|
if file.endswith(".txt"): |
|
all_files.append(os.path.join(root, file)) |
|
|
|
for file in all_files: |
|
print(file) |
|
with open(file, 'r') as f: |
|
lines = f.readlines() |
|
|
|
|
|
corpus_name = os.path.basename(file).split('.')[0] |
|
|
|
|
|
if CLEAN_FUNCTIONS[corpus_name] is not None: |
|
lines = CLEAN_FUNCTIONS[corpus_name](lines) |
|
|
|
|
|
new_file = file.replace('original', 'clean') |
|
os.makedirs(os.path.dirname(new_file), exist_ok=True) |
|
with open(new_file, 'w') as f: |
|
f.writelines(lines) |
|
|