BabyLM / clean_data.py
Zeb
Clean data again
89e27fb
raw
history blame
10.7 kB
""" Script used to clean the data. """
import os
import re
from nltk import tokenize
def clean_aochildes(lines):
""" For aochildes, we remove the space between the punctuation mark and the final word and join together every 5 lines """
new_lines = []
joined = []
for i, line in enumerate(lines):
new_line = line[:-3] + line[-2:]
joined.append(new_line.strip())
if i % 5 == 0:
new_lines.append(" ".join(joined) + "\n")
joined = []
return new_lines
def clean_bnc_spoken(lines):
""" For bnc_spoken, we lowercase """
new_lines = []
for line in lines:
new_line = line.lower()
if new_line != '\n':
new_lines.append(new_line)
return new_lines
def clean_cbt(lines):
""" For cbt, we lowercase and normalise punctuation """
punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", 'β€œ', '”', 'β€”', '–']
new_lines = []
for line in lines:
new_line = line.lower()
new_line = new_line.replace(": ' ", ": \"")
new_line = new_line.replace("''", "\"")
new_line = new_line.replace(" '\n", "\"\n")
new_line = new_line.replace(" ' ", "\" ")
new_line = new_line.replace(" `` ", " \"")
new_line = new_line.replace("` ", " \"")
new_line = new_line.replace("`", "\"")
new_line = new_line.replace("’", "\"")
for punct in punctuation:
new_line = new_line.replace(f" {punct}", punct)
new_lines.append(new_line)
return new_lines
def clean_children_stories(lines):
""" For children_stories, we lowercase """
new_lines = []
for line in lines:
new_line = line.lower().strip()
if new_line != '':
new_lines.append(new_line + "\n")
return new_lines
def clean_gutenberg(lines):
""" For gutenberg, we lowercase, remove italics and group lines into paragraphs. We also remove any lines containing '*' or 'p.' """
# Get paragraphs
paragraphs = []
paragraph = ""
for line in lines:
# Remove italics
tmp_line = line.lower().strip().replace('_','')
if tmp_line == "" and paragraph != "":
if len(paragraph.split()) > 2 and not paragraph.split()[-1][-1].isnumeric(): # Remove paragraphs with less than 3 words and those that end in a number (probably part of a bibliography)
paragraphs.append(paragraph[:-1] + '\n')
paragraph = ""
else:
paragraph += tmp_line + " "
# Bad characters - gutenberg has a lot of figures, footnotes, chapter names etc that we want to remove
bad_chars = ['*', 'p.', '=', '|', '[', ']', ' ', ' ', 'v.']
new_lines = [p.strip()+'\n' for p in paragraphs if not any([c in p for c in bad_chars]) and p != '' and p != '\n' and p[0] != '(']
return new_lines
def clean_open_subtitles(lines):
""" For open_subtitles, we lowercase, remove subtitle dashes and fix the lowercase 'l' problem. We also join every 5 lines. """
punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", 'β€œ', '”', 'β€”', '–', ' ', '\n']
new_lines = []
joined = []
count = 0
for line in lines:
new_line = line.lower()
# Skip music lines
if 'β™ͺ' in new_line or '[' in new_line or ']' in new_line or 'β€Ž' in new_line:
continue
if new_line[0:2] in ["- ", "– ", "β€” "]:
new_line = new_line[2:]
if new_line[0] in ["-", "–", "β€”"]:
new_line = new_line[1:]
new_line = ' ' + new_line
for punct in punctuation:
new_line = new_line.replace(f" l{punct}", f" i{punct}")
new_line = new_line.replace(f" lm{punct}", f" im{punct}")
new_line = new_line.replace(f" lf{punct}", f" if{punct}")
new_line = new_line.replace(' lc', ' ic')
new_line = new_line.replace(' ld', ' id')
new_line = new_line.replace(' lj', ' i j')
new_line = new_line.replace(' ln', ' in')
new_line = new_line.replace(' lp', ' ip')
new_line = new_line.replace(' lr', ' ir')
new_line = new_line.replace(' ls', ' is')
new_line = new_line.replace(' isd', ' lsd')
new_line = new_line.replace(' lt', ' it')
new_line = new_line.replace(' lt', ' it')
new_line = new_line.replace(' lv', ' iv')
if new_line.strip() != '':
joined.append(new_line.strip())
count += 1
if count % 5 == 0:
new_lines.append(" ".join(joined) + '\n')
joined = []
return new_lines
def clean_qed(lines):
""" For qed, we lowercase and normalise punctuation, remove words contained in parentheses,
remove lines that are just character's names and fix the lowercase 'l' problem. We also join every 5 lines. """
new_lines = []
count = 0
joined = []
for line in lines:
# Before lowercasing, check if the words in the line are uppercase containing lowercase 'l' instead of 'I' and fix accordingly
words = line.split()
for i, word in enumerate(words):
if word.replace('l','I').isupper() and 'l' in word and word != 'I\'ll':
words[i] = word.replace('l', 'I')
new_line = ' '.join(words).lower()
new_line = new_line.replace(' lc', ' ic')
new_line = new_line.replace(' ld', ' id')
new_line = new_line.replace(' lj', ' i j')
new_line = new_line.replace(' ln', ' in')
new_line = new_line.replace(' lp', ' ip')
new_line = new_line.replace(' lr', ' ir')
new_line = new_line.replace(' ls', ' is')
new_line = new_line.replace(' isd', ' lsd')
new_line = new_line.replace(' lt', ' it')
new_line = new_line.replace(' lt', ' it')
new_line = new_line.replace(' lv', ' iv')
new_line = new_line.replace('>', '')
new_line = new_line.replace('<i', '')
new_line = new_line.replace('</i', '')
new_line = new_line.replace('>i', '')
new_line = new_line.replace('>/i', '')
new_line = new_line.replace('&gt', '')
new_line = new_line.replace('&lt', '')
new_line = new_line.replace('&', '')
# Skip lines that are just character names, e.g. "AMY GOODMAN:"
if len(new_line.strip()) < 1 or (len(words) <= 3 and new_line.strip()[-1] == ':'):
continue
# Remove subtitle dashes
if new_line[0:2] == "- ":
new_line = new_line[2:]
if new_line[0] == "-":
new_line = new_line[1:]
# Remove substrings contained within circular or square parantheses (screen descriptions)
pattern = r'\([^)]*\)'
new_line = re.sub(pattern, '', new_line)
pattern = r'\[[^)]*\]'
new_line = re.sub(pattern, '', new_line)
new_line = new_line.replace('"', '\'')
# Remove strange characters
new_line = new_line.replace('#','')
new_line = new_line.replace('*','')
new_line = new_line.strip()
if new_line != "":
joined.append(new_line)
count += 1
if count % 5 == 0:
new_lines.append(" ".join(joined) + '\n')
joined = []
return new_lines
def clean_simple_wikipedia(lines):
""" For simple_wikipedia, we lowercase, remove empty lines and article names."""
new_lines = []
next_line_is_article_name = False
for line in lines:
if next_line_is_article_name:
next_line_is_article_name = False
continue
if line.strip() == "":
next_line_is_article_name = True
continue
if len(line.split()) > 2:
new_lines.append(line.lower())
return new_lines
def clean_switchboard(lines):
""" For switchboard, we lowercase and join every 5 lines. """
new_lines = []
count = 0
joined = []
for line in lines:
new_line = line.lower().strip()
joined.append(new_line)
count += 1
if count % 5 == 0:
new_lines.append(" ".join(joined) + '\n')
joined = []
return new_lines
def clean_wikipedia(lines):
""" For wikipedia, we lowercase and remove empty lines and article names.
We also remove lines that seem to be figure names or table entries. """
new_lines = []
for line in lines:
new_line = line.strip()
words = new_line.split()
# Remove empty lines and article names
if new_line == "":
continue
if new_line[0] == "=" and new_line[-1] == "=":
continue
# Filter out lines that seem to be figure names or table entries
all_numeric = True
all_uppercase = True
for word in words:
if not word.isnumeric():
all_numeric = False
if not word[0].isupper():
all_uppercase = False
if all_numeric or all_uppercase:
continue
new_lines.append(new_line.lower().strip() + '\n')
return new_lines
CLEAN_FUNCTIONS = {'aochildes' : clean_aochildes, 'bnc_spoken' : clean_bnc_spoken, 'cbt' : clean_cbt, 'children_stories' : clean_children_stories, 'gutenberg' : clean_gutenberg, 'open_subtitles' : clean_open_subtitles, 'qed' : clean_qed, 'simple_wikipedia' : clean_simple_wikipedia, 'switchboard' : clean_switchboard, 'wikipedia' : clean_wikipedia}
FOLDERS = ['10M', '100M', 'dev', 'test']
if __name__ == "__main__":
# Read all text files from directory "BabyLM"
all_files = []
for folder in FOLDERS:
for root, dirs, files in os.walk(f"original/{folder}"):
for file in files:
if file.endswith(".txt"):
all_files.append(os.path.join(root, file))
for file in all_files:
print(file)
with open(file, 'r') as f:
lines = f.readlines()
# Get the corpus name
corpus_name = os.path.basename(file).split('.')[0]
# Clean the data
if CLEAN_FUNCTIONS[corpus_name] is not None:
lines = CLEAN_FUNCTIONS[corpus_name](lines)
# Replace multiple spaces with single space
lines = [re.sub(' +', ' ', line) for line in lines if line.strip() != '']
# Write the new file
new_file = file.replace('original', 'clean')
os.makedirs(os.path.dirname(new_file), exist_ok=True)
with open(new_file, 'w') as f:
# Save file name to file, so we can later recover the original file names
f.write(new_file.split('/')[-1] + '\n')
f.writelines(lines)