File size: 8,967 Bytes
55d1991
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
""" Script used to clean the data. """

import os
import re
from nltk import tokenize

def clean_aochildes(lines):
    """ For aochildes, we just remove the space between the punctuation mark and the final word """
    new_lines = []
    for line in lines:
        new_lines.append(line[:-3] + line[-2:])
    return new_lines

def clean_bnc_spoken(lines):
    """ For bnc_spoken, we lowercase """
    new_lines = []
    for line in lines:
        new_lines.append(line.lower())
    return new_lines

def clean_cbt(lines):
    """ For cbt, we lowercase and normalise punctuation """
    punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", 'β€œ', '”', 'β€”', '–']
    new_lines = []
    for line in lines:
        new_line = line.lower()
        new_line = new_line.replace(": ' ", ":  \"")
        new_line = new_line.replace("''", "\"")
        new_line = new_line.replace(" '\n", "\"\n")
        new_line = new_line.replace(" ' ", "\" ")
        new_line = new_line.replace(" `` ", "  \"")
        new_line = new_line.replace("` ", " \"")
        new_line = new_line.replace("`", "\"")
        new_line = new_line.replace("’", "\"")
        for punct in punctuation:
            new_line = new_line.replace(f" {punct}", punct)
        new_lines.append(new_line)
    return new_lines

def clean_children_stories(lines):
    """ For children_stories, we lowercase and split long lines into sentences """
    new_lines = []
    for line in lines:
        sentences = [s + '\n' for s in tokenize.sent_tokenize(line.lower().strip()) if s != '']
        new_lines.extend(sentences)
    return new_lines

def clean_gutenberg(lines):
    """ For gutenberg, we lowercase, remove italics, group lines into paragraphs and then split into sentences """
    # Get paragraphs
    paragraphs = []
    paragraph = ""
    for line in lines:
        # Remove italics
        tmp_line = line.lower().strip().replace('_','')
        if tmp_line == "" and paragraph != "":
            paragraphs.append(paragraph[:-1] + '\n')
            paragraph = ""
        else:
            paragraph += tmp_line + " "
    
    # Split into sentences using NLTK
    new_lines = []
    for paragraph in paragraphs:
        sentences = [s + '\n' for s in tokenize.sent_tokenize(paragraph) if s != '']
        new_lines.extend(sentences)
    return new_lines

def clean_open_subtitles(lines):
    """ For open_subtitles, we lowercase, remove subtitle dashes and fix the lowercase 'l' problem """
    punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", 'β€œ', '”', 'β€”', '–', ' ', '\n']
    new_lines = []
    for line in lines:
        new_line = line.lower()
        if new_line[0:2] == "- ":
            new_line = new_line[2:]
        if new_line[0] == "-":
            new_line = new_line[1:]
        new_line = ' ' + new_line
        for punct in punctuation:
            new_line = new_line.replace(f" l{punct}", f" i{punct}")
            new_line = new_line.replace(f" lm{punct}", f" im{punct}")
            new_line = new_line.replace(f" lf{punct}", f" if{punct}")
        new_line = new_line.replace(' lc', ' ic')
        new_line = new_line.replace(' ld', ' id')
        new_line = new_line.replace(' lj', ' i j')
        new_line = new_line.replace(' ln', ' in')
        new_line = new_line.replace(' lp', ' ip')
        new_line = new_line.replace(' lr', ' ir')
        new_line = new_line.replace(' ls', ' is')
        new_line = new_line.replace(' isd', ' lsd')
        new_line = new_line.replace(' lt', ' it')
        new_line = new_line.replace(' lt', ' it')
        new_line = new_line.replace(' lv', ' iv')
        new_lines.append(new_line.strip() + '\n')
    return new_lines

def clean_qed(lines):
    """ For qed, we lowercase and normalise punctuation, remove words contained in parentheses,
    remove lines that arejust character's names and fix the lowercase 'l' problem"""

    new_lines = []
    for line in lines:
        # Before lowercasing, check if the words in the line are uppercase containing lowercase 'l' instead of 'I' and fix accordingly
        words = line.split()
        for i, word in enumerate(words):
            if word.replace('l','I').isupper() and 'l' in word and word != 'I\'ll':
                words[i] = word.replace('l', 'I')
        new_line = ' '.join(words).lower()
        new_line = new_line.replace(' lc', ' ic')
        new_line = new_line.replace(' ld', ' id')
        new_line = new_line.replace(' lj', ' i j')
        new_line = new_line.replace(' ln', ' in')
        new_line = new_line.replace(' lp', ' ip')
        new_line = new_line.replace(' lr', ' ir')
        new_line = new_line.replace(' ls', ' is')
        new_line = new_line.replace(' isd', ' lsd')
        new_line = new_line.replace(' lt', ' it')
        new_line = new_line.replace(' lt', ' it')
        new_line = new_line.replace(' lv', ' iv')

        # Skip lines that are just character names, e.g. "AMY GOODMAN:"
        if len(new_line.strip()) < 1 or (len(words) <= 3 and new_line.strip()[-1] == ':'):
            continue

        # Remove subtitle dashes
        if new_line[0:2] == "- ":
            new_line = new_line[2:]
        if new_line[0] == "-":
            new_line = new_line[1:]

        # Remove substrings contained within circular or square parantheses (screen descriptions)
        pattern = r'\([^)]*\)'
        new_line = re.sub(pattern, '', new_line)
        pattern = r'\[[^)]*\]'
        new_line = re.sub(pattern, '', new_line)
        new_line = new_line.replace('"', '\'')

        # Remove strange characters
        new_line = new_line.replace('#','')
        new_line = new_line.replace('*','')

        new_line = new_line.strip()
        if new_line != "":
            new_lines.append(new_line + '\n')
    return new_lines

def clean_simple_wikipedia(lines):
    """ For simple_wikipedia, we lowercase, remove empty lines and article names and split paragraphs into sentences."""
    new_lines = []
    next_line_is_article_name = False
    for line in lines:
        if next_line_is_article_name:
            next_line_is_article_name = False
            continue
        if line.strip() == "":
            next_line_is_article_name = True
            continue
        sentences = [s + '\n' for s in tokenize.sent_tokenize(line.lower()) if s != '']
        new_lines.extend(sentences)
    return new_lines

def clean_switchboard(lines):
    """ For switchboard, we lowercase """
    new_lines = []
    for line in lines:
        new_line = line.lower()
        new_lines.append(new_line)
    return new_lines

def clean_wikipedia(lines):
    """ For wikipedia, we lowercase, remove empty lines and article names and split paragraphs into sentences.
     We also remove lines that seem to be figure names or table entries. """
    new_lines = []
    for line in lines:
        new_line = line.strip()
        words = new_line.split()
        
        # Remove empty lines and article names
        if new_line == "":
            continue
        if new_line[0] == "=" and new_line[-1] == "=":
            continue

        # Filter out lines that seem to be figure names or table entries
        all_numeric = True
        all_uppercase = True
        for word in words:
            if not word.isnumeric():
                all_numeric = False
            if not word[0].isupper():
                all_uppercase = False
        if all_numeric or all_uppercase:
            continue
    
        # Split into sentences using NLTK
        sentences = [s + '\n' for s in tokenize.sent_tokenize(new_line.lower()) if s != '']
        new_lines.extend(sentences)
    return new_lines

CLEAN_FUNCTIONS = {'aochildes' : clean_aochildes, 'bnc_spoken' : clean_bnc_spoken, 'cbt' : clean_cbt, 'children_stories' : clean_children_stories, 'gutenberg' : clean_gutenberg, 'open_subtitles' : clean_open_subtitles, 'qed' : clean_qed, 'simple_wikipedia' : clean_simple_wikipedia, 'switchboard' : clean_switchboard, 'wikipedia' : clean_wikipedia}
FOLDERS = ['10M', '100M', 'dev', 'test']

if __name__ == "__main__":

    # Read all text files from directory "BabyLM"
    all_files = []
    for folder in FOLDERS:
        for root, dirs, files in os.walk(f"original/{folder}"):
            for file in files:
                if file.endswith(".txt"):
                    all_files.append(os.path.join(root, file))

    for file in all_files:
        print(file)
        with open(file, 'r') as f:
            lines = f.readlines()

        # Get the corpus name
        corpus_name = os.path.basename(file).split('.')[0]

        # Clean the data
        if CLEAN_FUNCTIONS[corpus_name] is not None:
            lines = CLEAN_FUNCTIONS[corpus_name](lines)

        # Write the new file
        new_file = file.replace('original', 'clean')
        os.makedirs(os.path.dirname(new_file), exist_ok=True)
        with open(new_file, 'w') as f:
           f.writelines(lines)