leaves-of-grass / generate_data.py
diversen's picture
alter input and output
619727c
import re
import json
def clean_text_file(input_file):
"""
Cleans the text file by:
- Removing lines that start with 'BOOK' or contain only numbers,
- Substituting two leading spaces with a single tab,
- Normalizing all leading whitespace to no more than two tabs of indentation,
- Splitting the text into chunks separated by at least 3 newlines.
"""
with open(input_file, "r", encoding="utf-8") as infile:
lines = infile.readlines()
cleaned_lines = []
for line in lines:
stripped_line = line.strip()
if stripped_line.startswith("BOOK") or stripped_line.isdigit():
continue
line = re.sub(r"^ ", "\t", line)
line = re.sub(r"^[\t ]+", normalize_whitespace, line)
# Remove a single leading tab if present
if line.startswith("\t"):
line = line[1:]
cleaned_lines.append(line)
cleaned_text = "".join(cleaned_lines)
poems = [chunk.strip() for chunk in cleaned_text.split("\n\n\n") if chunk.strip()]
# in each poem convert any double newlines to single newlines
poems = [re.sub(r"\n\n", "\n", poem) for poem in poems]
return poems
def normalize_whitespace(match):
whitespace = match.group(0).replace("\t", " ")
tab_count = len(whitespace) // 2
return "\t" * min(tab_count, 2)
def create_training_data(poems, max_lines=7):
training_pairs = []
for poem in poems:
lines = poem.split("\n")
for i in range(len(lines) - 1):
prompt = lines[i]
continuation = "\n".join(lines[i + 1:i + 1 + max_lines])
if continuation.strip(): # Skip empty continuations
training_pairs.append({
"input": prompt,
"output": continuation
})
return training_pairs
# Example usage
poems = clean_text_file("leaves-of-grass-original.txt")
training_pairs = create_training_data(poems, max_lines=10)
print(f"Number of training pairs: {len(training_pairs)}")
with open("train.json", "w", encoding="utf-8") as outfile:
json.dump(training_pairs, outfile, indent=4, ensure_ascii=False)