File size: 2,157 Bytes
847b6c5
 
 
 
619727c
847b6c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619727c
847b6c5
619727c
 
 
847b6c5
 
 
 
 
 
 
 
619727c
 
847b6c5
619727c
 
 
 
 
 
 
 
 
 
847b6c5
 
619727c
 
847b6c5
619727c
 
847b6c5
619727c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import re
import json


def clean_text_file(input_file):
    """
    Cleans the text file by:
    - Removing lines that start with 'BOOK' or contain only numbers,
    - Substituting two leading spaces with a single tab,
    - Normalizing all leading whitespace to no more than two tabs of indentation,
    - Splitting the text into chunks separated by at least 3 newlines.
    """
    with open(input_file, "r", encoding="utf-8") as infile:
        lines = infile.readlines()

    cleaned_lines = []

    for line in lines:
        stripped_line = line.strip()
        if stripped_line.startswith("BOOK") or stripped_line.isdigit():
            continue

        line = re.sub(r"^ ", "\t", line)
        line = re.sub(r"^[\t ]+", normalize_whitespace, line)

        # Remove a single leading tab if present
        if line.startswith("\t"):
            line = line[1:]
        cleaned_lines.append(line)

    cleaned_text = "".join(cleaned_lines)
    poems = [chunk.strip() for chunk in cleaned_text.split("\n\n\n") if chunk.strip()]

    # in each poem convert any double newlines to single newlines
    poems = [re.sub(r"\n\n", "\n", poem) for poem in poems]
    return poems


def normalize_whitespace(match):
    whitespace = match.group(0).replace("\t", "  ")
    tab_count = len(whitespace) // 2
    return "\t" * min(tab_count, 2)


def create_training_data(poems, max_lines=7):
    training_pairs = []
    for poem in poems:
        lines = poem.split("\n")
        for i in range(len(lines) - 1):
            prompt = lines[i]
            continuation = "\n".join(lines[i + 1:i + 1 + max_lines])
            if continuation.strip():  # Skip empty continuations
                training_pairs.append({
                    "input": prompt,
                    "output": continuation
                })
    return training_pairs


# Example usage
poems = clean_text_file("leaves-of-grass-original.txt")

training_pairs = create_training_data(poems, max_lines=10)
print(f"Number of training pairs: {len(training_pairs)}")

with open("train.json", "w", encoding="utf-8") as outfile:
    json.dump(training_pairs, outfile, indent=4, ensure_ascii=False)