diversen commited on
Commit
847b6c5
·
1 Parent(s): e8ffb3e
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ venv
2
+ prompt.md
README.md CHANGED
@@ -1,3 +1,17 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # leaves of grass
2
+
3
+ The following is a dataset for training a model to generate text in the style of Walt Whitman's "Leaves of Grass".
4
+
5
+ The idea with this dataset is to provide a single line or multiple lines from the poem, and then provide the next line in the poem. The model will be trained to predict the next line in the poem given the previous lines.
6
+
7
+ There is a [generate_data.py](generate_data.py) script that can be used to generate the dataset. The script takes a text file of the poem and generates a dataset in the format described above.
8
+
9
+ It keeps some formatting. After each poem title, there is a dobule newline character. Each block of a poem is also separated by a dobule newline character.
10
+
11
+ Every line in a block is separated by a single newline character. Tabs are used as poetic indentations.
12
+ Overall the original formatting is kept but made more machine readable.
13
+
14
+ ## Usage
15
+
16
+ python generate_data.py
17
+
generate_data.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import json
3
+
4
+
5
+ def clean_text_file(input_file, output_file):
6
+ """
7
+ Cleans the text file by:
8
+ - Removing lines that start with 'BOOK' or contain only numbers,
9
+ - Substituting two leading spaces with a single tab,
10
+ - Normalizing all leading whitespace to no more than two tabs of indentation,
11
+ - Splitting the text into chunks separated by at least 3 newlines.
12
+ """
13
+ with open(input_file, "r", encoding="utf-8") as infile:
14
+ lines = infile.readlines()
15
+
16
+ cleaned_lines = []
17
+
18
+ for line in lines:
19
+ stripped_line = line.strip()
20
+ if stripped_line.startswith("BOOK") or stripped_line.isdigit():
21
+ continue
22
+
23
+ line = re.sub(r"^ ", "\t", line)
24
+ line = re.sub(r"^[\t ]+", normalize_whitespace, line)
25
+
26
+ # Remove a single leading tab if present
27
+ if line.startswith("\t"):
28
+ line = line[1:]
29
+ cleaned_lines.append(line)
30
+
31
+ cleaned_text = "".join(cleaned_lines)
32
+ chunks = [chunk.strip() for chunk in cleaned_text.split("\n\n\n") if chunk.strip()]
33
+
34
+ with open(output_file, "w", encoding="utf-8") as outfile:
35
+ outfile.writelines(cleaned_lines)
36
+
37
+ return chunks
38
+
39
+
40
+ def normalize_whitespace(match):
41
+ whitespace = match.group(0).replace("\t", " ")
42
+ tab_count = len(whitespace) // 2
43
+ return "\t" * min(tab_count, 2)
44
+
45
+
46
+ def chunk_poem_by_lines(lines, max_chunk_lines, overlap_lines):
47
+ """
48
+ Split a poem into chunks of manageable size, preserving lines and overlap for continuity.
49
+ """
50
+ chunks = []
51
+ for i in range(0, len(lines), max_chunk_lines - overlap_lines):
52
+ chunk = lines[i : i + max_chunk_lines]
53
+ chunks.append(chunk)
54
+ return chunks
55
+
56
+
57
+ def generate_training_pairs(
58
+ poems, max_context_lines=10, max_chunk_lines=20, overlap_lines=10
59
+ ):
60
+ """
61
+ Generate input-output training pairs for poetry generation, using line-based chunking.
62
+ max_context_lines: The maximum number of lines to consider as context for the next line.
63
+ max_chunk_lines: The maximum number of lines to consider in a single chunk.
64
+ overlap_lines: The number of lines to overlap between chunks for continuity.
65
+
66
+
67
+ """
68
+ training_data = []
69
+
70
+ for poem in poems:
71
+ lines = poem.splitlines(keepends=True)
72
+
73
+ # Chunk the poem into manageable pieces
74
+ chunks = chunk_poem_by_lines(
75
+ lines, max_chunk_lines=max_chunk_lines, overlap_lines=overlap_lines
76
+ )
77
+
78
+ for chunk in chunks:
79
+ for i in range(1, len(chunk)):
80
+ input_lines = "".join(chunk[max(0, i - max_context_lines) : i])
81
+ output_line = chunk[i] # The next line to predict
82
+ training_data.append({"input": input_lines, "output": output_line})
83
+
84
+ return training_data
85
+
86
+
87
+ # Example usage
88
+ poems = clean_text_file("leaves-of-grass-original.txt", "leaves-of-grass-cleaned.txt")
89
+
90
+ # 400 poems
91
+ print(f"Number of poems: {len(poems)}")
92
+
93
+ # TODO: Implement generate_training_pairs
94
+ # poems = poems[25:26] # For testing purposes. Long poem.
95
+
96
+
97
+ # Compact Poetry Model
98
+ max_context_lines = 5
99
+ max_chunk_lines = 10
100
+ overlap_lines = 2
101
+
102
+ """
103
+ # Narrative Poetry Model
104
+ max_context_lines = 10
105
+ max_chunk_lines = 20
106
+ overlap_lines = 5
107
+ """
108
+
109
+ """
110
+ # Epic or Free Verse Poetry
111
+ max_context_lines = 20
112
+ max_chunk_lines = 50
113
+ overlap_lines = 10
114
+ """
115
+
116
+ training_data = generate_training_pairs(
117
+ poems,
118
+ max_context_lines=max_context_lines,
119
+ max_chunk_lines=max_chunk_lines,
120
+ overlap_lines=overlap_lines,
121
+ )
122
+
123
+ print(f"Number of training pairs: {len(training_data)}")
124
+
125
+ # write the training data to a file
126
+ with open("train.json", "w") as outfile:
127
+ json.dump(training_data, outfile, indent=2, ensure_ascii=False)
leaves-of-grass-cleaned.txt ADDED
The diff for this file is too large to render. See raw diff
 
leaves-of-grass-original.txt ADDED
The diff for this file is too large to render. See raw diff
 
train.json ADDED
The diff for this file is too large to render. See raw diff