alter input and output
Browse files- generate_data.py +23 -83
- leaves-of-grass-cleaned.txt +0 -0
- leaves-of-grass-original.txt +1 -1
- train.json +0 -0
generate_data.py
CHANGED
@@ -2,7 +2,7 @@ import re
|
|
2 |
import json
|
3 |
|
4 |
|
5 |
-
def clean_text_file(input_file
|
6 |
"""
|
7 |
Cleans the text file by:
|
8 |
- Removing lines that start with 'BOOK' or contain only numbers,
|
@@ -29,12 +29,11 @@ def clean_text_file(input_file, output_file):
|
|
29 |
cleaned_lines.append(line)
|
30 |
|
31 |
cleaned_text = "".join(cleaned_lines)
|
32 |
-
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
return chunks
|
38 |
|
39 |
|
40 |
def normalize_whitespace(match):
|
@@ -43,85 +42,26 @@ def normalize_whitespace(match):
|
|
43 |
return "\t" * min(tab_count, 2)
|
44 |
|
45 |
|
46 |
-
def
|
47 |
-
|
48 |
-
Split a poem into chunks of manageable size, preserving lines and overlap for continuity.
|
49 |
-
"""
|
50 |
-
chunks = []
|
51 |
-
for i in range(0, len(lines), max_chunk_lines - overlap_lines):
|
52 |
-
chunk = lines[i : i + max_chunk_lines]
|
53 |
-
chunks.append(chunk)
|
54 |
-
return chunks
|
55 |
-
|
56 |
-
|
57 |
-
def generate_training_pairs(
|
58 |
-
poems, max_context_lines=10, max_chunk_lines=20, overlap_lines=10
|
59 |
-
):
|
60 |
-
"""
|
61 |
-
Generate input-output training pairs for poetry generation, using line-based chunking.
|
62 |
-
max_context_lines: The maximum number of lines to consider as context for the next line.
|
63 |
-
max_chunk_lines: The maximum number of lines to consider in a single chunk.
|
64 |
-
overlap_lines: The number of lines to overlap between chunks for continuity.
|
65 |
-
|
66 |
-
|
67 |
-
"""
|
68 |
-
training_data = []
|
69 |
-
|
70 |
for poem in poems:
|
71 |
-
lines = poem.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
-
# Chunk the poem into manageable pieces
|
74 |
-
chunks = chunk_poem_by_lines(
|
75 |
-
lines, max_chunk_lines=max_chunk_lines, overlap_lines=overlap_lines
|
76 |
-
)
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
input_lines = "".join(chunk[max(0, i - max_context_lines) : i])
|
81 |
-
output_line = chunk[i] # The next line to predict
|
82 |
-
training_data.append({"input": input_lines, "output": output_line})
|
83 |
-
|
84 |
-
return training_data
|
85 |
|
|
|
|
|
86 |
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
# 400 poems
|
91 |
-
print(f"Number of poems: {len(poems)}")
|
92 |
-
|
93 |
-
# TODO: Implement generate_training_pairs
|
94 |
-
# poems = poems[25:26] # For testing purposes. Long poem.
|
95 |
-
|
96 |
-
|
97 |
-
# Compact Poetry Model
|
98 |
-
max_context_lines = 5
|
99 |
-
max_chunk_lines = 10
|
100 |
-
overlap_lines = 2
|
101 |
-
|
102 |
-
"""
|
103 |
-
# Narrative Poetry Model
|
104 |
-
max_context_lines = 10
|
105 |
-
max_chunk_lines = 20
|
106 |
-
overlap_lines = 5
|
107 |
-
"""
|
108 |
-
|
109 |
-
"""
|
110 |
-
# Epic or Free Verse Poetry
|
111 |
-
max_context_lines = 20
|
112 |
-
max_chunk_lines = 50
|
113 |
-
overlap_lines = 10
|
114 |
-
"""
|
115 |
-
|
116 |
-
training_data = generate_training_pairs(
|
117 |
-
poems,
|
118 |
-
max_context_lines=max_context_lines,
|
119 |
-
max_chunk_lines=max_chunk_lines,
|
120 |
-
overlap_lines=overlap_lines,
|
121 |
-
)
|
122 |
-
|
123 |
-
print(f"Number of training pairs: {len(training_data)}")
|
124 |
-
|
125 |
-
# write the training data to a file
|
126 |
-
with open("train.json", "w") as outfile:
|
127 |
-
json.dump(training_data, outfile, indent=2, ensure_ascii=False)
|
|
|
2 |
import json
|
3 |
|
4 |
|
5 |
+
def clean_text_file(input_file):
|
6 |
"""
|
7 |
Cleans the text file by:
|
8 |
- Removing lines that start with 'BOOK' or contain only numbers,
|
|
|
29 |
cleaned_lines.append(line)
|
30 |
|
31 |
cleaned_text = "".join(cleaned_lines)
|
32 |
+
poems = [chunk.strip() for chunk in cleaned_text.split("\n\n\n") if chunk.strip()]
|
33 |
|
34 |
+
# in each poem convert any double newlines to single newlines
|
35 |
+
poems = [re.sub(r"\n\n", "\n", poem) for poem in poems]
|
36 |
+
return poems
|
|
|
37 |
|
38 |
|
39 |
def normalize_whitespace(match):
|
|
|
42 |
return "\t" * min(tab_count, 2)
|
43 |
|
44 |
|
45 |
+
def create_training_data(poems, max_lines=7):
|
46 |
+
training_pairs = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
for poem in poems:
|
48 |
+
lines = poem.split("\n")
|
49 |
+
for i in range(len(lines) - 1):
|
50 |
+
prompt = lines[i]
|
51 |
+
continuation = "\n".join(lines[i + 1:i + 1 + max_lines])
|
52 |
+
if continuation.strip(): # Skip empty continuations
|
53 |
+
training_pairs.append({
|
54 |
+
"input": prompt,
|
55 |
+
"output": continuation
|
56 |
+
})
|
57 |
+
return training_pairs
|
58 |
|
|
|
|
|
|
|
|
|
59 |
|
60 |
+
# Example usage
|
61 |
+
poems = clean_text_file("leaves-of-grass-original.txt")
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
+
training_pairs = create_training_data(poems, max_lines=10)
|
64 |
+
print(f"Number of training pairs: {len(training_pairs)}")
|
65 |
|
66 |
+
with open("train.json", "w", encoding="utf-8") as outfile:
|
67 |
+
json.dump(training_pairs, outfile, indent=4, ensure_ascii=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
leaves-of-grass-cleaned.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
leaves-of-grass-original.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
|
2 |
|
3 |
Come, said my soul,
|
4 |
Such verses for my Body let us write, (for we are one,)
|
|
|
1 |
+
Leaves of Grass
|
2 |
|
3 |
Come, said my soul,
|
4 |
Such verses for my Body let us write, (for we are one,)
|
train.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|