projectlosangeles commited on
Commit
0d8675a
·
verified ·
1 Parent(s): 46d536e

Upload 14 files

Browse files
Monster-MIDI-Dataset-main.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a16ad46342c4cdafbedabc9606f06c7a934737888e213afc6f021098302a055
3
- size 5811367
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e021400efc79006c854784a2739f31686373d42c916589169f1351b92f86a85
3
+ size 5812266
SyllablesSearch.py ADDED
The diff for this file is too large to render. See raw diff
 
TMELODIES.py ADDED
@@ -0,0 +1,932 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+
3
+ r'''############################################################################
4
+ ################################################################################
5
+ #
6
+ #
7
+ # Tegridy MELODIES Python Module (TMELODIES)
8
+ # Version 1.0
9
+ #
10
+ # Project Los Angeles
11
+ #
12
+ # Tegridy Code 2024
13
+ #
14
+ # https://github.com/asigalov61/tegridy-tools
15
+ #
16
+ #
17
+ ################################################################################
18
+ #
19
+ # All melodies in this module are licensed CC BY-NC-SA
20
+ #
21
+ ################################################################################
22
+ #
23
+ # Copyright 2024 Project Los Angeles / Tegridy Code
24
+ #
25
+ # Licensed under the Apache License, Version 2.0 (the "License");
26
+ # you may not use this file except in compliance with the License.
27
+ # You may obtain a copy of the License at
28
+ #
29
+ # http://www.apache.org/licenses/LICENSE-2.0
30
+ #
31
+ # Unless required by applicable law or agreed to in writing, software
32
+ # distributed under the License is distributed on an "AS IS" BASIS,
33
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
34
+ # See the License for the specific language governing permissions and
35
+ # limitations under the License.
36
+ #
37
+ ################################################################################
38
+ ################################################################################
39
+ #
40
+ # You can decode all melodies easily with TMIDIX Python Module like so...
41
+ #
42
+ ################################################################################
43
+
44
+ import random
45
+ import TMELODIES
46
+ import TMIDIX
47
+
48
+ melody = random.choice(TMELODIES.ALL_MELODIES)
49
+
50
+ mel_chords = TMELODIES.harmonize_melody(melody)
51
+
52
+ name, part, key, output_score = TMELODIES.melody_to_enhanced_score_notes(melody,
53
+ harmonized_tones_chords=mel_chords
54
+ )
55
+
56
+ print('=' * 70)
57
+ print('Song:', name+' '+part+' in '+key)
58
+ print('=' * 70)
59
+
60
+ midi_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(output_score,
61
+ output_signature = name+' '+part+' in '+key,
62
+ output_file_name = name+' '+part+' in '+key,
63
+ track_name='Project Los Angeles',
64
+ timings_multiplier=16
65
+ )
66
+
67
+ print('=' * 70)
68
+
69
+ ################################################################################
70
+ '''
71
+
72
+ ALL_MELODIES = [['Arabian Nights', 'Verse', 'A', 'Minor',
73
+ [[0, 13, 48], [13, 13, 50], [14, 27, 52], [28, 13, 48], [13, 13, 50],
74
+ [14, 27, 52], [28, 13, 48], [13, 13, 52], [14, 19, 51], [20, 14, 47],
75
+ [15, 19, 49], [19, 28, 51], [29, 13, 47], [14, 13, 51], [14, 20, 50],
76
+ [20, 19, 50], [20, 18, 50], [18, 24, 50], [24, 27, 52], [28, 68, 48],
77
+ [82, 13, 52], [14, 13, 53], [14, 20, 55], [20, 20, 51], [21, 14, 53],
78
+ [14, 20, 55], [21, 20, 51], [20, 13, 55], [14, 20, 54], [21, 20, 50],
79
+ [20, 13, 52], [14, 27, 54], [28, 13, 50], [14, 13, 54], [13, 20, 53],
80
+ [21, 20, 53], [21, 13, 53], [13, 27, 53], [42, 13, 55], [14, 68, 52],
81
+ [82, 13, 57], [14, 13, 59], [14, 27, 60], [27, 13, 57], [14, 13, 59],
82
+ [14, 27, 60], [28, 13, 57], [13, 13, 59], [14, 27, 60], [28, 13, 57],
83
+ [13, 13, 59], [14, 27, 60], [28, 13, 57], [14, 13, 59], [14, 27, 60],
84
+ [27, 13, 57], [13, 13, 59], [14, 27, 60], [28, 27, 57], [28, 82, 52],
85
+ [82, 13, 48], [14, 13, 50], [14, 27, 52], [28, 13, 48], [13, 13, 50],
86
+ [14, 27, 52], [27, 13, 48], [14, 13, 52], [14, 19, 51], [19, 14, 47],
87
+ [15, 19, 49], [20, 28, 51], [29, 13, 47], [14, 13, 51], [14, 20, 52],
88
+ [20, 19, 52], [19, 18, 52], [19, 17, 60], [17, 20, 59], [21, 20, 57],
89
+ [20, 62, 57]]],
90
+ ['Beard Barb', 'Chorus', 'F', 'Major',
91
+ [[0, 35, 81], [41, 42, 81], [42, 20, 83], [18, 20, 81], [19, 20, 79],
92
+ [20, 21, 81], [20, 61, 79], [61, 12, 76], [10, 11, 74], [11, 65, 72],
93
+ [82, 31, 81], [37, 43, 81], [42, 23, 83], [22, 20, 81], [20, 18, 79],
94
+ [17, 22, 81], [22, 94, 79], [163, 32, 81], [39, 40, 81], [39, 23, 83],
95
+ [21, 20, 81], [21, 20, 79], [18, 21, 81], [21, 82, 79], [83, 60, 72],
96
+ [81, 30, 77], [29, 11, 76], [10, 21, 77], [22, 18, 81], [19, 22, 79],
97
+ [21, 35, 81], [41, 11, 76], [9, 10, 74], [9, 77, 72]]],
98
+ ['Beard Barb', 'Verse', 'A', 'Major',
99
+ [[0, 27, 76], [39, 28, 74], [40, 28, 72], [39, 21, 69], [20, 10, 72],
100
+ [12, 35, 67], [132, 20, 69], [19, 10, 72], [12, 29, 67], [49, 20, 69],
101
+ [20, 10, 72], [9, 32, 67], [72, 20, 76], [19, 25, 77], [125, 35, 77],
102
+ [42, 35, 74], [41, 34, 71], [40, 18, 76], [20, 11, 71], [11, 57, 72],
103
+ [69, 18, 76], [20, 12, 69], [22, 11, 69], [12, 8, 71], [8, 11, 72],
104
+ [13, 7, 71], [6, 11, 72], [13, 10, 69], [9, 17, 67], [17, 35, 76],
105
+ [82, 12, 69], [13, 7, 71], [7, 13, 72], [12, 8, 71], [8, 11, 72],
106
+ [12, 10, 69], [9, 16, 67], [18, 30, 76], [83, 13, 69], [13, 7, 71],
107
+ [8, 10, 72], [10, 8, 71], [8, 10, 72], [11, 10, 69], [10, 17, 67],
108
+ [19, 32, 76], [81, 5, 76], [13, 3, 76], [9, 5, 76], [13, 6, 76], [8, 5, 74],
109
+ [14, 7, 74], [6, 13, 72]]],
110
+ ['Blue Railcart', 'Chorus', 'B-', 'Minor',
111
+ [[0, 23, 70], [26, 10, 65], [13, 10, 70], [13, 12, 68], [14, 12, 66],
112
+ [13, 25, 63], [26, 16, 66], [13, 13, 65], [15, 24, 60], [25, 13, 66],
113
+ [14, 13, 65], [14, 12, 61], [40, 12, 70], [12, 15, 69], [14, 13, 70],
114
+ [13, 14, 72], [14, 12, 70], [13, 23, 66], [27, 13, 65], [13, 12, 73],
115
+ [13, 12, 72], [13, 13, 73], [15, 34, 70], [56, 23, 70], [27, 13, 65],
116
+ [13, 13, 70], [12, 14, 68], [13, 15, 66], [14, 26, 63], [26, 16, 66],
117
+ [14, 13, 65], [14, 27, 60], [27, 14, 66], [12, 12, 65], [15, 13, 61],
118
+ [25, 24, 70], [27, 15, 69], [13, 13, 70], [13, 14, 72], [14, 13, 70],
119
+ [14, 24, 66], [26, 13, 65], [13, 11, 73], [13, 14, 72], [13, 12, 73],
120
+ [14, 38, 70]]],
121
+ ['Blue Railcart', 'Verse', 'E-', 'Minor',
122
+ [[0, 11, 65], [13, 13, 70], [12, 13, 69], [13, 13, 70], [15, 14, 72],
123
+ [12, 13, 70], [14, 13, 68], [13, 13, 70], [14, 24, 68], [26, 23, 66],
124
+ [27, 36, 66], [56, 12, 63], [12, 13, 68], [13, 13, 67], [12, 13, 68],
125
+ [15, 13, 70], [12, 12, 68], [14, 13, 63], [13, 14, 66], [15, 46, 65],
126
+ [107, 11, 65], [12, 14, 70], [15, 14, 69], [12, 13, 70], [14, 15, 72],
127
+ [13, 12, 70], [13, 14, 68], [13, 14, 66], [15, 29, 65], [27, 24, 63],
128
+ [26, 14, 70], [55, 13, 65], [12, 13, 73], [13, 13, 72], [13, 12, 70],
129
+ [13, 15, 69], [14, 13, 70], [14, 12, 72], [13, 13, 69], [14, 57, 70]]],
130
+ ['Bremen Musicians', 'Verse', 'C', 'Major',
131
+ [[0, 15, 60], [16, 15, 60], [15, 15, 60], [15, 15, 62], [15, 15, 64],
132
+ [16, 15, 64], [15, 15, 60], [15, 15, 64], [16, 30, 67], [30, 30, 64],
133
+ [31, 60, 67], [60, 15, 65], [16, 15, 65], [15, 15, 65], [15, 15, 64],
134
+ [15, 15, 62], [16, 15, 62], [15, 15, 65], [15, 15, 69], [15, 30, 67],
135
+ [31, 30, 65], [30, 61, 67], [61, 45, 64], [46, 15, 64], [15, 30, 67],
136
+ [31, 30, 64], [31, 15, 69], [15, 15, 69], [15, 15, 69], [15, 15, 71],
137
+ [16, 30, 72], [30, 30, 69], [31, 15, 74], [15, 15, 74], [15, 15, 74],
138
+ [15, 15, 72], [16, 15, 71], [15, 15, 67], [15, 15, 71], [15, 15, 74],
139
+ [16, 30, 72], [30, 30, 71], [31, 61, 69], [61, 45, 65], [45, 15, 65],
140
+ [16, 30, 69], [30, 30, 72], [31, 30, 71], [30, 30, 69], [30, 30, 71],
141
+ [31, 30, 74], [30, 15, 76], [16, 15, 74], [15, 45, 72]]],
142
+ ['Chunga Changa', 'Chorus', 'G', 'Major',
143
+ [[0, 7, 67], [16, 6, 72], [16, 15, 71], [20, 7, 69], [9, 7, 67], [17, 6, 69],
144
+ [14, 19, 71], [24, 6, 69], [6, 6, 67], [16, 6, 69], [15, 21, 69],
145
+ [22, 5, 67], [9, 6, 67], [14, 6, 69], [15, 21, 69], [23, 5, 67], [9, 6, 67],
146
+ [15, 5, 69], [15, 21, 69], [23, 4, 67], [7, 6, 67], [15, 6, 69],
147
+ [15, 21, 69], [22, 5, 67], [8, 5, 69], [15, 7, 71], [16, 19, 69],
148
+ [22, 7, 67], [70, 7, 67], [15, 6, 72], [15, 20, 71], [21, 6, 69], [8, 5, 67],
149
+ [16, 6, 69], [16, 21, 71], [23, 6, 69], [7, 5, 67], [14, 6, 69],
150
+ [16, 21, 69], [24, 4, 67], [6, 6, 67], [15, 6, 69], [16, 19, 69],
151
+ [22, 3, 67], [8, 6, 67], [15, 6, 69], [15, 22, 69], [24, 5, 67], [9, 7, 67],
152
+ [15, 5, 69], [13, 22, 69], [23, 6, 67], [7, 7, 65], [17, 7, 64],
153
+ [14, 20, 62], [24, 8, 60]]],
154
+ ['Chunga Changa', 'Verse', 'C', 'Minor',
155
+ [[0, 4, 60], [15, 5, 63], [14, 6, 67], [16, 5, 67], [46, 20, 67], [23, 6, 65],
156
+ [6, 6, 67], [16, 7, 68], [14, 14, 67], [77, 23, 72], [30, 6, 63],
157
+ [15, 6, 62], [16, 6, 62], [47, 16, 62], [20, 6, 63], [7, 7, 65], [17, 6, 67],
158
+ [15, 13, 63], [91, 6, 60], [16, 6, 63], [14, 6, 67], [16, 6, 67],
159
+ [47, 17, 67], [21, 6, 65], [7, 6, 67], [16, 7, 68], [17, 28, 67],
160
+ [71, 22, 72], [32, 7, 63], [15, 7, 62], [16, 7, 62], [47, 15, 62],
161
+ [22, 6, 63], [6, 9, 65], [17, 7, 67], [16, 12, 60]]],
162
+ ['Clouds', 'Chorus', 'E', 'Minor',
163
+ [[0, 15, 71], [13, 7, 76], [17, 6, 79], [15, 110, 83], [162, 5, 83],
164
+ [17, 15, 83], [13, 16, 84], [15, 17, 83], [16, 14, 81], [14, 13, 79],
165
+ [15, 45, 83], [45, 51, 78], [137, 16, 71], [14, 5, 75], [15, 6, 78],
166
+ [17, 123, 81], [165, 16, 83], [15, 6, 81], [15, 15, 83], [15, 6, 84],
167
+ [15, 5, 83], [15, 5, 81], [15, 38, 84], [45, 72, 83], [105, 16, 84],
168
+ [15, 16, 83], [15, 16, 84], [15, 15, 83], [15, 16, 81], [15, 15, 80],
169
+ [15, 30, 83], [30, 15, 81], [15, 76, 72], [135, 29, 83], [30, 30, 78],
170
+ [30, 60, 81], [60, 59, 79], [90, 12, 81], [15, 18, 79], [15, 17, 81],
171
+ [15, 17, 79], [15, 16, 78], [15, 16, 76], [15, 31, 83], [30, 15, 79],
172
+ [15, 72, 76], [135, 31, 79], [30, 28, 75], [30, 85, 76]]],
173
+ ['Clouds', 'Verse', 'E', 'Minor',
174
+ [[0, 16, 71], [15, 15, 76], [15, 30, 79], [31, 16, 78], [14, 15, 76],
175
+ [14, 15, 75], [14, 17, 76], [15, 18, 78], [17, 15, 75], [14, 71, 76],
176
+ [89, 15, 71], [15, 17, 72], [17, 23, 74], [30, 5, 74], [14, 13, 74],
177
+ [13, 15, 76], [15, 15, 74], [16, 16, 72], [15, 17, 74], [15, 31, 72],
178
+ [32, 43, 71], [57, 17, 72], [17, 13, 71], [15, 27, 69], [28, 15, 72],
179
+ [17, 15, 76], [15, 16, 81], [15, 17, 79], [15, 16, 78], [14, 14, 76],
180
+ [15, 55, 79], [92, 17, 78], [15, 13, 76], [15, 30, 71], [30, 14, 76],
181
+ [13, 17, 78], [16, 13, 79], [13, 16, 78], [16, 14, 81], [14, 17, 79],
182
+ [16, 33, 78], [31, 20, 76]]],
183
+ ['Daddys Song', 'Chorus', 'A-', 'Major',
184
+ [[0, 7, 73], [15, 6, 68], [14, 15, 70], [14, 5, 68], [15, 7, 73], [14, 5, 68],
185
+ [15, 14, 70], [14, 7, 68], [14, 15, 72], [29, 9, 72], [14, 4, 68],
186
+ [15, 27, 70], [29, 13, 68], [28, 6, 72], [15, 5, 68], [14, 14, 70],
187
+ [14, 6, 68], [15, 6, 72], [14, 5, 68], [15, 15, 70], [14, 6, 68],
188
+ [29, 15, 70], [14, 14, 68], [14, 14, 66], [15, 29, 65], [29, 19, 68],
189
+ [28, 6, 73], [15, 5, 68], [14, 16, 70], [14, 4, 68], [15, 5, 73],
190
+ [14, 5, 68], [14, 14, 70], [15, 8, 68], [14, 18, 73], [29, 15, 73],
191
+ [14, 14, 72], [15, 27, 75], [28, 19, 70], [29, 6, 73], [14, 16, 73],
192
+ [15, 17, 72], [14, 17, 70], [14, 6, 72], [15, 18, 72], [14, 20, 70],
193
+ [15, 11, 68], [28, 13, 66], [15, 15, 65], [14, 17, 63], [14, 34, 68],
194
+ [58, 4, 73], [14, 16, 73], [14, 15, 72], [15, 15, 70], [14, 7, 72],
195
+ [15, 16, 72], [14, 18, 70], [14, 13, 68], [29, 17, 68], [15, 17, 70],
196
+ [14, 20, 72], [14, 16, 73]]],
197
+ ['Daddys Song', 'Verse', 'C#', 'Minor',
198
+ [[0, 23, 68], [29, 28, 68], [29, 30, 73], [29, 17, 68], [14, 13, 66],
199
+ [14, 30, 64], [29, 27, 66], [29, 31, 64], [28, 29, 63], [29, 29, 66],
200
+ [29, 31, 69], [29, 27, 68], [29, 13, 63], [14, 17, 64], [14, 31, 66],
201
+ [29, 31, 68], [29, 35, 64], [57, 20, 68], [29, 24, 68], [29, 31, 73],
202
+ [29, 10, 68], [14, 15, 69], [14, 27, 71], [29, 27, 73], [29, 28, 69],
203
+ [29, 18, 66], [28, 27, 73], [29, 29, 69], [29, 28, 68], [28, 12, 66],
204
+ [15, 13, 64], [14, 31, 66], [29, 27, 68], [29, 20, 61]]],
205
+ ['Dreidel', 'Chorus', 'C', 'Major',
206
+ [[0, 6, 64], [14, 8, 67], [17, 6, 64], [14, 7, 67], [15, 6, 64], [14, 7, 67],
207
+ [17, 22, 64], [32, 6, 64], [15, 6, 67], [16, 6, 67], [14, 8, 65],
208
+ [16, 6, 64], [16, 35, 62], [49, 6, 62], [14, 7, 65], [16, 6, 62],
209
+ [14, 6, 65], [17, 6, 62], [15, 6, 65], [17, 20, 62], [30, 6, 62],
210
+ [15, 7, 67], [15, 7, 65], [16, 7, 64], [15, 7, 62], [15, 26, 60]]],
211
+ ['Dreidel', 'Verse', 'C', 'Major',
212
+ [[0, 22, 55], [32, 6, 60], [15, 8, 60], [14, 8, 62], [15, 8, 62],
213
+ [15, 12, 64], [17, 25, 60], [31, 8, 64], [15, 7, 67], [15, 8, 67],
214
+ [15, 9, 65], [15, 8, 64], [17, 36, 62], [50, 6, 62], [14, 7, 62],
215
+ [15, 7, 62], [15, 6, 64], [16, 6, 64], [15, 7, 65], [15, 21, 62],
216
+ [32, 7, 62], [16, 7, 67], [16, 8, 65], [15, 8, 64], [15, 8, 62],
217
+ [16, 41, 60]]],
218
+ ['Duck Tales', 'Chorus', 'A', 'Major',
219
+ [[0, 12, 68], [20, 11, 68], [31, 8, 59], [7, 22, 71], [21, 11, 68],
220
+ [75, 25, 64], [28, 12, 62], [21, 14, 60], [19, 13, 62], [21, 14, 64],
221
+ [20, 14, 62], [20, 14, 60], [20, 10, 62], [20, 13, 68], [22, 12, 68],
222
+ [32, 8, 59], [7, 23, 71], [24, 13, 68], [73, 25, 64], [29, 13, 62],
223
+ [19, 14, 60], [19, 12, 62], [20, 15, 64], [21, 12, 62], [20, 13, 60],
224
+ [20, 12, 62], [22, 12, 68], [21, 12, 68]]],
225
+ ['Duck Tales', 'Verse', 'F#', 'Major',
226
+ [[0, 12, 52], [20, 13, 56], [22, 13, 59], [19, 11, 61], [20, 8, 62],
227
+ [14, 10, 61], [20, 23, 61], [29, 10, 59], [20, 24, 57], [31, 30, 56],
228
+ [51, 23, 57], [32, 24, 56], [48, 15, 52], [21, 17, 56], [21, 14, 59],
229
+ [20, 14, 61], [20, 9, 62], [14, 12, 61], [19, 16, 61], [22, 16, 59],
230
+ [28, 24, 62], [30, 28, 61], [49, 26, 62], [34, 26, 61], [69, 16, 54],
231
+ [21, 13, 57], [21, 12, 61], [20, 26, 61], [34, 28, 59], [67, 15, 61],
232
+ [21, 14, 64], [20, 13, 66], [21, 26, 68], [32, 7, 66], [12, 15, 66]]],
233
+ ['Eagles Learn to Fly', 'Verse', 'F#', 'Minor',
234
+ [[0, 28, 68], [29, 17, 73], [14, 15, 68], [43, 12, 66], [21, 9, 66],
235
+ [8, 16, 64], [14, 14, 63], [14, 33, 61], [73, 14, 61], [14, 15, 64],
236
+ [14, 17, 68], [14, 40, 73], [44, 14, 68], [14, 48, 73], [43, 14, 68],
237
+ [15, 27, 71], [28, 36, 66], [43, 16, 59], [15, 15, 62], [14, 5, 66],
238
+ [15, 41, 71], [43, 14, 66], [14, 27, 71], [29, 29, 73], [29, 29, 69],
239
+ [28, 34, 64], [44, 7, 73], [14, 8, 73], [14, 15, 73], [15, 67, 71],
240
+ [72, 15, 71], [14, 14, 69], [14, 14, 68], [15, 13, 66], [28, 26, 68],
241
+ [29, 14, 69], [29, 26, 68], [29, 15, 73], [14, 20, 68], [43, 14, 66],
242
+ [22, 8, 66], [7, 14, 64], [14, 15, 63], [15, 28, 61]]],
243
+ ['Freak', 'Chorus', 'D', 'Minor',
244
+ [[0, 17, 76], [19, 6, 81], [19, 20, 81], [37, 4, 69], [8, 9, 69],
245
+ [11, 23, 77], [28, 14, 74], [28, 17, 74], [19, 6, 79], [17, 18, 79],
246
+ [20, 18, 74], [19, 21, 77], [19, 37, 76], [39, 6, 73], [17, 20, 79],
247
+ [18, 19, 77], [18, 12, 74], [59, 17, 76], [19, 14, 72], [56, 17, 76],
248
+ [16, 6, 74], [19, 6, 71], [20, 18, 72], [17, 25, 69]]],
249
+ ['Freak', 'Verse', 'C', 'Minor',
250
+ [[0, 19, 76], [18, 20, 77], [19, 21, 76], [21, 17, 72], [17, 18, 69],
251
+ [20, 33, 72], [35, 19, 71], [20, 21, 74], [19, 20, 76], [19, 20, 74],
252
+ [20, 17, 71], [17, 20, 74], [20, 35, 77], [36, 17, 76], [18, 19, 79],
253
+ [19, 18, 81], [18, 20, 79], [21, 19, 76], [20, 17, 72], [18, 34, 76],
254
+ [37, 6, 74], [17, 18, 74], [20, 19, 76], [19, 18, 74], [20, 17, 76],
255
+ [17, 20, 77], [19, 37, 79]]],
256
+ ['Friend Song', 'Chorus', 'A', 'Minor',
257
+ [[0, 14, 71], [13, 15, 69], [14, 18, 62], [28, 15, 62], [13, 11, 65],
258
+ [14, 27, 69], [28, 14, 67], [14, 11, 65], [14, 6, 64], [13, 6, 64],
259
+ [14, 16, 64], [14, 14, 69], [14, 29, 72], [41, 15, 72], [14, 5, 71],
260
+ [14, 3, 71], [13, 12, 71], [14, 12, 67], [14, 27, 69], [28, 26, 71],
261
+ [27, 40, 72], [56, 26, 71], [27, 27, 69], [28, 16, 62], [27, 12, 62],
262
+ [14, 12, 65], [14, 27, 69], [28, 14, 67], [13, 13, 65], [14, 6, 64],
263
+ [14, 6, 64], [14, 14, 64], [14, 12, 69], [14, 27, 72], [41, 13, 72],
264
+ [14, 4, 71], [14, 3, 71], [14, 13, 71], [14, 12, 74], [13, 17, 72],
265
+ [28, 24, 71], [28, 11, 69]]],
266
+ ['Friend Song', 'Verse', 'A', 'Minor',
267
+ [[0, 3, 69], [14, 3, 71], [14, 13, 72], [13, 4, 74], [14, 4, 72], [14, 4, 71],
268
+ [14, 4, 69], [55, 54, 64], [55, 23, 60], [27, 4, 69], [14, 4, 71],
269
+ [14, 54, 72], [55, 51, 74], [56, 24, 72], [27, 31, 71], [28, 25, 59],
270
+ [27, 5, 67], [14, 4, 69], [14, 13, 71], [14, 3, 72], [14, 5, 71],
271
+ [13, 5, 69], [14, 6, 68], [55, 55, 65], [55, 18, 64], [28, 3, 68],
272
+ [14, 4, 69], [13, 52, 71], [56, 52, 72], [55, 22, 71], [27, 26, 69],
273
+ [28, 26, 57]]],
274
+ ['Gang Dance', 'Chorus', 'D', 'Minor',
275
+ [[0, 11, 64], [21, 23, 64], [25, 7, 70], [49, 6, 69], [23, 23, 69],
276
+ [24, 12, 64], [68, 43, 65], [45, 6, 64], [23, 8, 65], [14, 7, 64],
277
+ [10, 6, 65], [11, 8, 67], [12, 13, 69], [50, 7, 67], [24, 25, 67],
278
+ [22, 11, 74], [48, 6, 73], [23, 25, 73], [24, 10, 69], [47, 6, 74],
279
+ [23, 54, 74]]],
280
+ ['Gang Dance', 'Verse', 'D', 'Minor',
281
+ [[0, 27, 69], [35, 5, 68], [10, 27, 69], [35, 6, 68], [10, 9, 69],
282
+ [24, 11, 65], [24, 12, 64], [22, 11, 62], [49, 43, 67], [47, 6, 62],
283
+ [25, 5, 67], [11, 5, 67], [12, 6, 67], [10, 7, 69], [12, 13, 67],
284
+ [72, 43, 67], [46, 7, 60], [24, 7, 67], [20, 23, 70], [25, 11, 69],
285
+ [22, 15, 67], [24, 9, 65], [24, 8, 65], [48, 4, 65], [11, 4, 65],
286
+ [11, 5, 65], [12, 5, 65], [11, 6, 65], [12, 9, 67], [13, 12, 65]]],
287
+ ['Gang Song', 'Chorus', 'C', 'Minor',
288
+ [[0, 24, 62], [38, 22, 62], [38, 25, 62], [38, 23, 63], [37, 24, 60],
289
+ [38, 24, 55], [38, 9, 63], [19, 11, 63], [19, 12, 62], [19, 9, 63],
290
+ [19, 30, 65], [38, 25, 65], [38, 32, 65], [38, 27, 65], [38, 28, 67],
291
+ [37, 14, 55], [39, 6, 67], [18, 12, 67], [19, 14, 65], [19, 10, 67],
292
+ [19, 31, 68], [38, 10, 70], [19, 22, 72], [38, 13, 72], [19, 9, 70],
293
+ [19, 9, 68], [19, 30, 67], [38, 10, 68], [19, 22, 70], [37, 11, 70],
294
+ [19, 10, 68], [20, 11, 67], [18, 20, 65], [19, 10, 64], [19, 12, 65],
295
+ [19, 11, 68], [19, 30, 67], [38, 13, 67], [19, 11, 67], [19, 34, 65],
296
+ [38, 71, 63]]],
297
+ ['Gang Song', 'Verse', 'C', 'Minor',
298
+ [[0, 9, 55], [19, 13, 60], [19, 12, 62], [19, 12, 63], [19, 10, 60],
299
+ [19, 15, 59], [19, 10, 60], [19, 11, 62], [19, 11, 59], [19, 32, 60],
300
+ [95, 11, 60], [18, 11, 63], [19, 10, 67], [20, 41, 72], [56, 12, 72],
301
+ [19, 13, 70], [19, 14, 68], [19, 12, 67], [19, 13, 65], [19, 19, 68],
302
+ [19, 41, 67], [114, 9, 67], [19, 14, 70], [18, 10, 68], [19, 11, 65],
303
+ [20, 13, 62], [18, 12, 60], [19, 10, 59], [19, 14, 67], [19, 13, 65],
304
+ [19, 27, 63], [133, 13, 60], [19, 14, 62], [19, 9, 62], [19, 15, 62],
305
+ [56, 8, 62], [20, 14, 62], [18, 11, 63], [19, 13, 62], [19, 17, 60]]],
306
+ ['Gang Stop', 'Chorus', 'F', 'Minor',
307
+ [[0, 13, 72], [25, 3, 67], [13, 13, 63], [25, 2, 62], [13, 14, 60],
308
+ [27, 14, 72], [25, 2, 67], [12, 12, 63], [26, 3, 62], [12, 14, 60],
309
+ [26, 4, 72], [13, 2, 72], [5, 2, 72], [6, 2, 72], [13, 3, 72], [13, 24, 73],
310
+ [26, 19, 72], [27, 12, 73], [14, 4, 72], [13, 3, 68], [12, 3, 67],
311
+ [12, 46, 65], [54, 23, 68], [25, 2, 65], [14, 24, 68], [27, 2, 65],
312
+ [12, 9, 68], [11, 3, 65], [12, 24, 68], [27, 2, 65], [14, 22, 68],
313
+ [25, 3, 65], [13, 10, 68], [12, 2, 65], [13, 3, 67], [13, 1, 67], [6, 2, 67],
314
+ [5, 2, 67], [14, 2, 67], [12, 24, 68], [27, 17, 67], [27, 24, 63],
315
+ [27, 21, 62], [26, 32, 60]]],
316
+ ['Gang Stop', 'Verse', 'F', 'Minor',
317
+ [[0, 14, 66], [26, 3, 67], [14, 12, 68], [25, 3, 67], [13, 3, 66],
318
+ [14, 2, 67], [12, 12, 70], [24, 4, 69], [15, 13, 68], [26, 3, 67],
319
+ [13, 2, 66], [13, 2, 67], [11, 22, 70], [26, 2, 69], [13, 12, 68],
320
+ [26, 3, 67], [11, 2, 66], [13, 2, 67], [12, 18, 67], [28, 22, 60],
321
+ [25, 42, 59], [54, 21, 68], [25, 3, 65], [14, 13, 68], [25, 9, 65],
322
+ [12, 9, 68], [12, 3, 65], [13, 22, 68], [26, 2, 65], [15, 12, 68],
323
+ [24, 8, 65], [12, 11, 68], [14, 3, 65], [12, 23, 68], [25, 3, 65],
324
+ [14, 23, 68], [25, 11, 65], [14, 10, 68], [11, 10, 65], [12, 24, 62],
325
+ [27, 22, 67], [26, 23, 63], [26, 16, 60]]],
326
+ ['Grasshopper', 'Chorus', 'B-', 'Minor',
327
+ [[0, 6, 70], [19, 6, 72], [19, 4, 72], [9, 5, 72], [10, 6, 72], [18, 5, 72],
328
+ [19, 6, 73], [19, 4, 73], [9, 5, 73], [10, 6, 73], [18, 6, 73], [19, 10, 73],
329
+ [19, 7, 72], [19, 7, 70], [18, 7, 69], [19, 7, 70], [19, 9, 70], [37, 6, 70],
330
+ [19, 6, 72], [19, 4, 72], [9, 6, 72], [10, 7, 72], [18, 6, 72], [19, 6, 73],
331
+ [19, 4, 73], [9, 5, 73], [10, 7, 73], [18, 5, 73], [19, 8, 73], [19, 7, 72],
332
+ [19, 7, 70], [18, 7, 69], [19, 9, 70]]],
333
+ ['Grasshopper', 'Verse', 'F', 'Minor',
334
+ [[0, 8, 70], [19, 10, 65], [19, 9, 70], [19, 9, 65], [18, 7, 70], [19, 9, 69],
335
+ [19, 10, 69], [37, 8, 69], [19, 11, 65], [19, 9, 69], [19, 9, 65],
336
+ [18, 9, 69], [19, 9, 70], [19, 10, 70], [37, 7, 70], [19, 11, 65],
337
+ [19, 8, 70], [19, 9, 65], [18, 8, 70], [19, 9, 69], [19, 9, 69], [37, 8, 69],
338
+ [19, 9, 65], [19, 8, 69], [19, 9, 65], [18, 8, 69], [19, 14, 70]]],
339
+ ['Gummy Bears', 'Chorus', 'B-', 'Major',
340
+ [[0, 18, 74], [24, 8, 75], [14, 36, 77], [40, 26, 77], [35, 9, 77],
341
+ [15, 8, 77], [13, 7, 77], [13, 10, 77], [13, 8, 75], [13, 7, 74],
342
+ [13, 8, 72], [13, 20, 74], [23, 10, 77], [14, 33, 70], [38, 11, 70],
343
+ [26, 4, 70], [14, 7, 77], [12, 7, 77], [13, 7, 77], [14, 9, 77], [13, 8, 75],
344
+ [13, 8, 74], [12, 10, 72], [12, 5, 70], [26, 5, 77], [18, 8, 74], [8, 5, 70],
345
+ [25, 4, 74], [26, 4, 70], [14, 10, 75], [13, 11, 74], [15, 22, 72],
346
+ [27, 23, 74], [35, 10, 70], [27, 9, 70], [23, 10, 70], [26, 10, 70],
347
+ [26, 9, 70], [26, 10, 70], [25, 11, 70], [26, 11, 70]]],
348
+ ['Gummy Bears', 'Verse', 'B-', 'Major',
349
+ [[0, 12, 62], [19, 12, 58], [18, 10, 62], [16, 12, 65], [18, 11, 62],
350
+ [16, 10, 65], [16, 14, 67], [17, 12, 69], [17, 11, 70], [17, 21, 65],
351
+ [26, 9, 62], [25, 12, 67], [18, 14, 65], [19, 10, 63], [16, 12, 65],
352
+ [18, 12, 63], [17, 8, 62], [16, 12, 63], [18, 10, 62], [17, 10, 60],
353
+ [16, 27, 65], [50, 14, 62], [21, 12, 58], [17, 11, 62], [16, 13, 65],
354
+ [18, 11, 62], [17, 12, 65], [16, 14, 67], [17, 16, 69], [16, 13, 70],
355
+ [17, 13, 65], [18, 12, 62], [16, 10, 58], [17, 13, 67], [18, 13, 65],
356
+ [17, 14, 63], [18, 11, 65], [16, 11, 62], [19, 11, 65], [16, 13, 67],
357
+ [17, 12, 69], [16, 11, 70], [17, 32, 72]]],
358
+ ['Hava Nagilah', 'Chorus', 'C', 'Minor',
359
+ [[0, 9, 66], [24, 35, 66], [50, 10, 63], [24, 10, 62], [25, 8, 62],
360
+ [25, 46, 62], [49, 7, 63], [25, 35, 63], [49, 12, 62], [12, 10, 63],
361
+ [13, 10, 60], [24, 5, 60], [25, 29, 60], [49, 41, 60], [50, 30, 63],
362
+ [37, 8, 62], [12, 9, 60], [23, 12, 60], [23, 34, 67], [46, 42, 66],
363
+ [46, 8, 63], [11, 9, 66], [11, 9, 63], [12, 11, 62], [11, 69, 66]]],
364
+ ['Hava Nagilah', 'Verse', 'D', 'Minor',
365
+ [[0, 34, 62], [49, 69, 62], [74, 18, 66], [25, 18, 63], [25, 17, 62],
366
+ [24, 36, 66], [50, 66, 66], [74, 22, 69], [24, 14, 67], [25, 17, 66],
367
+ [25, 36, 67], [49, 69, 67], [74, 21, 70], [25, 20, 69], [25, 17, 67],
368
+ [24, 48, 66], [50, 10, 63], [12, 8, 66], [12, 11, 63], [25, 83, 62]]],
369
+ ['It Too Shall Pass', 'Verse', 'C', 'Minor',
370
+ [[0, 4, 75], [18, 4, 72], [18, 4, 79], [36, 4, 75], [18, 4, 72], [18, 6, 79],
371
+ [36, 4, 72], [18, 4, 74], [18, 3, 75], [18, 4, 77], [18, 4, 75], [36, 4, 74],
372
+ [36, 4, 74], [18, 4, 71], [18, 4, 79], [36, 4, 74], [18, 6, 71], [18, 4, 79],
373
+ [36, 4, 74], [18, 3, 75], [18, 4, 77], [18, 4, 79], [18, 4, 77], [36, 3, 75],
374
+ [36, 4, 79], [18, 6, 76], [18, 4, 84], [36, 4, 79], [18, 6, 76], [18, 4, 84],
375
+ [36, 6, 79], [18, 4, 80], [18, 4, 82], [18, 4, 84], [18, 4, 82],
376
+ [36, 12, 80], [36, 4, 80], [18, 6, 84], [1, 12, 84], [17, 6, 82],
377
+ [18, 4, 80], [19, 3, 80], [35, 6, 79], [36, 4, 77], [1, 17, 77], [17, 5, 80],
378
+ [18, 9, 79], [18, 6, 74], [18, 4, 75], [72, 4, 80], [18, 6, 84], [18, 4, 82],
379
+ [18, 4, 80], [18, 4, 80], [36, 4, 79], [36, 4, 77], [18, 6, 80], [18, 4, 79],
380
+ [18, 4, 71], [18, 3, 72]]],
381
+ ['Learn at School', 'Chorus', 'E', 'Minor',
382
+ [[0, 14, 64], [14, 8, 74], [15, 15, 74], [14, 7, 72], [15, 17, 72],
383
+ [15, 9, 71], [14, 24, 71], [29, 12, 64], [15, 9, 72], [14, 18, 72],
384
+ [15, 8, 71], [14, 16, 71], [15, 6, 69], [15, 30, 69], [29, 16, 67],
385
+ [14, 18, 69], [15, 14, 72], [14, 17, 71], [15, 13, 67], [14, 14, 64],
386
+ [15, 16, 67], [15, 16, 66], [14, 18, 71], [15, 16, 63], [14, 29, 66],
387
+ [29, 36, 64]]],
388
+ ['Learn at School', 'Verse', 'E', 'Minor',
389
+ [[0, 15, 59], [15, 9, 67], [14, 18, 67], [15, 8, 66], [14, 16, 66],
390
+ [15, 7, 64], [14, 19, 64], [29, 14, 59], [15, 8, 67], [15, 15, 67],
391
+ [14, 8, 66], [15, 17, 66], [14, 7, 64], [15, 19, 64], [29, 14, 64],
392
+ [15, 15, 67], [14, 8, 71], [15, 13, 71], [14, 17, 69], [15, 18, 67],
393
+ [14, 8, 71], [15, 17, 71], [15, 14, 69], [14, 18, 67], [15, 31, 69],
394
+ [29, 46, 71]]],
395
+ ['Let Them Run', 'Chorus', 'E', 'Minor',
396
+ [[0, 7, 61], [15, 11, 61], [17, 9, 64], [15, 30, 64], [32, 28, 63],
397
+ [59, 10, 63], [14, 8, 66], [15, 26, 66], [31, 32, 64], [61, 12, 64],
398
+ [14, 8, 68], [15, 30, 68], [33, 28, 66], [60, 12, 69], [14, 12, 68],
399
+ [17, 45, 71], [90, 11, 68], [14, 10, 71], [18, 30, 71], [33, 24, 69],
400
+ [59, 11, 66], [16, 8, 69], [16, 29, 69], [31, 23, 68], [62, 11, 64],
401
+ [13, 10, 68], [15, 54, 66], [61, 56, 68], [62, 14, 61]]],
402
+ ['Let Them Run', 'Verse', 'C#', 'Minor',
403
+ [[0, 7, 68], [15, 7, 69], [16, 13, 68], [30, 9, 61], [14, 7, 63], [16, 9, 64],
404
+ [15, 6, 61], [16, 8, 68], [15, 7, 69], [14, 17, 68], [29, 8, 63],
405
+ [14, 8, 64], [18, 10, 66], [15, 5, 63], [16, 8, 68], [15, 9, 69],
406
+ [15, 20, 68], [31, 8, 63], [15, 6, 64], [16, 18, 66], [30, 7, 68],
407
+ [13, 10, 69], [16, 38, 68], [93, 7, 73], [14, 9, 74], [17, 19, 73],
408
+ [31, 8, 68], [14, 9, 69], [16, 11, 71], [15, 7, 68], [16, 8, 73],
409
+ [15, 8, 74], [15, 19, 73], [32, 10, 66], [14, 8, 68], [14, 10, 69],
410
+ [17, 10, 73], [16, 11, 71], [16, 8, 69], [16, 19, 73], [29, 10, 68],
411
+ [16, 8, 64], [15, 20, 63], [30, 10, 66], [16, 10, 64], [15, 60, 61]]],
412
+ ['Lullaby', 'Chorus', 'E-', 'Major',
413
+ [[0, 22, 70], [30, 31, 70], [30, 30, 72], [30, 29, 70], [30, 25, 75],
414
+ [29, 32, 75], [31, 31, 72], [30, 25, 70], [44, 21, 63], [30, 16, 63],
415
+ [16, 27, 68], [29, 31, 70], [30, 25, 72], [29, 32, 72], [31, 30, 70],
416
+ [30, 30, 68], [32, 20, 67], [29, 32, 67], [30, 30, 68], [30, 33, 67],
417
+ [31, 87, 70], [120, 59, 68], [59, 57, 65], [60, 80, 63]]],
418
+ ['Lullaby', 'Verse', 'E-', 'Major',
419
+ [[0, 27, 70], [25, 19, 67], [16, 45, 70], [44, 16, 67], [15, 26, 70],
420
+ [30, 32, 70], [31, 28, 68], [29, 31, 67], [32, 30, 65], [30, 30, 67],
421
+ [30, 67, 70], [180, 46, 70], [45, 15, 65], [14, 45, 70], [46, 16, 65],
422
+ [14, 32, 70], [32, 30, 68], [29, 29, 67], [28, 31, 65], [30, 29, 63],
423
+ [30, 32, 67], [31, 66, 70]]],
424
+ ['Mamonthy Song', 'Chorus', 'F', 'Minor',
425
+ [[0, 15, 72], [14, 15, 77], [14, 7, 79], [15, 5, 80], [14, 25, 79],
426
+ [28, 14, 77], [29, 15, 72], [14, 15, 77], [14, 5, 79], [15, 5, 80],
427
+ [14, 30, 79], [28, 11, 77], [29, 14, 72], [14, 15, 77], [14, 5, 79],
428
+ [14, 7, 80], [15, 29, 79], [28, 24, 77], [29, 15, 72], [14, 16, 75],
429
+ [14, 11, 73], [14, 8, 72], [14, 27, 75], [29, 19, 73], [28, 14, 73],
430
+ [15, 16, 82], [14, 15, 80], [14, 16, 82], [14, 30, 80], [29, 26, 79],
431
+ [28, 7, 77], [14, 29, 77], [29, 24, 72], [71, 6, 72], [14, 17, 72],
432
+ [15, 15, 76], [14, 15, 79], [14, 30, 82], [29, 26, 80], [29, 15, 79],
433
+ [15, 15, 80], [16, 12, 79], [12, 31, 77]]],
434
+ ['Mamonthy Song', 'Verse', 'A-', 'Major',
435
+ [[0, 15, 72], [14, 4, 75], [14, 4, 75], [14, 3, 75], [15, 28, 75],
436
+ [28, 30, 77], [29, 15, 72], [14, 5, 75], [14, 4, 75], [14, 4, 75],
437
+ [15, 29, 75], [57, 13, 68], [14, 9, 77], [14, 4, 77], [14, 4, 77],
438
+ [14, 27, 77], [29, 28, 80], [28, 14, 79], [15, 16, 77], [14, 5, 75],
439
+ [14, 3, 75], [14, 20, 75], [57, 16, 75], [15, 16, 80], [14, 15, 79],
440
+ [14, 11, 80], [14, 30, 79], [29, 30, 77], [28, 16, 79], [15, 30, 77],
441
+ [28, 27, 75], [71, 13, 75], [14, 15, 80], [15, 14, 70], [14, 15, 72],
442
+ [14, 30, 73], [29, 28, 77], [28, 16, 75], [14, 27, 73], [29, 34, 72]]],
443
+ ['New Year Song', 'Verse', 'C', 'Major',
444
+ [[0, 49, 79], [53, 8, 76], [27, 25, 76], [27, 50, 79], [53, 7, 76],
445
+ [27, 13, 76], [27, 23, 79], [26, 26, 77], [27, 22, 76], [27, 25, 74],
446
+ [27, 80, 72], [107, 45, 81], [53, 19, 84], [27, 18, 81], [26, 45, 79],
447
+ [54, 8, 76], [27, 17, 76], [26, 27, 79], [27, 26, 77], [27, 23, 76],
448
+ [26, 28, 74], [27, 87, 72], [108, 46, 81], [53, 16, 84], [27, 17, 81],
449
+ [27, 46, 79], [53, 7, 76], [27, 16, 76], [26, 28, 79], [27, 27, 77],
450
+ [27, 29, 76], [27, 28, 74], [26, 52, 72]]],
451
+ ['Open Secret', 'Chorus', 'G', 'Minor',
452
+ [[0, 4, 79], [12, 3, 84], [13, 13, 83], [26, 4, 77], [13, 4, 80],
453
+ [13, 11, 79], [25, 4, 79], [13, 4, 84], [13, 13, 83], [26, 4, 77],
454
+ [12, 3, 80], [13, 14, 79], [25, 5, 79], [14, 4, 84], [13, 4, 83],
455
+ [13, 4, 83], [13, 4, 79], [12, 3, 83], [13, 2, 84], [11, 4, 84], [13, 4, 84],
456
+ [13, 6, 86], [13, 10, 87], [12, 6, 86], [9, 4, 86], [3, 7, 84], [15, 4, 83],
457
+ [13, 8, 84]]],
458
+ ['Open Secret', 'Verse', 'E-', 'Minor',
459
+ [[0, 15, 79], [51, 13, 79], [52, 4, 79], [12, 4, 79], [13, 14, 80],
460
+ [13, 3, 75], [13, 14, 79], [13, 3, 77], [13, 12, 77], [26, 15, 77],
461
+ [51, 15, 77], [52, 4, 77], [13, 5, 80], [13, 12, 79], [13, 5, 74],
462
+ [12, 12, 77], [13, 3, 75], [13, 11, 75], [26, 15, 75], [52, 15, 75],
463
+ [51, 3, 75], [13, 4, 75], [13, 12, 77], [13, 4, 75], [13, 4, 74],
464
+ [13, 4, 72], [12, 51, 82], [52, 14, 80], [39, 5, 84], [13, 3, 82],
465
+ [13, 4, 80], [13, 10, 79], [13, 4, 77], [13, 3, 75], [13, 5, 74],
466
+ [12, 7, 72]]],
467
+ ['Righteous Road', 'Chorus', 'E-', 'Minor',
468
+ [[0, 20, 67], [18, 23, 68], [19, 19, 70], [17, 36, 71], [35, 37, 68],
469
+ [35, 23, 64], [20, 38, 65], [36, 19, 71], [19, 4, 70], [18, 5, 70],
470
+ [16, 4, 66], [19, 5, 66], [18, 9, 63], [17, 28, 63], [38, 6, 63],
471
+ [17, 21, 63], [18, 17, 62], [16, 21, 63], [18, 22, 65], [18, 21, 66],
472
+ [18, 20, 68], [18, 23, 70], [18, 22, 68], [18, 7, 66], [19, 6, 66],
473
+ [19, 5, 58], [19, 18, 58], [17, 9, 63]]],
474
+ ['Righteous Road', 'Verse', 'E-', 'Minor',
475
+ [[0, 5, 66], [18, 21, 66], [20, 6, 65], [17, 5, 65], [18, 5, 65],
476
+ [18, 22, 65], [19, 19, 66], [35, 21, 66], [20, 20, 68], [16, 20, 66],
477
+ [18, 21, 65], [18, 6, 63], [18, 24, 63], [20, 21, 58], [36, 19, 58],
478
+ [18, 19, 63], [18, 18, 62], [17, 20, 63], [17, 21, 65], [19, 21, 66],
479
+ [19, 20, 65], [16, 21, 66], [19, 20, 68], [18, 20, 70], [18, 21, 71],
480
+ [17, 19, 70], [16, 22, 68], [20, 23, 70]]],
481
+ ['Snow Maiden', 'Chorus', 'G', 'Major',
482
+ [[0, 17, 62], [26, 26, 62], [27, 10, 70], [54, 25, 67], [29, 21, 60],
483
+ [24, 12, 69], [54, 22, 65], [23, 25, 58], [30, 12, 67], [13, 11, 65],
484
+ [27, 10, 64], [13, 26, 67], [14, 12, 65], [13, 15, 65], [105, 14, 60],
485
+ [27, 6, 58], [13, 6, 58], [13, 18, 58], [54, 16, 62], [26, 6, 60],
486
+ [14, 9, 60], [13, 17, 60], [54, 19, 64], [26, 6, 62], [14, 13, 62],
487
+ [13, 21, 61], [26, 12, 59], [14, 10, 61], [13, 12, 62]]],
488
+ ['Snow Maiden', 'Verse', 'D', 'Major',
489
+ [[0, 6, 66], [13, 5, 66], [14, 4, 69], [13, 5, 69], [13, 28, 69], [25, 5, 67],
490
+ [15, 5, 67], [12, 17, 71], [27, 27, 71], [28, 26, 69], [53, 9, 66],
491
+ [14, 8, 66], [13, 3, 69], [12, 6, 69], [14, 25, 69], [27, 6, 67],
492
+ [14, 6, 67], [13, 19, 71], [27, 15, 71], [13, 29, 69], [67, 5, 66],
493
+ [14, 4, 66], [13, 4, 66], [13, 4, 66], [13, 23, 66], [27, 5, 62],
494
+ [14, 4, 62], [13, 12, 64], [27, 22, 64], [26, 15, 62], [53, 4, 62],
495
+ [13, 4, 62], [14, 5, 62], [13, 7, 64], [13, 20, 66], [27, 5, 62],
496
+ [13, 5, 62], [14, 17, 62], [26, 20, 61], [27, 12, 62]]],
497
+ ['Sun Circle', 'Chorus', 'G', 'Major',
498
+ [[0, 17, 62], [30, 16, 62], [29, 51, 67], [60, 20, 69], [30, 21, 71],
499
+ [30, 21, 69], [30, 12, 67], [30, 14, 62], [29, 16, 62], [30, 51, 67],
500
+ [60, 21, 69], [30, 21, 71], [29, 19, 71], [30, 14, 69], [30, 18, 62],
501
+ [30, 20, 62], [30, 51, 69], [59, 21, 71], [30, 20, 72], [30, 22, 74],
502
+ [30, 18, 69], [30, 19, 69], [30, 18, 71], [30, 54, 72], [59, 20, 67],
503
+ [30, 18, 69], [30, 49, 71]]],
504
+ ['Sun Circle', 'Verse', 'G', 'Minor',
505
+ [[0, 29, 62], [29, 20, 63], [20, 9, 62], [9, 58, 67], [59, 31, 69],
506
+ [31, 22, 70], [21, 8, 69], [8, 44, 62], [60, 27, 62], [29, 17, 70],
507
+ [21, 4, 70], [7, 31, 70], [30, 21, 69], [22, 2, 69], [8, 30, 67],
508
+ [32, 67, 66], [91, 28, 65], [29, 21, 67], [21, 7, 65], [8, 55, 70],
509
+ [60, 30, 72], [29, 23, 74], [22, 6, 72], [7, 48, 65], [62, 28, 65],
510
+ [29, 17, 69], [18, 8, 70], [10, 30, 72], [31, 15, 70], [17, 13, 72],
511
+ [15, 45, 74]]],
512
+ ['Sunbath', 'Verse', 'F', 'Major',
513
+ [[0, 11, 72], [19, 10, 70], [11, 9, 69], [31, 8, 65], [31, 7, 62],
514
+ [29, 7, 64], [19, 105, 65], [106, 12, 72], [20, 12, 70], [10, 11, 69],
515
+ [32, 9, 65], [30, 10, 62], [32, 8, 64], [19, 95, 65], [105, 9, 69],
516
+ [18, 13, 72], [12, 87, 67], [96, 10, 69], [18, 11, 72], [10, 86, 65],
517
+ [94, 11, 67], [20, 8, 69], [11, 8, 70], [32, 9, 67], [30, 10, 64],
518
+ [31, 10, 67], [17, 101, 65], [107, 10, 69], [22, 11, 72], [13, 86, 67],
519
+ [91, 10, 69], [21, 12, 72], [11, 80, 65], [91, 9, 67], [20, 8, 69],
520
+ [11, 7, 70], [32, 7, 67], [31, 9, 64], [31, 9, 67], [18, 79, 65]]],
521
+ ['Tail Spin', 'Chorus', 'G', 'Major',
522
+ [[0, 6, 67], [16, 8, 71], [16, 6, 62], [16, 5, 65], [8, 5, 67], [15, 4, 67],
523
+ [8, 6, 71], [16, 5, 62], [16, 5, 65], [16, 5, 67], [16, 7, 71], [16, 5, 62],
524
+ [15, 5, 65], [8, 6, 67], [19, 7, 67], [16, 7, 67], [15, 14, 67], [21, 6, 67],
525
+ [16, 7, 71], [16, 6, 62], [16, 5, 65], [8, 5, 67], [16, 3, 67], [8, 6, 71],
526
+ [15, 5, 62], [16, 5, 65], [16, 5, 67], [16, 7, 71], [16, 5, 62], [15, 5, 65],
527
+ [8, 6, 67]]],
528
+ ['Tail Spin', 'Verse', 'C', 'Major',
529
+ [[0, 14, 62], [14, 17, 67], [16, 17, 64], [32, 3, 64], [8, 6, 64],
530
+ [16, 25, 66], [41, 15, 60], [15, 17, 64], [15, 17, 62], [30, 2, 62],
531
+ [10, 6, 62], [16, 22, 64], [71, 4, 60], [8, 5, 60], [15, 7, 60], [16, 6, 64],
532
+ [17, 7, 64], [17, 6, 62], [16, 7, 62], [21, 2, 62], [8, 3, 62], [8, 7, 64],
533
+ [14, 3, 64], [10, 6, 66], [13, 5, 66], [9, 13, 67]]],
534
+ ['Thirty Three Cows', 'Chorus', 'A', 'Major',
535
+ [[0, 8, 76], [20, 6, 76], [9, 8, 76], [20, 5, 76], [10, 18, 76], [20, 28, 72],
536
+ [39, 8, 74], [19, 4, 74], [10, 8, 74], [20, 6, 74], [10, 17, 74],
537
+ [19, 26, 71], [40, 7, 72], [19, 4, 72], [10, 8, 72], [19, 5, 72],
538
+ [10, 16, 72], [20, 29, 69], [39, 18, 70], [20, 12, 72], [10, 20, 70],
539
+ [19, 13, 69], [10, 23, 67], [59, 19, 69], [19, 4, 72], [10, 17, 72],
540
+ [20, 11, 69], [10, 19, 72], [19, 28, 74], [40, 8, 67], [20, 13, 67],
541
+ [9, 18, 69], [20, 11, 68], [10, 17, 67], [19, 23, 76], [40, 7, 76],
542
+ [19, 5, 76], [10, 8, 76], [20, 4, 76], [10, 15, 76], [19, 5, 72],
543
+ [10, 18, 72], [20, 12, 76], [10, 17, 74]]],
544
+ ['Thirty Three Cows', 'Verse', 'A', 'Major',
545
+ [[0, 9, 64], [19, 8, 62], [10, 12, 60], [28, 17, 60], [31, 13, 64],
546
+ [28, 16, 67], [17, 18, 72], [31, 14, 72], [72, 11, 71], [19, 13, 69],
547
+ [11, 12, 67], [26, 12, 69], [30, 11, 67], [30, 16, 64], [17, 14, 62],
548
+ [102, 11, 64], [19, 10, 62], [11, 13, 60], [27, 13, 60], [31, 11, 64],
549
+ [28, 16, 67], [17, 17, 72], [31, 16, 72], [72, 11, 71], [18, 11, 72],
550
+ [10, 15, 74], [29, 12, 71], [28, 13, 69], [31, 17, 71], [17, 17, 67],
551
+ [100, 18, 64], [19, 10, 62], [10, 13, 60], [29, 15, 60], [30, 12, 64],
552
+ [29, 17, 67], [18, 18, 72], [30, 16, 72], [71, 9, 71], [20, 13, 69],
553
+ [12, 13, 67], [27, 12, 69], [28, 13, 67], [30, 19, 64], [18, 19, 62],
554
+ [101, 11, 64], [18, 10, 62], [10, 15, 60], [28, 13, 60], [29, 13, 64],
555
+ [31, 18, 72], [17, 19, 71], [31, 18, 69], [71, 12, 64], [18, 12, 62],
556
+ [10, 14, 60], [29, 15, 60], [30, 12, 64], [29, 18, 72], [19, 17, 71],
557
+ [29, 17, 69], [69, 19, 69], [18, 12, 69], [12, 13, 74], [30, 12, 74],
558
+ [29, 14, 74], [31, 16, 76], [15, 59, 77]]],
559
+ ['USSR Anthem', 'Chorus', 'C', 'Major',
560
+ [[0, 23, 67], [23, 46, 76], [47, 35, 74], [35, 11, 72], [12, 46, 74],
561
+ [47, 23, 71], [23, 23, 67], [24, 46, 72], [46, 35, 71], [36, 11, 69],
562
+ [11, 46, 71], [47, 23, 64], [24, 23, 64], [23, 46, 69], [47, 23, 67],
563
+ [23, 23, 65], [24, 46, 67], [47, 23, 60], [23, 23, 60], [24, 46, 72],
564
+ [46, 35, 71], [36, 11, 69], [11, 93, 67]]],
565
+ ['USSR Anthem', 'Verse', 'C', 'Major',
566
+ [[0, 23, 67], [23, 46, 72], [47, 35, 67], [35, 11, 69], [12, 46, 71],
567
+ [47, 23, 64], [23, 23, 64], [24, 46, 69], [46, 35, 67], [36, 11, 65],
568
+ [11, 46, 67], [47, 23, 60], [24, 23, 60], [23, 46, 62], [47, 23, 62],
569
+ [23, 23, 64], [24, 46, 65], [47, 23, 65], [23, 23, 67], [24, 46, 69],
570
+ [46, 23, 71], [24, 23, 72], [23, 70, 74]]],
571
+ ['White Ships', 'Chorus', 'G', 'Minor',
572
+ [[0, 9, 81], [18, 9, 81], [19, 9, 81], [19, 9, 83], [19, 9, 81], [9, 9, 83],
573
+ [9, 9, 81], [19, 18, 80], [19, 9, 76], [19, 9, 79], [18, 9, 79], [19, 9, 79],
574
+ [19, 9, 81], [19, 9, 79], [9, 9, 81], [9, 9, 79], [19, 18, 78], [19, 9, 74],
575
+ [19, 18, 74], [37, 28, 74], [38, 9, 73], [18, 9, 74], [19, 9, 76],
576
+ [19, 9, 78], [19, 75, 71]]],
577
+ ['White Ships', 'Verse', 'B', 'Minor',
578
+ [[0, 18, 71], [28, 9, 71], [9, 9, 71], [19, 9, 74], [19, 28, 78],
579
+ [37, 18, 76], [19, 9, 74], [19, 18, 76], [28, 9, 76], [9, 9, 76],
580
+ [19, 9, 78], [19, 28, 76], [37, 18, 74], [19, 9, 73], [19, 28, 74],
581
+ [37, 28, 71], [38, 28, 79], [37, 28, 76], [38, 112, 78]]],
582
+ ['Winged Swing', 'Chorus', 'D', 'Major',
583
+ [[0, 27, 57], [22, 37, 66], [43, 70, 66], [86, 25, 66], [22, 23, 67],
584
+ [21, 46, 66], [43, 79, 64], [108, 21, 64], [22, 95, 71], [107, 25, 71],
585
+ [22, 20, 69], [22, 23, 68], [21, 91, 69], [130, 54, 62], [43, 89, 74],
586
+ [107, 20, 74], [22, 23, 72], [22, 14, 70], [21, 42, 70], [43, 88, 69],
587
+ [108, 23, 69], [22, 23, 71], [21, 22, 69], [22, 83, 71], [84, 42, 73],
588
+ [43, 106, 74]]],
589
+ ['Winged Swing', 'Verse', 'D', 'Minor',
590
+ [[0, 27, 69], [32, 8, 70], [11, 7, 69], [11, 11, 65], [10, 9, 64],
591
+ [11, 14, 62], [11, 20, 65], [22, 36, 64], [64, 30, 69], [33, 11, 70],
592
+ [10, 7, 69], [11, 10, 65], [11, 10, 64], [11, 16, 62], [10, 59, 64],
593
+ [86, 29, 66], [33, 12, 67], [11, 3, 69], [10, 12, 69], [11, 11, 70],
594
+ [11, 5, 72], [11, 23, 72], [21, 28, 70], [43, 12, 70], [11, 5, 69],
595
+ [11, 18, 69], [21, 29, 67], [43, 12, 62], [11, 5, 64], [11, 68, 64]]],
596
+ ['With a Smile', 'Chorus', 'C#', 'Minor',
597
+ [[0, 14, 61], [17, 7, 64], [17, 7, 68], [17, 7, 68], [17, 7, 68], [17, 7, 68],
598
+ [17, 20, 68], [35, 17, 73], [17, 17, 61], [17, 17, 64], [17, 8, 63],
599
+ [17, 10, 63], [17, 10, 63], [17, 21, 63], [34, 18, 63], [17, 17, 64],
600
+ [17, 20, 68], [17, 18, 66], [17, 16, 68], [18, 18, 69], [17, 17, 68],
601
+ [17, 18, 66], [17, 17, 68], [17, 18, 69], [17, 34, 73], [34, 49, 71],
602
+ [68, 15, 73], [17, 18, 68], [17, 18, 71], [17, 7, 69], [17, 5, 69],
603
+ [17, 7, 69], [18, 23, 69], [34, 17, 73], [17, 17, 66], [17, 17, 69],
604
+ [17, 8, 68], [17, 7, 68], [17, 7, 68], [17, 25, 68], [34, 15, 73],
605
+ [17, 19, 64], [17, 17, 63], [17, 19, 64], [17, 18, 66], [17, 17, 69],
606
+ [17, 15, 68], [18, 18, 66], [17, 17, 64], [17, 17, 66], [17, 37, 69],
607
+ [34, 51, 68]]],
608
+ ['With a Smile', 'Verse', 'B-', 'Major',
609
+ [[0, 20, 68], [17, 20, 65], [18, 32, 70], [34, 33, 68], [34, 20, 63],
610
+ [17, 18, 66], [17, 19, 65], [17, 19, 63], [17, 72, 61], [102, 21, 65],
611
+ [17, 20, 68], [17, 13, 70], [18, 11, 70], [17, 18, 70], [17, 18, 72],
612
+ [17, 20, 75], [17, 19, 73], [17, 21, 72], [17, 20, 70], [17, 38, 73],
613
+ [34, 51, 68], [68, 19, 70], [17, 19, 72], [17, 51, 73], [51, 20, 72],
614
+ [17, 18, 70], [17, 20, 65], [18, 19, 68], [17, 20, 66], [17, 82, 70],
615
+ [102, 20, 72], [17, 19, 70], [17, 18, 73], [17, 18, 68], [17, 19, 72],
616
+ [17, 10, 70], [17, 23, 70], [17, 18, 68], [17, 19, 63], [18, 19, 65],
617
+ [17, 37, 63], [34, 41, 61]]],
618
+ ['Wizard of Oz', 'Verse', 'F', 'Major',
619
+ [[0, 25, 69], [30, 6, 72], [17, 5, 72], [15, 5, 77], [16, 9, 77],
620
+ [16, 28, 81], [30, 6, 79], [16, 6, 79], [16, 15, 81], [17, 5, 79],
621
+ [16, 5, 76], [16, 7, 72], [15, 29, 79], [31, 13, 77], [15, 14, 69],
622
+ [17, 5, 72], [15, 6, 72], [14, 6, 77], [17, 6, 77], [16, 28, 81],
623
+ [32, 5, 79], [15, 6, 79], [15, 12, 81], [15, 6, 79], [17, 5, 76],
624
+ [15, 10, 72], [14, 37, 77], [48, 10, 81], [16, 24, 84], [31, 24, 84],
625
+ [31, 24, 84], [32, 24, 84], [31, 21, 77], [28, 12, 77], [17, 62, 77],
626
+ [62, 12, 82], [18, 22, 86], [31, 23, 86], [31, 25, 86], [31, 22, 86],
627
+ [31, 30, 86], [33, 38, 84], [76, 8, 81], [16, 9, 84], [14, 8, 82],
628
+ [15, 8, 77], [16, 8, 74], [15, 27, 82], [32, 27, 81], [141, 7, 72],
629
+ [16, 19, 79], [31, 21, 79], [30, 44, 79], [48, 5, 77], [16, 63, 77]]]]
630
+
631
+ ################################################################################
632
+
633
+ FILTERED_CHORDS = [[0], [0, 3], [0, 3, 5], [0, 3, 5, 8], [0, 3, 5, 9], [0, 3, 5, 10], [0, 3, 7],
634
+ [0, 3, 7, 10], [0, 3, 8], [0, 3, 9], [0, 3, 10], [0, 4], [0, 4, 6],
635
+ [0, 4, 6, 9], [0, 4, 6, 10], [0, 4, 7], [0, 4, 7, 10], [0, 4, 8], [0, 4, 9],
636
+ [0, 4, 10], [0, 5], [0, 5, 8], [0, 5, 9], [0, 5, 10], [0, 6], [0, 6, 9],
637
+ [0, 6, 10], [0, 7], [0, 7, 10], [0, 8], [0, 9], [0, 10], [1], [1, 4],
638
+ [1, 4, 6], [1, 4, 6, 9], [1, 4, 6, 10], [1, 4, 6, 11], [1, 4, 7],
639
+ [1, 4, 7, 10], [1, 4, 7, 11], [1, 4, 8], [1, 4, 8, 11], [1, 4, 9], [1, 4, 10],
640
+ [1, 4, 11], [1, 5], [1, 5, 8], [1, 5, 8, 11], [1, 5, 9], [1, 5, 10],
641
+ [1, 5, 11], [1, 6], [1, 6, 9], [1, 6, 10], [1, 6, 11], [1, 7], [1, 7, 10],
642
+ [1, 7, 11], [1, 8], [1, 8, 11], [1, 9], [1, 10], [1, 11], [2], [2, 5],
643
+ [2, 5, 8], [2, 5, 8, 11], [2, 5, 9], [2, 5, 10], [2, 5, 11], [2, 6], [2, 6, 9],
644
+ [2, 6, 10], [2, 6, 11], [2, 7], [2, 7, 10], [2, 7, 11], [2, 8], [2, 8, 11],
645
+ [2, 9], [2, 10], [2, 11], [3], [3, 5], [3, 5, 8], [3, 5, 8, 11], [3, 5, 9],
646
+ [3, 5, 10], [3, 5, 11], [3, 7], [3, 7, 10], [3, 7, 11], [3, 8], [3, 8, 11],
647
+ [3, 9], [3, 10], [3, 11], [4], [4, 6], [4, 6, 9], [4, 6, 10], [4, 6, 11],
648
+ [4, 7], [4, 7, 10], [4, 7, 11], [4, 8], [4, 8, 11], [4, 9], [4, 10], [4, 11],
649
+ [5], [5, 8], [5, 8, 11], [5, 9], [5, 10], [5, 11], [6], [6, 9], [6, 10],
650
+ [6, 11], [7], [7, 10], [7, 11], [8], [8, 11], [9], [10], [11]]
651
+
652
+ ################################################################################
653
+
654
+ import copy
655
+ from collections import Counter
656
+ from itertools import groupby
657
+
658
+ ################################################################################
659
+
660
+ def ordered_set(seq):
661
+ dic = {}
662
+ return [k for k, v in dic.fromkeys(seq).items()]
663
+
664
+ ################################################################################
665
+
666
+ def grouped_set(seq):
667
+ return [k for k, v in groupby(seq)]
668
+
669
+ ################################################################################
670
+
671
+ def melody_pitches(melody):
672
+ return [p[2] for p in melody[4]]
673
+
674
+ ################################################################################
675
+
676
+ def melody_tones(melody):
677
+ return [t[2] % 12 for t in melody[4]]
678
+
679
+ ################################################################################
680
+
681
+ def melody_pitches_counts(melody):
682
+ return [list(c) for c in Counter(melody_pitches(melody)).most_common()]
683
+
684
+ ################################################################################
685
+
686
+ def melody_tones_counts(melody):
687
+ return [list(c) for c in Counter(melody_tones(melody)).most_common()]
688
+
689
+ ################################################################################
690
+
691
+ def transpose_melody(melody, transpose_value):
692
+
693
+ mel = copy.deepcopy(melody)
694
+
695
+ score = mel[4]
696
+
697
+ for note in score:
698
+ note[2] += transpose_value
699
+
700
+ return mel[:4] + [score]
701
+
702
+ ################################################################################
703
+
704
+ def adjust_melody_average_timings(melody, average_time):
705
+
706
+ mel = copy.deepcopy(melody)
707
+
708
+ score = mel[4]
709
+
710
+ dtimes = [d[1] for d in score]
711
+ old_avg_dtime = sum(dtimes) / len(dtimes)
712
+
713
+ tadjk = old_avg_dtime / average_time
714
+
715
+ for note in score:
716
+ note[1] = int(note[1] / tadjk)
717
+ note[2] = int(note[2] / tadjk)
718
+
719
+ dtimes = [d[1] for d in score]
720
+ new_avg_dtime = sum(dtimes) / len(dtimes)
721
+
722
+ return [mel[:4] + [score], new_avg_dtime, old_avg_dtime]
723
+
724
+ ################################################################################
725
+
726
+ def most_common_melody_pitch_and_tone(melody):
727
+
728
+ mel_pitches = melody_pitches(melody)
729
+ mel_tones = [t % 12 for t in mel_pitches]
730
+
731
+ return [list(Counter(mel_pitches).most_common()[0]), list(Counter(mel_tones).most_common()[0])]
732
+
733
+ ################################################################################
734
+
735
+ def melody_dtimes_counts(melody):
736
+ return [list(c) for c in Counter([n[0] for n in melody[4]]).most_common()]
737
+
738
+ ################################################################################
739
+
740
+ def melody_durations_counts(melody):
741
+ return [list(c) for c in Counter([n[1] for n in melody[4]]).most_common()]
742
+
743
+ ################################################################################
744
+
745
+ def melody_notes_count(melody):
746
+ return len(melody[4])
747
+
748
+ ################################################################################
749
+
750
+ def most_common_melody_dtime_and_duration(melody):
751
+ return [melody_dtimes_counts(melody)[0], melody_durations_counts(melody)[0]]
752
+
753
+ ################################################################################
754
+
755
+ def melody_run_time(melody):
756
+ dtimes = [n[0] for n in melody[4]]
757
+ last_dur = melody[4][-1][1]
758
+
759
+ rel_run_time = sum(dtimes)+last_dur
760
+ ms_run_time = rel_run_time * 16
761
+ sec_run_time = ms_run_time / 1000
762
+ min_run_time = sec_run_time / 60
763
+
764
+ return [rel_run_time, ms_run_time, sec_run_time, min_run_time]
765
+
766
+ ################################################################################
767
+
768
+ def harmonize_melody(melody):
769
+
770
+ mel_tones = melody_tones(melody)
771
+
772
+ cur_chord = []
773
+
774
+ harmonized_chords = []
775
+
776
+ for i, m in enumerate(mel_tones):
777
+ cur_chord.append(m)
778
+ cc = sorted(set(cur_chord))
779
+
780
+ if cc in FILTERED_CHORDS:
781
+ harmonized_chords.append(cc)
782
+
783
+ else:
784
+ while sorted(set(cur_chord)) not in FILTERED_CHORDS:
785
+ cur_chord.pop(0)
786
+ cc = sorted(set(cur_chord))
787
+ harmonized_chords.append(cc)
788
+
789
+ return harmonized_chords
790
+
791
+ ################################################################################
792
+
793
+ def melody_range(melody):
794
+
795
+ mel_pitches = melody_pitches(melody)
796
+
797
+ max_pitch = max(mel_pitches)
798
+ avg_pitch = sum(mel_pitches) / len(mel_pitches)
799
+ min_pitch = min(mel_pitches)
800
+
801
+ pitch_range = max_pitch - min_pitch
802
+
803
+ return [max_pitch, avg_pitch, min_pitch, pitch_range]
804
+
805
+ ################################################################################
806
+
807
+ def melody_octave(melody):
808
+ return int(melody_range(melody)[1] // 12)
809
+
810
+ ################################################################################
811
+
812
+ def melody_to_enhanced_score_notes(melody,
813
+ melody_channel=3,
814
+ melody_velocity=-1,
815
+ melody_patch=40,
816
+ harmonized_tones_chords=[],
817
+ harmonized_tones_chords_base_octave=-1,
818
+ harmonized_tones_chords_channel=0,
819
+ harmonized_tones_chords_velocity=-1,
820
+ harmonized_tones_chords_patch=0
821
+ ):
822
+
823
+ name = melody[0]
824
+ part = melody[1]
825
+ key1 = melody[2]
826
+ key2 = melody[3]
827
+
828
+ if harmonized_tones_chords_base_octave > -1:
829
+ mel_base_octave = harmonized_tones_chords_base_octave
830
+
831
+ else:
832
+ mel_base_octave = melody_octave(melody) - 1
833
+
834
+ escore_notes = []
835
+
836
+ time = 0
837
+
838
+ for i, note in enumerate(melody[4]):
839
+
840
+ time += note[0]
841
+ dur = note[1]
842
+ ptc = note[2]
843
+
844
+ if melody_velocity == -1:
845
+ vel = int(110 + ((ptc % 12) * 1.5))
846
+ else:
847
+ vel = melody_velocity
848
+
849
+ escore_notes.append(['note', time, dur, melody_channel, ptc, vel, melody_patch])
850
+
851
+ if harmonized_tones_chords and i < len(harmonized_tones_chords):
852
+
853
+ for t in harmonized_tones_chords[i]:
854
+
855
+ ptc = (mel_base_octave * 12) + t
856
+
857
+ if harmonized_tones_chords_velocity == -1:
858
+ vel = int(80 + ((ptc % 12) * 1.5))
859
+ else:
860
+ vel = harmonized_tones_chords_velocity
861
+
862
+ escore_notes.append(['note', time, dur, harmonized_tones_chords_channel, ptc, vel, harmonized_tones_chords_patch])
863
+
864
+ return [name, part, key1 + ' ' + key2, escore_notes]
865
+
866
+ ################################################################################
867
+
868
+ def flip_melody(melody):
869
+
870
+ mel = copy.deepcopy(melody)
871
+
872
+ old_mel_range = melody_range(melody)
873
+
874
+ for note in mel[4]:
875
+ note[2] = 127 - note[2]
876
+
877
+ new_mel_range = melody_range(mel)
878
+
879
+ transpose_value = int(old_mel_range[1] - new_mel_range[1])
880
+
881
+ new_melody = transpose_melody(mel, transpose_value)
882
+
883
+ return melody[:4] + [new_melody[4]]
884
+
885
+ ################################################################################
886
+
887
+ def reverse_melody(melody, full_reverse=True):
888
+
889
+ mel = copy.deepcopy(melody)
890
+
891
+ if full_reverse:
892
+
893
+ abs_times = []
894
+
895
+ atime = 0
896
+
897
+ for t in mel[4]:
898
+ atime += t[0]
899
+ abs_times.append(atime)
900
+
901
+ abs_dtimes = []
902
+
903
+ for i, t in enumerate(mel[4]):
904
+ abs_dtimes.append(abs_times[i]+t[1])
905
+
906
+ new_dtimes = []
907
+ pt = abs_dtimes[-1]
908
+
909
+ for t in abs_dtimes[::-1]:
910
+ new_dtimes.append(pt-t)
911
+ pt = t
912
+
913
+ new_mel = copy.deepcopy(mel[4][::-1])
914
+
915
+ for i, t in enumerate(new_mel):
916
+ t[0] = new_dtimes[i]
917
+
918
+ return melody[:4] + [new_mel]
919
+
920
+ else:
921
+ mel_pitches = melody_pitches(melody)[::-1]
922
+
923
+ for i, note in enumerate(mel[4]):
924
+ note[2] = mel_pitches[i]
925
+
926
+ return melody[:4] + [mel[4]]
927
+
928
+ ################################################################################
929
+ #
930
+ # This is the end of TMELODIES Python module
931
+ #
932
+ ################################################################################
TMIDIX.py CHANGED
@@ -9381,6 +9381,150 @@ def advanced_add_drums_to_escore_notes(escore_notes,
9381
 
9382
  return delta_score_to_abs_score(drums_score)
9383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9384
  ###################################################################################
9385
  #
9386
  # This is the end of the TMIDI X Python module
 
9381
 
9382
  return delta_score_to_abs_score(drums_score)
9383
 
9384
+ ###################################################################################
9385
+
9386
+ MIDI_TEXT_EVENTS = ['text_event',
9387
+ 'copyright_text_event',
9388
+ 'track_name',
9389
+ 'instrument_name',
9390
+ 'lyric',
9391
+ 'marker',
9392
+ 'cue_point',
9393
+ 'text_event_08',
9394
+ 'text_event_09',
9395
+ 'text_event_0a',
9396
+ 'text_event_0b',
9397
+ 'text_event_0c',
9398
+ 'text_event_0d',
9399
+ 'text_event_0e',
9400
+ 'text_event_0f'
9401
+ ]
9402
+
9403
+ ###################################################################################
9404
+
9405
+ import hashlib
9406
+ import re
9407
+
9408
+ ###################################################################################
9409
+
9410
+ def get_md5_hash(data):
9411
+ return hashlib.md5(data).hexdigest()
9412
+
9413
+ ###################################################################################
9414
+
9415
+ def is_valid_md5_hash(string):
9416
+ return bool(re.match(r'^[a-fA-F0-9]{32}$', string))
9417
+
9418
+ ###################################################################################
9419
+
9420
+ def clean_string(original_string,
9421
+ regex=r'[^a-zA-Z0-9 ]',
9422
+ remove_duplicate_spaces=True,
9423
+ title=False
9424
+ ):
9425
+
9426
+ cstr1 = re.sub(regex, '', original_string)
9427
+
9428
+ if title:
9429
+ cstr1 = cstr1.title()
9430
+
9431
+ if remove_duplicate_spaces:
9432
+ return re.sub(r'\s+', ' ', cstr1).strip()
9433
+
9434
+ else:
9435
+ return cstr1
9436
+
9437
+ ###################################################################################
9438
+
9439
+ def encode_to_ord(text, chars_range=[], sub_char='', chars_shift=0):
9440
+
9441
+ if not chars_range:
9442
+ chars_range = [32] + list(range(65, 91)) + list(range(97, 123))
9443
+
9444
+ if sub_char:
9445
+ chars_range.append(ord(sub_char))
9446
+
9447
+ chars_range = sorted(set(chars_range))
9448
+
9449
+ encoded = []
9450
+
9451
+ for char in text:
9452
+ if ord(char) in chars_range:
9453
+ encoded.append(chars_range.index(ord(char)) + chars_shift)
9454
+
9455
+ else:
9456
+ if sub_char:
9457
+ encoded.append(chars_range.index(ord(sub_char)) + chars_shift)
9458
+
9459
+
9460
+ return [encoded, chars_range]
9461
+
9462
+ ###################################################################################
9463
+
9464
+ def decode_from_ord(ord_list, chars_range=[], sub_char='', chars_shift=0):
9465
+
9466
+ if not chars_range:
9467
+ chars_range = [32] + list(range(65, 91)) + list(range(97, 123))
9468
+
9469
+ if sub_char:
9470
+ chars_range.append(ord(sub_char))
9471
+
9472
+ chars_range = sorted(set(chars_range))
9473
+
9474
+ return ''.join(chr(chars_range[num-chars_shift]) if 0 <= num-chars_shift < len(chars_range) else sub_char for num in ord_list)
9475
+
9476
+ ###################################################################################
9477
+
9478
+ def lists_similarity(list1, list2, by_elements=True, by_sum=True):
9479
+
9480
+ if len(list1) != len(list2) or len(list1) % 2 != 0:
9481
+ return -1
9482
+
9483
+ element_ratios = []
9484
+ total_counts1 = sum(list1)
9485
+ total_counts2 = sum(list2)
9486
+
9487
+ for a, b in zip(list1, list2):
9488
+ if a == 0 and b == 0:
9489
+ element_ratios.append(1)
9490
+ elif a == 0 or b == 0:
9491
+ element_ratios.append(0)
9492
+ else:
9493
+ element_ratios.append(min(a, b) / max(a, b))
9494
+
9495
+ average_element_ratio = sum(element_ratios) / len(element_ratios)
9496
+
9497
+ total_counts_ratio = min(total_counts1, total_counts2) / max(total_counts1, total_counts2)
9498
+
9499
+ if by_elements and by_sum:
9500
+ return (average_element_ratio + total_counts_ratio) / 2
9501
+
9502
+ elif by_elements and not by_sum:
9503
+ return average_element_ratio
9504
+
9505
+ elif not by_elements and by_sum:
9506
+ return total_counts_ratio
9507
+
9508
+ else:
9509
+ return -1
9510
+
9511
+ ###################################################################################
9512
+
9513
+ def find_indexes(lst, value, mode='equal', dual_mode=True):
9514
+
9515
+ indexes = []
9516
+
9517
+ if mode == 'equal' or dual_mode:
9518
+ indexes.extend([index for index, elem in enumerate(lst) if elem == value])
9519
+
9520
+ if mode == 'smaller':
9521
+ indexes.extend([index for index, elem in enumerate(lst) if elem < value])
9522
+
9523
+ if mode == 'larger':
9524
+ indexes.extend([index for index, elem in enumerate(lst) if elem > value])
9525
+
9526
+ return sorted(set(indexes))
9527
+
9528
  ###################################################################################
9529
  #
9530
  # This is the end of the TMIDI X Python module
TPLOTS.py ADDED
@@ -0,0 +1,1369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+
3
+ r'''############################################################################
4
+ ################################################################################
5
+ #
6
+ #
7
+ # Tegridy Plots Python Module (TPLOTS)
8
+ # Version 1.0
9
+ #
10
+ # Project Los Angeles
11
+ #
12
+ # Tegridy Code 2024
13
+ #
14
+ # https://github.com/asigalov61/tegridy-tools
15
+ #
16
+ #
17
+ ################################################################################
18
+ #
19
+ # Copyright 2024 Project Los Angeles / Tegridy Code
20
+ #
21
+ # Licensed under the Apache License, Version 2.0 (the "License");
22
+ # you may not use this file except in compliance with the License.
23
+ # You may obtain a copy of the License at
24
+ #
25
+ # http://www.apache.org/licenses/LICENSE-2.0
26
+ #
27
+ # Unless required by applicable law or agreed to in writing, software
28
+ # distributed under the License is distributed on an "AS IS" BASIS,
29
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
30
+ # See the License for the specific language governing permissions and
31
+ # limitations under the License.
32
+ #
33
+ ################################################################################
34
+ ################################################################################
35
+ #
36
+ # Critical dependencies
37
+ #
38
+ # !pip install numpy
39
+ # !pip install scipy
40
+ # !pip install matplotlib
41
+ # !pip install networkx
42
+ # !pip3 install scikit-learn
43
+ #
44
+ ################################################################################
45
+ #
46
+ # Future critical dependencies
47
+ #
48
+ # !pip install umap-learn
49
+ # !pip install alphashape
50
+ #
51
+ ################################################################################
52
+ '''
53
+
54
+ ################################################################################
55
+ # Modules imports
56
+ ################################################################################
57
+
58
+ import os
59
+ from collections import Counter
60
+ from itertools import groupby
61
+
62
+ import numpy as np
63
+
64
+ import networkx as nx
65
+
66
+ from sklearn.manifold import TSNE
67
+ from sklearn import metrics
68
+ from sklearn.preprocessing import MinMaxScaler
69
+ from sklearn.decomposition import PCA
70
+
71
+ from scipy.ndimage import zoom
72
+ from scipy.spatial import distance_matrix
73
+ from scipy.sparse.csgraph import minimum_spanning_tree
74
+ from scipy.stats import zscore
75
+
76
+ import matplotlib.pyplot as plt
77
+ from PIL import Image
78
+
79
+ ################################################################################
80
+ # Constants
81
+ ################################################################################
82
+
83
+ ALL_CHORDS_FULL = [[0], [0, 3], [0, 3, 5], [0, 3, 5, 8], [0, 3, 5, 9], [0, 3, 5, 10], [0, 3, 6],
84
+ [0, 3, 6, 9], [0, 3, 6, 10], [0, 3, 7], [0, 3, 7, 10], [0, 3, 8], [0, 3, 9],
85
+ [0, 3, 10], [0, 4], [0, 4, 6], [0, 4, 6, 9], [0, 4, 6, 10], [0, 4, 7],
86
+ [0, 4, 7, 10], [0, 4, 8], [0, 4, 9], [0, 4, 10], [0, 5], [0, 5, 8], [0, 5, 9],
87
+ [0, 5, 10], [0, 6], [0, 6, 9], [0, 6, 10], [0, 7], [0, 7, 10], [0, 8], [0, 9],
88
+ [0, 10], [1], [1, 4], [1, 4, 6], [1, 4, 6, 9], [1, 4, 6, 10], [1, 4, 6, 11],
89
+ [1, 4, 7], [1, 4, 7, 10], [1, 4, 7, 11], [1, 4, 8], [1, 4, 8, 11], [1, 4, 9],
90
+ [1, 4, 10], [1, 4, 11], [1, 5], [1, 5, 8], [1, 5, 8, 11], [1, 5, 9],
91
+ [1, 5, 10], [1, 5, 11], [1, 6], [1, 6, 9], [1, 6, 10], [1, 6, 11], [1, 7],
92
+ [1, 7, 10], [1, 7, 11], [1, 8], [1, 8, 11], [1, 9], [1, 10], [1, 11], [2],
93
+ [2, 5], [2, 5, 8], [2, 5, 8, 11], [2, 5, 9], [2, 5, 10], [2, 5, 11], [2, 6],
94
+ [2, 6, 9], [2, 6, 10], [2, 6, 11], [2, 7], [2, 7, 10], [2, 7, 11], [2, 8],
95
+ [2, 8, 11], [2, 9], [2, 10], [2, 11], [3], [3, 5], [3, 5, 8], [3, 5, 8, 11],
96
+ [3, 5, 9], [3, 5, 10], [3, 5, 11], [3, 6], [3, 6, 9], [3, 6, 10], [3, 6, 11],
97
+ [3, 7], [3, 7, 10], [3, 7, 11], [3, 8], [3, 8, 11], [3, 9], [3, 10], [3, 11],
98
+ [4], [4, 6], [4, 6, 9], [4, 6, 10], [4, 6, 11], [4, 7], [4, 7, 10], [4, 7, 11],
99
+ [4, 8], [4, 8, 11], [4, 9], [4, 10], [4, 11], [5], [5, 8], [5, 8, 11], [5, 9],
100
+ [5, 10], [5, 11], [6], [6, 9], [6, 10], [6, 11], [7], [7, 10], [7, 11], [8],
101
+ [8, 11], [9], [10], [11]]
102
+
103
+ ################################################################################
104
+
105
+ CHORDS_TYPES = ['WHITE', 'BLACK', 'UNKNOWN', 'MIXED WHITE', 'MIXED BLACK', 'MIXED GRAY']
106
+
107
+ ################################################################################
108
+
109
+ WHITE_NOTES = [0, 2, 4, 5, 7, 9, 11]
110
+
111
+ ################################################################################
112
+
113
+ BLACK_NOTES = [1, 3, 6, 8, 10]
114
+
115
+ ################################################################################
116
+ # Helper functions
117
+ ################################################################################
118
+
119
+ def tones_chord_type(tones_chord,
120
+ return_chord_type_index=True,
121
+ ):
122
+
123
+ """
124
+ Returns tones chord type
125
+ """
126
+
127
+ WN = WHITE_NOTES
128
+ BN = BLACK_NOTES
129
+ MX = WHITE_NOTES + BLACK_NOTES
130
+
131
+
132
+ CHORDS = ALL_CHORDS_FULL
133
+
134
+ tones_chord = sorted(tones_chord)
135
+
136
+ ctype = 'UNKNOWN'
137
+
138
+ if tones_chord in CHORDS:
139
+
140
+ if sorted(set(tones_chord) & set(WN)) == tones_chord:
141
+ ctype = 'WHITE'
142
+
143
+ elif sorted(set(tones_chord) & set(BN)) == tones_chord:
144
+ ctype = 'BLACK'
145
+
146
+ if len(tones_chord) > 1 and sorted(set(tones_chord) & set(MX)) == tones_chord:
147
+
148
+ if len(sorted(set(tones_chord) & set(WN))) == len(sorted(set(tones_chord) & set(BN))):
149
+ ctype = 'MIXED GRAY'
150
+
151
+ elif len(sorted(set(tones_chord) & set(WN))) > len(sorted(set(tones_chord) & set(BN))):
152
+ ctype = 'MIXED WHITE'
153
+
154
+ elif len(sorted(set(tones_chord) & set(WN))) < len(sorted(set(tones_chord) & set(BN))):
155
+ ctype = 'MIXED BLACK'
156
+
157
+ if return_chord_type_index:
158
+ return CHORDS_TYPES.index(ctype)
159
+
160
+ else:
161
+ return ctype
162
+
163
+ ###################################################################################
164
+
165
+ def tone_type(tone,
166
+ return_tone_type_index=True
167
+ ):
168
+
169
+ """
170
+ Returns tone type
171
+ """
172
+
173
+ tone = tone % 12
174
+
175
+ if tone in BLACK_NOTES:
176
+ if return_tone_type_index:
177
+ return CHORDS_TYPES.index('BLACK')
178
+ else:
179
+ return "BLACK"
180
+
181
+ else:
182
+ if return_tone_type_index:
183
+ return CHORDS_TYPES.index('WHITE')
184
+ else:
185
+ return "WHITE"
186
+
187
+ ###################################################################################
188
+
189
+ def find_closest_points(points, return_points=True):
190
+
191
+ """
192
+ Find closest 2D points
193
+ """
194
+
195
+ coords = np.array(points)
196
+
197
+ num_points = coords.shape[0]
198
+ closest_matches = np.zeros(num_points, dtype=int)
199
+ distances = np.zeros((num_points, num_points))
200
+
201
+ for i in range(num_points):
202
+ for j in range(num_points):
203
+ if i != j:
204
+ distances[i, j] = np.linalg.norm(coords[i] - coords[j])
205
+ else:
206
+ distances[i, j] = np.inf
207
+
208
+ closest_matches = np.argmin(distances, axis=1)
209
+
210
+ if return_points:
211
+ points_matches = coords[closest_matches].tolist()
212
+ return points_matches
213
+
214
+ else:
215
+ return closest_matches.tolist()
216
+
217
+ ################################################################################
218
+
219
+ def reduce_dimensionality_tsne(list_of_valies,
220
+ n_comp=2,
221
+ n_iter=5000,
222
+ verbose=True
223
+ ):
224
+
225
+ """
226
+ Reduces the dimensionality of the values using t-SNE.
227
+ """
228
+
229
+ vals = np.array(list_of_valies)
230
+
231
+ tsne = TSNE(n_components=n_comp,
232
+ n_iter=n_iter,
233
+ verbose=verbose)
234
+
235
+ reduced_vals = tsne.fit_transform(vals)
236
+
237
+ return reduced_vals.tolist()
238
+
239
+ ################################################################################
240
+
241
+ def compute_mst_edges(similarity_scores_list):
242
+
243
+ """
244
+ Computes the Minimum Spanning Tree (MST) edges based on the similarity scores.
245
+ """
246
+
247
+ num_tokens = len(similarity_scores_list[0])
248
+
249
+ graph = nx.Graph()
250
+
251
+ for i in range(num_tokens):
252
+ for j in range(i + 1, num_tokens):
253
+ weight = 1 - similarity_scores_list[i][j]
254
+ graph.add_edge(i, j, weight=weight)
255
+
256
+ mst = nx.minimum_spanning_tree(graph)
257
+
258
+ mst_edges = list(mst.edges(data=False))
259
+
260
+ return mst_edges
261
+
262
+ ################################################################################
263
+
264
+ def square_binary_matrix(binary_matrix,
265
+ matrix_size=128,
266
+ interpolation_order=5,
267
+ return_square_matrix_points=False
268
+ ):
269
+
270
+ """
271
+ Reduces an arbitrary binary matrix to a square binary matrix
272
+ """
273
+
274
+ zoom_factors = (matrix_size / len(binary_matrix), 1)
275
+
276
+ resized_matrix = zoom(binary_matrix, zoom_factors, order=interpolation_order)
277
+
278
+ resized_matrix = (resized_matrix > 0.5).astype(int)
279
+
280
+ final_matrix = np.zeros((matrix_size, matrix_size), dtype=int)
281
+ final_matrix[:, :resized_matrix.shape[1]] = resized_matrix
282
+
283
+ points = np.column_stack(np.where(final_matrix == 1)).tolist()
284
+
285
+ if return_square_matrix_points:
286
+ return points
287
+
288
+ else:
289
+ return resized_matrix
290
+
291
+ ################################################################################
292
+
293
+ def square_matrix_points_colors(square_matrix_points):
294
+
295
+ """
296
+ Returns colors for square matrix points
297
+ """
298
+
299
+ cmap = generate_colors(12)
300
+
301
+ chords = []
302
+ chords_dict = set()
303
+ counts = []
304
+
305
+ for k, v in groupby(square_matrix_points, key=lambda x: x[0]):
306
+ pgroup = [vv[1] for vv in v]
307
+ chord = sorted(set(pgroup))
308
+ tchord = sorted(set([p % 12 for p in chord]))
309
+ chords_dict.add(tuple(tchord))
310
+ chords.append(tuple(tchord))
311
+ counts.append(len(pgroup))
312
+
313
+ chords_dict = sorted(chords_dict)
314
+
315
+ colors = []
316
+
317
+ for i, c in enumerate(chords):
318
+ colors.extend([cmap[round(sum(c) / len(c))]] * counts[i])
319
+
320
+ return colors
321
+
322
+ ################################################################################
323
+
324
+ def hsv_to_rgb(h, s, v):
325
+
326
+ if s == 0.0:
327
+ return v, v, v
328
+
329
+ i = int(h*6.0)
330
+ f = (h*6.0) - i
331
+ p = v*(1.0 - s)
332
+ q = v*(1.0 - s*f)
333
+ t = v*(1.0 - s*(1.0-f))
334
+ i = i%6
335
+
336
+ return [(v, t, p), (q, v, p), (p, v, t), (p, q, v), (t, p, v), (v, p, q)][i]
337
+
338
+ ################################################################################
339
+
340
+ def generate_colors(n):
341
+ return [hsv_to_rgb(i/n, 1, 1) for i in range(n)]
342
+
343
+ ################################################################################
344
+
345
+ def add_arrays(a, b):
346
+ return [sum(pair) for pair in zip(a, b)]
347
+
348
+ ################################################################################
349
+
350
+ def calculate_similarities(lists_of_values, metric='cosine'):
351
+ return metrics.pairwise_distances(lists_of_values, metric=metric).tolist()
352
+
353
+ ################################################################################
354
+
355
+ def get_tokens_embeddings(x_transformer_model):
356
+ return x_transformer_model.net.token_emb.emb.weight.detach().cpu().tolist()
357
+
358
+ ################################################################################
359
+
360
+ def minkowski_distance_matrix(X, p=3):
361
+
362
+ X = np.array(X)
363
+
364
+ n = X.shape[0]
365
+ dist_matrix = np.zeros((n, n))
366
+
367
+ for i in range(n):
368
+ for j in range(n):
369
+ dist_matrix[i, j] = np.sum(np.abs(X[i] - X[j])**p)**(1/p)
370
+
371
+ return dist_matrix.tolist()
372
+
373
+ ################################################################################
374
+
375
+ def robust_normalize(values):
376
+
377
+ values = np.array(values)
378
+ q1 = np.percentile(values, 25)
379
+ q3 = np.percentile(values, 75)
380
+ iqr = q3 - q1
381
+
382
+ filtered_values = values[(values >= q1 - 1.5 * iqr) & (values <= q3 + 1.5 * iqr)]
383
+
384
+ min_val = np.min(filtered_values)
385
+ max_val = np.max(filtered_values)
386
+ normalized_values = (values - min_val) / (max_val - min_val)
387
+
388
+ normalized_values = np.clip(normalized_values, 0, 1)
389
+
390
+ return normalized_values.tolist()
391
+
392
+ ################################################################################
393
+
394
+ def min_max_normalize(values):
395
+
396
+ scaler = MinMaxScaler()
397
+
398
+ return scaler.fit_transform(values).tolist()
399
+
400
+ ################################################################################
401
+
402
+ def remove_points_outliers(points, z_score_threshold=3):
403
+
404
+ points = np.array(points)
405
+
406
+ z_scores = np.abs(zscore(points, axis=0))
407
+
408
+ return points[(z_scores < z_score_threshold).all(axis=1)].tolist()
409
+
410
+ ################################################################################
411
+
412
+ def generate_labels(lists_of_values,
413
+ return_indices_labels=False
414
+ ):
415
+
416
+ ordered_indices = list(range(len(lists_of_values)))
417
+ ordered_indices_labels = [str(i) for i in ordered_indices]
418
+ ordered_values_labels = [str(lists_of_values[i]) for i in ordered_indices]
419
+
420
+ if return_indices_labels:
421
+ return ordered_indices_labels
422
+
423
+ else:
424
+ return ordered_values_labels
425
+
426
+ ################################################################################
427
+
428
+ def reduce_dimensionality_pca(list_of_values, n_components=2):
429
+
430
+ """
431
+ Reduces the dimensionality of the values using PCA.
432
+ """
433
+
434
+ pca = PCA(n_components=n_components)
435
+ pca_data = pca.fit_transform(list_of_values)
436
+
437
+ return pca_data.tolist()
438
+
439
+ def reduce_dimensionality_simple(list_of_values,
440
+ return_means=True,
441
+ return_std_devs=True,
442
+ return_medians=False,
443
+ return_vars=False
444
+ ):
445
+
446
+ '''
447
+ Reduces dimensionality of the values in a simple way
448
+ '''
449
+
450
+ array = np.array(list_of_values)
451
+ results = []
452
+
453
+ if return_means:
454
+ means = np.mean(array, axis=1)
455
+ results.append(means)
456
+
457
+ if return_std_devs:
458
+ std_devs = np.std(array, axis=1)
459
+ results.append(std_devs)
460
+
461
+ if return_medians:
462
+ medians = np.median(array, axis=1)
463
+ results.append(medians)
464
+
465
+ if return_vars:
466
+ vars = np.var(array, axis=1)
467
+ results.append(vars)
468
+
469
+ merged_results = np.column_stack(results)
470
+
471
+ return merged_results.tolist()
472
+
473
+ ################################################################################
474
+
475
+ def reduce_dimensionality_2d_distance(list_of_values, p=5):
476
+
477
+ '''
478
+ Reduces the dimensionality of the values using 2d distance
479
+ '''
480
+
481
+ values = np.array(list_of_values)
482
+
483
+ dist_matrix = distance_matrix(values, values, p=p)
484
+
485
+ mst = minimum_spanning_tree(dist_matrix).toarray()
486
+
487
+ points = []
488
+
489
+ for i in range(len(values)):
490
+ for j in range(len(values)):
491
+ if mst[i, j] > 0:
492
+ points.append([i, j])
493
+
494
+ return points
495
+
496
+ ################################################################################
497
+
498
+ def normalize_to_range(values, n):
499
+
500
+ min_val = min(values)
501
+ max_val = max(values)
502
+
503
+ range_val = max_val - min_val
504
+
505
+ normalized_values = [((value - min_val) / range_val * 2 * n) - n for value in values]
506
+
507
+ return normalized_values
508
+
509
+ ################################################################################
510
+
511
+ def reduce_dimensionality_simple_pca(list_of_values, n_components=2):
512
+
513
+ '''
514
+ Reduces the dimensionality of the values using simple PCA
515
+ '''
516
+
517
+ reduced_values = []
518
+
519
+ for l in list_of_values:
520
+
521
+ norm_values = [round(v * len(l)) for v in normalize_to_range(l, (n_components+1) // 2)]
522
+
523
+ pca_values = Counter(norm_values).most_common()
524
+ pca_values = [vv[0] / len(l) for vv in pca_values]
525
+ pca_values = pca_values[:n_components]
526
+ pca_values = pca_values + [0] * (n_components - len(pca_values))
527
+
528
+ reduced_values.append(pca_values)
529
+
530
+ return reduced_values
531
+
532
+ ################################################################################
533
+
534
+ def filter_and_replace_values(list_of_values,
535
+ threshold,
536
+ replace_value,
537
+ replace_above_threshold=False
538
+ ):
539
+
540
+ array = np.array(list_of_values)
541
+
542
+ modified_array = np.copy(array)
543
+
544
+ if replace_above_threshold:
545
+ modified_array[modified_array > threshold] = replace_value
546
+
547
+ else:
548
+ modified_array[modified_array < threshold] = replace_value
549
+
550
+ return modified_array.tolist()
551
+
552
+ ################################################################################
553
+
554
+ def find_shortest_constellation_path(points,
555
+ start_point_idx,
556
+ end_point_idx,
557
+ p=5,
558
+ return_path_length=False,
559
+ return_path_points=False,
560
+ ):
561
+
562
+ """
563
+ Finds the shortest path between two points of the points constellation
564
+ """
565
+
566
+ points = np.array(points)
567
+
568
+ dist_matrix = distance_matrix(points, points, p=p)
569
+
570
+ mst = minimum_spanning_tree(dist_matrix).toarray()
571
+
572
+ G = nx.Graph()
573
+
574
+ for i in range(len(points)):
575
+ for j in range(len(points)):
576
+ if mst[i, j] > 0:
577
+ G.add_edge(i, j, weight=mst[i, j])
578
+
579
+ path = nx.shortest_path(G,
580
+ source=start_point_idx,
581
+ target=end_point_idx,
582
+ weight='weight'
583
+ )
584
+
585
+ path_length = nx.shortest_path_length(G,
586
+ source=start_point_idx,
587
+ target=end_point_idx,
588
+ weight='weight')
589
+
590
+ path_points = points[np.array(path)].tolist()
591
+
592
+
593
+ if return_path_points:
594
+ return path_points
595
+
596
+ if return_path_length:
597
+ return path_length
598
+
599
+ return path
600
+
601
+ ################################################################################
602
+ # Core functions
603
+ ################################################################################
604
+
605
+ def plot_ms_SONG(ms_song,
606
+ preview_length_in_notes=0,
607
+ block_lines_times_list = None,
608
+ plot_title='ms Song',
609
+ max_num_colors=129,
610
+ drums_color_num=128,
611
+ plot_size=(11,4),
612
+ note_height = 0.75,
613
+ show_grid_lines=False,
614
+ return_plt = False,
615
+ timings_multiplier=1,
616
+ save_plt='',
617
+ save_only_plt_image=True,
618
+ save_transparent=False
619
+ ):
620
+
621
+ '''ms SONG plot'''
622
+
623
+ notes = [s for s in ms_song if s[0] == 'note']
624
+
625
+ if (len(max(notes, key=len)) != 7) and (len(min(notes, key=len)) != 7):
626
+ print('The song notes do not have patches information')
627
+ print('Ploease add patches to the notes in the song')
628
+
629
+ else:
630
+
631
+ start_times = [(s[1] * timings_multiplier) / 1000 for s in notes]
632
+ durations = [(s[2] * timings_multiplier) / 1000 for s in notes]
633
+ pitches = [s[4] for s in notes]
634
+ patches = [s[6] for s in notes]
635
+
636
+ colors = generate_colors(max_num_colors)
637
+ colors[drums_color_num] = (1, 1, 1)
638
+
639
+ pbl = (notes[preview_length_in_notes][1] * timings_multiplier) / 1000
640
+
641
+ fig, ax = plt.subplots(figsize=plot_size)
642
+
643
+ for start, duration, pitch, patch in zip(start_times, durations, pitches, patches):
644
+ rect = plt.Rectangle((start, pitch), duration, note_height, facecolor=colors[patch])
645
+ ax.add_patch(rect)
646
+
647
+ ax.set_xlim([min(start_times), max(add_arrays(start_times, durations))])
648
+ ax.set_ylim([min(pitches)-1, max(pitches)+1])
649
+
650
+ ax.set_facecolor('black')
651
+ fig.patch.set_facecolor('white')
652
+
653
+ if preview_length_in_notes > 0:
654
+ ax.axvline(x=pbl, c='white')
655
+
656
+ if block_lines_times_list:
657
+ for bl in block_lines_times_list:
658
+ ax.axvline(x=bl, c='white')
659
+
660
+ if show_grid_lines:
661
+ ax.grid(color='white')
662
+
663
+ plt.xlabel('Time (s)', c='black')
664
+ plt.ylabel('MIDI Pitch', c='black')
665
+
666
+ plt.title(plot_title)
667
+
668
+ if save_plt != '':
669
+ if save_only_plt_image:
670
+ plt.axis('off')
671
+ plt.title('')
672
+ plt.savefig(save_plt,
673
+ transparent=save_transparent,
674
+ bbox_inches='tight',
675
+ pad_inches=0,
676
+ facecolor='black'
677
+ )
678
+ plt.close()
679
+
680
+ else:
681
+ plt.savefig(save_plt)
682
+ plt.close()
683
+
684
+ if return_plt:
685
+ return fig
686
+
687
+ plt.show()
688
+ plt.close()
689
+
690
+ ################################################################################
691
+
692
+ def plot_square_matrix_points(list_of_points,
693
+ list_of_points_colors,
694
+ plot_size=(7, 7),
695
+ point_size = 10,
696
+ show_grid_lines=False,
697
+ plot_title = 'Square Matrix Points Plot',
698
+ return_plt=False,
699
+ save_plt='',
700
+ save_only_plt_image=True,
701
+ save_transparent=False
702
+ ):
703
+
704
+ '''Square matrix points plot'''
705
+
706
+ fig, ax = plt.subplots(figsize=plot_size)
707
+
708
+ ax.set_facecolor('black')
709
+
710
+ if show_grid_lines:
711
+ ax.grid(color='white')
712
+
713
+ plt.xlabel('Time Step', c='black')
714
+ plt.ylabel('MIDI Pitch', c='black')
715
+
716
+ plt.title(plot_title)
717
+
718
+ plt.scatter([p[0] for p in list_of_points],
719
+ [p[1] for p in list_of_points],
720
+ c=list_of_points_colors,
721
+ s=point_size
722
+ )
723
+
724
+ if save_plt != '':
725
+ if save_only_plt_image:
726
+ plt.axis('off')
727
+ plt.title('')
728
+ plt.savefig(save_plt,
729
+ transparent=save_transparent,
730
+ bbox_inches='tight',
731
+ pad_inches=0,
732
+ facecolor='black'
733
+ )
734
+ plt.close()
735
+
736
+ else:
737
+ plt.savefig(save_plt)
738
+ plt.close()
739
+
740
+ if return_plt:
741
+ return fig
742
+
743
+ plt.show()
744
+ plt.close()
745
+
746
+ ################################################################################
747
+
748
+ def plot_cosine_similarities(lists_of_values,
749
+ plot_size=(7, 7),
750
+ save_plot=''
751
+ ):
752
+
753
+ """
754
+ Cosine similarities plot
755
+ """
756
+
757
+ cos_sim = metrics.pairwise_distances(lists_of_values, metric='cosine')
758
+
759
+ plt.figure(figsize=plot_size)
760
+
761
+ plt.imshow(cos_sim, cmap="inferno", interpolation="nearest")
762
+
763
+ im_ratio = cos_sim.shape[0] / cos_sim.shape[1]
764
+
765
+ plt.colorbar(fraction=0.046 * im_ratio, pad=0.04)
766
+
767
+ plt.xlabel("Index")
768
+ plt.ylabel("Index")
769
+
770
+ plt.tight_layout()
771
+
772
+ if save_plot != '':
773
+ plt.savefig(save_plot, bbox_inches="tight")
774
+ plt.close()
775
+
776
+ plt.show()
777
+ plt.close()
778
+
779
+ ################################################################################
780
+
781
+ def plot_points_with_mst_lines(points,
782
+ points_labels,
783
+ points_mst_edges,
784
+ plot_size=(20, 20),
785
+ labels_size=24,
786
+ save_plot=''
787
+ ):
788
+
789
+ """
790
+ Plots 2D points with labels and MST lines.
791
+ """
792
+
793
+ plt.figure(figsize=plot_size)
794
+
795
+ for i, label in enumerate(points_labels):
796
+ plt.scatter(points[i][0], points[i][1])
797
+ plt.annotate(label, (points[i][0], points[i][1]), fontsize=labels_size)
798
+
799
+ for edge in points_mst_edges:
800
+ i, j = edge
801
+ plt.plot([points[i][0], points[j][0]], [points[i][1], points[j][1]], 'k-', alpha=0.5)
802
+
803
+ plt.title('Points Map with MST Lines', fontsize=labels_size)
804
+ plt.xlabel('X-axis', fontsize=labels_size)
805
+ plt.ylabel('Y-axis', fontsize=labels_size)
806
+
807
+ if save_plot != '':
808
+ plt.savefig(save_plot, bbox_inches="tight")
809
+ plt.close()
810
+
811
+ plt.show()
812
+
813
+ plt.close()
814
+
815
+ ################################################################################
816
+
817
+ def plot_points_constellation(points,
818
+ points_labels,
819
+ p=5,
820
+ plot_size=(15, 15),
821
+ labels_size=12,
822
+ show_grid=False,
823
+ save_plot=''
824
+ ):
825
+
826
+ """
827
+ Plots 2D points constellation
828
+ """
829
+
830
+ points = np.array(points)
831
+
832
+ dist_matrix = distance_matrix(points, points, p=p)
833
+
834
+ mst = minimum_spanning_tree(dist_matrix).toarray()
835
+
836
+ plt.figure(figsize=plot_size)
837
+
838
+ plt.scatter(points[:, 0], points[:, 1], color='blue')
839
+
840
+ for i, label in enumerate(points_labels):
841
+ plt.annotate(label, (points[i, 0], points[i, 1]),
842
+ textcoords="offset points",
843
+ xytext=(0, 10),
844
+ ha='center',
845
+ fontsize=labels_size
846
+ )
847
+
848
+ for i in range(len(points)):
849
+ for j in range(len(points)):
850
+ if mst[i, j] > 0:
851
+ plt.plot([points[i, 0], points[j, 0]], [points[i, 1], points[j, 1]], 'k--')
852
+
853
+ plt.xlabel('X-axis', fontsize=labels_size)
854
+ plt.ylabel('Y-axis', fontsize=labels_size)
855
+ plt.title('2D Coordinates with Minimum Spanning Tree', fontsize=labels_size)
856
+
857
+ plt.grid(show_grid)
858
+
859
+ if save_plot != '':
860
+ plt.savefig(save_plot, bbox_inches="tight")
861
+ plt.close()
862
+
863
+ plt.show()
864
+
865
+ plt.close()
866
+
867
+ ################################################################################
868
+
869
+ def binary_matrix_to_images(matrix,
870
+ step,
871
+ overlap,
872
+ output_folder='./Dataset/',
873
+ output_img_prefix='image',
874
+ output_img_ext='.png',
875
+ save_to_array=False,
876
+ verbose=True
877
+ ):
878
+
879
+ if not save_to_array:
880
+
881
+ if verbose:
882
+ print('=' * 70)
883
+ print('Checking output folder dir...')
884
+
885
+ os.makedirs(os.path.dirname(output_folder), exist_ok=True)
886
+
887
+ if verbose:
888
+ print('Done!')
889
+
890
+ if verbose:
891
+ print('=' * 70)
892
+ print('Writing images...')
893
+
894
+ matrix = np.array(matrix, dtype=np.uint8)
895
+
896
+ image_array = []
897
+
898
+ for i in range(0, max(1, matrix.shape[0]), overlap):
899
+
900
+ submatrix = matrix[i:i+step, :]
901
+
902
+ if submatrix.shape[0] < 128:
903
+ zeros_array = np.zeros((128-submatrix.shape[0], 128))
904
+ submatrix = np.vstack((submatrix, zeros_array))
905
+
906
+ img = Image.fromarray(submatrix * 255).convert('1')
907
+
908
+ if save_to_array:
909
+ image_array.append(np.array(img))
910
+
911
+ else:
912
+ img.save(output_folder + output_img_prefix + '_' + str(matrix.shape[1]) + '_' + str(i).zfill(7) + output_img_ext)
913
+
914
+ if verbose:
915
+ print('Done!')
916
+ print('=' * 70)
917
+ print('Saved', (matrix.shape[0] // min(step, overlap))+1, 'imges!')
918
+ print('=' * 70)
919
+
920
+ if save_to_array:
921
+ return np.array(image_array).tolist()
922
+
923
+ ################################################################################
924
+
925
+ def images_to_binary_matrix(list_of_images):
926
+
927
+ image_array = np.array(list_of_images)
928
+
929
+ original_matrix = []
930
+
931
+ for img in image_array:
932
+
933
+ submatrix = np.array(img)
934
+ original_matrix.extend(submatrix.tolist())
935
+
936
+ return original_matrix
937
+
938
+ ################################################################################
939
+
940
+ def square_image_matrix(image_matrix,
941
+ matrix_size=128,
942
+ num_pca_components=5,
943
+ filter_out_zero_rows=False,
944
+ return_square_matrix_points=False
945
+ ):
946
+
947
+ """
948
+ Reduces an arbitrary image matrix to a square image matrix
949
+ """
950
+
951
+ matrix = np.array(image_matrix)
952
+
953
+ if filter_out_zero_rows:
954
+ matrix = matrix[~np.all(matrix == 0, axis=1)]
955
+
956
+ target_rows = matrix_size
957
+
958
+ rows_per_group = matrix.shape[0] // target_rows
959
+
960
+ compressed_matrix = np.zeros((target_rows, matrix.shape[1]), dtype=np.int32)
961
+
962
+ for i in range(target_rows):
963
+ start_row = i * rows_per_group
964
+ end_row = (i + 1) * rows_per_group
965
+ group = matrix[start_row:end_row, :]
966
+
967
+ pca = PCA(n_components=num_pca_components)
968
+ pca.fit(group)
969
+
970
+ principal_component = np.mean(pca.components_, axis=0)
971
+ contributions = np.dot(group, principal_component)
972
+ selected_row_index = np.argmax(contributions)
973
+
974
+ compressed_matrix[i, :] = group[selected_row_index, :]
975
+
976
+ if return_square_matrix_points:
977
+ filtered_matrix = compressed_matrix[~np.all(compressed_matrix == 0, axis=1)]
978
+
979
+ row_indexes, col_indexes = np.where(filtered_matrix != 0)
980
+ points = np.column_stack((row_indexes, filtered_matrix[row_indexes, col_indexes])).tolist()
981
+
982
+ return points
983
+
984
+ else:
985
+ return compressed_matrix.tolist()
986
+
987
+ ################################################################################
988
+
989
+ def image_matrix_to_images(image_matrix,
990
+ step,
991
+ overlap,
992
+ num_img_channels=3,
993
+ output_folder='./Dataset/',
994
+ output_img_prefix='image',
995
+ output_img_ext='.png',
996
+ save_to_array=False,
997
+ verbose=True
998
+ ):
999
+
1000
+ if num_img_channels > 1:
1001
+ n_mat_channels = 3
1002
+
1003
+ else:
1004
+ n_mat_channels = 1
1005
+
1006
+ if not save_to_array:
1007
+
1008
+ if verbose:
1009
+ print('=' * 70)
1010
+ print('Checking output folder dir...')
1011
+
1012
+ os.makedirs(os.path.dirname(output_folder), exist_ok=True)
1013
+
1014
+ if verbose:
1015
+ print('Done!')
1016
+
1017
+ if verbose:
1018
+ print('=' * 70)
1019
+ print('Writing images...')
1020
+
1021
+ matrix = np.array(image_matrix)
1022
+
1023
+ image_array = []
1024
+
1025
+ for i in range(0, max(1, matrix.shape[0]), overlap):
1026
+
1027
+ submatrix = matrix[i:i+step, :]
1028
+
1029
+ if submatrix.shape[0] < 128:
1030
+ zeros_array = np.zeros((128-submatrix.shape[0], 128))
1031
+ submatrix = np.vstack((submatrix, zeros_array))
1032
+
1033
+ if n_mat_channels == 3:
1034
+
1035
+ r = (submatrix // (256*256)) % 256
1036
+ g = (submatrix // 256) % 256
1037
+ b = submatrix % 256
1038
+
1039
+ rgb_image = np.stack((r, g, b), axis=-1).astype(np.uint8)
1040
+ img = Image.fromarray(rgb_image, 'RGB')
1041
+
1042
+ else:
1043
+ grayscale_image = submatrix.astype(np.uint8)
1044
+ img = Image.fromarray(grayscale_image, 'L')
1045
+
1046
+ if save_to_array:
1047
+ image_array.append(np.array(img))
1048
+
1049
+ else:
1050
+ img.save(output_folder + output_img_prefix + '_' + str(matrix.shape[1]) + '_' + str(i).zfill(7) + output_img_ext)
1051
+
1052
+ if verbose:
1053
+ print('Done!')
1054
+ print('=' * 70)
1055
+ print('Saved', (matrix.shape[0] // min(step, overlap))+1, 'imges!')
1056
+ print('=' * 70)
1057
+
1058
+ if save_to_array:
1059
+ return np.array(image_array).tolist()
1060
+
1061
+ ################################################################################
1062
+
1063
+ def images_to_image_matrix(list_of_images,
1064
+ num_img_channels=3
1065
+ ):
1066
+
1067
+ if num_img_channels > 1:
1068
+ n_mat_channels = 3
1069
+
1070
+ else:
1071
+ n_mat_channels = 1
1072
+
1073
+ image_array = np.array(list_of_images)
1074
+
1075
+ original_matrix = []
1076
+
1077
+ for img in image_array:
1078
+
1079
+ if num_img_channels == 3:
1080
+
1081
+ rgb_array = np.array(img)
1082
+
1083
+ matrix = (rgb_array[..., 0].astype(np.int64) * 256*256 +
1084
+ rgb_array[..., 1].astype(np.int64) * 256 +
1085
+ rgb_array[..., 2].astype(np.int64))
1086
+
1087
+ else:
1088
+ matrix = np.array(img)
1089
+
1090
+ original_matrix.extend(matrix)
1091
+
1092
+ return original_matrix
1093
+
1094
+ ################################################################################
1095
+
1096
+ def square_matrix_to_RGB_matrix(square_matrix):
1097
+
1098
+ smatrix = np.array(square_matrix)
1099
+ sq_matrix = smatrix[:smatrix.shape[1]]
1100
+
1101
+ r = (sq_matrix // (256 ** 2)) % 256
1102
+ g = (sq_matrix // 256) % 256
1103
+ b = sq_matrix % 256
1104
+
1105
+ rgb_array = np.stack((r, g, b), axis=-1)
1106
+
1107
+ return rgb_array.tolist()
1108
+
1109
+ ################################################################################
1110
+
1111
+ def upsample_square_matrix(square_matrix, upsampling_factor=4):
1112
+
1113
+ smatrix = np.array(square_matrix)
1114
+ sq_matrix = smatrix[:smatrix.shape[1]]
1115
+
1116
+ scaling_array = np.ones((upsampling_factor, upsampling_factor))
1117
+ scaled_array = np.kron(sq_matrix, scaling_array)
1118
+ scaled_array = scaled_array.astype('int')
1119
+
1120
+ return scaled_array.tolist()
1121
+
1122
+ ################################################################################
1123
+
1124
+ def downsample_square_matrix(square_matrix, downsampling_factor=4):
1125
+
1126
+ smatrix = np.array(square_matrix)
1127
+ sq_matrix = smatrix[:smatrix.shape[1]]
1128
+
1129
+ dmatrix = sq_matrix[::downsampling_factor, ::downsampling_factor]
1130
+ dmatrix = dmatrix.astype('int')
1131
+
1132
+ return dmatrix.tolist()
1133
+
1134
+ ################################################################################
1135
+
1136
+ def plot_parsons_code(parsons_code,
1137
+ start_pitch=60,
1138
+ return_plot_dict=False,
1139
+ return_plot_string=False,
1140
+ plot_size=(10, 10),
1141
+ labels_size=16,
1142
+ save_plot=''
1143
+ ):
1144
+
1145
+ '''
1146
+ Plot parsons code string
1147
+ '''
1148
+
1149
+ if parsons_code[0] != "*":
1150
+ return None
1151
+
1152
+ contour_dict = {}
1153
+ pitch = 0
1154
+ index = 0
1155
+
1156
+ maxp = 0
1157
+ minp = 0
1158
+
1159
+ contour_dict[(pitch, index)] = "*"
1160
+
1161
+ for point in parsons_code:
1162
+ if point == "R":
1163
+ index += 1
1164
+ contour_dict[(pitch, index)] = "-"
1165
+
1166
+ index += 1
1167
+ contour_dict[(pitch, index)] = "*"
1168
+
1169
+ elif point == "U":
1170
+ index += 1
1171
+ pitch -= 1
1172
+ contour_dict[(pitch, index)] = "/"
1173
+
1174
+ index += 1
1175
+ pitch -= 1
1176
+ contour_dict[(pitch, index)] = "*"
1177
+
1178
+ if pitch < maxp:
1179
+ maxp = pitch
1180
+
1181
+ elif point == "D":
1182
+ index += 1
1183
+ pitch += 1
1184
+ contour_dict[(pitch, index)] = "\\"
1185
+
1186
+ index += 1
1187
+ pitch += 1
1188
+ contour_dict[(pitch, index)] = "*"
1189
+
1190
+ if pitch > minp:
1191
+ minp = pitch
1192
+
1193
+ if return_plot_dict:
1194
+ return contour_dict
1195
+
1196
+ if return_plot_string:
1197
+
1198
+ plot_string = ''
1199
+
1200
+ for pitch in range(maxp, minp+1):
1201
+ line = [" " for _ in range(index + 1)]
1202
+ for pos in range(index + 1):
1203
+ if (pitch, pos) in contour_dict:
1204
+ line[pos] = contour_dict[(pitch, pos)]
1205
+
1206
+ plot_string = "".join(line)
1207
+
1208
+ return plot_string
1209
+
1210
+ labels = []
1211
+ pitches = []
1212
+ positions = []
1213
+ cur_pitch = start_pitch
1214
+ pitch_idx = 0
1215
+
1216
+ for k, v in contour_dict.items():
1217
+
1218
+ if v != '*':
1219
+
1220
+ pitches.append(cur_pitch)
1221
+ positions.append(pitch_idx)
1222
+
1223
+ if v == '/':
1224
+ cur_pitch += 1
1225
+ labels.append('U')
1226
+
1227
+ elif v == '\\':
1228
+ cur_pitch -= 1
1229
+ labels.append('D')
1230
+
1231
+ elif v == '-':
1232
+ labels.append('R')
1233
+
1234
+ pitch_idx += 1
1235
+
1236
+ plt.figure(figsize=plot_size)
1237
+
1238
+
1239
+ plt.plot(pitches)
1240
+
1241
+ for i, point in enumerate(zip(positions, pitches)):
1242
+ plt.annotate(labels[i], point, fontsize=labels_size)
1243
+
1244
+
1245
+ plt.title('Parsons Code with Labels', fontsize=labels_size)
1246
+ plt.xlabel('Position', fontsize=labels_size)
1247
+ plt.ylabel('Pitch', fontsize=labels_size)
1248
+
1249
+ if save_plot != '':
1250
+ plt.savefig(save_plot, bbox_inches="tight")
1251
+ plt.close()
1252
+
1253
+ plt.show()
1254
+
1255
+ plt.close()
1256
+
1257
+ ################################################################################
1258
+ # [WIP] Future dev functions
1259
+ ################################################################################
1260
+
1261
+ '''
1262
+ import umap
1263
+
1264
+ def reduce_dimensionality_umap(list_of_values,
1265
+ n_comp=2,
1266
+ n_neighbors=15,
1267
+ ):
1268
+
1269
+ """
1270
+ Reduces the dimensionality of the values using UMAP.
1271
+ """
1272
+
1273
+ vals = np.array(list_of_values)
1274
+
1275
+ umap_reducer = umap.UMAP(n_components=n_comp,
1276
+ n_neighbors=n_neighbors,
1277
+ n_epochs=5000,
1278
+ verbose=True
1279
+ )
1280
+
1281
+ reduced_vals = umap_reducer.fit_transform(vals)
1282
+
1283
+ return reduced_vals.tolist()
1284
+ '''
1285
+
1286
+ ################################################################################
1287
+
1288
+ '''
1289
+ import alphashape
1290
+ from shapely.geometry import Point
1291
+ from matplotlib.tri import Triangulation, LinearTriInterpolator
1292
+ from scipy.stats import zscore
1293
+
1294
+ #===============================================================================
1295
+
1296
+ coordinates = points
1297
+
1298
+ dist_matrix = minkowski_distance_matrix(coordinates, p=3) # You can change the value of p as needed
1299
+
1300
+ # Centering matrix
1301
+ n = dist_matrix.shape[0]
1302
+ H = np.eye(n) - np.ones((n, n)) / n
1303
+
1304
+ # Apply double centering
1305
+ B = -0.5 * H @ dist_matrix**2 @ H
1306
+
1307
+ # Eigen decomposition
1308
+ eigvals, eigvecs = np.linalg.eigh(B)
1309
+
1310
+ # Sort eigenvalues and eigenvectors
1311
+ idx = np.argsort(eigvals)[::-1]
1312
+ eigvals = eigvals[idx]
1313
+ eigvecs = eigvecs[:, idx]
1314
+
1315
+ # Select the top 2 eigenvectors
1316
+ X_transformed = eigvecs[:, :2] * np.sqrt(eigvals[:2])
1317
+
1318
+ #===============================================================================
1319
+
1320
+ src_points = X_transformed
1321
+ src_values = np.array([[p[1]] for p in points]) #np.random.rand(X_transformed.shape[0])
1322
+
1323
+ #===============================================================================
1324
+
1325
+ # Normalize the points to the range [0, 1]
1326
+ scaler = MinMaxScaler()
1327
+ points_normalized = scaler.fit_transform(src_points)
1328
+
1329
+ values_normalized = custom_normalize(src_values)
1330
+
1331
+ # Remove outliers based on z-score
1332
+ z_scores = np.abs(zscore(points_normalized, axis=0))
1333
+ filtered_points = points_normalized[(z_scores < 3).all(axis=1)]
1334
+ filtered_values = values_normalized[(z_scores < 3).all(axis=1)]
1335
+
1336
+ # Compute the concave hull (alpha shape)
1337
+ alpha = 8 # Adjust alpha as needed
1338
+ hull = alphashape.alphashape(filtered_points, alpha)
1339
+
1340
+ # Create a triangulation
1341
+ tri = Triangulation(filtered_points[:, 0], filtered_points[:, 1])
1342
+
1343
+ # Interpolate the values on the triangulation
1344
+ interpolator = LinearTriInterpolator(tri, filtered_values[:, 0])
1345
+ xi, yi = np.meshgrid(np.linspace(0, 1, 100), np.linspace(0, 1, 100))
1346
+ zi = interpolator(xi, yi)
1347
+
1348
+ # Mask out points outside the concave hull
1349
+ mask = np.array([hull.contains(Point(x, y)) for x, y in zip(xi.flatten(), yi.flatten())])
1350
+ zi = np.ma.array(zi, mask=~mask.reshape(zi.shape))
1351
+
1352
+ # Plot the filled contour based on the interpolated values
1353
+ plt.contourf(xi, yi, zi, levels=50, cmap='viridis')
1354
+
1355
+ # Plot the original points
1356
+ #plt.scatter(filtered_points[:, 0], filtered_points[:, 1], c=filtered_values, edgecolors='k')
1357
+
1358
+ plt.title('Filled Contour Plot with Original Values')
1359
+ plt.xlabel('X-axis')
1360
+ plt.ylabel('Y-axis')
1361
+ plt.colorbar(label='Value')
1362
+ plt.show()
1363
+ '''
1364
+
1365
+ ################################################################################
1366
+ #
1367
+ # This is the end of TPLOTS Python modules
1368
+ #
1369
+ ################################################################################
tegridy-tools-main.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86cec8475a0e0cd4ec7ddaf3dee37b892611afb6d3f17dac7f3411f64ae79960
3
+ size 108223519
x_transformer_1_23_2.py ADDED
@@ -0,0 +1,2481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #===================================================================================================================
2
+ #
3
+ # X Trasformer Module
4
+ #
5
+ # Partial x-transformers code With useful modifications
6
+ #
7
+ # Version 1.0
8
+ #
9
+ # Original source code courtesy of lucidrains
10
+ # https://github.com/lucidrains/x-transformers
11
+ #
12
+ # Original source code retrieved on 10/10/2023
13
+ #
14
+ # Project Los Angeles
15
+ # Tegridy Code 2023
16
+
17
+ #===================================================================================================================
18
+
19
+ # Critical dependencies
20
+ #
21
+ # !pip install torch
22
+ # !pip install einops
23
+
24
+ #===================================================================================================================
25
+
26
+ from functools import partial
27
+ from typing import Optional, Tuple
28
+
29
+ import os
30
+ os.environ['USE_FLASH_ATTENTION'] = '1'
31
+
32
+ import torch
33
+ from torch import nn, einsum, Tensor
34
+ import torch.nn.functional as F
35
+
36
+ # Flash attention
37
+ from torch.nn.attention import SDPBackend, sdpa_kernel
38
+ torch.backends.cuda.enable_flash_sdp(True)
39
+
40
+ from collections import namedtuple
41
+ from functools import wraps
42
+ from packaging import version
43
+ from dataclasses import dataclass
44
+
45
+ from einops import rearrange, repeat
46
+
47
+ # constants
48
+
49
+ EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
50
+
51
+ @dataclass
52
+ class Intermediates:
53
+ qk_similarities: Optional[Tensor] = None
54
+ pre_softmax_attn: Optional[Tensor] = None
55
+ post_softmax_attn: Optional[Tensor] = None
56
+ cached_kv: Optional[Tuple[Tensor, Tensor]] = None
57
+
58
+ def to_tuple(self):
59
+ return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn)
60
+
61
+ # helpers
62
+
63
+ def exists(val):
64
+ return val is not None
65
+
66
+ def default(val, d):
67
+ return val if exists(val) else d
68
+
69
+ def compact(arr):
70
+ return [*filter(exists, arr)]
71
+
72
+ def once(fn):
73
+ called = False
74
+ @wraps(fn)
75
+ def inner(x):
76
+ nonlocal called
77
+ if called:
78
+ return
79
+ called = True
80
+ return fn(x)
81
+ return inner
82
+
83
+ print_once = once(print)
84
+
85
+ # functions for creating causal mask
86
+ # need a special one for onnx cpu (no support for .triu)
87
+
88
+ def create_causal_mask(i, j, device):
89
+ return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1)
90
+
91
+ def onnx_create_causal_mask(i, j, device):
92
+ r = torch.arange(i, device = device)
93
+ causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j')
94
+ causal_mask = F.pad(causal_mask, (j - i, 0), value = False)
95
+ return causal_mask
96
+
97
+ # main class
98
+
99
+ class Attend(nn.Module):
100
+ def __init__(
101
+ self,
102
+ *,
103
+ dropout = 0.,
104
+ causal = False,
105
+ heads = None,
106
+ talking_heads = False,
107
+ sparse_topk = None,
108
+ scale = None,
109
+ qk_norm = False,
110
+ flash = False,
111
+ add_zero_kv = False,
112
+ onnxable = False
113
+ ):
114
+ super().__init__()
115
+ self.scale = scale
116
+ self.qk_norm = qk_norm
117
+
118
+ self.causal = causal
119
+ self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask
120
+
121
+ self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax
122
+
123
+ self.dropout = dropout
124
+ self.attn_dropout = nn.Dropout(dropout)
125
+
126
+ # talking heads
127
+
128
+ assert not (flash and talking_heads), 'talking heads not compatible with flash attention'
129
+
130
+ self.talking_heads = talking_heads
131
+ if talking_heads:
132
+ self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
133
+ self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False)
134
+
135
+ # sparse topk
136
+
137
+ assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention'
138
+ self.sparse_topk = sparse_topk
139
+
140
+ # add a key / value token composed of zeros
141
+ # in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html
142
+
143
+ self.add_zero_kv = add_zero_kv
144
+
145
+ # flash attention
146
+
147
+ self.flash = flash
148
+ assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
149
+
150
+ # determine efficient attention configs for cuda and cpu
151
+
152
+ self.cpu_config = EfficientAttentionConfig(True, True, True)
153
+ self.cuda_config = None
154
+
155
+ if not torch.cuda.is_available() or not flash:
156
+ return
157
+
158
+ device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
159
+
160
+ major, minor = device_properties.major, device_properties.minor
161
+
162
+ if (major, minor) == (8, 0):
163
+ print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
164
+ self.cuda_config = EfficientAttentionConfig(True, False, False)
165
+ elif (major, minor) == (9, 0):
166
+ print_once('H100 GPU detected, using flash attention')
167
+ self.cuda_config = EfficientAttentionConfig(True, False, False)
168
+ else:
169
+ print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
170
+ self.cuda_config = EfficientAttentionConfig(False, True, True)
171
+
172
+ def flash_attn(
173
+ self,
174
+ q, k, v,
175
+ mask = None,
176
+ attn_bias = None
177
+ ):
178
+ batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
179
+
180
+ # Recommended for multi-query single-key-value attention by Tri Dao
181
+ # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
182
+
183
+ if k.ndim == 3:
184
+ k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
185
+
186
+ if v.ndim == 3:
187
+ v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
188
+
189
+ # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention
190
+
191
+ if self.qk_norm:
192
+ default_scale = q.shape[-1] ** -0.5
193
+ q = q * (self.scale / default_scale)
194
+
195
+ # Check if mask exists and expand to compatible shape
196
+ # The mask is B L, so it would have to be expanded to B H N L
197
+
198
+ causal = self.causal
199
+
200
+ # in the case of kv caching with one token (q_len == 1), just turn off causal masking
201
+ # in speculative decoding, this may go up to 5-6, so right aligned causal mask will be needed there
202
+
203
+ if q_len == 1 and causal:
204
+ causal = False
205
+
206
+ # expand key padding mask
207
+
208
+ if exists(mask):
209
+ assert mask.ndim == 4
210
+ mask = mask.expand(batch, heads, q_len, k_len)
211
+
212
+ # handle kv cache - this should be bypassable in updated flash attention 2
213
+
214
+ if k_len > q_len and causal:
215
+ causal_mask = self.create_causal_mask(q_len, k_len, device = device)
216
+ if not exists(mask):
217
+ mask = ~causal_mask
218
+ else:
219
+ mask = mask & ~causal_mask
220
+ causal = False
221
+
222
+ # manually handle causal mask, if another mask was given
223
+
224
+ row_is_entirely_masked = None
225
+
226
+ if exists(mask) and causal:
227
+ causal_mask = self.create_causal_mask(q_len, k_len, device = device)
228
+ mask = mask & ~causal_mask
229
+
230
+ # protect against an entire row being masked out
231
+
232
+ row_is_entirely_masked = ~mask.any(dim = -1)
233
+ mask[..., 0] = mask[..., 0] | row_is_entirely_masked
234
+
235
+ causal = False
236
+
237
+ # handle alibi positional bias
238
+ # convert from bool to float
239
+
240
+ if exists(attn_bias):
241
+ attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1)
242
+
243
+ # if mask given, the mask would already contain the causal mask from above logic
244
+ # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number
245
+
246
+ mask_value = -torch.finfo(q.dtype).max
247
+
248
+ if exists(mask):
249
+ attn_bias = attn_bias.masked_fill(~mask, mask_value // 2)
250
+ elif causal:
251
+ causal_mask = self.create_causal_mask(q_len, k_len, device = device)
252
+ attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2)
253
+ causal = False
254
+
255
+ # scaled_dot_product_attention handles attn_mask either as bool or additive bias
256
+ # make it an additive bias here
257
+
258
+ mask = attn_bias
259
+
260
+ # Check if there is a compatible device for flash attention
261
+
262
+ config = self.cuda_config if is_cuda else self.cpu_config
263
+
264
+ # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
265
+
266
+ # Legacy code...
267
+ # with torch.backends.cuda.sdp_kernel(enable_math=True, enable_mem_efficient=True):
268
+ # with sdpa_kernel([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION]):
269
+
270
+ # PyTorch 2.3-2.4 SDPA backend code...
271
+ with sdpa_kernel([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION, SDPBackend.FLASH_ATTENTION, SDPBackend.CUDNN_ATTENTION]):
272
+ # with sdpa_kernel([SDPBackend.FLASH_ATTENTION]):
273
+
274
+ # New PyTorch 2.5 SDPA backend code:
275
+ # with sdpa_kernel(SDPBackend.CUDNN_ATTENTION):
276
+
277
+ out = F.scaled_dot_product_attention(
278
+ q, k, v,
279
+ attn_mask = mask,
280
+ dropout_p = self.dropout if self.training else 0.,
281
+ is_causal = causal
282
+ )
283
+
284
+ # for a row that is entirely masked out, should zero out the output of that row token
285
+
286
+ if exists(row_is_entirely_masked):
287
+ out = out.masked_fill(row_is_entirely_masked[..., None], 0.)
288
+
289
+ return out, Intermediates()
290
+
291
+ def forward(
292
+ self,
293
+ q, k, v,
294
+ mask = None,
295
+ attn_bias = None,
296
+ prev_attn = None
297
+ ):
298
+ """
299
+ einstein notation
300
+ b - batch
301
+ h - heads
302
+ n, i, j - sequence length (base sequence length, source, target)
303
+ d - feature dimension
304
+ """
305
+
306
+ n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device
307
+
308
+ scale = default(self.scale, q.shape[-1] ** -0.5)
309
+
310
+ causal = self.causal
311
+
312
+ # handle kv cached decoding
313
+
314
+ if n == 1 and causal:
315
+ causal = False
316
+
317
+ # handle grouped multi-query attention
318
+
319
+ if kv_heads == 1:
320
+ k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v))
321
+ elif kv_heads < heads:
322
+ k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v))
323
+
324
+ # handle zero kv, as means for allowing network to attend to nothing
325
+
326
+ if self.add_zero_kv:
327
+ k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v))
328
+
329
+ if exists(mask):
330
+ mask = F.pad(mask, (1, 0), value = True)
331
+
332
+ if exists(attn_bias):
333
+ attn_bias = F.pad(attn_bias, (1, 0), value = 0.)
334
+
335
+ if self.flash:
336
+ assert not exists(prev_attn), 'residual attention not compatible with flash attention'
337
+ return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias)
338
+
339
+ kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
340
+
341
+ dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale
342
+
343
+ if exists(prev_attn):
344
+ dots = dots + prev_attn
345
+
346
+ qk_similarities = dots.clone()
347
+
348
+ if self.talking_heads:
349
+ dots = self.pre_softmax_talking_heads(dots)
350
+
351
+ if exists(attn_bias):
352
+ dots = dots + attn_bias
353
+
354
+ i, j, dtype = *dots.shape[-2:], dots.dtype
355
+
356
+ mask_value = -torch.finfo(dots.dtype).max
357
+
358
+ if exists(self.sparse_topk) and self.sparse_topk < j:
359
+ top_values, _ = dots.topk(self.sparse_topk, dim = -1)
360
+ sparse_topk_mask = dots < top_values[..., -1:]
361
+ mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask
362
+
363
+ if exists(mask):
364
+ dots = dots.masked_fill(~mask, mask_value)
365
+
366
+ if causal:
367
+ causal_mask = self.create_causal_mask(i, j, device = device)
368
+ dots = dots.masked_fill(causal_mask, mask_value)
369
+
370
+ pre_softmax_attn = dots.clone()
371
+
372
+ attn = self.attn_fn(dots, dim = -1)
373
+ attn = attn.type(dtype)
374
+
375
+ post_softmax_attn = attn.clone()
376
+
377
+ attn = self.attn_dropout(attn)
378
+
379
+ if self.talking_heads:
380
+ attn = self.post_softmax_talking_heads(attn)
381
+
382
+ out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v)
383
+
384
+ intermediates = Intermediates(
385
+ qk_similarities = qk_similarities,
386
+ pre_softmax_attn = pre_softmax_attn,
387
+ post_softmax_attn = post_softmax_attn
388
+ )
389
+
390
+ return out, intermediates
391
+
392
+ #===================================================================================================================
393
+
394
+ from math import ceil, log
395
+ from typing import Optional, Union, Tuple, Callable
396
+
397
+ import torch
398
+ from torch import nn, Tensor
399
+ from torch.nn import Module
400
+ import torch.nn.functional as F
401
+
402
+ from einops import rearrange, pack, unpack
403
+
404
+ def exists(val):
405
+ return val is not None
406
+
407
+ def default(val, d):
408
+ return val if exists(val) else d
409
+
410
+ def identity(t, *args, **kwargs):
411
+ return t
412
+
413
+ def cast_tuple(t, length = 1):
414
+ return t if isinstance(t, tuple) else (t,) * length
415
+
416
+ def eval_decorator(fn):
417
+ def inner(self, *args, **kwargs):
418
+ was_training = self.training
419
+ self.eval()
420
+ out = fn(self, *args, **kwargs)
421
+ self.train(was_training)
422
+ return out
423
+ return inner
424
+
425
+ # for variable lengthed prefixes
426
+
427
+ def align_right(t, lens, pad_id = 0):
428
+ batch, seq_len, device, dtype = *t.shape, t.device, t.dtype
429
+
430
+ assert lens.ndim == 1 and lens.shape[0] == batch
431
+ assert lens.amax() <= seq_len
432
+
433
+ pad_lens = seq_len - lens
434
+ max_pad_len = pad_lens.amax()
435
+
436
+ batch_arange = torch.arange(batch, device = device, dtype = torch.long)[..., None]
437
+ prompt_len_arange = torch.arange(seq_len, device = device, dtype = torch.long)
438
+
439
+ t = F.pad(t, (max_pad_len, 0), value = 0)
440
+ offset = max_pad_len - pad_lens
441
+
442
+ aligned = t[batch_arange, prompt_len_arange + offset[..., None]]
443
+ return aligned
444
+
445
+ # nucleus
446
+
447
+ def top_p(logits, thres = 0.9):
448
+ sorted_logits, sorted_indices = torch.sort(logits, descending = True)
449
+ cum_probs = torch.cumsum(F.softmax(sorted_logits, dim = -1), dim = -1)
450
+
451
+ sorted_indices_to_remove = cum_probs > thres
452
+ sorted_indices_to_remove = F.pad(sorted_indices_to_remove, (1, -1), value = False)
453
+
454
+ sorted_logits[sorted_indices_to_remove] = float('-inf')
455
+ return sorted_logits.scatter(1, sorted_indices, sorted_logits)
456
+
457
+ # topk
458
+
459
+ def top_k(logits, frac_num_tokens = 0.1, k = None):
460
+ num_tokens = logits.shape[-1]
461
+
462
+ k = default(k, ceil(frac_num_tokens * num_tokens))
463
+ k = min(k, num_tokens)
464
+
465
+ val, ind = torch.topk(logits, k)
466
+ probs = torch.full_like(logits, float('-inf'))
467
+ probs.scatter_(1, ind, val)
468
+ return probs
469
+
470
+ # top_a
471
+
472
+ def top_a(logits, min_p_pow = 2.0, min_p_ratio = 0.02):
473
+ probs = F.softmax(logits, dim = -1)
474
+ max_probs = torch.amax(probs, dim = -1, keepdim = True)
475
+ limit = torch.pow(max_probs, min_p_pow) * min_p_ratio
476
+ return torch.where(probs < limit, float('-inf'), logits)
477
+
478
+ # contrastive decoding function
479
+
480
+ def contrastive_decode_fn(
481
+ expert_logits,
482
+ amateur_logits,
483
+ alpha = 0.1,
484
+ beta = 0.5
485
+ ):
486
+ """
487
+ Appendix A Algorithm 2
488
+ https://arxiv.org/abs/2309.09117
489
+ """
490
+
491
+ cutoff = log(alpha) + expert_logits.amax(dim = -1, keepdim = True)
492
+ diffs = (1 + beta) * expert_logits - beta * amateur_logits
493
+ contrastive_decode_logits = diffs.masked_fill(expert_logits < cutoff, -torch.finfo(expert_logits.dtype).max)
494
+ return contrastive_decode_logits
495
+
496
+ # autoregressive wrapper class
497
+
498
+ class AutoregressiveWrapper(Module):
499
+ def __init__(
500
+ self,
501
+ net,
502
+ ignore_index = -100,
503
+ pad_value = 0,
504
+ mask_prob = 0.,
505
+ add_attn_z_loss = False,
506
+ return_cache=False
507
+ ):
508
+ super().__init__()
509
+ self.pad_value = pad_value
510
+ self.ignore_index = ignore_index
511
+
512
+ self.net = net
513
+ self.max_seq_len = net.max_seq_len
514
+
515
+ # paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432
516
+ assert mask_prob < 1.
517
+ self.mask_prob = mask_prob
518
+
519
+ # whether to add router z-loss
520
+ self.add_attn_z_loss = add_attn_z_loss
521
+ self.return_cache = return_cache
522
+
523
+ @torch.inference_mode()
524
+ @eval_decorator
525
+ def generate(
526
+ self,
527
+ prompts,
528
+ seq_len,
529
+ eos_token = None,
530
+ temperature = 1.,
531
+ prompt_lens: Optional[Tensor] = None,
532
+ filter_logits_fn: Callable = top_k,
533
+ restrict_to_max_seq_len = True,
534
+ amateur_model: Optional[Union[Module, Tuple[Module]]] = None,
535
+ filter_kwargs: dict = dict(),
536
+ contrastive_decode_kwargs: Union[dict, Tuple[dict]] = dict(
537
+ beta = 0.5,
538
+ alpha = 0.1
539
+ ),
540
+ cache_kv = True,
541
+ verbose=True,
542
+ return_prime=False,
543
+ **kwargs
544
+ ):
545
+ max_seq_len, device = self.max_seq_len, prompts.device
546
+
547
+ prompts, ps = pack([prompts], '* n')
548
+
549
+ b, t = prompts.shape
550
+
551
+ # handle variable lengthed prompts (prefixes)
552
+
553
+ seq_start_pos = None
554
+ if exists(prompt_lens):
555
+ prompts = align_right(prompts, prompt_lens, pad_id = self.pad_value)
556
+ seq_start_pos = t - prompt_lens
557
+
558
+ # output from which sampled tokens appended to
559
+
560
+ out = prompts
561
+
562
+ if verbose:
563
+ print("Generating sequence of max length:", seq_len)
564
+
565
+ # kv caches
566
+
567
+ cache = None
568
+
569
+ # if doing contrastive decoding, turn off filter automatically
570
+
571
+ if exists(amateur_model):
572
+ amateur_model = cast_tuple(amateur_model)
573
+ contrastive_decode_kwargs = cast_tuple(contrastive_decode_kwargs)
574
+
575
+ assert len(amateur_model) == len(contrastive_decode_kwargs)
576
+
577
+ amateur_caches = [None] * len(amateur_model)
578
+ filter_logits_fn = identity
579
+
580
+ for i, module in enumerate(amateur_model):
581
+ if isinstance(module, AutoregressiveWrapper):
582
+ amateur_model[i] = module.net
583
+
584
+ module.eval()
585
+
586
+ # sampling up to seq_len
587
+
588
+ for sl in range(seq_len):
589
+
590
+ if restrict_to_max_seq_len:
591
+ x = out[:, -max_seq_len:]
592
+
593
+ if exists(cache):
594
+ for inter in cache.attn_intermediates:
595
+ inter.cached_kv = [t[..., -(max_seq_len - 1):, :] for t in inter.cached_kv]
596
+
597
+ logits, new_cache = self.net(
598
+ x,
599
+ return_intermediates = True,
600
+ cache = cache,
601
+ seq_start_pos = seq_start_pos,
602
+ **kwargs
603
+ )
604
+
605
+ if cache_kv and self.net.can_cache_kv:
606
+ cache = new_cache
607
+
608
+ logits = logits[:, -1]
609
+
610
+ # handle contrastive decoding, Li et al.
611
+ # https://arxiv.org/abs/2210.15097
612
+
613
+ if exists(amateur_model):
614
+ for i, (amateur, amateur_cache, amateur_contrastive_decode_kwargs) in enumerate(zip(amateur_model, amateur_caches, contrastive_decode_kwargs)):
615
+ amateur_logits, next_amateur_cache = amateur(
616
+ x,
617
+ return_intermediates = True,
618
+ cache = amateur_cache,
619
+ seq_start_pos = seq_start_pos,
620
+ **kwargs
621
+ )
622
+
623
+ amateur_logits = amateur_logits[:, -1]
624
+
625
+ assert amateur_logits.shape == logits.shape, 'logits dimension are not the same between amateur and expert model'
626
+ logits = contrastive_decode_fn(logits, amateur_logits, **amateur_contrastive_decode_kwargs)
627
+
628
+ if cache_kv and amateur.can_cache_kv:
629
+ amateur_caches[i] = next_amateur_cache
630
+
631
+ # filter by top_k, top_p (nucleus), top_a, or custom
632
+
633
+ filtered_logits = filter_logits_fn(logits, **filter_kwargs)
634
+
635
+ probs = F.softmax(filtered_logits / temperature, dim=-1)
636
+
637
+ sample = torch.multinomial(probs, 1)
638
+
639
+ out = torch.cat((out, sample), dim=-1)
640
+
641
+ if verbose:
642
+ if sl % 32 == 0:
643
+ print(sl, '/', seq_len)
644
+
645
+ if exists(eos_token):
646
+ is_eos_tokens = (out == eos_token)
647
+
648
+ if is_eos_tokens.any(dim = -1).all():
649
+ # mask out everything after the eos tokens
650
+ shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
651
+ mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
652
+ out = out.masked_fill(mask, self.pad_value)
653
+
654
+ if verbose:
655
+ print('Model called the end of sequence at:', sl, '/', seq_len)
656
+
657
+ break
658
+
659
+ if return_prime:
660
+ return out[:, :]
661
+
662
+ else:
663
+ return out[:, t:]
664
+
665
+ # out, = unpack(out, ps, '* n')
666
+
667
+ # return out
668
+
669
+ def compute_accuracy(self, logits, labels):
670
+ out = torch.argmax(logits, dim=-1)
671
+ out = out.flatten()
672
+ labels = labels.flatten()
673
+
674
+ mask = (labels != self.ignore_index) # can also be self.pad_value (your choice)
675
+ out = out[mask]
676
+ labels = labels[mask]
677
+
678
+ num_right = (out == labels)
679
+ num_right = torch.sum(num_right).type(torch.float32)
680
+
681
+ acc = num_right / len(labels)
682
+ return acc
683
+
684
+ def forward(self, x, **kwargs):
685
+ seq, ignore_index, add_attn_z_loss = x.shape[1], self.ignore_index, self.add_attn_z_loss
686
+
687
+ inp, target = x[:, :-1], x[:, 1:]
688
+ inp = torch.where(inp == ignore_index, self.pad_value, inp)
689
+
690
+ if self.mask_prob > 0.:
691
+ rand = torch.randn(inp.shape, device = x.device)
692
+ rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out
693
+ num_mask = min(int(seq * self.mask_prob), seq - 1)
694
+ indices = rand.topk(num_mask, dim = -1).indices
695
+ mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool()
696
+ kwargs.update(self_attn_kv_mask = mask)
697
+
698
+ logits, cache = self.net(
699
+ inp,
700
+ return_intermediates = True,
701
+ return_attn_z_loss = add_attn_z_loss,
702
+ **kwargs
703
+ )
704
+
705
+ acc = self.compute_accuracy(logits, target)
706
+
707
+ loss = F.cross_entropy(
708
+ rearrange(logits, 'b n c -> b c n'),
709
+ target,
710
+ ignore_index = ignore_index
711
+ )
712
+
713
+ if add_attn_z_loss:
714
+ loss = loss + cache.attn_z_loss
715
+
716
+ if self.return_cache:
717
+ return loss, acc, cache
718
+
719
+ else:
720
+ return loss, acc
721
+
722
+ #===============================================================================
723
+
724
+ import math
725
+ from random import random
726
+
727
+ import torch
728
+ from torch import nn, einsum, Tensor
729
+ import torch.nn.functional as F
730
+
731
+ from functools import partial, wraps
732
+ from inspect import isfunction
733
+ from collections import namedtuple
734
+ from dataclasses import dataclass
735
+ from typing import List, Callable, Optional
736
+
737
+ from einops import rearrange, repeat, reduce, pack, unpack
738
+ from einops.layers.torch import Rearrange
739
+
740
+ # constants
741
+
742
+ DEFAULT_DIM_HEAD = 64
743
+
744
+ @dataclass
745
+ class LayerIntermediates:
746
+ hiddens: Optional[List[Tensor]] = None
747
+ attn_intermediates: Optional[List[Intermediates]] = None
748
+ layer_hiddens: Optional[List[Tensor]] = None
749
+ attn_z_loss: Optional[Tensor] = None
750
+ mems: Optional[Tensor] = None
751
+
752
+ # helpers
753
+
754
+ def exists(val):
755
+ return val is not None
756
+
757
+ def default(val, d):
758
+ if exists(val):
759
+ return val
760
+ return d() if isfunction(d) else d
761
+
762
+ def cast_tuple(val, depth):
763
+ return val if isinstance(val, tuple) else (val,) * depth
764
+
765
+ def divisible_by(num, den):
766
+ return (num % den) == 0
767
+
768
+ def maybe(fn):
769
+ @wraps(fn)
770
+ def inner(x, *args, **kwargs):
771
+ if not exists(x):
772
+ return x
773
+ return fn(x, *args, **kwargs)
774
+ return inner
775
+
776
+ class always():
777
+ def __init__(self, val):
778
+ self.val = val
779
+ def __call__(self, *args, **kwargs):
780
+ return self.val
781
+
782
+ class not_equals():
783
+ def __init__(self, val):
784
+ self.val = val
785
+ def __call__(self, x, *args, **kwargs):
786
+ return x != self.val
787
+
788
+ class equals():
789
+ def __init__(self, val):
790
+ self.val = val
791
+ def __call__(self, x, *args, **kwargs):
792
+ return x == self.val
793
+
794
+ def Sequential(*modules):
795
+ return nn.Sequential(*filter(exists, modules))
796
+
797
+ # tensor helpers
798
+
799
+ def max_neg_value(tensor):
800
+ return -torch.finfo(tensor.dtype).max
801
+
802
+ def l2norm(t, groups = 1):
803
+ t = rearrange(t, '... (g d) -> ... g d', g = groups)
804
+ t = F.normalize(t, p = 2, dim = -1)
805
+ return rearrange(t, '... g d -> ... (g d)')
806
+
807
+ def pad_at_dim(t, pad, dim = -1, value = 0.):
808
+ dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
809
+ zeros = ((0, 0) * dims_from_right)
810
+ return F.pad(t, (*zeros, *pad), value = value)
811
+
812
+ def or_reduce(masks):
813
+ head, *body = masks
814
+ for rest in body:
815
+ head = head | rest
816
+ return head
817
+
818
+ # auxiliary loss helpers
819
+
820
+ def calc_z_loss(
821
+ pre_softmax_attns: List[Tensor],
822
+ mask = None,
823
+ weight = 1.
824
+ ):
825
+ # the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906
826
+ # in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects
827
+ # also used in PaLM as one of the measures
828
+
829
+ lse = 0.
830
+
831
+ for attn in pre_softmax_attns:
832
+ lse = lse + attn.logsumexp(dim = -1)
833
+
834
+ loss = torch.square(lse)
835
+ loss = reduce(loss, 'b h n -> b n', 'sum')
836
+
837
+ if not exists(mask):
838
+ return loss.mean() * weight
839
+
840
+ loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5)
841
+ return loss * weight
842
+
843
+ # init helpers
844
+
845
+ def init_zero_(layer):
846
+ nn.init.constant_(layer.weight, 0.)
847
+ if exists(layer.bias):
848
+ nn.init.constant_(layer.bias, 0.)
849
+
850
+ # keyword argument helpers
851
+
852
+ def pick_and_pop(keys, d):
853
+ values = list(map(lambda key: d.pop(key), keys))
854
+ return dict(zip(keys, values))
855
+
856
+ def group_dict_by_key(cond, d):
857
+ return_val = [dict(),dict()]
858
+ for key in d.keys():
859
+ match = bool(cond(key))
860
+ ind = int(not match)
861
+ return_val[ind][key] = d[key]
862
+ return (*return_val,)
863
+
864
+ def string_begins_with(prefix, str):
865
+ return str.startswith(prefix)
866
+
867
+ def group_by_key_prefix(prefix, d):
868
+ return group_dict_by_key(partial(string_begins_with, prefix), d)
869
+
870
+ def groupby_prefix_and_trim(prefix, d):
871
+ kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
872
+ kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
873
+ return kwargs_without_prefix, kwargs
874
+
875
+ # structured dropout, more effective than traditional attention dropouts
876
+
877
+ def dropout_seq(seq, mask, dropout):
878
+ b, n, *_, device = *seq.shape, seq.device
879
+ logits = torch.randn(b, n, device = device)
880
+
881
+ if exists(mask):
882
+ mask_value = max_neg_value(logits)
883
+ logits = logits.masked_fill(~mask, mask_value)
884
+
885
+ keep_prob = 1. - dropout
886
+ num_keep = max(1, int(keep_prob * n))
887
+ keep_indices = logits.topk(num_keep, dim = 1).indices
888
+
889
+ batch_indices = torch.arange(b, device = device)
890
+ batch_indices = rearrange(batch_indices, 'b -> b 1')
891
+
892
+ seq = seq[batch_indices, keep_indices]
893
+
894
+ if exists(mask):
895
+ seq_counts = mask.sum(dim = -1)
896
+ seq_keep_counts = torch.ceil(seq_counts * keep_prob).int()
897
+ keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1')
898
+
899
+ mask = mask[batch_indices, keep_indices] & keep_mask
900
+
901
+ return seq, mask
902
+
903
+ # activations
904
+
905
+ class ReluSquared(nn.Module):
906
+ def forward(self, x):
907
+ return F.relu(x) ** 2
908
+
909
+ # embedding
910
+
911
+ class TokenEmbedding(nn.Module):
912
+ def __init__(self, dim, num_tokens, l2norm_embed = False):
913
+ super().__init__()
914
+ self.l2norm_embed = l2norm_embed
915
+ self.emb = nn.Embedding(num_tokens, dim)
916
+
917
+ def forward(self, x):
918
+ token_emb = self.emb(x)
919
+ return l2norm(token_emb) if self.l2norm_embed else token_emb
920
+
921
+ # positional embeddings
922
+
923
+ class AbsolutePositionalEmbedding(nn.Module):
924
+ def __init__(self, dim, max_seq_len, l2norm_embed = False):
925
+ super().__init__()
926
+ self.scale = dim ** -0.5 if not l2norm_embed else 1.
927
+ self.max_seq_len = max_seq_len
928
+ self.l2norm_embed = l2norm_embed
929
+ self.emb = nn.Embedding(max_seq_len, dim)
930
+
931
+ def forward(self, x, pos = None, seq_start_pos = None):
932
+ seq_len, device = x.shape[1], x.device
933
+ assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}'
934
+
935
+ if not exists(pos):
936
+ pos = torch.arange(seq_len, device = device)
937
+
938
+ if exists(seq_start_pos):
939
+ pos = (pos - seq_start_pos[..., None]).clamp(min = 0)
940
+
941
+ pos_emb = self.emb(pos)
942
+ pos_emb = pos_emb * self.scale
943
+ return l2norm(pos_emb) if self.l2norm_embed else pos_emb
944
+
945
+ class ScaledSinusoidalEmbedding(nn.Module):
946
+ def __init__(self, dim, theta = 10000):
947
+ super().__init__()
948
+ assert divisible_by(dim, 2)
949
+ self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5)
950
+
951
+ half_dim = dim // 2
952
+ freq_seq = torch.arange(half_dim).float() / half_dim
953
+ inv_freq = theta ** -freq_seq
954
+ self.register_buffer('inv_freq', inv_freq, persistent = False)
955
+
956
+ def forward(self, x, pos = None, seq_start_pos = None):
957
+ seq_len, device = x.shape[1], x.device
958
+
959
+ if not exists(pos):
960
+ pos = torch.arange(seq_len, device = device)
961
+
962
+ if exists(seq_start_pos):
963
+ pos = pos - seq_start_pos[..., None]
964
+
965
+ emb = einsum('i, j -> i j', pos, self.inv_freq)
966
+ emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
967
+ return emb * self.scale
968
+
969
+ class RelativePositionBias(nn.Module):
970
+ def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8):
971
+ super().__init__()
972
+ self.scale = scale
973
+ self.causal = causal
974
+ self.num_buckets = num_buckets
975
+ self.max_distance = max_distance
976
+ self.relative_attention_bias = nn.Embedding(num_buckets, heads)
977
+
978
+ @staticmethod
979
+ def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128):
980
+ ret = 0
981
+ n = -relative_position
982
+ if not causal:
983
+ num_buckets //= 2
984
+ ret += (n < 0).long() * num_buckets
985
+ n = torch.abs(n)
986
+ else:
987
+ n = torch.max(n, torch.zeros_like(n))
988
+
989
+ max_exact = num_buckets // 2
990
+ is_small = n < max_exact
991
+
992
+ val_if_large = max_exact + (
993
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
994
+ ).long()
995
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
996
+
997
+ ret += torch.where(is_small, n, val_if_large)
998
+ return ret
999
+
1000
+ @property
1001
+ def device(self):
1002
+ return next(self.parameters()).device
1003
+
1004
+ def forward(self, i, j):
1005
+ device = self.device
1006
+ q_pos = torch.arange(j - i, j, dtype = torch.long, device = device)
1007
+ k_pos = torch.arange(j, dtype = torch.long, device = device)
1008
+ rel_pos = k_pos[None, :] - q_pos[:, None]
1009
+ rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
1010
+ values = self.relative_attention_bias(rp_bucket)
1011
+ bias = rearrange(values, 'i j h -> h i j')
1012
+ return bias * self.scale
1013
+
1014
+ class DynamicPositionBias(nn.Module):
1015
+ def __init__(self, dim, *, heads, depth, log_distance = False, norm = False):
1016
+ super().__init__()
1017
+ assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1'
1018
+ self.log_distance = log_distance
1019
+
1020
+ self.mlp = nn.ModuleList([])
1021
+
1022
+ self.mlp.append(Sequential(
1023
+ nn.Linear(1, dim),
1024
+ nn.LayerNorm(dim) if norm else None,
1025
+ nn.SiLU()
1026
+ ))
1027
+
1028
+ for _ in range(depth - 1):
1029
+ self.mlp.append(Sequential(
1030
+ nn.Linear(dim, dim),
1031
+ nn.LayerNorm(dim) if norm else None,
1032
+ nn.SiLU()
1033
+ ))
1034
+
1035
+ self.mlp.append(nn.Linear(dim, heads))
1036
+
1037
+ @property
1038
+ def device(self):
1039
+ return next(self.parameters()).device
1040
+
1041
+ def forward(self, i, j):
1042
+ assert i == j
1043
+ n, device = j, self.device
1044
+
1045
+ # get the (n x n) matrix of distances
1046
+ seq_arange = torch.arange(n, device = device)
1047
+ context_arange = torch.arange(n, device = device)
1048
+ indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j')
1049
+ indices += (n - 1)
1050
+
1051
+ # input to continuous positions MLP
1052
+ pos = torch.arange(-n + 1, n, device = device).float()
1053
+ pos = rearrange(pos, '... -> ... 1')
1054
+
1055
+ if self.log_distance:
1056
+ pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1)
1057
+
1058
+ for layer in self.mlp:
1059
+ pos = layer(pos)
1060
+
1061
+ # get position biases
1062
+ bias = pos[indices]
1063
+ bias = rearrange(bias, 'i j h -> h i j')
1064
+ return bias
1065
+
1066
+ class AlibiPositionalBias(nn.Module):
1067
+ def __init__(self, heads, total_heads, **kwargs):
1068
+ super().__init__()
1069
+ self.heads = heads
1070
+ self.total_heads = total_heads
1071
+
1072
+ slopes = Tensor(self._get_slopes(heads))
1073
+ slopes = rearrange(slopes, 'h -> h 1 1')
1074
+ self.register_buffer('slopes', slopes, persistent = False)
1075
+ self.register_buffer('bias', None, persistent = False)
1076
+
1077
+ def get_bias(self, i, j, device):
1078
+ i_arange = torch.arange(j - i, j, device = device)
1079
+ j_arange = torch.arange(j, device = device)
1080
+ bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
1081
+ return bias
1082
+
1083
+ @staticmethod
1084
+ def _get_slopes(heads):
1085
+ def get_slopes_power_of_2(n):
1086
+ start = (2**(-2**-(math.log2(n)-3)))
1087
+ ratio = start
1088
+ return [start*ratio**i for i in range(n)]
1089
+
1090
+ if math.log2(heads).is_integer():
1091
+ return get_slopes_power_of_2(heads)
1092
+
1093
+ closest_power_of_2 = 2 ** math.floor(math.log2(heads))
1094
+ return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
1095
+
1096
+ @property
1097
+ def device(self):
1098
+ return next(self.buffers()).device
1099
+
1100
+ def forward(self, i, j):
1101
+ h, device = self.total_heads, self.device
1102
+
1103
+ if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
1104
+ return self.bias[..., -i:, -j:]
1105
+
1106
+ bias = self.get_bias(i, j, device)
1107
+ bias = bias * self.slopes
1108
+
1109
+ num_heads_unalibied = h - bias.shape[0]
1110
+ bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0)
1111
+ self.register_buffer('bias', bias, persistent = False)
1112
+
1113
+ return self.bias
1114
+
1115
+ class RotaryEmbedding(nn.Module):
1116
+ def __init__(
1117
+ self,
1118
+ dim,
1119
+ use_xpos = False,
1120
+ scale_base = 512,
1121
+ interpolation_factor = 1.,
1122
+ base = 10000,
1123
+ base_rescale_factor = 1.
1124
+ ):
1125
+ super().__init__()
1126
+ # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
1127
+ # has some connection to NTK literature
1128
+ # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
1129
+ base *= base_rescale_factor ** (dim / (dim - 2))
1130
+
1131
+ inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
1132
+ self.register_buffer('inv_freq', inv_freq)
1133
+
1134
+ assert interpolation_factor >= 1.
1135
+ self.interpolation_factor = interpolation_factor
1136
+
1137
+ if not use_xpos:
1138
+ self.register_buffer('scale', None)
1139
+ return
1140
+
1141
+ scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
1142
+
1143
+ self.scale_base = scale_base
1144
+ self.register_buffer('scale', scale)
1145
+
1146
+ def forward(self, seq_len):
1147
+ device = self.inv_freq.device
1148
+ t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
1149
+
1150
+ t = t / self.interpolation_factor
1151
+
1152
+ freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
1153
+ freqs = torch.cat((freqs, freqs), dim = -1)
1154
+
1155
+ if not exists(self.scale):
1156
+ return freqs, 1.
1157
+
1158
+ power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
1159
+ scale = self.scale ** rearrange(power, 'n -> n 1')
1160
+ scale = torch.cat((scale, scale), dim = -1)
1161
+
1162
+ return freqs, scale
1163
+
1164
+
1165
+ def rotate_half(x):
1166
+ x = rearrange(x, '... (j d) -> ... j d', j = 2)
1167
+ x1, x2 = x.unbind(dim = -2)
1168
+ return torch.cat((-x2, x1), dim = -1)
1169
+
1170
+ def apply_rotary_pos_emb(t, freqs, scale = 1):
1171
+ rot_dim, seq_len = freqs.shape[-1], t.shape[-2]
1172
+ freqs = freqs[-seq_len:, :]
1173
+
1174
+ if t.ndim == 4 and freqs.ndim == 3:
1175
+ freqs = rearrange(freqs, 'b n d -> b 1 n d')
1176
+
1177
+ # partial rotary embeddings, Wang et al. GPT-J
1178
+ t, t_unrotated = t[..., :rot_dim], t[..., rot_dim:]
1179
+ t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
1180
+ return torch.cat((t, t_unrotated), dim = -1)
1181
+
1182
+ # norms
1183
+
1184
+ class Scale(nn.Module):
1185
+ def __init__(self, value, fn):
1186
+ super().__init__()
1187
+ self.value = value
1188
+ self.fn = fn
1189
+
1190
+ def forward(self, x, **kwargs):
1191
+ out = self.fn(x, **kwargs)
1192
+ scale_fn = lambda t: t * self.value
1193
+
1194
+ if not isinstance(out, tuple):
1195
+ return scale_fn(out)
1196
+
1197
+ return (scale_fn(out[0]), *out[1:])
1198
+
1199
+ class ScaleNorm(nn.Module):
1200
+ def __init__(self, dim, eps = 1e-5):
1201
+ super().__init__()
1202
+ self.eps = eps
1203
+ self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5))
1204
+
1205
+ def forward(self, x):
1206
+ norm = torch.norm(x, dim = -1, keepdim = True)
1207
+ return x / norm.clamp(min = self.eps) * self.g
1208
+
1209
+ class RMSNorm(nn.Module):
1210
+ def __init__(self, dim):
1211
+ super().__init__()
1212
+ self.scale = dim ** 0.5
1213
+ self.g = nn.Parameter(torch.ones(dim))
1214
+
1215
+ def forward(self, x):
1216
+ return F.normalize(x, dim = -1) * self.scale * self.g
1217
+
1218
+ class SimpleRMSNorm(nn.Module):
1219
+ def __init__(self, dim):
1220
+ super().__init__()
1221
+ self.scale = dim ** 0.5
1222
+
1223
+ def forward(self, x):
1224
+ return F.normalize(x, dim = -1) * self.scale
1225
+
1226
+ # residual and residual gates
1227
+
1228
+ class Residual(nn.Module):
1229
+ def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.):
1230
+ super().__init__()
1231
+ self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
1232
+ self.scale_residual_constant = scale_residual_constant
1233
+
1234
+ def forward(self, x, residual):
1235
+ if exists(self.residual_scale):
1236
+ residual = residual * self.residual_scale
1237
+
1238
+ if self.scale_residual_constant != 1:
1239
+ residual = residual * self.scale_residual_constant
1240
+
1241
+ return x + residual
1242
+
1243
+ class GRUGating(nn.Module):
1244
+ def __init__(self, dim, scale_residual = False, **kwargs):
1245
+ super().__init__()
1246
+ self.gru = nn.GRUCell(dim, dim)
1247
+ self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None
1248
+
1249
+ def forward(self, x, residual):
1250
+ if exists(self.residual_scale):
1251
+ residual = residual * self.residual_scale
1252
+
1253
+ gated_output = self.gru(
1254
+ rearrange(x, 'b n d -> (b n) d'),
1255
+ rearrange(residual, 'b n d -> (b n) d')
1256
+ )
1257
+
1258
+ return gated_output.reshape_as(x)
1259
+
1260
+ # token shifting
1261
+
1262
+ def shift(t, amount, mask = None):
1263
+ if amount == 0:
1264
+ return t
1265
+ else:
1266
+ amount = min(amount, t.shape[1])
1267
+
1268
+ if exists(mask):
1269
+ t = t.masked_fill(~mask[..., None], 0.)
1270
+
1271
+ return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.)
1272
+
1273
+ class ShiftTokens(nn.Module):
1274
+ def __init__(self, shifts, fn):
1275
+ super().__init__()
1276
+ self.fn = fn
1277
+ self.shifts = tuple(shifts)
1278
+
1279
+ def forward(self, x, **kwargs):
1280
+ mask = kwargs.get('mask', None)
1281
+ shifts = self.shifts
1282
+ segments = len(shifts)
1283
+ feats_per_shift = x.shape[-1] // segments
1284
+ splitted = x.split(feats_per_shift, dim = -1)
1285
+ segments_to_shift, rest = splitted[:segments], splitted[segments:]
1286
+ segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
1287
+ x = torch.cat((*segments_to_shift, *rest), dim = -1)
1288
+ return self.fn(x, **kwargs)
1289
+
1290
+ # feedforward
1291
+
1292
+ class GLU(nn.Module):
1293
+ def __init__(
1294
+ self,
1295
+ dim_in,
1296
+ dim_out,
1297
+ activation: Callable,
1298
+ mult_bias = False
1299
+ ):
1300
+ super().__init__()
1301
+ self.act = activation
1302
+ self.proj = nn.Linear(dim_in, dim_out * 2)
1303
+ self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1.
1304
+
1305
+ def forward(self, x):
1306
+ x, gate = self.proj(x).chunk(2, dim = -1)
1307
+ return x * self.act(gate) * self.mult_bias
1308
+
1309
+ class FeedForward(nn.Module):
1310
+ def __init__(
1311
+ self,
1312
+ dim,
1313
+ dim_out = None,
1314
+ mult = 4,
1315
+ glu = False,
1316
+ glu_mult_bias = False,
1317
+ swish = False,
1318
+ relu_squared = False,
1319
+ post_act_ln = False,
1320
+ dropout = 0.,
1321
+ no_bias = False,
1322
+ zero_init_output = False
1323
+ ):
1324
+ super().__init__()
1325
+ inner_dim = int(dim * mult)
1326
+ dim_out = default(dim_out, dim)
1327
+
1328
+ if relu_squared:
1329
+ activation = ReluSquared()
1330
+ elif swish:
1331
+ activation = nn.SiLU()
1332
+ else:
1333
+ activation = nn.GELU()
1334
+
1335
+ if glu:
1336
+ project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias)
1337
+ else:
1338
+ project_in = nn.Sequential(
1339
+ nn.Linear(dim, inner_dim, bias = not no_bias),
1340
+ activation
1341
+ )
1342
+
1343
+ self.ff = Sequential(
1344
+ project_in,
1345
+ nn.LayerNorm(inner_dim) if post_act_ln else None,
1346
+ nn.Dropout(dropout),
1347
+ nn.Linear(inner_dim, dim_out, bias = not no_bias)
1348
+ )
1349
+
1350
+ # init last linear layer to 0
1351
+ if zero_init_output:
1352
+ init_zero_(self.ff[-1])
1353
+
1354
+ def forward(self, x):
1355
+ return self.ff(x)
1356
+
1357
+ # attention. it is all we need
1358
+
1359
+ class Attention(nn.Module):
1360
+ def __init__(
1361
+ self,
1362
+ dim,
1363
+ dim_head = DEFAULT_DIM_HEAD,
1364
+ heads = 8,
1365
+ causal = False,
1366
+ flash = False,
1367
+ talking_heads = False,
1368
+ head_scale = False,
1369
+ sparse_topk = None,
1370
+ num_mem_kv = 0,
1371
+ dropout = 0.,
1372
+ on_attn = False,
1373
+ gate_value_heads = False,
1374
+ gate_values = False,
1375
+ zero_init_output = False,
1376
+ max_attend_past = None,
1377
+ qk_norm = False,
1378
+ qk_norm_groups = 1,
1379
+ qk_norm_scale = 10,
1380
+ qk_norm_dim_scale = False,
1381
+ one_kv_head = False,
1382
+ kv_heads = None,
1383
+ shared_kv = False,
1384
+ value_dim_head = None,
1385
+ tensor_product = False, # https://arxiv.org/abs/2208.06061
1386
+ add_zero_kv = False, # same as add_zero_attn in pytorch
1387
+ rotary_embed_values = False,
1388
+ onnxable = False
1389
+ ):
1390
+ super().__init__()
1391
+ self.scale = dim_head ** -0.5
1392
+
1393
+ self.heads = heads
1394
+ self.causal = causal
1395
+ self.max_attend_past = max_attend_past
1396
+
1397
+ assert not (exists(kv_heads) and one_kv_head), 'either attn_one_kv_head is set to True (in which case kv_heads is set to 1), or attn_kv_heads is set, but not both'
1398
+
1399
+ value_dim_head = default(value_dim_head, dim_head)
1400
+ kv_heads = default(kv_heads, heads)
1401
+
1402
+ kv_heads = 1 if one_kv_head else kv_heads
1403
+ assert divisible_by(heads, kv_heads)
1404
+
1405
+ self.kv_heads = kv_heads
1406
+
1407
+ q_dim = dim_head * heads
1408
+ k_dim = dim_head * kv_heads
1409
+ v_dim = value_dim_head * kv_heads
1410
+ out_dim = value_dim_head * heads
1411
+
1412
+ self.to_q = nn.Linear(dim, q_dim, bias = False)
1413
+ self.to_k = nn.Linear(dim, k_dim, bias = False)
1414
+
1415
+ # shared key / values, for further memory savings during inference
1416
+ assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values'
1417
+ self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None
1418
+
1419
+ # relations projection from tp-attention
1420
+ self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None
1421
+
1422
+ # add GLU gating for aggregated values, from alphafold2
1423
+ self.to_v_gate = None
1424
+ if gate_values:
1425
+ self.to_v_gate = nn.Linear(dim, out_dim)
1426
+ nn.init.constant_(self.to_v_gate.weight, 0)
1427
+ nn.init.constant_(self.to_v_gate.bias, 10)
1428
+
1429
+ # add per head gating of the output values, from 'Attend to nothing' paper
1430
+ self.to_v_head_gate = None
1431
+ if gate_value_heads:
1432
+ self.to_v_head_gate = nn.Linear(dim, heads)
1433
+ nn.init.constant_(self.to_v_head_gate.weight, 0)
1434
+ nn.init.constant_(self.to_v_head_gate.bias, 10)
1435
+
1436
+ # cosine sim attention
1437
+ self.qk_norm = qk_norm
1438
+ self.qk_norm_groups = qk_norm_groups
1439
+ self.qk_norm_scale = qk_norm_scale
1440
+
1441
+ # whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442
1442
+ self.qk_norm_dim_scale = qk_norm_dim_scale
1443
+
1444
+ self.qk_norm_q_scale = self.qk_norm_k_scale = 1
1445
+ if qk_norm and qk_norm_dim_scale:
1446
+ self.qk_norm_q_scale = nn.Parameter(torch.ones(heads, 1, dim_head))
1447
+ self.qk_norm_k_scale = nn.Parameter(torch.ones(heads, 1, dim_head))
1448
+
1449
+ assert (not qk_norm) or divisible_by(dim_head, qk_norm_groups), 'dimension per attention head must be divisible by the qk norm groups'
1450
+ assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)'
1451
+
1452
+ # attend class - includes core attention algorithm + talking heads
1453
+
1454
+ self.attend = Attend(
1455
+ heads = heads,
1456
+ causal = causal,
1457
+ talking_heads = talking_heads,
1458
+ dropout = dropout,
1459
+ sparse_topk = sparse_topk,
1460
+ qk_norm = qk_norm,
1461
+ scale = qk_norm_scale if qk_norm else self.scale,
1462
+ add_zero_kv = add_zero_kv,
1463
+ flash = flash,
1464
+ onnxable = onnxable
1465
+ )
1466
+
1467
+ # head scaling
1468
+ self.head_scale = head_scale
1469
+ if head_scale:
1470
+ self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1))
1471
+
1472
+ # explicit topk sparse attention
1473
+ self.sparse_topk = sparse_topk
1474
+
1475
+ # add memory key / values
1476
+ self.num_mem_kv = num_mem_kv
1477
+ if num_mem_kv > 0:
1478
+ self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
1479
+ self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
1480
+
1481
+ # attention on attention
1482
+ self.attn_on_attn = on_attn
1483
+ self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False)
1484
+
1485
+ # whether to rotate positions into values, for absolute positions in addition to relative
1486
+ self.rotary_embed_values = rotary_embed_values
1487
+
1488
+ # init output projection 0
1489
+ if zero_init_output:
1490
+ init_zero_(self.to_out)
1491
+
1492
+ def forward(
1493
+ self,
1494
+ x,
1495
+ context = None,
1496
+ mask = None,
1497
+ context_mask = None,
1498
+ attn_mask = None,
1499
+ rel_pos = None,
1500
+ rotary_pos_emb = None,
1501
+ prev_attn = None,
1502
+ mem = None,
1503
+ return_intermediates = False,
1504
+ cache: Optional[Intermediates] = None,
1505
+ ):
1506
+ b, n, _, h, kv_h, head_scale, device, has_context = *x.shape, self.heads, self.kv_heads, self.head_scale, x.device, exists(context)
1507
+ kv_input = default(context, x)
1508
+
1509
+ q_input = x
1510
+ k_input = kv_input
1511
+ v_input = kv_input
1512
+ r_input = x
1513
+
1514
+ if exists(mem):
1515
+ k_input, mem_packed_shape = pack([mem, k_input], 'b * d')
1516
+ v_input, _ = pack([mem, v_input], 'b * d')
1517
+
1518
+ q = self.to_q(q_input)
1519
+ k = self.to_k(k_input)
1520
+ v = self.to_v(v_input) if exists(self.to_v) else k
1521
+ r = self.to_r(r_input) if exists(self.to_r) else None
1522
+
1523
+ q = rearrange(q, 'b n (h d) -> b h n d', h = h)
1524
+
1525
+ k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = kv_h), (k, v, r))
1526
+
1527
+ if exists(cache) and not has_context:
1528
+ ck, cv = cache.cached_kv
1529
+
1530
+ if exists(mem):
1531
+ mk, k = unpack(k, mem_packed_shape, 'b h * d')
1532
+ mv, v = unpack(v, mem_packed_shape, 'b h * d')
1533
+
1534
+ k = torch.cat((ck, k), dim = -2)
1535
+ v = torch.cat((cv, v), dim = -2)
1536
+
1537
+ if exists(mem):
1538
+ k = torch.cat((mk, k), dim = -2)
1539
+ v = torch.cat((mv, v), dim = -2)
1540
+
1541
+ if return_intermediates:
1542
+ mem_len = mem.shape[-2] if exists(mem) else 0
1543
+ cached_kv = (k[..., mem_len:, :], v[..., mem_len:, :])
1544
+
1545
+ if self.qk_norm:
1546
+ qk_l2norm = partial(l2norm, groups = self.qk_norm_groups)
1547
+ q, k = map(qk_l2norm, (q, k))
1548
+ scale = self.qk_norm_scale
1549
+
1550
+ q = q * self.qk_norm_q_scale
1551
+ k = k * self.qk_norm_k_scale
1552
+
1553
+ if exists(rotary_pos_emb) and not has_context:
1554
+ freqs, xpos_scale = rotary_pos_emb
1555
+ q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.)
1556
+
1557
+ q = apply_rotary_pos_emb(q, freqs, q_xpos_scale)
1558
+ k = apply_rotary_pos_emb(k, freqs, k_xpos_scale)
1559
+
1560
+ if self.rotary_embed_values:
1561
+ v = apply_rotary_pos_emb(v, freqs, k_xpos_scale)
1562
+
1563
+ input_mask = context_mask
1564
+
1565
+ if not exists(input_mask) and not has_context:
1566
+ input_mask = mask
1567
+
1568
+ if self.num_mem_kv > 0:
1569
+ mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v))
1570
+
1571
+ if self.qk_norm:
1572
+ mem_k = l2norm(mem_k)
1573
+ mem_k = mem_k * self.qk_norm_k_scale
1574
+
1575
+ k = torch.cat((mem_k, k), dim = -2)
1576
+ v = torch.cat((mem_v, v), dim = -2)
1577
+
1578
+ if exists(input_mask):
1579
+ input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True)
1580
+
1581
+ i, j = map(lambda t: t.shape[-2], (q, k))
1582
+
1583
+ # determine masking
1584
+
1585
+ mask_value = max_neg_value(q)
1586
+ masks = []
1587
+ final_attn_mask = None
1588
+
1589
+ if exists(input_mask):
1590
+ input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
1591
+ masks.append(~input_mask)
1592
+
1593
+ if exists(attn_mask):
1594
+ assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4'
1595
+ if attn_mask.ndim == 2:
1596
+ attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j')
1597
+ elif attn_mask.ndim == 3:
1598
+ attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j')
1599
+ masks.append(~attn_mask)
1600
+
1601
+ if exists(self.max_attend_past):
1602
+ range_q = torch.arange(j - i, j, device = device)
1603
+ range_k = torch.arange(j, device = device)
1604
+ dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j')
1605
+ max_attend_past_mask = dist > self.max_attend_past
1606
+ masks.append(max_attend_past_mask)
1607
+
1608
+ if len(masks) > 0:
1609
+ final_attn_mask = ~or_reduce(masks)
1610
+
1611
+ # prepare relative positional bias, if needed
1612
+
1613
+ attn_bias = None
1614
+ if exists(rel_pos):
1615
+ attn_bias = rel_pos(i, j)
1616
+
1617
+ # attention is all we need
1618
+
1619
+ out, intermediates = self.attend(
1620
+ q, k, v,
1621
+ mask = final_attn_mask,
1622
+ attn_bias = attn_bias,
1623
+ prev_attn = prev_attn
1624
+ )
1625
+
1626
+ # https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients
1627
+
1628
+ if exists(r):
1629
+ out = out * r + out
1630
+
1631
+ # normformer scaling of heads
1632
+
1633
+ if head_scale:
1634
+ out = out * self.head_scale_params
1635
+
1636
+ # per head gating, from https://arxiv.org/abs/2306.12929
1637
+
1638
+ if exists(self.to_v_head_gate):
1639
+ head_gate = self.to_v_head_gate(x)
1640
+ out = out * rearrange(head_gate, 'b n h -> b h n 1').sigmoid()
1641
+
1642
+ # merge heads
1643
+
1644
+ out = rearrange(out, 'b h n d -> b n (h d)')
1645
+
1646
+ # alphafold2 styled gating of the values
1647
+
1648
+ if exists(self.to_v_gate):
1649
+ gates = self.to_v_gate(x)
1650
+ out = out * gates.sigmoid()
1651
+
1652
+ # combine the heads
1653
+
1654
+ out = self.to_out(out)
1655
+
1656
+ if exists(mask):
1657
+ mask = rearrange(mask, 'b n -> b n 1')
1658
+ out = out.masked_fill(~mask, 0.)
1659
+
1660
+ if not return_intermediates:
1661
+ return out
1662
+
1663
+ intermediates.cached_kv = cached_kv
1664
+
1665
+ return out, intermediates
1666
+
1667
+ class AttentionLayers(nn.Module):
1668
+ def __init__(
1669
+ self,
1670
+ dim,
1671
+ depth,
1672
+ heads = 8,
1673
+ causal = False,
1674
+ cross_attend = False,
1675
+ only_cross = False,
1676
+ use_scalenorm = False,
1677
+ use_rmsnorm = False,
1678
+ use_simple_rmsnorm = False,
1679
+ alibi_pos_bias = False,
1680
+ alibi_num_heads = None,
1681
+ rel_pos_bias = False,
1682
+ rel_pos_num_buckets = 32,
1683
+ rel_pos_max_distance = 128,
1684
+ dynamic_pos_bias = False,
1685
+ dynamic_pos_bias_log_distance = False,
1686
+ dynamic_pos_bias_mlp_depth = 2,
1687
+ dynamic_pos_bias_norm = False,
1688
+ rotary_pos_emb = False,
1689
+ rotary_emb_dim = None,
1690
+ rotary_xpos = False,
1691
+ rotary_interpolation_factor = 1.,
1692
+ rotary_xpos_scale_base = 512,
1693
+ rotary_base_rescale_factor = 1.,
1694
+ custom_layers = None,
1695
+ sandwich_coef = None,
1696
+ par_ratio = None,
1697
+ weight_tie_layers = False, # Albert - https://arxiv.org/abs/1909.11942
1698
+ layers_execute_order = None, # generalizes weight tying, can do arbitrary layer execution orders
1699
+ residual_attn = False,
1700
+ cross_residual_attn = False,
1701
+ macaron = False,
1702
+ pre_norm = True,
1703
+ pre_norm_has_final_norm = True,
1704
+ gate_residual = False,
1705
+ scale_residual = False,
1706
+ scale_residual_constant = 1.,
1707
+ shift_tokens = 0,
1708
+ sandwich_norm = False,
1709
+ resi_dual = False,
1710
+ resi_dual_scale = 1.,
1711
+ zero_init_branch_output = False,
1712
+ layer_dropout = 0.,
1713
+ cross_attn_tokens_dropout = 0.,
1714
+ **kwargs
1715
+ ):
1716
+ super().__init__()
1717
+ rotary_pos_emb = rotary_pos_emb or rotary_xpos
1718
+
1719
+ ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
1720
+ attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs)
1721
+
1722
+ dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
1723
+
1724
+ self.dim = dim
1725
+ self.depth = depth
1726
+ self.causal = causal
1727
+ self.layers = nn.ModuleList([])
1728
+
1729
+ self.has_pos_emb = rel_pos_bias or rotary_pos_emb
1730
+
1731
+ rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32)
1732
+
1733
+ assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention'
1734
+ self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None
1735
+
1736
+ assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both'
1737
+ assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
1738
+
1739
+ # relative positional bias
1740
+
1741
+ flash_attn = attn_kwargs.get('flash', False)
1742
+ assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias'
1743
+
1744
+ self.rel_pos = None
1745
+ if rel_pos_bias:
1746
+ assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
1747
+ self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
1748
+ elif dynamic_pos_bias:
1749
+ assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
1750
+ self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
1751
+ elif alibi_pos_bias:
1752
+ alibi_num_heads = default(alibi_num_heads, heads)
1753
+ assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
1754
+ self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
1755
+
1756
+ assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both'
1757
+ assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
1758
+
1759
+ if resi_dual:
1760
+ pre_norm = False
1761
+
1762
+ self.pre_norm = pre_norm
1763
+ self.sandwich_norm = sandwich_norm
1764
+
1765
+ self.resi_dual = resi_dual
1766
+ assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.'
1767
+ self.resi_dual_scale = resi_dual_scale
1768
+
1769
+ self.residual_attn = residual_attn
1770
+ self.cross_residual_attn = cross_residual_attn
1771
+ assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention'
1772
+
1773
+ self.cross_attend = cross_attend
1774
+
1775
+ assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm'
1776
+
1777
+ if use_scalenorm:
1778
+ norm_class = ScaleNorm
1779
+ elif use_rmsnorm:
1780
+ norm_class = RMSNorm
1781
+ elif use_simple_rmsnorm:
1782
+ norm_class = SimpleRMSNorm
1783
+ else:
1784
+ norm_class = nn.LayerNorm
1785
+
1786
+ norm_fn = partial(norm_class, dim)
1787
+
1788
+ if cross_attend and not only_cross:
1789
+ default_block = ('a', 'c', 'f')
1790
+ elif cross_attend and only_cross:
1791
+ default_block = ('c', 'f')
1792
+ else:
1793
+ default_block = ('a', 'f')
1794
+
1795
+ if macaron:
1796
+ default_block = ('f',) + default_block
1797
+
1798
+ # zero init
1799
+
1800
+ if zero_init_branch_output:
1801
+ attn_kwargs = {**attn_kwargs, 'zero_init_output': True}
1802
+ ff_kwargs = {**ff_kwargs, 'zero_init_output': True}
1803
+
1804
+ # setup weight tying, which is a special case of `layer_execute_order`
1805
+
1806
+ assert not (weight_tie_layers and any([*map(exists, (custom_layers, par_ratio, sandwich_coef))]))
1807
+
1808
+ if weight_tie_layers:
1809
+ assert not exists(layers_execute_order)
1810
+ layers_execute_order = tuple(range(len(default_block))) * depth
1811
+ depth = 1
1812
+
1813
+ # calculate layer block order
1814
+
1815
+ if exists(custom_layers):
1816
+ layer_types = custom_layers
1817
+ elif exists(par_ratio):
1818
+ par_depth = depth * len(default_block)
1819
+ assert 1 < par_ratio <= par_depth, 'par ratio out of range'
1820
+ default_block = tuple(filter(not_equals('f'), default_block))
1821
+ par_attn = par_depth // par_ratio
1822
+ depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
1823
+ par_width = (depth_cut + depth_cut // par_attn) // par_attn
1824
+ assert len(default_block) <= par_width, 'default block is too large for par_ratio'
1825
+ par_block = default_block + ('f',) * (par_width - len(default_block))
1826
+ par_head = par_block * par_attn
1827
+ layer_types = par_head + ('f',) * (par_depth - len(par_head))
1828
+ elif exists(sandwich_coef):
1829
+ assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
1830
+ layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
1831
+ else:
1832
+ layer_types = default_block * depth
1833
+
1834
+ self.layer_types = layer_types
1835
+ self.layers_execute_order = default(layers_execute_order, tuple(range(len(layer_types))))
1836
+
1837
+ assert all([i < len(self.layer_types) for i in self.layers_execute_order])
1838
+
1839
+ self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
1840
+
1841
+ # stochastic depth
1842
+
1843
+ self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types))
1844
+
1845
+ # structured dropout for cross attending
1846
+
1847
+ self.cross_attn_tokens_dropout = cross_attn_tokens_dropout
1848
+
1849
+ # calculate token shifting
1850
+
1851
+ shift_tokens = cast_tuple(shift_tokens, len(layer_types))
1852
+
1853
+ # whether it has post norm
1854
+
1855
+ self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
1856
+
1857
+ # iterate and construct layers
1858
+
1859
+ for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)):
1860
+ is_last_layer = ind == (len(self.layer_types) - 1)
1861
+
1862
+ if layer_type == 'a':
1863
+ layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs)
1864
+ elif layer_type == 'c':
1865
+ layer = Attention(dim, heads = heads, **attn_kwargs)
1866
+ elif layer_type == 'f':
1867
+ layer = FeedForward(dim, **ff_kwargs)
1868
+ layer = layer if not macaron else Scale(0.5, layer)
1869
+ else:
1870
+ raise Exception(f'invalid layer type {layer_type}')
1871
+
1872
+ if layer_shift_tokens > 0:
1873
+ shift_range_upper = layer_shift_tokens + 1
1874
+ shift_range_lower = -layer_shift_tokens if not causal else 0
1875
+ layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer)
1876
+
1877
+ residual_fn = GRUGating if gate_residual else Residual
1878
+ residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant)
1879
+
1880
+ pre_branch_norm = norm_fn() if pre_norm else None
1881
+ post_branch_norm = norm_fn() if sandwich_norm else None
1882
+ post_main_norm = norm_fn() if not pre_norm else None
1883
+
1884
+ norms = nn.ModuleList([
1885
+ pre_branch_norm,
1886
+ post_branch_norm,
1887
+ post_main_norm
1888
+ ])
1889
+
1890
+ self.layers.append(nn.ModuleList([
1891
+ norms,
1892
+ layer,
1893
+ residual
1894
+ ]))
1895
+
1896
+ def forward(
1897
+ self,
1898
+ x,
1899
+ context = None,
1900
+ mask = None,
1901
+ context_mask = None,
1902
+ attn_mask = None,
1903
+ self_attn_kv_mask = None,
1904
+ mems = None,
1905
+ seq_start_pos: Optional[Tensor] = None,
1906
+ cache: Optional[LayerIntermediates] = None,
1907
+ cache_age = 1,
1908
+ return_hiddens = False
1909
+ ):
1910
+ assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True'
1911
+
1912
+ # initialize accums
1913
+
1914
+ hiddens = []
1915
+ layer_hiddens = []
1916
+ intermediates = []
1917
+
1918
+ prev_attn = None
1919
+ prev_cross_attn = None
1920
+
1921
+ mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
1922
+
1923
+ # handle left padded sequences
1924
+
1925
+ if exists(seq_start_pos):
1926
+ seq_arange = torch.arange(x.shape[-2], device = x.device, dtype = torch.long)
1927
+ left_pad_mask = seq_arange >= seq_start_pos[..., None]
1928
+
1929
+ if exists(self_attn_kv_mask):
1930
+ self_attn_kv_mask = self_attn_kv_mask & left_pad_mask
1931
+ else:
1932
+ self_attn_kv_mask = left_pad_mask
1933
+
1934
+ # rotary positions
1935
+
1936
+ rotary_pos_emb = None
1937
+
1938
+ if exists(self.rotary_pos_emb):
1939
+ max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems)))
1940
+ rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length)
1941
+
1942
+ # assume cached key / values
1943
+
1944
+ attn_cache = []
1945
+
1946
+ if exists(cache):
1947
+ assert not self.training and self.causal and not any([*map(exists, (mask, attn_mask))])
1948
+
1949
+ if cache_age > 0:
1950
+ x = x[:, -cache_age:] # for spec decoding, may be greater than 1
1951
+
1952
+ attn_cache = cache.attn_intermediates
1953
+
1954
+ iter_attn_cache = iter(attn_cache)
1955
+
1956
+ # outer residual - for resiDual paper
1957
+
1958
+ outer_residual = x * self.resi_dual_scale
1959
+
1960
+ # get layers to be executed
1961
+
1962
+ layer_variables = (
1963
+ self.layer_types,
1964
+ self.layers,
1965
+ self.layer_dropouts
1966
+ )
1967
+
1968
+ layer_variables = tuple(tuple(layer_variable[i] for i in self.layers_execute_order) for layer_variable in layer_variables)
1969
+
1970
+ # go through the attention and feedforward layers
1971
+
1972
+ for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(*layer_variables)):
1973
+ is_last = ind == (len(self.layers) - 1)
1974
+
1975
+ if self.training and layer_dropout > 0. and random() < layer_dropout:
1976
+ continue
1977
+
1978
+ if layer_type == 'a':
1979
+ if return_hiddens:
1980
+ hiddens.append(x)
1981
+ layer_mem = mems.pop(0) if mems else None
1982
+
1983
+ if layer_type == 'c':
1984
+ if self.training and self.cross_attn_tokens_dropout > 0.:
1985
+ context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout)
1986
+
1987
+ inner_residual = x
1988
+
1989
+ if return_hiddens:
1990
+ layer_hiddens.append(x)
1991
+
1992
+ pre_norm, post_branch_norm, post_main_norm = norm
1993
+
1994
+ if exists(pre_norm):
1995
+ x = pre_norm(x)
1996
+
1997
+ if layer_type == 'a':
1998
+ out, inter = block(x, mask = mask, context_mask = self_attn_kv_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, cache = next(iter_attn_cache, None), mem = layer_mem, return_intermediates = True)
1999
+ elif layer_type == 'c':
2000
+ out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn, cache = next(iter_attn_cache, None), return_intermediates = True)
2001
+ elif layer_type == 'f':
2002
+ out = block(x)
2003
+
2004
+ if self.resi_dual:
2005
+ outer_residual = outer_residual + out * self.resi_dual_scale
2006
+
2007
+ if exists(post_branch_norm):
2008
+ out = post_branch_norm(out)
2009
+
2010
+ x = residual_fn(out, inner_residual)
2011
+
2012
+ if layer_type in ('a', 'c') and return_hiddens:
2013
+ intermediates.append(inter)
2014
+
2015
+ if layer_type == 'a' and self.residual_attn:
2016
+ prev_attn = inter.pre_softmax_attn
2017
+ elif layer_type == 'c' and self.cross_residual_attn:
2018
+ prev_cross_attn = inter.pre_softmax_attn
2019
+
2020
+ if exists(post_main_norm):
2021
+ x = post_main_norm(x)
2022
+
2023
+ if return_hiddens:
2024
+ layer_hiddens.append(x)
2025
+
2026
+ if self.resi_dual:
2027
+ x = x + self.final_norm(outer_residual)
2028
+ else:
2029
+ x = self.final_norm(x)
2030
+
2031
+ if not return_hiddens:
2032
+ return x
2033
+
2034
+ intermediates = LayerIntermediates(
2035
+ hiddens = hiddens,
2036
+ attn_intermediates = intermediates,
2037
+ layer_hiddens = layer_hiddens
2038
+ )
2039
+
2040
+ return x, intermediates
2041
+
2042
+ class Encoder(AttentionLayers):
2043
+ def __init__(self, **kwargs):
2044
+ assert 'causal' not in kwargs, 'cannot set causality on encoder'
2045
+ super().__init__(causal = False, **kwargs)
2046
+
2047
+ class Decoder(AttentionLayers):
2048
+ def __init__(self, **kwargs):
2049
+ assert 'causal' not in kwargs, 'cannot set causality on decoder'
2050
+ super().__init__(causal = True, **kwargs)
2051
+
2052
+ class CrossAttender(AttentionLayers):
2053
+ def __init__(self, **kwargs):
2054
+ super().__init__(cross_attend = True, only_cross = True, **kwargs)
2055
+
2056
+ class ViTransformerWrapper(nn.Module):
2057
+ def __init__(
2058
+ self,
2059
+ *,
2060
+ image_size,
2061
+ patch_size,
2062
+ attn_layers,
2063
+ channels = 3,
2064
+ num_classes = None,
2065
+ post_emb_norm = False,
2066
+ num_register_tokens = 0,
2067
+ emb_dropout = 0.
2068
+ ):
2069
+ super().__init__()
2070
+ assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder'
2071
+ assert divisible_by(image_size, patch_size), 'image dimensions must be divisible by the patch size'
2072
+ dim = attn_layers.dim
2073
+ num_patches = (image_size // patch_size) ** 2
2074
+ patch_dim = channels * patch_size ** 2
2075
+
2076
+ self.patch_size = patch_size
2077
+
2078
+ self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
2079
+
2080
+ has_register_tokens = num_register_tokens > 0
2081
+ self.has_register_tokens = has_register_tokens
2082
+
2083
+ if has_register_tokens:
2084
+ self.register_tokens = nn.Parameter(torch.randn(num_register_tokens, dim))
2085
+
2086
+ self.patch_to_embedding = nn.Sequential(
2087
+ nn.LayerNorm(patch_dim),
2088
+ nn.Linear(patch_dim, dim),
2089
+ nn.LayerNorm(dim)
2090
+ )
2091
+
2092
+ self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
2093
+ self.dropout = nn.Dropout(emb_dropout)
2094
+
2095
+ self.attn_layers = attn_layers
2096
+
2097
+ self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity()
2098
+
2099
+ def forward(
2100
+ self,
2101
+ img,
2102
+ return_embeddings = False
2103
+ ):
2104
+ b, p = img.shape[0], self.patch_size
2105
+
2106
+ x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
2107
+ x = self.patch_to_embedding(x)
2108
+ n = x.shape[1]
2109
+
2110
+ x = x + self.pos_embedding[:, :n]
2111
+
2112
+ x = self.post_emb_norm(x)
2113
+ x = self.dropout(x)
2114
+
2115
+ if self.has_register_tokens:
2116
+ r = repeat(self.register_tokens, 'n d -> b n d', b = b)
2117
+ x, ps = pack((x, r), 'b * d')
2118
+
2119
+ x = self.attn_layers(x)
2120
+
2121
+ if self.has_register_tokens:
2122
+ x, _ = unpack(x, ps, 'b * d')
2123
+
2124
+ if not exists(self.mlp_head) or return_embeddings:
2125
+ return x
2126
+
2127
+ x = x.mean(dim = -2)
2128
+ return self.mlp_head(x)
2129
+
2130
+ class TransformerWrapper(nn.Module):
2131
+ def __init__(
2132
+ self,
2133
+ *,
2134
+ num_tokens,
2135
+ max_seq_len,
2136
+ attn_layers,
2137
+ emb_dim = None,
2138
+ max_mem_len = 0,
2139
+ shift_mem_down = 0,
2140
+ emb_dropout = 0.,
2141
+ post_emb_norm = False,
2142
+ num_memory_tokens = None,
2143
+ memory_tokens_interspersed_every = None,
2144
+ tie_embedding = False,
2145
+ logits_dim = None,
2146
+ use_abs_pos_emb = True,
2147
+ scaled_sinu_pos_emb = False,
2148
+ l2norm_embed = False,
2149
+ emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1
2150
+ attn_z_loss_weight = 1e-4,
2151
+ ):
2152
+ super().__init__()
2153
+ assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
2154
+
2155
+ dim = attn_layers.dim
2156
+ emb_dim = default(emb_dim, dim)
2157
+ self.emb_dim = emb_dim
2158
+ self.num_tokens = num_tokens
2159
+
2160
+ self.max_seq_len = max_seq_len
2161
+ self.max_mem_len = max_mem_len
2162
+ self.shift_mem_down = shift_mem_down
2163
+
2164
+ self.l2norm_embed = l2norm_embed
2165
+ self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed)
2166
+
2167
+ if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
2168
+ self.pos_emb = always(0)
2169
+ elif scaled_sinu_pos_emb:
2170
+ self.pos_emb = ScaledSinusoidalEmbedding(emb_dim)
2171
+ else:
2172
+ self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed)
2173
+
2174
+ self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290
2175
+
2176
+ self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity()
2177
+ self.emb_dropout = nn.Dropout(emb_dropout)
2178
+
2179
+ self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
2180
+ self.attn_layers = attn_layers
2181
+
2182
+ self.init_()
2183
+
2184
+ logits_dim = default(logits_dim, num_tokens)
2185
+ self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t()
2186
+
2187
+ # memory tokens (like [cls]) from Memory Transformers paper
2188
+
2189
+ num_memory_tokens = default(num_memory_tokens, 0)
2190
+ self.num_memory_tokens = num_memory_tokens
2191
+ if num_memory_tokens > 0:
2192
+ self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
2193
+
2194
+ self.memory_tokens_interspersed_every = memory_tokens_interspersed_every
2195
+
2196
+ # whether can do cached kv decoding
2197
+
2198
+ self.can_cache_kv = self.num_memory_tokens == 0
2199
+
2200
+ def init_(self):
2201
+ if self.l2norm_embed:
2202
+ nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
2203
+ if not isinstance(self.pos_emb, always):
2204
+ nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
2205
+ return
2206
+
2207
+ nn.init.kaiming_normal_(self.token_emb.emb.weight)
2208
+
2209
+ def forward(
2210
+ self,
2211
+ x,
2212
+ return_embeddings = False,
2213
+ return_logits_and_embeddings = False,
2214
+ return_intermediates = False,
2215
+ mask = None,
2216
+ return_mems = False,
2217
+ return_attn = False,
2218
+ mems = None,
2219
+ pos = None,
2220
+ prepend_embeds = None,
2221
+ sum_embeds = None,
2222
+ return_attn_z_loss = False,
2223
+ attn_z_loss_weight = 1e-4,
2224
+ seq_start_pos = None,
2225
+ cache: Optional[LayerIntermediates] = None,
2226
+ **kwargs
2227
+ ):
2228
+ b, n, device, num_mems, has_memory_tokens, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.num_memory_tokens > 0, self.emb_frac_gradient
2229
+ return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss
2230
+
2231
+ # absolute positional embedding
2232
+
2233
+ external_pos_emb = exists(pos) and pos.dtype != torch.long
2234
+ pos_emb = self.pos_emb(x, pos = pos, seq_start_pos = seq_start_pos) if not external_pos_emb else pos
2235
+ x = self.token_emb(x) + pos_emb
2236
+
2237
+ # for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training
2238
+
2239
+ if exists(sum_embeds):
2240
+ x = x + sum_embeds
2241
+
2242
+ # post embedding norm, purportedly leads to greater stabilization
2243
+
2244
+ x = self.post_emb_norm(x)
2245
+
2246
+ # whether to append embeds, as in PaLI, for image embeddings
2247
+
2248
+ if exists(prepend_embeds):
2249
+ prepend_seq, prepend_dim = prepend_embeds.shape[1:]
2250
+ assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions'
2251
+
2252
+ x = torch.cat((prepend_embeds, x), dim = -2)
2253
+
2254
+ # whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model
2255
+
2256
+ if emb_frac_gradient < 1:
2257
+ assert emb_frac_gradient > 0
2258
+ x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient)
2259
+
2260
+ # embedding dropout
2261
+
2262
+ x = self.emb_dropout(x)
2263
+
2264
+ x = self.project_emb(x)
2265
+
2266
+ if has_memory_tokens:
2267
+ mem_every = self.memory_tokens_interspersed_every
2268
+
2269
+ if exists(mem_every):
2270
+ assert mem_every > 0
2271
+ assert isinstance(self.attn_layers, Decoder), 'only for decoder'
2272
+ next_seq_len = math.ceil(n / mem_every) * mem_every
2273
+
2274
+ x = pad_at_dim(x, (0, next_seq_len - n), dim = -2, value = 0.)
2275
+ x = rearrange(x, 'b (n m) d -> (b n) m d', m = mem_every)
2276
+
2277
+ mem = repeat(self.memory_tokens, 'n d -> b n d', b = x.shape[0])
2278
+ x, mem_packed_shape = pack((mem, x), 'b * d')
2279
+
2280
+ # auto-handle masking after appending memory tokens
2281
+ if not exists(mem_every) and exists(mask):
2282
+ mask = pad_at_dim(mask, (num_mems, 0), dim = -1, value = True)
2283
+
2284
+ if exists(mem_every):
2285
+ x = rearrange(x, '(b n) m d -> b (n m) d', b = b)
2286
+
2287
+ if self.shift_mem_down and exists(mems):
2288
+ mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:]
2289
+ mems = [*mems_r, *mems_l]
2290
+
2291
+ x, intermediates = self.attn_layers(x, mask = mask, mems = mems, cache = cache, return_hiddens = True, seq_start_pos = seq_start_pos, **kwargs)
2292
+
2293
+ if has_memory_tokens:
2294
+ if exists(mem_every):
2295
+ x = rearrange(x, 'b (n m) d -> (b n) m d', m = (mem_every + num_mems))
2296
+
2297
+ mem, x = unpack(x, mem_packed_shape, 'b * d')
2298
+
2299
+ if exists(mem_every):
2300
+ x = rearrange(x, '(b n) m d -> b (n m) d', b = b)
2301
+
2302
+ x = x[:, :n]
2303
+
2304
+ if return_logits_and_embeddings:
2305
+ out = (self.to_logits(x), x)
2306
+ elif return_embeddings:
2307
+ out = x
2308
+ else:
2309
+ out = self.to_logits(x)
2310
+
2311
+ if return_attn_z_loss:
2312
+ pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates))
2313
+ intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight)
2314
+ return_intermediates = True
2315
+
2316
+ if return_mems:
2317
+ hiddens = intermediates.hiddens
2318
+ new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens
2319
+ new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
2320
+
2321
+ if not return_intermediates:
2322
+ return out, new_mems
2323
+
2324
+ intermediates.mems = new_mems
2325
+
2326
+ if return_intermediates:
2327
+ return out, intermediates
2328
+
2329
+ if return_attn:
2330
+ attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
2331
+ return out, attn_maps
2332
+
2333
+ return out
2334
+
2335
+ class ContinuousTransformerWrapper(nn.Module):
2336
+ def __init__(
2337
+ self,
2338
+ *,
2339
+ max_seq_len,
2340
+ attn_layers,
2341
+ dim_in = None,
2342
+ dim_out = None,
2343
+ emb_dim = None,
2344
+ max_mem_len = 0,
2345
+ post_emb_norm = False,
2346
+ emb_dropout = 0.,
2347
+ use_abs_pos_emb = True,
2348
+ scaled_sinu_pos_emb = False
2349
+ ):
2350
+ super().__init__()
2351
+ assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
2352
+
2353
+ dim = attn_layers.dim
2354
+
2355
+ self.max_seq_len = max_seq_len
2356
+
2357
+ self.max_mem_len = max_mem_len
2358
+
2359
+ if not (use_abs_pos_emb and not attn_layers.has_pos_emb):
2360
+ self.pos_emb = always(0)
2361
+ elif scaled_sinu_pos_emb:
2362
+ self.pos_emb = ScaledSinusoidalEmbedding(dim)
2363
+ else:
2364
+ self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len)
2365
+
2366
+ self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity()
2367
+ self.emb_dropout = nn.Dropout(emb_dropout)
2368
+
2369
+ self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity()
2370
+
2371
+ self.attn_layers = attn_layers
2372
+
2373
+ self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity()
2374
+
2375
+ def forward(
2376
+ self,
2377
+ x,
2378
+ return_embeddings = False,
2379
+ return_intermediates = False,
2380
+ return_mems = False,
2381
+ mask = None,
2382
+ return_attn = False,
2383
+ mems = None,
2384
+ pos = None,
2385
+ prepend_embeds = None,
2386
+ **kwargs
2387
+ ):
2388
+ x = self.project_in(x)
2389
+ x = x + self.pos_emb(x, pos = pos)
2390
+
2391
+ x = self.post_emb_norm(x)
2392
+
2393
+ # whether to append embeds, as in PaLI, for image embeddings
2394
+
2395
+ if exists(prepend_embeds):
2396
+ _, prepend_dim = prepend_embeds.shape[1:]
2397
+ assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as model dimensions'
2398
+
2399
+ x = torch.cat((prepend_embeds, x), dim = -2)
2400
+
2401
+ x = self.emb_dropout(x)
2402
+
2403
+ x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs)
2404
+
2405
+ out = self.project_out(x) if not return_embeddings else x
2406
+
2407
+ if return_intermediates:
2408
+ return out, intermediates
2409
+
2410
+ if return_mems:
2411
+ hiddens = intermediates.hiddens
2412
+ new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), hiddens))
2413
+ return out, new_mems
2414
+
2415
+ if return_attn:
2416
+ attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
2417
+ return out, attn_maps
2418
+
2419
+ return out
2420
+
2421
+ class XTransformer(nn.Module):
2422
+ def __init__(
2423
+ self,
2424
+ *,
2425
+ dim,
2426
+ tie_token_emb = False,
2427
+ ignore_index = -100,
2428
+ pad_value = 0,
2429
+ cross_attn_tokens_dropout = 0.,
2430
+ **kwargs
2431
+ ):
2432
+ super().__init__()
2433
+ enc_kwargs, kwargs = groupby_prefix_and_trim('enc_', kwargs)
2434
+ dec_kwargs, kwargs = groupby_prefix_and_trim('dec_', kwargs)
2435
+
2436
+ assert 'dim' not in enc_kwargs and 'dim' not in dec_kwargs, 'dimension of either encoder or decoder must be set with `dim` keyword'
2437
+ enc_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], enc_kwargs)
2438
+ enc_transformer_kwargs['emb_dropout'] = enc_kwargs.pop('emb_dropout', 0)
2439
+ enc_transformer_kwargs['num_memory_tokens'] = enc_kwargs.pop('num_memory_tokens', None)
2440
+ enc_transformer_kwargs['scaled_sinu_pos_emb'] = enc_kwargs.pop('scaled_sinu_pos_emb', False)
2441
+ enc_transformer_kwargs['use_abs_pos_emb'] = enc_kwargs.pop('use_abs_pos_emb', True)
2442
+
2443
+ dec_transformer_kwargs = pick_and_pop(['num_tokens', 'max_seq_len'], dec_kwargs)
2444
+ dec_transformer_kwargs['emb_dropout'] = dec_kwargs.pop('emb_dropout', 0)
2445
+ dec_transformer_kwargs['scaled_sinu_pos_emb'] = dec_kwargs.pop('scaled_sinu_pos_emb', False)
2446
+ dec_transformer_kwargs['use_abs_pos_emb'] = dec_kwargs.pop('use_abs_pos_emb', True)
2447
+
2448
+ self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # how many tokens from the encoder to dropout when cross attending from decoder - seen in a couple papers, including Perceiver AR - this will also be very effective regularization when cross attending to very long memories
2449
+
2450
+ self.encoder = TransformerWrapper(
2451
+ **enc_transformer_kwargs,
2452
+ attn_layers = Encoder(dim = dim, **enc_kwargs)
2453
+ )
2454
+
2455
+ self.decoder = TransformerWrapper(
2456
+ **dec_transformer_kwargs,
2457
+ attn_layers = Decoder(dim = dim, cross_attend = True, **dec_kwargs)
2458
+ )
2459
+
2460
+ if tie_token_emb:
2461
+ self.decoder.token_emb = self.encoder.token_emb
2462
+
2463
+ self.decoder = AutoregressiveWrapper(self.decoder, ignore_index=ignore_index, pad_value=pad_value)
2464
+
2465
+ @torch.no_grad()
2466
+ def generate(self, seq_in, seq_out_start, seq_len, mask = None, attn_mask = None, **kwargs):
2467
+ encodings = self.encoder(seq_in, mask = mask, attn_mask = attn_mask, return_embeddings = True)
2468
+ return self.decoder.generate(seq_out_start, seq_len, context = encodings, context_mask = mask, **kwargs)
2469
+
2470
+ def forward(self, src, tgt, mask = None, attn_mask = None, src_prepend_embeds = None):
2471
+
2472
+ if exists(src_prepend_embeds) and exists(mask):
2473
+ mask = pad_at_dim(mask, (src_prepend_embeds.shape[-2], 0), dim = -1, value = True)
2474
+
2475
+ enc = self.encoder(src, mask = mask, attn_mask = attn_mask, prepend_embeds = src_prepend_embeds, return_embeddings = True)
2476
+
2477
+ if self.training and self.cross_attn_tokens_dropout > 0:
2478
+ enc, mask = dropout_seq(enc, mask, self.cross_attn_tokens_dropout)
2479
+
2480
+ out = self.decoder(tgt, context = enc, context_mask = mask)
2481
+ return out
x_transformer_1_27_16.py CHANGED
@@ -21,7 +21,16 @@ r'''############################################################################
21
  #
22
  # !pip install torch
23
  # !pip install einops
24
- # !pip install matplotlib
 
 
 
 
 
 
 
 
 
25
  #
26
  #===============================================================================
27
  '''
@@ -3046,11 +3055,7 @@ class AutoregressiveWrapper(Module):
3046
 
3047
  # sampling up to seq_len
3048
 
3049
-
3050
-
3051
  for sl in range(seq_len):
3052
-
3053
- try:
3054
 
3055
  if restrict_to_max_seq_len:
3056
  max_len_exceeded = out.shape[-1] > max_seq_len
@@ -3124,14 +3129,6 @@ class AutoregressiveWrapper(Module):
3124
  print('Model called the end of sequence at:', sl, '/', seq_len)
3125
  break
3126
 
3127
- except KeyboardInterrupt:
3128
- print('Stopping generation...')
3129
- break
3130
-
3131
- except Exception as e:
3132
- print('Error:', e)
3133
- break
3134
-
3135
  if exists(eos_token):
3136
  # mask out everything after the eos tokens
3137
  shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
@@ -3659,11 +3656,11 @@ import importlib
3659
 
3660
  #===============================================================================
3661
 
3662
- def instantiate_x_transformer_model(max_seq_len,
3663
- num_tokens,
3664
  dim=1024,
3665
- depth=4,
3666
- heads=8,
3667
  attn_flash=True,
3668
  ignore_index=-1,
3669
  verbose=True):
@@ -3686,10 +3683,7 @@ def instantiate_x_transformer_model(max_seq_len,
3686
  ignore_index=ignore_index
3687
  )
3688
 
3689
- if torch.cuda.is_available():
3690
- model.cuda()
3691
- else:
3692
- model.cpu()
3693
 
3694
  if verbose:
3695
  print('Done!')
@@ -3700,12 +3694,14 @@ def instantiate_x_transformer_model(max_seq_len,
3700
  #===============================================================================
3701
 
3702
  def save_x_transformer_model(model,
3703
- number_of_tokens,
3704
- max_seq_len,
 
 
 
3705
  dim=1024,
3706
- depth=4,
3707
- heads=8,
3708
- ignore_index=-1,
3709
  use_flash_attn=True,
3710
  batch_size=4,
3711
  grad_acc_rate=4,
@@ -3714,8 +3710,6 @@ def save_x_transformer_model(model,
3714
  num_steps=1,
3715
  loss=0,
3716
  accuracy=1,
3717
- checkpoint_dir='./',
3718
- checkpoint_name='model_checkpoint',
3719
  verbose=True
3720
  ):
3721
 
@@ -3783,11 +3777,7 @@ def load_x_transformer_model(checkpoint_file_path,
3783
  attn_layers=attn_layers)
3784
  model = class_(transformer_model, ignore_index=checkpoint['ignore_index'])
3785
  model.load_state_dict(checkpoint['model_state_dict'])
3786
-
3787
- if torch.cuda.is_available():
3788
- model.cuda()
3789
- else:
3790
- model.cpu()
3791
 
3792
  if verbose:
3793
  print('Done!')
@@ -3809,367 +3799,6 @@ def load_x_transformer_model(checkpoint_file_path,
3809
  print('Model accuracy:', checkpoint['accuracy'])
3810
  print('=' * 70)
3811
 
3812
- return model
3813
-
3814
- ################################################################################
3815
-
3816
- def generate_from_x_transformer_model(model=None,
3817
- num_tokens_to_generate=32,
3818
- prime_tokens_list=[0],
3819
- return_prime=False,
3820
- batch_size=1,
3821
- temperature=0.9,
3822
- precision='bfloat16',
3823
- device='cuda',
3824
- verbose=True
3825
- ):
3826
-
3827
- if model is not None:
3828
-
3829
- device_options = ['cuda', 'cpu', 'cuda:0']
3830
-
3831
- if device not in device_options or not torch.cuda.is_available():
3832
- device_type = 'cpu'
3833
- else:
3834
- device_type = device
3835
-
3836
- precision_options = ['float32', 'bfloat16', 'float16']
3837
-
3838
- if precision == 'bfloat16' and device_type != 'cpu' and not torch.cuda.is_bf16_supported():
3839
- precision = 'float16'
3840
-
3841
- if precision in precision_options:
3842
- ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[precision]
3843
- else:
3844
- ptdtype = torch.bfloat16
3845
-
3846
- ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)
3847
-
3848
- model.to(device_type)
3849
-
3850
- model.eval()
3851
-
3852
- if verbose:
3853
- print('=' * 70)
3854
- print('Generation information')
3855
- print('=' * 70)
3856
- print('Device:', device)
3857
- print('Precision:', precision)
3858
- print('=' * 70)
3859
- print('Prime tokens sample:', prime_tokens_list[:10])
3860
- print('=' * 70)
3861
- print('Model will generate', batch_size, 'batches', num_tokens_to_generate, 'tokens each', )
3862
- print('Total number of tokens to generate:', num_tokens_to_generate * batch_size)
3863
- print('=' * 70)
3864
- print('Model temeperature', temperature)
3865
- print('=' * 70)
3866
-
3867
- input = torch.tensor([prime_tokens_list] * batch_size, dtype=torch.long, device=device_type)
3868
-
3869
- with ctx:
3870
- out = model.generate(input,
3871
- num_tokens_to_generate,
3872
- temperature=temperature,
3873
- return_prime=return_prime,
3874
- verbose=verbose
3875
- )
3876
- if verbose:
3877
- print('=' * 70)
3878
- print('Done!')
3879
- print('=' * 70)
3880
-
3881
- return out.tolist()
3882
-
3883
- else:
3884
- print('=' * 70)
3885
- print('Please check the model!')
3886
- print('=' * 70)
3887
-
3888
- ################################################################################
3889
-
3890
- from torch.utils.data import Dataset
3891
-
3892
- class X_Transformer_Dataset(Dataset):
3893
- def __init__(self, data, seq_len, batch_size):
3894
- super().__init__()
3895
- self.data = data
3896
- self.seq_len = seq_len
3897
- self.batch_size = batch_size
3898
-
3899
- def __getitem__(self, index):
3900
-
3901
- full_seq = torch.Tensor(self.data[index][:self.seq_len+1]).long()
3902
-
3903
- return full_seq.cuda()
3904
-
3905
- def __len__(self):
3906
- return (len(self.data) // self.batch_size) * self.batch_size
3907
-
3908
- ################################################################################
3909
-
3910
- import tqdm
3911
- import pickle
3912
- import matplotlib.pyplot as plt
3913
-
3914
- #===============================================================================
3915
-
3916
- def save_data(data, filename):
3917
- with open(filename, 'wb') as f:
3918
- pickle.dump(data, f)
3919
-
3920
- def cycle_train_data(loader):
3921
- while True:
3922
- for data in loader:
3923
- yield data
3924
-
3925
- def default_output_func(output):
3926
- print(output)
3927
-
3928
- #===============================================================================
3929
-
3930
- def train_x_transformer_model(model,
3931
- model_sequence_length,
3932
- model_number_of_tokens,
3933
- model_name,
3934
- training_data,
3935
- model_ignore_index=-1,
3936
- model_dimension=1024,
3937
- model_depth=4,
3938
- model_number_of_heads=8,
3939
- model_uses_flash_attention=True,
3940
- training_data_batch_size=1,
3941
- training_learning_rate=1e-4,
3942
- accumulate_gradients_every=4,
3943
- number_of_training_epochs=1,
3944
- validate_every=100,
3945
- save_every=500,
3946
- generate_every=100,
3947
- generate_length=100,
3948
- generate_num_prime_tokens=512,
3949
- generate_output_custom_func=default_output_func,
3950
- print_stats_every=20,
3951
- device='cuda',
3952
- precision='float16',
3953
- clip_grad_norm_value=1.0,
3954
- scaler_enabled=True,
3955
- save_directory='./',
3956
- plot_statistics=True,
3957
- verbose=True
3958
- ):
3959
-
3960
- #===========================================================================
3961
-
3962
- device_options = ['cuda', 'cpu', 'cuda:0']
3963
-
3964
- if device not in device_options or not torch.cuda.is_available():
3965
- device_type = 'cpu'
3966
- else:
3967
- device_type = device
3968
-
3969
- precision_options = ['float32', 'bfloat16', 'float16']
3970
-
3971
- if precision == 'bfloat16' and device_type != 'cpu' and not torch.cuda.is_bf16_supported():
3972
- precision = 'float16'
3973
-
3974
- if precision in precision_options:
3975
- ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[precision]
3976
- else:
3977
- ptdtype = torch.bfloat16
3978
-
3979
- ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)
3980
-
3981
- model.to(device_type)
3982
-
3983
- optim = torch.optim.Adam(model.parameters(), lr=training_learning_rate)
3984
-
3985
- scaler = torch.cuda.amp.GradScaler(enabled=scaler_enabled)
3986
-
3987
- #===========================================================================
3988
-
3989
- train_losses = []
3990
- val_losses = []
3991
-
3992
- train_accs = []
3993
- val_accs = []
3994
-
3995
- nsteps = 0
3996
-
3997
- for ep in range(number_of_training_epochs):
3998
-
3999
- print('=' * 70)
4000
- print('Epoch #', ep)
4001
- print('=' * 70)
4002
-
4003
- random.shuffle(training_data)
4004
-
4005
- train_dataset = X_Transformer_Dataset(training_data, model_sequence_length, training_data_batch_size)
4006
- val_dataset = X_Transformer_Dataset(training_data, model_sequence_length, training_data_batch_size)
4007
- train_loader = cycle_train_data(DataLoader(train_dataset, batch_size = training_data_batch_size))
4008
- val_loader = cycle_train_data(DataLoader(val_dataset, batch_size = training_data_batch_size))
4009
-
4010
- NUM_BATCHES = len(training_data) // training_data_batch_size // accumulate_gradients_every
4011
-
4012
- for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='Training'):
4013
- model.train()
4014
-
4015
- for __ in range(accumulate_gradients_every):
4016
- with ctx:
4017
- loss, acc = model(next(train_loader))
4018
- loss = loss / accumulate_gradients_every
4019
- scaler.scale(loss).backward(torch.ones(loss.shape).cuda())
4020
-
4021
- if i % print_stats_every == 0:
4022
- print(f'Training loss: {loss.mean().item() * accumulate_gradients_every}')
4023
- print(f'Training acc: {acc.mean().item()}')
4024
-
4025
- train_losses.append(loss.mean().item() * accumulate_gradients_every)
4026
- train_accs.append(acc.mean().item())
4027
-
4028
- scaler.unscale_(optim)
4029
- torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad_norm_value)
4030
- scaler.step(optim)
4031
- scaler.update()
4032
- optim.zero_grad(set_to_none=True)
4033
-
4034
- nsteps += 1
4035
-
4036
- if i % validate_every == 0:
4037
- model.eval()
4038
- with torch.no_grad():
4039
- with ctx:
4040
- val_loss, val_acc = model(next(val_loader))
4041
-
4042
- print(f'Validation loss: {val_loss.mean().item()}')
4043
- print(f'Validation acc: {val_acc.mean().item()}')
4044
-
4045
- val_losses.append(val_loss.mean().item())
4046
- val_accs.append(val_acc.mean().item())
4047
-
4048
- if plot_statistics:
4049
-
4050
- print('Plotting training loss graph...')
4051
-
4052
- tr_loss_list = train_losses
4053
- plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b')
4054
- plt.show()
4055
- plt.close()
4056
- print('Done!')
4057
-
4058
- print('Plotting training acc graph...')
4059
-
4060
- tr_loss_list = train_accs
4061
- plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b')
4062
- plt.show()
4063
- plt.close()
4064
- print('Done!')
4065
-
4066
- print('Plotting validation loss graph...')
4067
- tr_loss_list = val_losses
4068
- plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b')
4069
- plt.show()
4070
- plt.close()
4071
- print('Done!')
4072
-
4073
- print('Plotting validation acc graph...')
4074
- tr_loss_list = val_accs
4075
- plt.plot([i for i in range(len(tr_loss_list))] ,tr_loss_list, 'b')
4076
- plt.show()
4077
- plt.close()
4078
- print('Done!')
4079
-
4080
- #=====================================================================
4081
-
4082
- if i % generate_every == 0:
4083
- model.eval()
4084
-
4085
- inp = random.choice(val_dataset)[:generate_num_prime_tokens]
4086
-
4087
- print(inp)
4088
-
4089
- with ctx:
4090
-
4091
- sample = model.generate(inp[None, ...], generate_length)
4092
-
4093
- generate_output_custom_func(sample.tolist())
4094
-
4095
- #=====================================================================
4096
-
4097
- if i % save_every == 0:
4098
-
4099
- print('Saving model progress. Please wait...')
4100
- print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth')
4101
-
4102
- fname = save_directory+'/model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth'
4103
-
4104
- save_x_transformer_model(model,
4105
- checkpoint_dir=save_directory,
4106
- checkpoint_name=model_name,
4107
- number_of_tokens=model_number_of_tokens,
4108
- ignore_index=model_ignore_index,
4109
- max_seq_len=model_sequence_length,
4110
- dim=model_dimension,
4111
- depth=model_depth,
4112
- heads=model_number_of_heads,
4113
- use_flash_attn=model_uses_flash_attention,
4114
- batch_size=training_data_batch_size,
4115
- grad_acc_rate=accumulate_gradients_every,
4116
- learning_rate=training_learning_rate,
4117
- num_epochs=number_of_training_epochs,
4118
- num_steps=nsteps,
4119
- loss=str(round(float(train_losses[-1]), 4)),
4120
- accuracy=str(round(float(train_accs[-1]), 4)),
4121
- verbose=verbose)
4122
-
4123
- data = [train_losses, train_accs, val_losses, val_accs]
4124
-
4125
- save_data(data, save_directory+'losses_accuracies.pickle')
4126
-
4127
- print('Done!')
4128
-
4129
- #===========================================================================
4130
-
4131
- print('Saving model progress. Please wait...')
4132
- print('model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth')
4133
-
4134
- fname = save_directory+'model_checkpoint_' + str(nsteps) + '_steps_' + str(round(float(train_losses[-1]), 4)) + '_loss_' + str(round(float(train_accs[-1]), 4)) + '_acc.pth'
4135
-
4136
- torch.save(model.state_dict(), fname)
4137
-
4138
- print('Done!')
4139
-
4140
- data = [train_losses, train_accs, val_losses, val_accs]
4141
-
4142
- save_data(data, save_directory+'losses_accuracies')
4143
-
4144
- # Save training loss graph
4145
-
4146
- plt.plot([i for i in range(len(train_losses))] ,train_losses, 'b')
4147
- plt.savefig(save_directory+'training_loss_graph.png')
4148
- plt.close()
4149
- print('Done!')
4150
-
4151
- # Save training acc graph
4152
-
4153
- plt.plot([i for i in range(len(train_accs))] ,train_accs, 'b')
4154
- plt.savefig(save_directory+'training_accuracy_graph.png')
4155
- plt.close()
4156
- print('Done!')
4157
-
4158
- # Save validation loss graph
4159
-
4160
- plt.plot([i for i in range(len(val_losses))] ,val_losses, 'b')
4161
- plt.savefig(save_directory+'validation_loss_graph.png')
4162
- plt.close()
4163
- print('Done!')
4164
-
4165
- # Save validation acc graph
4166
-
4167
- plt.plot([i for i in range(len(val_accs))] ,val_accs, 'b')
4168
- plt.savefig(save_directory+'validation_accuracy_graph.png')
4169
- plt.close()
4170
- print('Done!')
4171
-
4172
  ################################################################################
4173
-
4174
  # This is the end of x-transformer Python module
4175
  ################################################################################
 
21
  #
22
  # !pip install torch
23
  # !pip install einops
24
+ #
25
+ #===============================================================================
26
+ #
27
+ # Basic use example
28
+ #
29
+ # from x_transformer_1_27_16 import *
30
+ #
31
+ # model = instantiate_x_transformer_model()
32
+ # save_x_transformer_model(model)
33
+ # load_x_transformer_model('model_checkpoint_1_epochs_1_steps_0_loss_1_acc.pth')
34
  #
35
  #===============================================================================
36
  '''
 
3055
 
3056
  # sampling up to seq_len
3057
 
 
 
3058
  for sl in range(seq_len):
 
 
3059
 
3060
  if restrict_to_max_seq_len:
3061
  max_len_exceeded = out.shape[-1] > max_seq_len
 
3129
  print('Model called the end of sequence at:', sl, '/', seq_len)
3130
  break
3131
 
 
 
 
 
 
 
 
 
3132
  if exists(eos_token):
3133
  # mask out everything after the eos tokens
3134
  shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
 
3656
 
3657
  #===============================================================================
3658
 
3659
+ def instantiate_x_transformer_model(num_tokens=20000,
3660
+ max_seq_len=8192,
3661
  dim=1024,
3662
+ depth=32,
3663
+ heads=32,
3664
  attn_flash=True,
3665
  ignore_index=-1,
3666
  verbose=True):
 
3683
  ignore_index=ignore_index
3684
  )
3685
 
3686
+ model.cuda()
 
 
 
3687
 
3688
  if verbose:
3689
  print('Done!')
 
3694
  #===============================================================================
3695
 
3696
  def save_x_transformer_model(model,
3697
+ checkpoint_dir='./',
3698
+ checkpoint_name='model_checkpoint',
3699
+ number_of_tokens=20000,
3700
+ ignore_index=-1,
3701
+ max_seq_len=8192,
3702
  dim=1024,
3703
+ depth=32,
3704
+ heads=32,
 
3705
  use_flash_attn=True,
3706
  batch_size=4,
3707
  grad_acc_rate=4,
 
3710
  num_steps=1,
3711
  loss=0,
3712
  accuracy=1,
 
 
3713
  verbose=True
3714
  ):
3715
 
 
3777
  attn_layers=attn_layers)
3778
  model = class_(transformer_model, ignore_index=checkpoint['ignore_index'])
3779
  model.load_state_dict(checkpoint['model_state_dict'])
3780
+ model.cuda()
 
 
 
 
3781
 
3782
  if verbose:
3783
  print('Done!')
 
3799
  print('Model accuracy:', checkpoint['accuracy'])
3800
  print('=' * 70)
3801
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3802
  ################################################################################
 
3803
  # This is the end of x-transformer Python module
3804
  ################################################################################