mnauf commited on
Commit
fc36c8d
·
1 Parent(s): b68a035

First deployment, all works locally

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="redgpt" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/redditGPT.iml" filepath="$PROJECT_DIR$/.idea/redditGPT.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/redditGPT.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="redgpt" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
__pycache__/model.cpython-38.pyc ADDED
Binary file (13.1 kB). View file
 
__pycache__/sample.cpython-38.pyc ADDED
Binary file (2.89 kB). View file
 
app.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from sample import generate_text
3
+
4
+ badges = """
5
+ <div style="display: flex">
6
+ <span style="margin-right: 5px">
7
+ <a href="https://www.linkedin.com/in/mnauf/" target="_blank"> <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/0/01/LinkedIn_Logo.svg/2560px-LinkedIn_Logo.svg.png" alt="Linkedin" width=100 height=auto> </a>
8
+ </span>
9
+ <span style="margin-right: 5px">
10
+ <a href="https://twitter.com/MNaufil" target="_blank"> <img src="https://img.shields.io/badge/Twitter-%231DA1F2.svg?style=for-the-badge&logo=Twitter&logoColor=white" alt="Twitter"> </a>
11
+ </span>
12
+ </div>
13
+ """
14
+
15
+ description="""GPT2 finetuned on recent public anonymous conversations from Reddit to capture public sentiments regarding the recent unfolding events in Pakistan. Since the genaral public is afraid of speaking publicly with their identities exposed because of the crackdown, Reddit is the most genuine source we can get to understand the public sentiments. Data is collected from Pakistan, AskMiddleEast and WorldNews Reddit communities from last year until 25th May 2023."""
16
+ with gr.Blocks() as block:
17
+ # gr.Markdown("""![Imgur](https://i.imgur.com/iPZlUa8.png)""")
18
+ gr.HTML("<img src=https://i.imgur.com/iPZlUa8.png width=auto height=200>")
19
+ gr.Markdown(badges)
20
+ gr.Markdown(description)
21
+ with gr.Row():
22
+ input_text = gr.Textbox(
23
+ label="Input Text",
24
+ lines=1,
25
+ value="Imran Khan arrest",
26
+ elem_id="input_text"
27
+ )
28
+
29
+ output_text = gr.Textbox(
30
+ label="Output",
31
+ lines=10,
32
+ value="",
33
+ elem_id="input_text"
34
+ )
35
+
36
+ inputs = [input_text]
37
+ outputs = [output_text]
38
+
39
+ run_button = gr.Button(
40
+ value="Generate Text",
41
+ )
42
+
43
+ run_button.click(
44
+ fn=generate_text,
45
+ inputs=inputs,
46
+ outputs=outputs,
47
+ queue=True
48
+ )
49
+ block.queue(concurrency_count=3).launch(server_name="localhost")
configurator.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Poor Man's Configurator. Probably a terrible idea. Example usage:
3
+ $ python train.py config/override_file.py --batch_size=32
4
+ this will first run config/override_file.py, then override batch_size to 32
5
+
6
+ The code in this file will be run as follows from e.g. train.py:
7
+ >>> exec(open('configurator.py').read())
8
+
9
+ So it's not a Python module, it's just shuttling this code away from train.py
10
+ The code in this script then overrides the globals()
11
+
12
+ I know people are not going to love this, I just really dislike configuration
13
+ complexity and having to prepend config. to every single variable. If someone
14
+ comes up with a better simple Python solution I am all ears.
15
+ """
16
+
17
+ import sys
18
+ from ast import literal_eval
19
+
20
+ for arg in sys.argv[1:]:
21
+ if '=' not in arg:
22
+ # assume it's the name of a config file
23
+ assert not arg.startswith('--')
24
+ config_file = arg
25
+ print(f"Overriding config with {config_file}:")
26
+ with open(config_file) as f:
27
+ print(f.read())
28
+ exec(open(config_file).read())
29
+ else:
30
+ # assume it's a --key=value argument
31
+ assert arg.startswith('--')
32
+ key, val = arg.split('=')
33
+ key = key[2:]
34
+ if key in globals():
35
+ try:
36
+ # attempt to eval it it (e.g. if bool, number, or etc)
37
+ attempt = literal_eval(val)
38
+ except (SyntaxError, ValueError):
39
+ # if that goes wrong, just use the string
40
+ attempt = val
41
+ # ensure the types match ok
42
+ assert type(attempt) == type(globals()[key])
43
+ # cross fingers
44
+ print(f"Overriding: {key} = {attempt}")
45
+ globals()[key] = attempt
46
+ else:
47
+ raise ValueError(f"Unknown config key: {key}")
model.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Full definition of a GPT Language Model, all of it in this single file.
3
+ References:
4
+ 1) the official GPT-2 TensorFlow implementation released by OpenAI:
5
+ https://github.com/openai/gpt-2/blob/master/src/model.py
6
+ 2) huggingface/transformers PyTorch implementation:
7
+ https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py
8
+ """
9
+
10
+ import math
11
+ import inspect
12
+ from dataclasses import dataclass
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ from torch.nn import functional as F
17
+
18
+ # @torch.jit.script # good to enable when not using torch.compile, disable when using (our default)
19
+ def new_gelu(x):
20
+ """
21
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).
22
+ Reference: Gaussian Error Linear Units (GELU) paper: https://arxiv.org/abs/1606.08415
23
+ """
24
+ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
25
+
26
+ class LayerNorm(nn.Module):
27
+ """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
28
+
29
+ def __init__(self, ndim, bias):
30
+ super().__init__()
31
+ self.weight = nn.Parameter(torch.ones(ndim))
32
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
33
+
34
+ def forward(self, input):
35
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
36
+
37
+ class CausalSelfAttention(nn.Module):
38
+
39
+ def __init__(self, config):
40
+ super().__init__()
41
+ assert config.n_embd % config.n_head == 0
42
+ # key, query, value projections for all heads, but in a batch
43
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
44
+ # output projection
45
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
46
+ # regularization
47
+ self.attn_dropout = nn.Dropout(config.dropout)
48
+ self.resid_dropout = nn.Dropout(config.dropout)
49
+ self.n_head = config.n_head
50
+ self.n_embd = config.n_embd
51
+ self.dropout = config.dropout
52
+ # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
53
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
54
+ if not self.flash:
55
+ print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
56
+ # causal mask to ensure that attention is only applied to the left in the input sequence
57
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
58
+ .view(1, 1, config.block_size, config.block_size))
59
+
60
+ def forward(self, x):
61
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
62
+
63
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
64
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
65
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
66
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
67
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
68
+
69
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
70
+ if self.flash:
71
+ # efficient attention using Flash Attention CUDA kernels
72
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
73
+ else:
74
+ # manual implementation of attention
75
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
76
+ att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
77
+ att = F.softmax(att, dim=-1)
78
+ att = self.attn_dropout(att)
79
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
80
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
81
+
82
+ # output projection
83
+ y = self.resid_dropout(self.c_proj(y))
84
+ return y
85
+
86
+ class MLP(nn.Module):
87
+
88
+ def __init__(self, config):
89
+ super().__init__()
90
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
91
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
92
+ self.dropout = nn.Dropout(config.dropout)
93
+
94
+ def forward(self, x):
95
+ x = self.c_fc(x)
96
+ x = new_gelu(x)
97
+ x = self.c_proj(x)
98
+ x = self.dropout(x)
99
+ return x
100
+
101
+ class Block(nn.Module):
102
+
103
+ def __init__(self, config):
104
+ super().__init__()
105
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
106
+ self.attn = CausalSelfAttention(config)
107
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
108
+ self.mlp = MLP(config)
109
+
110
+ def forward(self, x):
111
+ x = x + self.attn(self.ln_1(x))
112
+ x = x + self.mlp(self.ln_2(x))
113
+ return x
114
+
115
+ @dataclass
116
+ class GPTConfig:
117
+ block_size: int = 1024
118
+ vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
119
+ n_layer: int = 12
120
+ n_head: int = 12
121
+ n_embd: int = 768
122
+ dropout: float = 0.0
123
+ bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
124
+
125
+ class GPT(nn.Module):
126
+
127
+ def __init__(self, config):
128
+ super().__init__()
129
+ assert config.vocab_size is not None
130
+ assert config.block_size is not None
131
+ self.config = config
132
+
133
+ self.transformer = nn.ModuleDict(dict(
134
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
135
+ wpe = nn.Embedding(config.block_size, config.n_embd),
136
+ drop = nn.Dropout(config.dropout),
137
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
138
+ ln_f = LayerNorm(config.n_embd, bias=config.bias),
139
+ ))
140
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
141
+ # with weight tying when using torch.compile() some warnings get generated:
142
+ # "UserWarning: functional_call was passed multiple values for tied weights.
143
+ # This behavior is deprecated and will be an error in future versions"
144
+ # not 100% sure what this is, so far seems to be harmless. TODO investigate
145
+ self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
146
+
147
+ # init all weights
148
+ self.apply(self._init_weights)
149
+ # apply special scaled init to the residual projections, per GPT-2 paper
150
+ for pn, p in self.named_parameters():
151
+ if pn.endswith('c_proj.weight'):
152
+ torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
153
+
154
+ # report number of parameters
155
+ print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
156
+
157
+ def get_num_params(self, non_embedding=True):
158
+ """
159
+ Return the number of parameters in the model.
160
+ For non-embedding count (default), the position embeddings get subtracted.
161
+ The token embeddings would too, except due to the parameter sharing these
162
+ params are actually used as weights in the final layer, so we include them.
163
+ """
164
+ n_params = sum(p.numel() for p in self.parameters())
165
+ if non_embedding:
166
+ n_params -= self.transformer.wpe.weight.numel()
167
+ return n_params
168
+
169
+ def _init_weights(self, module):
170
+ if isinstance(module, nn.Linear):
171
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
172
+ if module.bias is not None:
173
+ torch.nn.init.zeros_(module.bias)
174
+ elif isinstance(module, nn.Embedding):
175
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
176
+
177
+ def forward(self, idx, targets=None):
178
+ device = idx.device
179
+ b, t = idx.size()
180
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
181
+ pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
182
+
183
+ # forward the GPT model itself
184
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
185
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)
186
+ x = self.transformer.drop(tok_emb + pos_emb)
187
+ for block in self.transformer.h:
188
+ x = block(x)
189
+ x = self.transformer.ln_f(x)
190
+
191
+ if targets is not None:
192
+ # if we are given some desired targets also calculate the loss
193
+ logits = self.lm_head(x)
194
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
195
+ else:
196
+ # inference-time mini-optimization: only forward the lm_head on the very last position
197
+ logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
198
+ loss = None
199
+
200
+ return logits, loss
201
+
202
+ def crop_block_size(self, block_size):
203
+ # model surgery to decrease the block size if necessary
204
+ # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
205
+ # but want to use a smaller block size for some smaller, simpler model
206
+ assert block_size <= self.config.block_size
207
+ self.config.block_size = block_size
208
+ self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size])
209
+ for block in self.transformer.h:
210
+ if hasattr(block.attn, 'bias'):
211
+ block.attn.bias = block.attn.bias[:,:,:block_size,:block_size]
212
+
213
+ @classmethod
214
+ def from_pretrained(cls, model_type, override_args=None):
215
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
216
+ override_args = override_args or {} # default to empty dict
217
+ # only dropout can be overridden see more notes below
218
+ assert all(k == 'dropout' for k in override_args)
219
+ from transformers import GPT2LMHeadModel
220
+ print("loading weights from pretrained gpt: %s" % model_type)
221
+
222
+ # n_layer, n_head and n_embd are determined from model_type
223
+ config_args = {
224
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
225
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
226
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
227
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
228
+ }[model_type]
229
+ print("forcing vocab_size=50257, block_size=1024, bias=True")
230
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
231
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
232
+ config_args['bias'] = True # always True for GPT model checkpoints
233
+ # we can override the dropout rate, if desired
234
+ if 'dropout' in override_args:
235
+ print(f"overriding dropout rate to {override_args['dropout']}")
236
+ config_args['dropout'] = override_args['dropout']
237
+ # create a from-scratch initialized minGPT model
238
+ config = GPTConfig(**config_args)
239
+ model = GPT(config)
240
+ sd = model.state_dict()
241
+ sd_keys = sd.keys()
242
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
243
+
244
+ # init a huggingface/transformers model
245
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
246
+ sd_hf = model_hf.state_dict()
247
+
248
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
249
+ sd_keys_hf = sd_hf.keys()
250
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
251
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
252
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
253
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
254
+ # this means that we have to transpose these weights when we import them
255
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
256
+ for k in sd_keys_hf:
257
+ if any(k.endswith(w) for w in transposed):
258
+ # special treatment for the Conv1D weights we need to transpose
259
+ assert sd_hf[k].shape[::-1] == sd[k].shape
260
+ with torch.no_grad():
261
+ sd[k].copy_(sd_hf[k].t())
262
+ else:
263
+ # vanilla copy over the other parameters
264
+ assert sd_hf[k].shape == sd[k].shape
265
+ with torch.no_grad():
266
+ sd[k].copy_(sd_hf[k])
267
+
268
+ return model
269
+
270
+ def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
271
+ # start with all of the candidate parameters
272
+ param_dict = {pn: p for pn, p in self.named_parameters()}
273
+ # filter out those that do not require grad
274
+ param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
275
+ # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
276
+ # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
277
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
278
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
279
+ optim_groups = [
280
+ {'params': decay_params, 'weight_decay': weight_decay},
281
+ {'params': nodecay_params, 'weight_decay': 0.0}
282
+ ]
283
+ num_decay_params = sum(p.numel() for p in decay_params)
284
+ num_nodecay_params = sum(p.numel() for p in nodecay_params)
285
+ print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
286
+ print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
287
+ # Create AdamW optimizer and use the fused version if it is available
288
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
289
+ use_fused = fused_available and device_type == 'cuda'
290
+ extra_args = dict(fused=True) if use_fused else dict()
291
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
292
+ print(f"using fused AdamW: {use_fused}")
293
+
294
+ return optimizer
295
+
296
+ def estimate_mfu(self, fwdbwd_per_iter, dt):
297
+ """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
298
+ # first estimate the number of flops we do per iteration.
299
+ # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
300
+ N = self.get_num_params()
301
+ cfg = self.config
302
+ L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size
303
+ flops_per_token = 6*N + 12*L*H*Q*T
304
+ flops_per_fwdbwd = flops_per_token * T
305
+ flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
306
+ # express our flops throughput as ratio of A100 bfloat16 peak flops
307
+ flops_achieved = flops_per_iter * (1.0/dt) # per second
308
+ flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
309
+ mfu = flops_achieved / flops_promised
310
+ return mfu
311
+
312
+ @torch.no_grad()
313
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
314
+ """
315
+ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
316
+ the sequence max_new_tokens times, feeding the predictions back into the model each time.
317
+ Most likely you'll want to make sure to be in model.eval() mode of operation for this.
318
+ """
319
+ for _ in range(max_new_tokens):
320
+ # if the sequence context is growing too long we must crop it at block_size
321
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
322
+ # forward the model to get the logits for the index in the sequence
323
+ logits, _ = self(idx_cond)
324
+ # pluck the logits at the final step and scale by desired temperature
325
+ logits = logits[:, -1, :] / temperature
326
+ # optionally crop the logits to only the top k options
327
+ if top_k is not None:
328
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
329
+ logits[logits < v[:, [-1]]] = -float('Inf')
330
+ # apply softmax to convert logits to (normalized) probabilities
331
+ probs = F.softmax(logits, dim=-1)
332
+ # sample from the distribution
333
+ idx_next = torch.multinomial(probs, num_samples=1)
334
+ # append sampled index to the running sequence and continue
335
+ idx = torch.cat((idx, idx_next), dim=1)
336
+
337
+ return idx
model/ckpt.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14c4618927621adda0f0c2f7dd0e735be8f15f7202bc5011c1ad5f47b315c212
3
+ size 1493431654
sample.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sample from a trained model
3
+ """
4
+ import os
5
+ import pickle
6
+ from contextlib import nullcontext
7
+ import torch
8
+ import tiktoken
9
+ from model import GPTConfig, GPT
10
+ import gradio as gr
11
+ # -----------------------------------------------------------------------------
12
+ init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
13
+ out_dir = 'model' # ignored if init_from is not 'resume'
14
+ start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt"
15
+ num_samples = 1 # number of samples to draw
16
+ max_new_tokens = 500 # number of tokens generated in each sample
17
+ temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
18
+ top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability
19
+ seed = 1337
20
+ device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.
21
+ dtype = 'bfloat16' # 'float32' or 'bfloat16' or 'float16'
22
+ compile = False # use PyTorch 2.0 to compile the model to be faster
23
+ exec(open('configurator.py').read()) # overrides from command line or config file
24
+ # -----------------------------------------------------------------------------
25
+
26
+ # torch.manual_seed(seed)
27
+ # torch.cuda.manual_seed(seed)
28
+ torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
29
+ torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
30
+ device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
31
+ ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
32
+ ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
33
+
34
+ # model
35
+ if init_from == 'resume':
36
+ # init from a model saved in a specific directory
37
+ ckpt_path = os.path.join(out_dir, 'ckpt.pt')
38
+ checkpoint = torch.load(ckpt_path, map_location=device)
39
+ gptconf = GPTConfig(**checkpoint['model_args'])
40
+ model = GPT(gptconf)
41
+ state_dict = checkpoint['model']
42
+ unwanted_prefix = '_orig_mod.'
43
+ for k,v in list(state_dict.items()):
44
+ if k.startswith(unwanted_prefix):
45
+ state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
46
+ model.load_state_dict(state_dict)
47
+ elif init_from.startswith('gpt2'):
48
+ # init from a given GPT-2 model
49
+ model = GPT.from_pretrained(init_from, dict(dropout=0.0))
50
+
51
+ model.eval()
52
+ model.to(device)
53
+ if compile:
54
+ model = torch.compile(model) # requires PyTorch 2.0 (optional)
55
+
56
+ # look for the meta pickle in case it is available in the dataset folder
57
+ load_meta = False
58
+ if init_from == 'resume' and 'config' in checkpoint and 'dataset' in checkpoint['config']: # older checkpoints might not have these...
59
+ meta_path = os.path.join('data', checkpoint['config']['dataset'], 'meta.pkl')
60
+ load_meta = os.path.exists(meta_path)
61
+ if load_meta:
62
+ print(f"Loading meta from {meta_path}...")
63
+ with open(meta_path, 'rb') as f:
64
+ meta = pickle.load(f)
65
+ # TODO want to make this more general to arbitrary encoder/decoder schemes
66
+ stoi, itos = meta['stoi'], meta['itos']
67
+ encode = lambda s: [stoi[c] for c in s]
68
+ decode = lambda l: ''.join([itos[i] for i in l])
69
+ else:
70
+ # ok let's assume gpt-2 encodings by default
71
+ print("No meta.pkl found, assuming GPT-2 encodings...")
72
+ enc = tiktoken.get_encoding("gpt2")
73
+ encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"})
74
+ decode = lambda l: enc.decode(l)
75
+
76
+
77
+ def generate_text(start):
78
+ start_ids = encode(start)
79
+ x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
80
+ output = ""
81
+ # run generation
82
+ with torch.no_grad():
83
+ with ctx:
84
+ for k in range(num_samples):
85
+ y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
86
+ output += decode(y[0].tolist())
87
+ return output