anilbhatt1 commited on
Commit
3b51c85
1 Parent(s): c6fdff9

Initial Commit

Browse files
Files changed (2) hide show
  1. app.py +198 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import functional as F
5
+ import os
6
+ from datetime import datetime
7
+
8
+ with open('input.txt', 'r', encoding='utf-8') as f:
9
+ text = f.read()
10
+
11
+ # here are all the unique characters that occur in this text
12
+ chars = sorted(list(set(text)))
13
+ vocab_size = len(chars)
14
+ # create a mapping from characters to integers
15
+ stoi = { ch:i for i,ch in enumerate(chars) }
16
+ itos = { i:ch for i,ch in enumerate(chars) }
17
+ encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers
18
+ decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
19
+
20
+ n_embd = 64
21
+ block_size = 64 # what is the maximum context length for predictions?
22
+ n_layer = 4
23
+ n_head = 4
24
+ dropout = 0.0
25
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
26
+
27
+ class Head(nn.Module):
28
+ """ one head of self-attention """
29
+
30
+ def __init__(self, head_size):
31
+ super().__init__()
32
+ self.key = nn.Linear(n_embd, head_size, bias=False)
33
+ self.query = nn.Linear(n_embd, head_size, bias=False)
34
+ self.value = nn.Linear(n_embd, head_size, bias=False)
35
+ self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
36
+
37
+ self.dropout = nn.Dropout(dropout)
38
+
39
+ def forward(self, x):
40
+ B,T,C = x.shape
41
+ k = self.key(x) # (B,T,C)
42
+ q = self.query(x) # (B,T,C)
43
+ # compute attention scores ("affinities")
44
+ wei = q @ k.transpose(-2,-1) * C**-0.5 # (B, T, C) @ (B, C, T) -> (B, T, T)
45
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)
46
+ wei = F.softmax(wei, dim=-1) # (B, T, T)
47
+ wei = self.dropout(wei)
48
+ # perform the weighted aggregation of the values
49
+ v = self.value(x) # (B,T,C)
50
+ out = wei @ v # (B, T, T) @ (B, T, C) -> (B, T, C)
51
+ return out
52
+
53
+ class MultiHeadAttention(nn.Module):
54
+ """ multiple heads of self-attention in parallel """
55
+
56
+ def __init__(self, num_heads, head_size):
57
+ super().__init__()
58
+ self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
59
+ self.proj = nn.Linear(n_embd, n_embd)
60
+ self.dropout = nn.Dropout(dropout)
61
+
62
+ def forward(self, x):
63
+ out = torch.cat([h(x) for h in self.heads], dim=-1)
64
+ out = self.dropout(self.proj(out))
65
+ return out
66
+
67
+ class FeedFoward(nn.Module):
68
+ """ a simple linear layer followed by a non-linearity """
69
+
70
+ def __init__(self, n_embd):
71
+ super().__init__()
72
+ self.net = nn.Sequential(
73
+ nn.Linear(n_embd, 4 * n_embd),
74
+ nn.ReLU(),
75
+ nn.Linear(4 * n_embd, n_embd),
76
+ nn.Dropout(dropout),
77
+ )
78
+
79
+ def forward(self, x):
80
+ return self.net(x)
81
+
82
+ class Block(nn.Module):
83
+ """ Transformer block: communication followed by computation """
84
+
85
+ def __init__(self, n_embd, n_head):
86
+ # n_embd: embedding dimension, n_head: the number of heads we'd like
87
+ super().__init__()
88
+ head_size = n_embd // n_head
89
+ self.sa = MultiHeadAttention(n_head, head_size)
90
+ self.ffwd = FeedFoward(n_embd)
91
+ self.ln1 = nn.LayerNorm(n_embd)
92
+ self.ln2 = nn.LayerNorm(n_embd)
93
+
94
+ def forward(self, x):
95
+ x = x + self.sa(self.ln1(x))
96
+ x = x + self.ffwd(self.ln2(x))
97
+ return x
98
+
99
+ # gpt model
100
+ class gptModel(nn.Module):
101
+
102
+ def __init__(self):
103
+ super().__init__()
104
+ # each token directly reads off the logits for the next token from a lookup table
105
+ self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
106
+ self.position_embedding_table = nn.Embedding(block_size, n_embd)
107
+ self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
108
+ self.ln_f = nn.LayerNorm(n_embd) # final layer norm
109
+ self.lm_head = nn.Linear(n_embd, vocab_size)
110
+
111
+ def forward(self, idx, targets=None):
112
+ B, T = idx.shape
113
+
114
+ # idx and targets are both (B,T) tensor of integers
115
+ tok_emb = self.token_embedding_table(idx) # (B,T,C)
116
+ pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)
117
+ x = tok_emb + pos_emb # (B,T,C)
118
+ x = self.blocks(x) # (B,T,C)
119
+ x = self.ln_f(x) # (B,T,C)
120
+ logits = self.lm_head(x) # (B,T,vocab_size)
121
+
122
+ if targets is None:
123
+ loss = None
124
+ else:
125
+ B, T, C = logits.shape
126
+ logits = logits.view(B*T, C)
127
+ targets = targets.view(B*T)
128
+ loss = F.cross_entropy(logits, targets)
129
+
130
+ return logits, loss
131
+
132
+ def generate(self, idx, max_new_tokens):
133
+ # idx is (B, T) array of indices in the current context
134
+ context_length = idx.shape[-1]
135
+ max_new_tokens -= context_length
136
+ for k in range(max_new_tokens):
137
+ # crop idx to the last block_size tokens
138
+ idx_cond = idx[:, -block_size:]
139
+ # get the predictions
140
+ logits, loss = self(idx_cond)
141
+ # focus only on the last time step
142
+ logits = logits[:, -1, :] # becomes (B, C)
143
+ # apply softmax to get probabilities
144
+ probs = F.softmax(logits, dim=-1) # (B, C)
145
+ # sample from the distribution
146
+ idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)
147
+ # append sampled index to the running sequence
148
+ idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)
149
+ return idx
150
+
151
+ model = gptModel()
152
+ m = model.to(device)
153
+ model_pth = 'checkpoint_epoch-199999_26.10.2023_13:20:27_cpu.pt'
154
+ model.load_state_dict(torch.load(model_pth))
155
+
156
+ def generate_text(given_text_context, max_text_length):
157
+
158
+ if given_text_context[-1] != ' ':
159
+ given_text_context += ' '
160
+
161
+ if max_text_length > 5000:
162
+ max_text_length = 5000
163
+
164
+ if max_text_length < 40:
165
+ max_text_length = 40
166
+
167
+ if len(given_text_context) > max_text_length:
168
+ given_text_context = given_text_context[:max_text_length]
169
+
170
+ context = given_text_context
171
+ # Encode the context
172
+ en_context = encode(context)
173
+ # Convert the Python list to a PyTorch tensor with dtype torch.int
174
+ en_tensor = torch.tensor(en_context, dtype=torch.int)
175
+ en_tensor = en_tensor.view(1, len(en_context))
176
+
177
+ output_msg = decode(m.generate(en_tensor, max_new_tokens=max_text_length)[0].tolist())
178
+
179
+ return output_msg
180
+
181
+
182
+ def gradio_fn(given_text_context, num_chars):
183
+ num_chars = int(num_chars)
184
+ output_txt_msg = generate_text(given_text_context, num_chars)
185
+ return output_txt_msg
186
+
187
+
188
+ demo = gr.Interface(fn=gradio_fn,
189
+ inputs=[gr.Textbox(info="Start my passage with: 'I would like to'"),
190
+ gr.Number(value=200, minimum=40, maximum=5000, \
191
+ info="Num characters for passage min=40, max=5000")],
192
+ outputs=gr.Textbox(),
193
+ title="Text Gen with GPT",
194
+ description="- GPT model that generates text based on \
195
+ (a) given text context (b) for given character length")
196
+
197
+
198
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # These requirements are for GPU
2
+ torch==2.1.0+cu118
3
+ gradio==3.50.2
4
+