Upload PrateritumGPT.py
Browse files- PrateritumGPT.py +119 -0
PrateritumGPT.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
from torch.utils.data import Dataset, DataLoader
|
5 |
+
from torch.nn.utils.rnn import pad_sequence
|
6 |
+
import math
|
7 |
+
|
8 |
+
tokens = list("azertyuiopqsdfghjklmwxcvbnäüöß—– ")
|
9 |
+
tokensdict = {}
|
10 |
+
|
11 |
+
for i in range(len(tokens)):
|
12 |
+
tokensdict.update({tokens[i]: [0] * i + [0] * (len(tokens) - (i + 1))})
|
13 |
+
|
14 |
+
# Ouvrir le fichier CSV
|
15 |
+
with open("C:\\Users\\marc2\\Downloads\\7eaaf0e22461b505c749e268c0b72bc4-12ebe211a929f039791dfeaa1a019b64cadddaf1\\7eaaf0e22461b505c749e268c0b72bc4-12ebe211a929f039791dfeaa1a019b64cadddaf1\\top-german-verbs.csv", 'r', encoding="utf-8") as file:
|
16 |
+
# Créer un objet lecteur CSV
|
17 |
+
reader = [i for i in csv.reader(file)][1:]
|
18 |
+
|
19 |
+
class CSVDataset(Dataset):
|
20 |
+
def __init__(self, features, labels):
|
21 |
+
self.features = features
|
22 |
+
self.labels = labels
|
23 |
+
|
24 |
+
def __len__(self):
|
25 |
+
return len(self.features)
|
26 |
+
|
27 |
+
def __getitem__(self, idx):
|
28 |
+
sample = self.features[idx], self.labels[idx]
|
29 |
+
return sample
|
30 |
+
|
31 |
+
# Supposons que vous ayez vos données sous forme de listes
|
32 |
+
features = []
|
33 |
+
labels = []
|
34 |
+
|
35 |
+
for i in reader:
|
36 |
+
k = []
|
37 |
+
for j in i[2]:
|
38 |
+
k += [tokens.index(j)]
|
39 |
+
k += [len(tokens) + 1] * (25 - len(k))
|
40 |
+
features += [torch.Tensor(k)]
|
41 |
+
k = []
|
42 |
+
for j in i[8]:
|
43 |
+
k += [tokens.index(j)]
|
44 |
+
k += [len(tokens) + 1] * (25 - len(k))
|
45 |
+
labels += [torch.Tensor(k)]
|
46 |
+
|
47 |
+
MyDataset = CSVDataset(features=features, labels=labels)
|
48 |
+
|
49 |
+
class TransformerModel(nn.Module):
|
50 |
+
def __init__(self, vocab_size, emb_dim, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout=0.1):
|
51 |
+
super().__init__()
|
52 |
+
self.custom_embedding = nn.Embedding(vocab_size, emb_dim)
|
53 |
+
self.pos_encoder = PositionalEncoding(emb_dim, dropout)
|
54 |
+
encoder_layer = nn.TransformerEncoderLayer(emb_dim, nhead, dim_feedforward, dropout)
|
55 |
+
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers)
|
56 |
+
decoder_layer = nn.TransformerDecoderLayer(emb_dim, nhead, dim_feedforward, dropout)
|
57 |
+
self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
|
58 |
+
self.output_layer = nn.Linear(emb_dim, vocab_size)
|
59 |
+
|
60 |
+
def forward(self, src, tgt, src_mask=None, tgt_mask=None, memory_mask=None, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None):
|
61 |
+
src_emb = self.pos_encoder(self.custom_embedding(src.long()))
|
62 |
+
tgt_emb = self.pos_encoder(self.custom_embedding(tgt.long()))
|
63 |
+
encoder_output = self.transformer_encoder(src_emb, src_mask, src_key_padding_mask)
|
64 |
+
decoder_output = self.transformer_decoder(tgt_emb, encoder_output, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask)
|
65 |
+
output = self.output_layer(decoder_output)
|
66 |
+
return output
|
67 |
+
|
68 |
+
# Définition de la classe PositionalEncoding (identique à l'exemple précédent)
|
69 |
+
class PositionalEncoding(nn.Module):
|
70 |
+
def __init__(self, d_model, dropout=0.1, max_len=5000):
|
71 |
+
super(PositionalEncoding, self).__init__()
|
72 |
+
self.dropout = nn.Dropout(p=dropout)
|
73 |
+
|
74 |
+
pe = torch.zeros(max_len, d_model)
|
75 |
+
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
76 |
+
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
|
77 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
78 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
79 |
+
pe = pe.unsqueeze(0).transpose(0, 1)
|
80 |
+
self.register_buffer('pe', pe)
|
81 |
+
|
82 |
+
def forward(self, x):
|
83 |
+
x = x + self.pe[:x.size(0), :]
|
84 |
+
return self.dropout(x)
|
85 |
+
|
86 |
+
# Préparation des données
|
87 |
+
def collate_fn(batch):
|
88 |
+
inputs = [item[0] for item in batch]
|
89 |
+
targets = [item[1] for item in batch]
|
90 |
+
inputs = pad_sequence(inputs, batch_first=True, padding_value=len(tokens) + 1)
|
91 |
+
targets = pad_sequence(targets, batch_first=True, padding_value=len(tokens) + 1)
|
92 |
+
return inputs, targets
|
93 |
+
|
94 |
+
train_loader = DataLoader(MyDataset, batch_size=32, shuffle=True, collate_fn=collate_fn)
|
95 |
+
|
96 |
+
# Définition du modèle, de la fonction de perte et de l'optimiseur
|
97 |
+
model = TransformerModel(vocab_size=len(tokens) + 2, emb_dim=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=2048)
|
98 |
+
loss_fn = nn.CrossEntropyLoss()
|
99 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
100 |
+
|
101 |
+
epochs = 10
|
102 |
+
|
103 |
+
for epoch in range(epochs):
|
104 |
+
total_loss = 0.0
|
105 |
+
|
106 |
+
for batch_idx, (inputs, targets) in enumerate(train_loader):
|
107 |
+
optimizer.zero_grad()
|
108 |
+
output = model(inputs, targets[:, :-1]) # Shifted targets
|
109 |
+
output = output.transpose(1, 2) # Adjust shape for loss function
|
110 |
+
loss = loss_fn(output, targets[:, 1:].long()) # Shifted targets
|
111 |
+
loss.backward()
|
112 |
+
optimizer.step()
|
113 |
+
|
114 |
+
total_loss += loss.item()
|
115 |
+
|
116 |
+
if batch_idx % 100 == 0:
|
117 |
+
print(f"Epoch {epoch + 1}/{epochs}, Batch {batch_idx}/{len(train_loader)}, Loss: {total_loss / (batch_idx + 1)}")
|
118 |
+
|
119 |
+
print(f"Epoch {epoch + 1}/{epochs}, Loss: {total_loss / len(train_loader)}")
|