import tensorflow as tf from tensorflow import keras # dataset utilizzato https://keras.io/api/datasets/fashion_mnist/#load_data-function mnist = keras.datasets.fashion_mnist.load_data() #vettori per trainare l'ia e per testarla (x_train,y_train),(x_test,y_test) = mnist import numpy as np #min = 0 e max = 255 np.min(x_train), np.min(x_train) #categorie vestiti class_names = ['top', 'trouser', 'pullover', 'dress', ' coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] #x_train e' una matrice e dobbiamo normalizzarlo (dividendolo per il max value) X_train = x_train/255.0 X_test = x_test/255.0 from keras import Sequential as Sequ from tensorflow.keras.layers import Flatten, Dense model = Sequ() #aggiungo il layer Flatten con la forma 28 x 28 pixel model.add(Flatten(input_shape=(28,28))) #aggiungo 128 neuroni model.add(Dense(128, activation='relu')) model.add(Dense(256, activation='relu')) #qui aggiungo 10 neuroni perche' abbiamo 10 categorie #con softmax activation -> perche' facciamo un multiclass classification (piu' di 2 classi) model.add(Dense(10, activation='softmax')) print(model.summary()) #model compilation # loss function, optimazer, metrics #utilizziamo sparse_categorical_crossentropy perche' #il nostro output e' 0,1,2,3,4,5 -> sparse_categorical_crossentropy #il nostro output e' one hot encoded -> categorical_entropy # 0,1 -> binary_cross_entropy model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) #Passiamo i dati per il training e li dividiamo in 10 'epoche' passando 32 dati ogni epoca model_checkpoint_best_callback = keras.callbacks.ModelCheckpoint( filepath = '../my_models/fashion_model.keras', monitor = 'val_loss', verbose = 1, save_best_only = False, save_weights_only = False ) model.fit(X_train, y_train, epochs=10, batch_size=32, callbacks=[model_checkpoint_best_callback])